]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - ipc/ipcdev.git/blob - qnx/src/ipc3x_dev/ti/syslink/family/vayu/vayudsp/VAYUDspEnabler.c
QNX IPC: Vayu - Add DSP1 MMU1 Programming
[ipc/ipcdev.git] / qnx / src / ipc3x_dev / ti / syslink / family / vayu / vayudsp / VAYUDspEnabler.c
1 /*
2  *  @file  VAYUEnabler.c
3  *
4  *  @brief  MMU programming module
5  *
6  *
7  *  ============================================================================
8  *
9  *  Copyright (c) 2013, Texas Instruments Incorporated
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  *  Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *
18  *  *  Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *
22  *  *  Neither the name of Texas Instruments Incorporated nor the names of
23  *     its contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  *  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  *  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  *  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  *  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
33  *  OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
34  *  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
35  *  OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
36  *  EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *  Contact information for paper mail:
38  *  Texas Instruments
39  *  Post Office Box 655303
40  *  Dallas, Texas 75265
41  *  Contact information:
42  *  http://www-k.ext.ti.com/sc/technical-support/product-information-centers.htm?
43  *  DCMP=TIHomeTracking&HQS=Other+OT+home_d_contact
44  *  ============================================================================
45  *
46  */
48 #include <errno.h>
49 #include <unistd.h>
50 #include <ti/syslink/Std.h>
52 /* OSAL and utils headers */
53 #include <ti/syslink/utils/List.h>
54 #include <ti/syslink/utils/Trace.h>
55 #include <ti/syslink/utils/OsalPrint.h>
57 /* Module level headers */
58 #include <OsalDrv.h>
59 #include <_ProcDefs.h>
60 #include <Processor.h>
61 #include <hw/inout.h>
62 #include <sys/mman.h>
64 #include <hw_defs.h>
65 #include <hw_mmu.h>
66 #include <VAYUDspHal.h>
67 #include <VAYUDspHalMmu.h>
68 #include <VAYUDspPhyShmem.h>
69 #include <VAYUDspEnabler.h>
70 #include <stdbool.h>
71 #include <stdint.h>
74 #define PAGE_SIZE 0x1000
76 /* Attributes of L2 page tables for DSP MMU.*/
77 struct page_info {
78     /* Number of valid PTEs in the L2 PT*/
79     UInt32 num_entries;
80 };
83 /* Attributes used to manage the DSP MMU page tables */
84 struct pg_table_attrs {
85     struct sync_cs_object *hcs_object;/* Critical section object handle */
86     UInt32 l1_base_pa; /* Physical address of the L1 PT */
87     UInt32 l1_base_va; /* Virtual  address of the L1 PT */
88     UInt32 l1_size; /* Size of the L1 PT */
89     UInt32 l1_tbl_alloc_pa;
90     /* Physical address of Allocated mem for L1 table. May not be aligned */
91     UInt32 l1_tbl_alloc_va;
92     /* Virtual address of Allocated mem for L1 table. May not be aligned */
93     UInt32 l1_tbl_alloc_sz;
94     /* Size of consistent memory allocated for L1 table.
95      * May not be aligned */
96     UInt32 l2_base_pa;        /* Physical address of the L2 PT */
97     UInt32 l2_base_va;        /* Virtual  address of the L2 PT */
98     UInt32 l2_size;        /* Size of the L2 PT */
99     UInt32 l2_tbl_alloc_pa;
100     /* Physical address of Allocated mem for L2 table. May not be aligned */
101     UInt32 l2_tbl_alloc_va;
102     /* Virtual address of Allocated mem for L2 table. May not be aligned */
103     UInt32 ls_tbl_alloc_sz;
104     /* Size of consistent memory allocated for L2 table.
105      * May not be aligned */
106     UInt32 l2_num_pages;    /* Number of allocated L2 PT */
107     struct page_info *pg_info;
108 };
111 enum pagetype {
112     SECTION = 0,
113     LARGE_PAGE = 1,
114     SMALL_PAGE = 2,
115     SUPER_SECTION  = 3
116 };
118 static UInt32 shm_phys_addr;
119 static UInt32 shm_phys_addr_dsp;
121 #define INREG32(x) in32((uintptr_t)x)
122 #define OUTREG32(x, y) out32((uintptr_t)x, y)
123 #define SIZE 0x4
125 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf);
126 static Int load_iotlb_entry (UInt32 mmuBase, struct iotlb_entry *e);
127 static Int iotlb_cr_valid (struct cr_regs *cr);
129 static Int rproc_mem_map (UInt32 mmuBase, struct pg_table_attrs * p_pt_attrs,
130                           UInt32 mpu_addr, UInt32 ul_virt_addr,
131                           UInt32 num_bytes, UInt32 map_attr);
132 static Int rproc_mem_unmap (UInt32 mmuBase, struct pg_table_attrs * p_pt_attrs,
133                             UInt32 da, UInt32 num_bytes);
136 static Void iotlb_cr_to_e (struct cr_regs *cr, struct iotlb_entry *e)
138     e->da       = cr->cam & MMU_CAM_VATAG_MASK;
139     e->pa       = cr->ram & MMU_RAM_PADDR_MASK;
140     e->valid    = cr->cam & MMU_CAM_V;
141     e->prsvd    = cr->cam & MMU_CAM_P;
142     e->pgsz     = cr->cam & MMU_CAM_PGSZ_MASK;
143     e->endian   = cr->ram & MMU_RAM_ENDIAN_MASK;
144     e->elsz     = cr->ram & MMU_RAM_ELSZ_MASK;
145     e->mixed    = cr->ram & MMU_RAM_MIXED;
148 static Void iotlb_getLock (UInt32 mmuBase, struct iotlb_lock *l)
150     ULONG reg;
151     VAYUDsp_MMURegs * mmuRegs = (VAYUDsp_MMURegs *)mmuBase;
153     reg = INREG32(&mmuRegs->LOCK);
154     l->base = MMU_LOCK_BASE(reg);
155     l->vict = MMU_LOCK_VICT(reg);
158 static Void iotlb_setLock (UInt32 mmuBase, struct iotlb_lock *l)
160     ULONG reg;
161     VAYUDsp_MMURegs * mmuRegs = (VAYUDsp_MMURegs *)mmuBase;
163     reg = (l->base << MMU_LOCK_BASE_SHIFT);
164     reg |= (l->vict << MMU_LOCK_VICT_SHIFT);
165     OUTREG32(&mmuRegs->LOCK, reg);
168 static void omap4_tlb_read_cr (UInt32 mmuBase, struct cr_regs *cr)
170     VAYUDsp_MMURegs * mmuRegs = (VAYUDsp_MMURegs *)mmuBase;
172     cr->cam = INREG32(&mmuRegs->READ_CAM);
173     cr->ram = INREG32(&mmuRegs->READ_RAM);
176 /* only used for iotlb iteration in for-loop */
177 static struct cr_regs __iotlb_read_cr (UInt32 mmuBase, int n)
179      struct cr_regs cr;
180      struct iotlb_lock l;
181      iotlb_getLock(mmuBase, &l);
182      l.vict = n;
183      iotlb_setLock(mmuBase, &l);
184      omap4_tlb_read_cr(mmuBase, &cr);
185      return cr;
188 #define for_each_iotlb_cr(n, __i, cr)                \
189     for (__i = 0;                            \
190          (__i < (n)) && (cr = __iotlb_read_cr(mmuBase, __i), TRUE);    \
191          __i++)
193 static Int save_tlbs (VAYUDSP_HalObject * halObject, UINT32 procId)
195     Int i =0;
196     struct cr_regs cr_tmp;
197     struct iotlb_lock l;
198     UInt32 mmuBase;
200     iotlb_getLock(halObject->mmu0Base, &l);
202     halObject->mmu0Obj.nrTlbs = l.base;
203     mmuBase = halObject->mmu0Base;
205     for_each_iotlb_cr(halObject->mmu0Obj.nrTlbs, i, cr_tmp) {
206         iotlb_cr_to_e(&cr_tmp, &halObject->mmu0Obj.tlbs[i]);
207     }
209     iotlb_getLock(halObject->mmu1Base, &l);
211     halObject->mmu1Obj.nrTlbs = l.base;
212     mmuBase = halObject->mmu1Base;
214     for_each_iotlb_cr(halObject->mmu1Obj.nrTlbs, i, cr_tmp) {
215         iotlb_cr_to_e(&cr_tmp, &halObject->mmu1Obj.tlbs[i]);
216     }
218     return 0;
222 static Int restore_tlbs (VAYUDSP_HalObject * halObject, UInt32 procId)
224     Int i = 0;
225     Int status = -1;
226     struct iotlb_lock save;
228     /* Reset the base and victim values */
229     save.base = 0;
230     save.vict = 0;
231     iotlb_setLock(halObject->mmu0Base, &save);
232     iotlb_setLock(halObject->mmu1Base, &save);
234     for (i = 0; i < halObject->mmu0Obj.nrTlbs; i++) {
235         status = load_iotlb_entry(halObject->mmu0Base, &halObject->mmu0Obj.tlbs[i]);
236         if (status < 0) {
237             GT_setFailureReason (curTrace,
238                                  GT_4CLASS,
239                                  "restore_tlbs",
240                                  status,
241                                  "Error restoring the mmu0 tlbs");
242             goto err;
243         }
244     }
246     for (i = 0; i < halObject->mmu1Obj.nrTlbs; i++) {
247         status = load_iotlb_entry(halObject->mmu1Base, &halObject->mmu1Obj.tlbs[i]);
248         if (status < 0) {
249             GT_setFailureReason (curTrace,
250                                  GT_4CLASS,
251                                  "restore_tlbs",
252                                  status,
253                                  "Error restoring the mmu1 tlbs");
254             goto err;
255         }
256     }
258     return 0;
260 err:
261     return status;
264 static Int save_mmu_regs (VAYUDSP_HalObject * halObject, UInt32 procId)
266     UInt32 i = 0;
268     if (halObject == NULL) {
269         GT_setFailureReason (curTrace,
270                              GT_4CLASS,
271                              "save_mmu_regs",
272                              -ENOMEM,
273                              "halObject is NULL");
274         return -ENOMEM;
275     }
277     if (halObject->mmu0Base == 0 || halObject->mmu1Base == 0) {
278         GT_setFailureReason (curTrace,
279                              GT_4CLASS,
280                              "save_mmu_regs",
281                              -ENOMEM,
282                              "halObject->mmuBase is 0");
283         return -ENOMEM;
284     }
286     for (i = 0; i < MMU_REGS_SIZE; i++) {
287         halObject->mmu0Obj.mmuRegs[i] = INREG32(halObject->mmu0Base + (i * 4));
288         halObject->mmu1Obj.mmuRegs[i] = INREG32(halObject->mmu1Base + (i * 4));
289     }
291     return 0;
294 static Int restore_mmu_regs (VAYUDSP_HalObject * halObject,
295                              UInt32 procId)
297     UInt32 i = 0;
299     if (halObject == NULL) {
300         GT_setFailureReason (curTrace,
301                              GT_4CLASS,
302                              "restore_mmu_regs",
303                              -ENOMEM,
304                              "halObject is NULL");
305         return -ENOMEM;
306     }
308     if (halObject->mmu0Base == 0 || halObject->mmu1Base == 0) {
309         GT_setFailureReason (curTrace,
310                              GT_4CLASS,
311                              "restore_mmu_regs",
312                              -ENOMEM,
313                              "halObject->mmuBase is 0");
314         return -ENOMEM;
315     }
317     for (i = 0; i < MMU_REGS_SIZE; i++) {
318         OUTREG32(halObject->mmu0Base + (i * 4), halObject->mmu0Obj.mmuRegs[i]);
319         OUTREG32(halObject->mmu1Base + (i * 4), halObject->mmu1Obj.mmuRegs[i]);
320     }
322     return 0;
325 Int save_dsp_mmu_ctxt (VAYUDSP_HalObject * halObject, UInt32 procId)
327     Int status = -1;
329     status = save_mmu_regs(halObject, procId);
330     if (status < 0) {
331         GT_setFailureReason (curTrace,
332                              GT_4CLASS,
333                              "save_mmu_ctxt",
334                              status,
335                              "Unable to save MMU Regs");
336         return status;
337     }
339     status = save_tlbs(halObject, procId);
340     if (status < 0) {
341         GT_setFailureReason (curTrace,
342                              GT_4CLASS,
343                              "save_mmu_ctxt",
344                              status,
345                              "Unable to save TLBs");
346         return status;
347     }
348     return status;
352 Int restore_dsp_mmu_ctxt (VAYUDSP_HalObject * halObject, UInt32 procId)
354     Int status = -1;
356     status = restore_mmu_regs(halObject, procId);
357     if (status < 0) {
358         GT_setFailureReason (curTrace,
359                              GT_4CLASS,
360                              "restore_mmu_ctxt",
361                              status,
362                              "Unable to restore MMU Regs");
363         return status;
364     }
366     status = restore_tlbs(halObject, procId);
367     if (status < 0) {
368         GT_setFailureReason (curTrace,
369                              GT_4CLASS,
370                              "restore_mmu_ctxt",
371                              status,
372                              "Unable to restore TLBS");
373         return status;
374     }
376     return status;
380  /*=========================================
381  * Decides a TLB entry size
382  *
383  */
384 static Int get_mmu_entry_size (UInt32 pa, UInt32 size, enum pagetype *size_tlb,
385                                UInt32 *entry_size)
387     Int     status = 0;
388     Bool    page_align_4kb  = false;
389     Bool    page_align_64kb = false;
390     Bool    page_align_1mb = false;
391     Bool    page_align_16mb = false;
392     UInt32  phys_addr = pa;
395     /*  First check the page alignment*/
396     if ((phys_addr % PAGE_SIZE_4KB)  == 0)
397         page_align_4kb  = true;
398     if ((phys_addr % PAGE_SIZE_64KB) == 0)
399         page_align_64kb = true;
400     if ((phys_addr % PAGE_SIZE_1MB)  == 0)
401         page_align_1mb  = true;
402     if ((phys_addr % PAGE_SIZE_16MB)  == 0)
403         page_align_16mb  = true;
405     if ((!page_align_64kb) && (!page_align_1mb)  && (!page_align_4kb)) {
406         status = -EINVAL;
407         GT_setFailureReason (curTrace,
408                              GT_4CLASS,
409                              "get_mmu_entry_size",
410                              status,
411                              "phys_addr is not properly aligned");
412         goto error_exit;
413     }
415     /*  Now decide the entry size */
416     if (size >= PAGE_SIZE_16MB) {
417         if (page_align_16mb) {
418             *size_tlb   = SUPER_SECTION;
419             *entry_size = PAGE_SIZE_16MB;
420         } else if (page_align_1mb) {
421             *size_tlb   = SECTION;
422             *entry_size = PAGE_SIZE_1MB;
423         } else if (page_align_64kb) {
424             *size_tlb   = LARGE_PAGE;
425             *entry_size = PAGE_SIZE_64KB;
426         } else if (page_align_4kb) {
427             *size_tlb   = SMALL_PAGE;
428             *entry_size = PAGE_SIZE_4KB;
429         } else {
430             status = -EINVAL;
431             GT_setFailureReason (curTrace,
432                                  GT_4CLASS,
433                                  "get_mmu_entry_size",
434                                  status,
435                                  "size and alignment are invalid");
436             goto error_exit;
437         }
438     } else if (size >= PAGE_SIZE_1MB && size < PAGE_SIZE_16MB) {
439         if (page_align_1mb) {
440             *size_tlb   = SECTION;
441             *entry_size = PAGE_SIZE_1MB;
442         } else if (page_align_64kb) {
443             *size_tlb   = LARGE_PAGE;
444             *entry_size = PAGE_SIZE_64KB;
445         } else if (page_align_4kb) {
446             *size_tlb   = SMALL_PAGE;
447             *entry_size = PAGE_SIZE_4KB;
448         } else {
449             status = -EINVAL;
450             GT_setFailureReason (curTrace,
451                                  GT_4CLASS,
452                                  "get_mmu_entry_size",
453                                  status,
454                                  "size and alignment are invalid");
455             goto error_exit;
456         }
457     } else if (size > PAGE_SIZE_4KB && size < PAGE_SIZE_1MB) {
458         if (page_align_64kb) {
459             *size_tlb   = LARGE_PAGE;
460             *entry_size = PAGE_SIZE_64KB;
461         } else if (page_align_4kb) {
462             *size_tlb   = SMALL_PAGE;
463             *entry_size = PAGE_SIZE_4KB;
464         } else {
465             status = -EINVAL;
466             GT_setFailureReason (curTrace,
467                                  GT_4CLASS,
468                                  "get_mmu_entry_size",
469                                  status,
470                                  "size and alignment are invalid");
471             goto error_exit;
472         }
473     } else if (size == PAGE_SIZE_4KB) {
474         if (page_align_4kb) {
475             *size_tlb   = SMALL_PAGE;
476             *entry_size = PAGE_SIZE_4KB;
477         } else {
478             status = -EINVAL;
479             GT_setFailureReason (curTrace,
480                                  GT_4CLASS,
481                                  "get_mmu_entry_size",
482                                  status,
483                                  "size and alignment are invalid");
484             goto error_exit;
485         }
486     } else {
487         status = -EINVAL;
488         GT_setFailureReason (curTrace,
489                              GT_4CLASS,
490                              "get_mmu_entry_size",
491                              status,
492                              "size is invalid");
493         goto error_exit;
494     }
495     return 0;
497 error_exit:
498     return status;
501 /*
502  * Note: Leaving add_dsp_mmu_entry here, but commented out, so that it is
503  * available in the future if static tlbs are needed to be added outside
504  * of the translation table for faster access.
505  */
506 #if 0
507 /*=========================================
508  * Add DSP MMU entries corresponding to given MPU-Physical address
509  * and DSP-virtual address
510  */
511 static Int add_dsp_mmu_entry (VAYUDSP_HalObject * halObject,
512                               UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
514     UInt32 mapped_size = 0;
515     enum pagetype size_tlb = SECTION;
516     UInt32 entry_size = 0;
517     int status = 0;
518     struct iotlb_entry tlb_entry;
519     int retval = 0;
521     while ((mapped_size < size) && (status == 0)) {
522         status = get_mmu_entry_size(*phys_addr, (size - mapped_size),
523                                     &size_tlb, &entry_size);
524         if (status < 0) {
525             GT_setFailureReason (curTrace,
526                                  GT_4CLASS,
527                                  "add_dsp_mmu_entry",
528                                  status,
529                                  "get_mmu_entry_size failed");
530             goto error_exit;
531         }
533         if (size_tlb == SUPER_SECTION)
534             tlb_entry.pgsz = MMU_CAM_PGSZ_16M;
536         else if (size_tlb == SECTION)
537             tlb_entry.pgsz = MMU_CAM_PGSZ_1M;
539         else if (size_tlb == LARGE_PAGE)
540             tlb_entry.pgsz = MMU_CAM_PGSZ_64K;
542         else if (size_tlb == SMALL_PAGE)
543             tlb_entry.pgsz = MMU_CAM_PGSZ_4K;
545         tlb_entry.elsz = MMU_RAM_ELSZ_16;
546         tlb_entry.endian = MMU_RAM_ENDIAN_LITTLE;
547         tlb_entry.mixed = MMU_RAM_MIXED;
548         tlb_entry.prsvd = MMU_CAM_P;
549         tlb_entry.valid = MMU_CAM_V;
551         tlb_entry.da = *dsp_addr;
552         tlb_entry.pa = *phys_addr;
553         retval = load_iotlb_entry(halObject, &tlb_entry);
554         if (retval < 0) {
555             GT_setFailureReason (curTrace,
556                                  GT_4CLASS,
557                                  "add_dsp_mmu_entry",
558                                  retval,
559                                  "load_iotlb_entry failed");
560             goto error_exit;
561         }
562         mapped_size  += entry_size;
563         *phys_addr   += entry_size;
564         *dsp_addr   += entry_size;
565     }
567     return 0;
569 error_exit:
570     printf("pte set failure retval = 0x%x, status = 0x%x \n",
571                             retval, status);
573     return retval;
575 #endif
577 static Int add_entry_ext (VAYUDSP_HalObject * halObject,
578                           UInt32 *phys_addr, UInt32 *dsp_addr, UInt32 size)
580     UInt32 mapped_size = 0;
581     enum pagetype     size_tlb = SECTION;
582     UInt32 entry_size = 0;
583     Int status = 0;
584     UInt32 page_size = HW_PAGE_SIZE_1MB;
585     UInt32 flags = 0;
587     flags = (DSP_MAPELEMSIZE32 | DSP_MAPLITTLEENDIAN |
588                     DSP_MAPPHYSICALADDR);
589     while ((mapped_size < size) && (status == 0)) {
591         /*  get_mmu_entry_size fills the size_tlb and entry_size
592         based on alignment and size of memory to map
593         to DSP - size */
594         status = get_mmu_entry_size (*phys_addr,
595                                      (size - mapped_size),
596                                      &size_tlb,
597                                      &entry_size);
598         if (status < 0) {
599             GT_setFailureReason (curTrace,
600                                  GT_4CLASS,
601                                  "add_entry_ext",
602                                  status,
603                                  "get_mmu_entry_size failed");
604             break;
605         }
606         else {
607             if (size_tlb == SUPER_SECTION)
608                 page_size = HW_PAGE_SIZE_16MB;
609             else if (size_tlb == SECTION)
610                 page_size = HW_PAGE_SIZE_1MB;
611             else if (size_tlb == LARGE_PAGE)
612                 page_size = HW_PAGE_SIZE_64KB;
613             else if (size_tlb == SMALL_PAGE)
614                 page_size = HW_PAGE_SIZE_4KB;
616             if (status == 0) {
617                 status = rproc_mem_map (halObject->mmu0Base,
618                                         halObject->mmu0Obj.pPtAttrs,
619                                         *phys_addr,
620                                         *dsp_addr,
621                                         page_size,
622                                         flags);
623                 if (status < 0) {
624                     GT_setFailureReason (curTrace,
625                                          GT_4CLASS,
626                                          "add_entry_ext",
627                                          status,
628                                          "rproc_mem_map failed");
629                     break;
630                 }
631                 status = rproc_mem_map (halObject->mmu1Base,
632                                         halObject->mmu1Obj.pPtAttrs,
633                                         *phys_addr,
634                                         *dsp_addr,
635                                         page_size,
636                                         flags);
637                 if (status < 0) {
638                     GT_setFailureReason (curTrace,
639                                          GT_4CLASS,
640                                          "add_entry_ext",
641                                          status,
642                                          "rproc_mem_map failed");
643                     break;
644                 }
645                 mapped_size  += entry_size;
646                 *phys_addr   += entry_size;
647                 *dsp_addr   += entry_size;
648             }
649         }
650     }
651     return status;
654 static Int __dump_tlb_entries (UInt32 mmuBase, struct cr_regs *crs, int num)
656     int i;
657     struct iotlb_lock saved;
658     struct cr_regs tmp;
659     struct cr_regs *p = crs;
661     iotlb_getLock(mmuBase, &saved);
662     for_each_iotlb_cr(num, i, tmp) {
663         if (!iotlb_cr_valid(&tmp))
664             continue;
665         *p++ = tmp;
666     }
667     iotlb_setLock(mmuBase, &saved);
668     return  p - crs;
671 UInt32 get_DspVirtAdd(VAYUDSP_HalObject * halObject, UInt32 physAdd)
673     int i, num;
674     struct cr_regs *cr;
675     struct cr_regs *p = NULL;
676     //DWORD dwPhys;
677     UInt32 lRetVal = 0;
678     num = 32;
679     if(shm_phys_addr_dsp == 0)
680         return 0;
681     cr = mmap(NULL,
682               sizeof(struct cr_regs) * num,
683               PROT_NOCACHE | PROT_READ | PROT_WRITE,
684               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
685               NOFD,
686               0);
687     if (cr == MAP_FAILED)
688     {
689         return NULL;
690     }
692     memset(cr, 0, sizeof(struct cr_regs) * num);
694     /* Since MMU0 and MMU1 are programmed with the same entries, can just check MMU0 */
695     num = __dump_tlb_entries(halObject->mmu0Base, cr, num);
696     for (i = 0; i < num; i++)
697     {
698         p = cr + i;
699         if(physAdd >= (p->ram & 0xFFFFF000) &&  physAdd < ((p + 1)->ram & 0xFFFFF000))
700         {
701             lRetVal = ((p->cam & 0xFFFFF000) + (physAdd - (p->ram & 0xFFFFF000)));
702         }
703     }
704     munmap(cr, sizeof(struct cr_regs) * num);
706     return lRetVal;
710 /**
711  * dump_tlb_entries - dump cr arrays to given buffer
712  * @obj:    target iommu
713  * @buf:    output buffer
714  **/
715 static UInt32 dump_tlb_entries (UInt32 mmuBase, char *buf, UInt32 bytes)
717     Int i, num;
718     struct cr_regs *cr;
719     Char *p = buf;
721     num = bytes / sizeof(*cr);
722     num = min(32, num);
723     cr = mmap(NULL,
724             sizeof(struct cr_regs) * num,
725               PROT_NOCACHE | PROT_READ | PROT_WRITE,
726               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
727               NOFD,
728               0);
729     if (!cr)
730     {
731         return NULL;
733     }
734     memset(cr, 0, sizeof(struct cr_regs) * num);
736     num = __dump_tlb_entries(mmuBase, cr, num);
737     for (i = 0; i < num; i++)
738         p += iotlb_dump_cr(cr + i, p);
739     munmap(cr, sizeof(struct cr_regs) * num);
740     return p - buf;
744 static Void rproc_tlb_dump (VAYUDSP_HalObject * halObject)
746     Char *p;
748     p = mmap(NULL,
749              1000,
750              PROT_NOCACHE | PROT_READ | PROT_WRITE,
751              MAP_ANON | MAP_PHYS | MAP_PRIVATE,
752              NOFD,
753              0);
754     if (MAP_FAILED != p)
755     {
756         dump_tlb_entries(halObject->mmu0Base, p, 1000);
757         dump_tlb_entries(halObject->mmu1Base, p, 1000);
758         munmap(p, 1000);
759     }
761     return;
765 /*================================
766  * Initialize the Dsp MMU.
767  *===============================*/
769 static Int rproc_mmu_init (VAYUDSP_HalObject * halObject,
770                            ProcMgr_AddrInfo * memEntries,
771                            UInt32 numMemEntries)
773     Int ret_val = 0;
774     UInt32 phys_addr = 0;
775     UInt32 i = 0;
776     UInt32 virt_addr = 0;
777     UInt32 reg;
778     VAYUDsp_MMURegs * mmuRegs0 = NULL;
779     VAYUDsp_MMURegs * mmuRegs1 = NULL;
781     if (halObject == NULL) {
782         ret_val = -ENOMEM;
783         GT_setFailureReason (curTrace,
784                              GT_4CLASS,
785                              "rproc_mmu_init",
786                              ret_val,
787                              "halObject is NULL");
788         goto error_exit;
789     }
791     if (halObject->mmu0Base == 0 || halObject->mmu1Base == 0) {
792         ret_val = -ENOMEM;
793         GT_setFailureReason (curTrace,
794                              GT_4CLASS,
795                              "rproc_mmu_init",
796                              ret_val,
797                              "halObject->mmuBase is 0");
798         goto error_exit;
799     }
800     mmuRegs0 = (VAYUDsp_MMURegs *)halObject->mmu0Base;
801     mmuRegs1 = (VAYUDsp_MMURegs *)halObject->mmu1Base;
803     /*  Disable the MMU & TWL */
804     hw_mmu_disable(halObject->mmu0Base);
805     hw_mmu_twl_disable(halObject->mmu0Base);
807     /*  Disable the MMU & TWL */
808     hw_mmu_disable(halObject->mmu1Base);
809     hw_mmu_twl_disable(halObject->mmu1Base);
811     printf("  Programming Dsp memory regions\n");
812     printf("=========================================\n");
814     for (i = 0; i < numMemEntries; i++) {
815         phys_addr = memEntries[i].addr[ProcMgr_AddrType_MasterPhys];
816         if (phys_addr == (UInt32)(-1) || phys_addr == 0) {
817             GT_setFailureReason (curTrace,
818                                  GT_4CLASS,
819                                  "benelli_mmu_init",
820                                  ret_val,
821                                  "phys_addr is invalid");
822             goto error_exit;
823         }
824         printf( "VA = [0x%x] of size [0x%x] at PA = [0x%x]\n",
825                 memEntries[i].addr[ProcMgr_AddrType_SlaveVirt],
826                 memEntries[i].size,
827                 (unsigned int)phys_addr);
829         /* VAYU SDC code */
830         /* Adjust below logic if using cacheable shared memory */
831         shm_phys_addr = 1;
832         virt_addr = memEntries[i].addr[ProcMgr_AddrType_SlaveVirt];
834         ret_val = add_entry_ext(halObject, &phys_addr, &virt_addr,
835                                     (memEntries[i].size));
836         if (ret_val < 0) {
837             GT_setFailureReason (curTrace,
838                                  GT_4CLASS,
839                                  "benelli_mmu_init",
840                                  ret_val,
841                                  "add_dsp_mmu_entry failed");
842             goto error_exit;
843         }
844     }
846     /* Set the TTB to point to the L1 page table's physical address */
847     OUTREG32(&mmuRegs0->TTB,
848            ((struct pg_table_attrs *)(halObject->mmu0Obj.pPtAttrs))->l1_base_pa);
849     OUTREG32(&mmuRegs1->TTB,
850            ((struct pg_table_attrs *)(halObject->mmu1Obj.pPtAttrs))->l1_base_pa);
852     /* Enable the TWL */
853     hw_mmu_twl_enable(halObject->mmu0Base);
854     hw_mmu_twl_enable(halObject->mmu1Base);
856     hw_mmu_enable(halObject->mmu0Base);
857     hw_mmu_enable(halObject->mmu1Base);
859     rproc_tlb_dump(halObject);
861     //Set the SYSCONFIG
862     reg = INREG32(halObject->mmu0Base + 0x10);
863     reg&=0xFFFFFFEF;
864     reg|=0x11;
865     OUTREG32(halObject->mmu0Base+0x10, reg);
867     reg = INREG32(halObject->mmu1Base + 0x10);
868     reg&=0xFFFFFFEF;
869     reg|=0x11;
870     OUTREG32(halObject->mmu1Base+0x10, reg);
872     return 0;
873 error_exit:
874     return ret_val;
878 /****************************************************
880 *  Function which sets the TWL of the remote core
883 *****************************************************/
885 static Int rproc_set_twl (UInt32 mmuBase, Bool on)
887     Int status = 0;
888     VAYUDsp_MMURegs * mmuRegs = NULL;
889     ULONG reg;
891     if (mmuBase == 0) {
892         status = -ENOMEM;
893         GT_setFailureReason (curTrace,
894                              GT_4CLASS,
895                              "benelli_set_twl",
896                              status,
897                              "mmuBase is NULL");
898     }
899     else {
900         mmuRegs = (VAYUDsp_MMURegs *)mmuBase;
902         /* Setting MMU to Smart Idle Mode */
903         reg = INREG32(&mmuRegs->SYSCONFIG);
904         reg &= ~MMU_SYS_IDLE_MASK;
905         reg |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
906         OUTREG32(&mmuRegs->SYSCONFIG, reg);
908         /* Enabling MMU */
909         reg =  INREG32(&mmuRegs->CNTL);
911         if (on)
912             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TWL_MASK);
913         else
914             OUTREG32(&mmuRegs->IRQENABLE, MMU_IRQ_TLB_MISS_MASK);
916         reg &= ~MMU_CNTL_MASK;
917         if (on)
918             reg |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
919         else
920             reg |= (MMU_CNTL_MMU_EN);
922         OUTREG32(&mmuRegs->CNTL, reg);
923     }
925     return status;
929 /*========================================
930  * This sets up the Dsp processor MMU Page tables
931  *
932  */
933 static struct pg_table_attrs * init_mmu_page_attribs (UInt32 l1_size,
934                                                       UInt32 l1_allign,
935                                                       UInt32 ls_num_of_pages)
937     struct pg_table_attrs * p_pt_attrs = NULL;
938     UInt32 pg_tbl_pa = 0;
939     off64_t offset = 0;
940     UInt32 pg_tbl_va = 0;
941     UInt32 align_size = 0;
942     UInt32 len = 0;
943     int status = 0;
945     p_pt_attrs = Memory_alloc (NULL, sizeof(struct pg_table_attrs), 0, NULL);
946     if (p_pt_attrs)
947         Memory_set (p_pt_attrs, 0, sizeof(struct pg_table_attrs));
948     else {
949         status = -ENOMEM;
950         GT_setFailureReason (curTrace,
951                              GT_4CLASS,
952                              "init_mmu_page_attribs",
953                              status,
954                              "Memory_alloc failed");
955         goto error_exit;
956     }
958     p_pt_attrs->l1_size = l1_size;
959     align_size = p_pt_attrs->l1_size;
960     p_pt_attrs->l1_tbl_alloc_sz = 0x100000;
961     /* Align sizes are expected to be power of 2 */
962     /* we like to get aligned on L1 table size */
963     pg_tbl_va = (UInt32) mmap64 (NULL,
964                                  p_pt_attrs->l1_tbl_alloc_sz,
965                                  PROT_NOCACHE | PROT_READ | PROT_WRITE,
966                                  MAP_ANON | MAP_PHYS | MAP_PRIVATE,
967                                  NOFD,
968                                  0x0);
969     if (pg_tbl_va == (UInt32)MAP_FAILED) {
970         pg_tbl_va = 0;
971         status = -ENOMEM;
972         GT_setFailureReason (curTrace,
973                              GT_4CLASS,
974                              "init_mmu_page_attribs",
975                              status,
976                              "mmap64 failed");
977         goto error_exit;
978     }
979     else {
980         /* Make sure the memory is contiguous */
981         status = mem_offset64 ((void *)pg_tbl_va, NOFD,
982                                p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
983         pg_tbl_pa = (UInt32)offset;
984         if (len != p_pt_attrs->l1_tbl_alloc_sz) {
985             status = -ENOMEM;
986             GT_setFailureReason (curTrace,
987                                  GT_4CLASS,
988                                  "init_mmu_page_attribs",
989                                  status,
990                                  "phys mem is not contiguous");
991         }
992         if (status != 0) {
993             GT_setFailureReason (curTrace,
994                                  GT_4CLASS,
995                                  "init_mmu_page_attribs",
996                                  status,
997                                  "mem_offset64 failed");
998             goto error_exit;
999         }
1000     }
1001     /* Check if the PA is aligned for us */
1002     if ((pg_tbl_pa) & (align_size-1)) {
1003         /* PA not aligned to page table size ,*/
1004         /* try with more allocation and align */
1005         munmap((void *)pg_tbl_va, p_pt_attrs->l1_tbl_alloc_sz);
1006         p_pt_attrs->l1_tbl_alloc_sz = p_pt_attrs->l1_tbl_alloc_sz*2;
1007         /* we like to get aligned on L1 table size */
1008         pg_tbl_va = (UInt32) mmap64 (NULL,
1009                                      p_pt_attrs->l1_tbl_alloc_sz,
1010                                      PROT_NOCACHE | PROT_READ | PROT_WRITE,
1011                                      MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1012                                      NOFD,
1013                                      0);
1014         if (pg_tbl_va == (UInt32)MAP_FAILED) {
1015             pg_tbl_va = 0;
1016             status = -ENOMEM;
1017             GT_setFailureReason (curTrace,
1018                                  GT_4CLASS,
1019                                  "init_mmu_page_attribs",
1020                                  status,
1021                                  "mmap64 failed");
1022             goto error_exit;
1023         }
1024         else {
1025             /* Make sure the memory is contiguous */
1026             status = mem_offset64 ((void *)pg_tbl_va, NOFD,
1027                                    p_pt_attrs->l1_tbl_alloc_sz, &offset, &len);
1028             pg_tbl_pa = (UInt32)offset;
1029             if (len != p_pt_attrs->l1_tbl_alloc_sz) {
1030                 status = -ENOMEM;
1031                 GT_setFailureReason (curTrace,
1032                                      GT_4CLASS,
1033                                      "init_mmu_page_attribs",
1034                                      status,
1035                                      "phys mem is not contiguous");
1036             }
1037             if (status != 0) {
1038                 GT_setFailureReason (curTrace,
1039                                      GT_4CLASS,
1040                                      "init_mmu_page_attribs",
1041                                      status,
1042                                      "mem_offset64 failed");
1043                 goto error_exit;
1044             }
1045         }
1046         /* We should be able to get aligned table now */
1047         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1048         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1049         /* Align the PA to the next 'align'  boundary */
1050         p_pt_attrs->l1_base_pa = ((pg_tbl_pa) + (align_size-1)) &
1051                             (~(align_size-1));
1052         p_pt_attrs->l1_base_va = pg_tbl_va + (p_pt_attrs->l1_base_pa -
1053                                 pg_tbl_pa);
1054     } else {
1055         /* We got aligned PA, cool */
1056         p_pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
1057         p_pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
1058         p_pt_attrs->l1_base_pa = pg_tbl_pa;
1059         p_pt_attrs->l1_base_va = pg_tbl_va;
1060     }
1062     if (p_pt_attrs->l1_base_va)
1063         memset((UInt8*)p_pt_attrs->l1_base_va, 0x00, p_pt_attrs->l1_size);
1064     p_pt_attrs->l2_num_pages = ls_num_of_pages;
1065     p_pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * p_pt_attrs->l2_num_pages;
1066     align_size = 4; /* Make it UInt32 aligned  */
1067     /* we like to get aligned on L1 table size */
1068     pg_tbl_va = p_pt_attrs->l1_base_va + 0x80000;
1069     pg_tbl_pa = p_pt_attrs->l1_base_pa + 0x80000;
1070     p_pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
1071     p_pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
1072     p_pt_attrs->ls_tbl_alloc_sz = p_pt_attrs->l2_size;
1073     p_pt_attrs->l2_base_pa = pg_tbl_pa;
1074     p_pt_attrs->l2_base_va = pg_tbl_va;
1075     if (p_pt_attrs->l2_base_va)
1076         memset((UInt8*)p_pt_attrs->l2_base_va, 0x00, p_pt_attrs->l2_size);
1078     p_pt_attrs->pg_info = Memory_alloc(NULL, sizeof(struct page_info), 0, NULL);
1079     if (p_pt_attrs->pg_info)
1080         Memory_set (p_pt_attrs->pg_info, 0, sizeof(struct page_info));
1081     else {
1082         status = -ENOMEM;
1083         GT_setFailureReason (curTrace,
1084                              GT_4CLASS,
1085                              "init_mmu_page_attribs",
1086                              status,
1087                              "Memory_alloc failed");
1088         goto error_exit;
1089     }
1090     return p_pt_attrs;
1092 error_exit:
1093     if (p_pt_attrs) {
1094         if (p_pt_attrs->pg_info)
1095             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info));
1096         if (p_pt_attrs->l1_tbl_alloc_va) {
1097             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1098                     p_pt_attrs->l1_tbl_alloc_sz);
1099         }
1100         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1101         p_pt_attrs = NULL;
1102     }
1104     return NULL;
1108 /*========================================
1109  * This destroys the Dsp processor MMU Page tables
1110  *
1111  */
1112 static Void deinit_mmu_page_attribs (struct pg_table_attrs * p_pt_attrs)
1114     if (p_pt_attrs) {
1115         if (p_pt_attrs->pg_info)
1116             Memory_free (NULL, p_pt_attrs->pg_info, sizeof(struct page_info));
1117         if (p_pt_attrs->l1_tbl_alloc_va) {
1118             munmap ((void *)p_pt_attrs->l1_tbl_alloc_va,
1119                     p_pt_attrs->l1_tbl_alloc_sz);
1120         }
1121         Memory_free (NULL, p_pt_attrs, sizeof(struct pg_table_attrs));
1122         p_pt_attrs = NULL;
1123     }
1127 /*============================================
1128  * This function calculates PTE address (MPU virtual) to be updated
1129  *  It also manages the L2 page tables
1130  */
1131 static Int pte_set (UInt32 pa, UInt32 va, UInt32 size,
1132                     struct hw_mmu_map_attrs_t *attrs, struct pg_table_attrs *pt_Table)
1134     UInt32 i;
1135     UInt32 pte_val;
1136     UInt32 pte_addr_l1;
1137     UInt32 pte_size;
1138     UInt32 pg_tbl_va; /* Base address of the PT that will be updated */
1139     UInt32 l1_base_va;
1140      /* Compiler warns that the next three variables might be used
1141      * uninitialized in this function. Doesn't seem so. Working around,
1142      * anyways.  */
1143     UInt32 l2_base_va = 0;
1144     UInt32 l2_base_pa = 0;
1145     UInt32 l2_page_num = 0;
1146     struct pg_table_attrs *pt = pt_Table;
1147     struct iotlb_entry    *mapAttrs;
1148     int status = 0;
1149     VAYUDSP_HalMmuEntryInfo setPteInfo;
1150     mapAttrs = Memory_alloc(0, sizeof(struct iotlb_entry), 0, NULL);
1152     l1_base_va = pt->l1_base_va;
1153     pg_tbl_va = l1_base_va;
1154     if ((size == HW_PAGE_SIZE_64KB) || (size == HW_PAGE_SIZE_4KB)) {
1155         /* Find whether the L1 PTE points to a valid L2 PT */
1156         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1157         if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1158             pte_val = *(UInt32 *)pte_addr_l1;
1159             pte_size = hw_mmu_pte_sizel1(pte_val);
1160         } else {
1161             return -EINVAL;
1162         }
1163         /* FIX ME */
1164         /* TODO: ADD synchronication element*/
1165         /*        sync_enter_cs(pt->hcs_object);*/
1166         if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1167             /* Get the L2 PA from the L1 PTE, and find
1168              * corresponding L2 VA */
1169             l2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1170             l2_base_va = l2_base_pa - pt->l2_base_pa +
1171             pt->l2_base_va;
1172             l2_page_num = (l2_base_pa - pt->l2_base_pa) /
1173                     HW_MMU_COARSE_PAGE_SIZE;
1174         } else if (pte_size == 0) {
1175             /* L1 PTE is invalid. Allocate a L2 PT and
1176              * point the L1 PTE to it */
1177             /* Find a free L2 PT. */
1178             for (i = 0; (i < pt->l2_num_pages) &&
1179                 (pt->pg_info[i].num_entries != 0); i++)
1180                 ;;
1181             if (i < pt->l2_num_pages) {
1182                 l2_page_num = i;
1183                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1184                        HW_MMU_COARSE_PAGE_SIZE);
1185                 l2_base_va = pt->l2_base_va + (l2_page_num *
1186                        HW_MMU_COARSE_PAGE_SIZE);
1187                 /* Endianness attributes are ignored for
1188                  * HW_MMU_COARSE_PAGE_SIZE */
1189                 mapAttrs->endian = attrs->endianism;
1190                 mapAttrs->mixed = attrs->mixedSize;
1191                 mapAttrs->elsz= attrs->element_size;
1192                 mapAttrs->da = va;
1193                 mapAttrs->pa = pa;
1194                 status = hw_mmu_pte_set(pg_tbl_va, l2_base_pa, va,
1195                                         HW_MMU_COARSE_PAGE_SIZE, attrs);
1196             } else {
1197                 status = -ENOMEM;
1198             }
1199         } else {
1200             /* Found valid L1 PTE of another size.
1201              * Should not overwrite it. */
1202             status = -EINVAL;
1203         }
1204         if (status == 0) {
1205             pg_tbl_va = l2_base_va;
1206             if (size == HW_PAGE_SIZE_64KB)
1207                 pt->pg_info[l2_page_num].num_entries += 16;
1208             else
1209                 pt->pg_info[l2_page_num].num_entries++;
1210         }
1211     }
1212     if (status == 0) {
1213         mapAttrs->endian = attrs->endianism;
1214         mapAttrs->mixed = attrs->mixedSize;
1215         mapAttrs->elsz= attrs->element_size;
1216         mapAttrs->da = va;
1217         mapAttrs->pa = pa;
1218         mapAttrs->pgsz = MMU_CAM_PGSZ_16M;
1219         setPteInfo.elementSize = attrs->element_size;
1220         setPteInfo.endianism = attrs->endianism;
1221         setPteInfo.masterPhyAddr = pa;
1222         setPteInfo.mixedSize = attrs->mixedSize;
1223         setPteInfo.size = size;
1224         setPteInfo.slaveVirtAddr = va;
1226         status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1227         if (status == RET_OK)
1228             status = 0;
1229     }
1230     Memory_free(0, mapAttrs, sizeof(struct iotlb_entry));
1231     return status;
1235 /*=============================================
1236  * This function calculates the optimum page-aligned addresses and sizes
1237  * Caller must pass page-aligned values
1238  */
1239 static Int pte_update (UInt32 pa, UInt32 va, UInt32 size,
1240                        struct hw_mmu_map_attrs_t *map_attrs, struct pg_table_attrs *pt_Table)
1242     UInt32 i;
1243     UInt32 all_bits;
1244     UInt32 pa_curr = pa;
1245     UInt32 va_curr = va;
1246     UInt32 num_bytes = size;
1247     Int status = 0;
1248     UInt32 pg_size[] = {HW_PAGE_SIZE_16MB, HW_PAGE_SIZE_1MB,
1249                HW_PAGE_SIZE_64KB, HW_PAGE_SIZE_4KB};
1250     while (num_bytes && (status == 0)) {
1251         /* To find the max. page size with which both PA & VA are
1252          * aligned */
1253         all_bits = pa_curr | va_curr;
1254         for (i = 0; i < 4; i++) {
1255             if ((num_bytes >= pg_size[i]) && ((all_bits &
1256                (pg_size[i] - 1)) == 0)) {
1257                 status = pte_set(pa_curr,
1258                     va_curr, pg_size[i], map_attrs, pt_Table);
1259                 pa_curr += pg_size[i];
1260                 va_curr += pg_size[i];
1261                 num_bytes -= pg_size[i];
1262                  /* Don't try smaller sizes. Hopefully we have
1263                  * reached an address aligned to a bigger page
1264                  * size */
1265                 break;
1266             }
1267         }
1268     }
1269     return status;
1273 /*============================================
1274  * This function maps MPU buffer to the DSP address space. It performs
1275 * linear to physical address translation if required. It translates each
1276 * page since linear addresses can be physically non-contiguous
1277 * All address & size arguments are assumed to be page aligned (in proc.c)
1278  *
1279  */
1280 static Int rproc_mem_map (UInt32 mmuBase, struct pg_table_attrs * p_pt_attrs,
1281                           UInt32 mpu_addr, UInt32 ul_virt_addr,
1282                           UInt32 num_bytes, UInt32 map_attr)
1284     UInt32 attrs;
1285     Int status = 0;
1286     struct hw_mmu_map_attrs_t hw_attrs;
1287     Int pg_i = 0;
1289     if (mmuBase == 0) {
1290         status = -ENOMEM;
1291         GT_setFailureReason (curTrace,
1292                              GT_4CLASS,
1293                              "rproc_mem_map",
1294                              status,
1295                              "mmuBase is 0");
1296     }
1297     else if (num_bytes == 0) {
1298         status = -EINVAL;
1299         GT_setFailureReason (curTrace,
1300                              GT_4CLASS,
1301                              "rproc_mem_map",
1302                              status,
1303                              "num_bytes is 0");
1304     }
1305     else {
1306         if (map_attr != 0) {
1307             attrs = map_attr;
1308             attrs |= DSP_MAPELEMSIZE32;
1309         } else {
1310             /* Assign default attributes */
1311             attrs = DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE32;
1312         }
1313         /* Take mapping properties */
1314         if (attrs & DSP_MAPBIGENDIAN)
1315             hw_attrs.endianism = HW_BIG_ENDIAN;
1316         else
1317             hw_attrs.endianism = HW_LITTLE_ENDIAN;
1319         hw_attrs.mixedSize = (enum hw_mmu_mixed_size_t)
1320                      ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1321         /* Ignore element_size if mixedSize is enabled */
1322         if (hw_attrs.mixedSize == 0) {
1323             if (attrs & DSP_MAPELEMSIZE8) {
1324                 /* Size is 8 bit */
1325                 hw_attrs.element_size = HW_ELEM_SIZE_8BIT;
1326             } else if (attrs & DSP_MAPELEMSIZE16) {
1327                 /* Size is 16 bit */
1328                 hw_attrs.element_size = HW_ELEM_SIZE_16BIT;
1329             } else if (attrs & DSP_MAPELEMSIZE32) {
1330                 /* Size is 32 bit */
1331                 hw_attrs.element_size = HW_ELEM_SIZE_32BIT;
1332             } else if (attrs & DSP_MAPELEMSIZE64) {
1333                 /* Size is 64 bit */
1334                 hw_attrs.element_size = HW_ELEM_SIZE_64BIT;
1335             } else {
1336                 /* Mixedsize isn't enabled, so size can't be
1337                  * zero here */
1338                 status = -EINVAL;
1339                 GT_setFailureReason (curTrace,
1340                                      GT_4CLASS,
1341                                      "rproc_mem_map",
1342                                      status,
1343                                      "MMU element size is zero");
1344             }
1345         }
1347         if (status >= 0) {
1348             /*
1349              * Do OS-specific user-va to pa translation.
1350              * Combine physically contiguous regions to reduce TLBs.
1351              * Pass the translated pa to PteUpdate.
1352              */
1353             if ((attrs & DSP_MAPPHYSICALADDR)) {
1354                 status = pte_update(mpu_addr, ul_virt_addr, num_bytes,
1355                            &hw_attrs,
1356                            (struct pg_table_attrs *)p_pt_attrs);
1357             }
1359             /* Don't propogate Linux or HW status to upper layers */
1360             if (status < 0) {
1361                 /*
1362                  * Roll out the mapped pages incase it failed in middle of
1363                  * mapping
1364                  */
1365                 if (pg_i) {
1366                     rproc_mem_unmap(mmuBase, p_pt_attrs, ul_virt_addr,
1367                                     (pg_i * PAGE_SIZE));
1368                 }
1369             }
1371             /* In any case, flush the TLB
1372              * This is called from here instead from pte_update to avoid
1373              * unnecessary repetition while mapping non-contiguous physical
1374              * regions of a virtual region */
1375             hw_mmu_tlb_flushAll(mmuBase);
1376         }
1377     }
1378     return status;
1383 /*
1384  *  ======== benelli_mem_unmap ========
1385  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1386  *
1387  *      PTEs of a mapped memory block are contiguous in any page table
1388  *      So, instead of looking up the PTE address for every 4K block,
1389  *      we clear consecutive PTEs until we unmap all the bytes
1390  */
1391 static Int rproc_mem_unmap (UInt32 mmuBase, struct pg_table_attrs * p_pt_attrs,
1392                             UInt32 da, UInt32 num_bytes)
1394     UInt32 L1_base_va;
1395     UInt32 L2_base_va;
1396     UInt32 L2_base_pa;
1397     UInt32 L2_page_num;
1398     UInt32 pte_val;
1399     UInt32 pte_size;
1400     UInt32 pte_count;
1401     UInt32 pte_addr_l1;
1402     UInt32 pte_addr_l2 = 0;
1403     UInt32 rem_bytes;
1404     UInt32 rem_bytes_l2;
1405     UInt32 vaCurr;
1406     Int status = 0;
1407     UInt32 temp;
1408     UInt32 pAddr;
1409     UInt32 numof4Kpages = 0;
1411     if (mmuBase == 0) {
1412         status = -ENOMEM;
1413         GT_setFailureReason (curTrace,
1414                              GT_4CLASS,
1415                              "rproc_mem_unmap",
1416                              status,
1417                              "mmuBase is 0");
1418     }
1419     else if (p_pt_attrs == NULL) {
1420         status = -ENOMEM;
1421         GT_setFailureReason (curTrace,
1422                              GT_4CLASS,
1423                              "rproc_mem_unmap",
1424                              status,
1425                              "p_pt_attrs is NULL");
1426     }
1427     else {
1428         vaCurr = da;
1429         rem_bytes = num_bytes;
1430         rem_bytes_l2 = 0;
1431         L1_base_va = p_pt_attrs->l1_base_va;
1432         pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1433         while (rem_bytes) {
1434             UInt32 vaCurrOrig = vaCurr;
1435             /* Find whether the L1 PTE points to a valid L2 PT */
1436             pte_addr_l1 = hw_mmu_pte_addr_l1(L1_base_va, vaCurr);
1437             pte_val = *(UInt32 *)pte_addr_l1;
1438             pte_size = hw_mmu_pte_sizel1(pte_val);
1439             if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1440                 /*
1441                  * Get the L2 PA from the L1 PTE, and find
1442                  * corresponding L2 VA
1443                  */
1444                 L2_base_pa = hw_mmu_pte_coarsel1(pte_val);
1445                 L2_base_va = L2_base_pa - p_pt_attrs->l2_base_pa
1446                             + p_pt_attrs->l2_base_va;
1447                 L2_page_num = (L2_base_pa - p_pt_attrs->l2_base_pa) /
1448                         HW_MMU_COARSE_PAGE_SIZE;
1449                 /*
1450                  * Find the L2 PTE address from which we will start
1451                  * clearing, the number of PTEs to be cleared on this
1452                  * page, and the size of VA space that needs to be
1453                  * cleared on this L2 page
1454                  */
1455                 pte_addr_l2 = hw_mmu_pte_addr_l2(L2_base_va, vaCurr);
1456                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1457                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) /
1458                         sizeof(UInt32);
1459                 if (rem_bytes < (pte_count * PAGE_SIZE))
1460                     pte_count = rem_bytes / PAGE_SIZE;
1462                 rem_bytes_l2 = pte_count * PAGE_SIZE;
1463                 /*
1464                  * Unmap the VA space on this L2 PT. A quicker way
1465                  * would be to clear pte_count entries starting from
1466                  * pte_addr_l2. However, below code checks that we don't
1467                  * clear invalid entries or less than 64KB for a 64KB
1468                  * entry. Similar checking is done for L1 PTEs too
1469                  * below
1470                  */
1471                 while (rem_bytes_l2) {
1472                     pte_val = *(UInt32 *)pte_addr_l2;
1473                     pte_size = hw_mmu_pte_sizel2(pte_val);
1474                     /* vaCurr aligned to pte_size? */
1475                     if ((pte_size != 0) && (rem_bytes_l2
1476                         >= pte_size) &&
1477                         !(vaCurr & (pte_size - 1))) {
1478                         /* Collect Physical addresses from VA */
1479                         pAddr = (pte_val & ~(pte_size - 1));
1480                         if (pte_size == HW_PAGE_SIZE_64KB)
1481                             numof4Kpages = 16;
1482                         else
1483                             numof4Kpages = 1;
1484                         temp = 0;
1486                         if (hw_mmu_pte_clear(pte_addr_l2,
1487                             vaCurr, pte_size) == RET_OK) {
1488                             rem_bytes_l2 -= pte_size;
1489                             vaCurr += pte_size;
1490                             pte_addr_l2 += (pte_size >> 12)
1491                                 * sizeof(UInt32);
1492                         } else {
1493                             status = -EFAULT;
1494                             goto EXIT_LOOP;
1495                         }
1496                     } else
1497                         status = -EFAULT;
1498                 }
1499                 if (rem_bytes_l2 != 0) {
1500                     status = -EFAULT;
1501                     goto EXIT_LOOP;
1502                 }
1503                 p_pt_attrs->pg_info[L2_page_num].num_entries -=
1504                             pte_count;
1505                 if (p_pt_attrs->pg_info[L2_page_num].num_entries
1506                                     == 0) {
1507                     /*
1508                      * Clear the L1 PTE pointing to the
1509                      * L2 PT
1510                      */
1511                     if (RET_OK != hw_mmu_pte_clear(L1_base_va,
1512                         vaCurrOrig, HW_MMU_COARSE_PAGE_SIZE)) {
1513                         status = -EFAULT;
1514                         goto EXIT_LOOP;
1515                     }
1516                 }
1517                 rem_bytes -= pte_count * PAGE_SIZE;
1518             } else
1519                 /* vaCurr aligned to pte_size? */
1520                 /* pte_size = 1 MB or 16 MB */
1521                 if ((pte_size != 0) && (rem_bytes >= pte_size) &&
1522                    !(vaCurr & (pte_size - 1))) {
1523                     if (pte_size == HW_PAGE_SIZE_1MB)
1524                         numof4Kpages = 256;
1525                     else
1526                         numof4Kpages = 4096;
1527                     temp = 0;
1528                     /* Collect Physical addresses from VA */
1529                     pAddr = (pte_val & ~(pte_size - 1));
1530                     if (hw_mmu_pte_clear(L1_base_va, vaCurr,
1531                             pte_size) == RET_OK) {
1532                         rem_bytes -= pte_size;
1533                         vaCurr += pte_size;
1534                     } else {
1535                         status = -EFAULT;
1536                         goto EXIT_LOOP;
1537                     }
1538             } else {
1539                 status = -EFAULT;
1540             }
1541         }
1542     }
1543     /*
1544      * It is better to flush the TLB here, so that any stale old entries
1545      * get flushed
1546      */
1547 EXIT_LOOP:
1548     hw_mmu_tlb_flushAll(mmuBase);
1549     return status;
1553 /*========================================
1554  * This sets up the Dsp processor
1555  *
1556  */
1557 Int rproc_dsp_setup (VAYUDSP_HalObject * halObject,
1558                      ProcMgr_AddrInfo * memEntries,
1559                      UInt32 numMemEntries)
1561     Int ret_val = 0;
1562     struct pg_table_attrs * p_pt_attrs_0 = NULL;
1563     struct pg_table_attrs * p_pt_attrs_1 = NULL;
1565     p_pt_attrs_0 = init_mmu_page_attribs(0x10000, 14, 128);
1566     if (!p_pt_attrs_0) {
1567         GT_setFailureReason (curTrace,
1568                              GT_4CLASS,
1569                              "rproc_setup",
1570                              ret_val,
1571                              "init_mmu_page_attribs failed");
1572     }
1573     else {
1574         halObject->mmu0Obj.pPtAttrs = p_pt_attrs_0;
1575         p_pt_attrs_1 = init_mmu_page_attribs(0x10000, 14, 128);
1576         if (!p_pt_attrs_1) {
1577             GT_setFailureReason (curTrace,
1578                                  GT_4CLASS,
1579                                  "rproc_setup",
1580                                  ret_val,
1581                                  "init_mmu_page_attribs failed");
1582         }
1583         else {
1584             halObject->mmu1Obj.pPtAttrs = p_pt_attrs_1;
1586             /* Disable TWL  */
1587             ret_val = rproc_set_twl(halObject->mmu0Base, FALSE);
1588             if (ret_val < 0) {
1589                 GT_setFailureReason (curTrace,
1590                                      GT_4CLASS,
1591                                      "ipu_setup",
1592                                      ret_val,
1593                                      "benelli_set_twl to FALSE failed");
1594             }
1595             else {
1596                 ret_val = rproc_set_twl(halObject->mmu1Base, FALSE);
1597                 if (ret_val < 0) {
1598                     GT_setFailureReason (curTrace,
1599                                          GT_4CLASS,
1600                                          "ipu_setup",
1601                                          ret_val,
1602                                          "benelli_set_twl to FALSE failed");
1603                 }
1604                 else {
1605                     ret_val = rproc_mmu_init (halObject, memEntries,
1606                                               numMemEntries);
1607                     if (ret_val < 0) {
1608                         GT_setFailureReason (curTrace,
1609                                              GT_4CLASS,
1610                                              "ipu_setup",
1611                                              ret_val,
1612                                              "benelli_mmu_init failed");
1613                     }
1614                     else {
1615                         ret_val = rproc_set_twl(halObject->mmu0Base, TRUE);
1616                         if (ret_val < 0) {
1617                             GT_setFailureReason (curTrace,
1618                                                  GT_4CLASS,
1619                                                  "ipu_setup",
1620                                                  ret_val,
1621                                                  "ducati_set_twl to TRUE failed");
1622                         }
1623                         else {
1624                             ret_val = rproc_set_twl(halObject->mmu1Base, TRUE);
1625                             if (ret_val < 0) {
1626                                 GT_setFailureReason (curTrace,
1627                                                      GT_4CLASS,
1628                                                      "ipu_setup",
1629                                                      ret_val,
1630                                                      "ducati_set_twl to TRUE failed");
1631                             }
1632                         }
1633                     }
1634                 }
1635             }
1636         }
1637     }
1639     if (ret_val < 0) {
1640         deinit_mmu_page_attribs(p_pt_attrs_0);
1641         deinit_mmu_page_attribs(p_pt_attrs_1);
1642         halObject->mmu0Obj.pPtAttrs = NULL;
1643         halObject->mmu1Obj.pPtAttrs = NULL;
1644     }
1646     return ret_val;
1651 Void rproc_dsp_destroy(VAYUDSP_HalObject * halObject)
1653     shm_phys_addr_dsp = 0;
1655     if (halObject->mmu0Obj.pPtAttrs) {
1656         deinit_mmu_page_attribs(halObject->mmu0Obj.pPtAttrs);
1657         halObject->mmu0Obj.pPtAttrs = NULL;
1658     }
1660     if (halObject->mmu1Obj.pPtAttrs) {
1661         deinit_mmu_page_attribs(halObject->mmu1Obj.pPtAttrs);
1662         halObject->mmu1Obj.pPtAttrs = NULL;
1663     }
1667 static Void iotlb_load_cr (UInt32 mmuBase, struct cr_regs *cr)
1669     ULONG reg;
1670     VAYUDsp_MMURegs * mmuRegs = (VAYUDsp_MMURegs *)mmuBase;
1672     reg = cr->cam | MMU_CAM_V;
1673     OUTREG32(&mmuRegs->CAM, reg);
1675     reg = cr->ram;
1676     OUTREG32(&mmuRegs->RAM, reg);
1678     reg = 1;
1679     OUTREG32(&mmuRegs->FLUSH_ENTRY, reg);
1681     reg = 1;
1682     OUTREG32(&mmuRegs->LD_TLB, reg);
1686 /**
1687  * iotlb_dump_cr - Dump an iommu tlb entry into buf
1688  * @obj:    target iommu
1689  * @cr:        contents of cam and ram register
1690  * @buf:    output buffer
1691  **/
1692 static UInt32 iotlb_dump_cr (struct cr_regs *cr, char *buf)
1694     Char *p = buf;
1696     if(!cr || !buf)
1697         return 0;
1699     /* FIXME: Need more detail analysis of cam/ram */
1700     p += sprintf(p, "%08x %08x %01x\n", (unsigned int)cr->cam,
1701                     (unsigned int)cr->ram,
1702                     (cr->cam & MMU_CAM_P) ? 1 : 0);
1703     return (p - buf);
1708 static Int iotlb_cr_valid (struct cr_regs *cr)
1710     if (!cr)
1711         return -EINVAL;
1713     return (cr->cam & MMU_CAM_V);
1718 static struct cr_regs *omap4_alloc_cr (struct iotlb_entry *e)
1720     struct cr_regs *cr;
1722     if (e->da & ~(get_cam_va_mask(e->pgsz))) {
1723         GT_setFailureReason (curTrace,
1724                              GT_4CLASS,
1725                              "omap4_alloc_cr",
1726                              -EINVAL,
1727                              "failed mask check");
1728         return NULL;
1729     }
1731     cr = mmap(NULL,
1732               sizeof(struct cr_regs),
1733               PROT_NOCACHE | PROT_READ | PROT_WRITE,
1734               MAP_ANON | MAP_PHYS | MAP_PRIVATE,
1735               NOFD,
1736               0);
1738     if (MAP_FAILED == cr)
1739     {
1740         GT_setFailureReason (curTrace,
1741                              GT_4CLASS,
1742                              "omap4_alloc_cr",
1743                              -EINVAL,
1744                              "mmap failed");
1745         return NULL;
1746     }
1748     cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
1749     cr->ram = e->pa | e->endian | e->elsz | e->mixed;
1750     return cr;
1755 static struct cr_regs *iotlb_alloc_cr (struct iotlb_entry *e)
1757     if (!e) {
1758         GT_setFailureReason (curTrace,
1759                              GT_4CLASS,
1760                              "iotlb_alloc_cr",
1761                              -EINVAL,
1762                              "e is NULL");
1763         return NULL;
1764     }
1766     return omap4_alloc_cr(e);
1771 /**
1772  * load_iotlb_entry - Set an iommu tlb entry
1773  * @obj:    target iommu
1774  * @e:        an iommu tlb entry info
1775  **/
1776 static Int load_iotlb_entry (UInt32 mmuBase, struct iotlb_entry *e)
1778     Int err = 0;
1779     struct iotlb_lock l;
1780     struct cr_regs *cr;
1782     if (mmuBase == NULL) {
1783         err = -EINVAL;
1784         GT_setFailureReason (curTrace,
1785                              GT_4CLASS,
1786                              "load_iotlb_entry",
1787                              err,
1788                              "mmuBase is NULL");
1789         goto out;
1790     }
1792     if (!e) {
1793         err = -EINVAL;
1794         GT_setFailureReason (curTrace,
1795                              GT_4CLASS,
1796                              "load_iotlb_entry",
1797                              err,
1798                              "e is NULL");
1799         goto out;
1800     }
1802     iotlb_getLock(mmuBase, &l);
1804     if (l.base == 32) {
1805         err = -EBUSY;
1806         GT_setFailureReason (curTrace,
1807                              GT_4CLASS,
1808                              "load_iotlb_entry",
1809                              err,
1810                              "l.base is full");
1811         goto out;
1812     }
1813     if (!e->prsvd) {
1814         int i;
1815         struct cr_regs tmp;
1817         for_each_iotlb_cr(32, i, tmp)
1818             if (!iotlb_cr_valid(&tmp))
1819                 break;
1821         if (i == 32) {
1822             err = -EBUSY;
1823             GT_setFailureReason (curTrace,
1824                                  GT_4CLASS,
1825                                  "load_iotlb_entry",
1826                                  err,
1827                                  "i == 32");
1828             goto out;
1829         }
1831         iotlb_getLock(mmuBase, &l);
1832     } else {
1833         l.vict = l.base;
1834         iotlb_setLock(mmuBase, &l);
1835     }
1837     cr = iotlb_alloc_cr(e);
1838     if (!cr){
1839         err = -ENOMEM;
1840         GT_setFailureReason (curTrace,
1841                              GT_4CLASS,
1842                              "load_iotlb_entry",
1843                              err,
1844                              "iotlb_alloc_cr failed");
1845         goto out;
1846     }
1848     iotlb_load_cr(mmuBase, cr);
1849     munmap(cr, sizeof(struct cr_regs));
1851     if (e->prsvd)
1852         l.base++;
1853     /* increment victim for next tlb load */
1854     if (++l.vict == 32)
1855         l.vict = l.base;
1856     iotlb_setLock(mmuBase, &l);
1858 out:
1859     return err;