]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - android/vendor-ti-am57x.git/blob - sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.c
sgx_km: Build SGX KM from source
[android/vendor-ti-am57x.git] / sgx_km / eurasia_km / services4 / srvkm / devices / sgx / mmu.c
1 /*************************************************************************/ /*!
2 @Title          MMU Management
3 @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
4 @Description    Implements basic low level control of MMU.
5 @License        Dual MIT/GPLv2
7 The contents of this file are subject to the MIT license as set out below.
9 Permission is hereby granted, free of charge, to any person obtaining a copy
10 of this software and associated documentation files (the "Software"), to deal
11 in the Software without restriction, including without limitation the rights
12 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 copies of the Software, and to permit persons to whom the Software is
14 furnished to do so, subject to the following conditions:
16 The above copyright notice and this permission notice shall be included in
17 all copies or substantial portions of the Software.
19 Alternatively, the contents of this file may be used under the terms of
20 the GNU General Public License Version 2 ("GPL") in which case the provisions
21 of GPL are applicable instead of those above.
23 If you wish to allow use of your version of this file only under the terms of
24 GPL, and not to allow others to use your version of this file under the terms
25 of the MIT license, indicate your decision by deleting the provisions above
26 and replace them with the notice and other provisions required by GPL as set
27 out in the file called "GPL-COPYING" included in this distribution. If you do
28 not delete the provisions above, a recipient may use your version of this file
29 under the terms of either the MIT license or GPL.
31 This License is also included in this distribution in the file called
32 "MIT-COPYING".
34 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
35 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
36 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
37 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
38 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
39 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
40 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41 */ /**************************************************************************/
43 #include "sgxdefs.h"
44 #include "sgxmmu.h"
45 #include "services_headers.h"
46 #include "buffer_manager.h"
47 #include "hash.h"
48 #include "ra.h"
49 #include "pdump_km.h"
50 #include "sgxapi_km.h"
51 #include "sgxinfo.h"
52 #include "sgxinfokm.h"
53 #include "mmu.h"
54 #include "sgxconfig.h"
55 #include "sgx_bridge_km.h"
56 #include "pdump_osfunc.h"
58 #define UINT32_MAX_VALUE        0xFFFFFFFFUL
60 /*
61         MMU performs device virtual to physical translation.
62         terminology:
63         page directory (PD)
64         pagetable (PT)
65         data page (DP)
67         Incoming 32bit Device Virtual Addresses are deconstructed into 3 fields:
68         ---------------------------------------------------------
69         |       PD Index/tag:   |       PT Index:       |       DP offset:              |
70         |       bits 31:22              |       bits 21:n       |       bits (n-1):0    |
71         ---------------------------------------------------------
72                 where typically n=12 for a standard 4k DP
73                 but n=16 for a 64k DP
75         MMU page directory (PD), pagetable (PT) and data page (DP) config:
76         PD:
77         - always one page per address space
78         - up to 4k in size to span 4Gb (32bit)
79         - contains up to 1024 32bit entries
80         - entries are indexed by the top 12 bits of an incoming 32bit device virtual address
81         - the PD entry selected contains the physical address of the PT to
82           perform the next stage of the V to P translation
84         PT:
85         - size depends on the DP size, e.g. 4k DPs have 4k PTs but 16k DPs have 1k PTs
86         - each PT always spans 4Mb of device virtual address space irrespective of DP size
87         - number of entries in a PT depend on DP size and ranges from 1024 to 4 entries
88         - entries are indexed by the PT Index field of the device virtual address (21:n)
89         - the PT entry selected contains the physical address of the DP to access
91         DP:
92         - size varies from 4k to 4M in multiple of 4 steppings
93         - DP offset field of the device virtual address ((n-1):0) is used as a byte offset
94           to address into the DP itself
95 */
97 #define SGX_MAX_PD_ENTRIES      (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
99 #if defined(FIX_HW_BRN_31620)
100 /* Sim doesn't use the address mask */
101 #define SGX_MMU_PDE_DUMMY_PAGE          (0)//(0x00000020U)
102 #define SGX_MMU_PTE_DUMMY_PAGE          (0)//(0x00000020U)
104 /* 4MB adress range per page table */
105 #define BRN31620_PT_ADDRESS_RANGE_SHIFT         22
106 #define BRN31620_PT_ADDRESS_RANGE_SIZE          (1 << BRN31620_PT_ADDRESS_RANGE_SHIFT)
108 /* 64MB address range per PDE cache line */
109 #define BRN31620_PDE_CACHE_FILL_SHIFT           26
110 #define BRN31620_PDE_CACHE_FILL_SIZE            (1 << BRN31620_PDE_CACHE_FILL_SHIFT)
111 #define BRN31620_PDE_CACHE_FILL_MASK            (BRN31620_PDE_CACHE_FILL_SIZE - 1)
113 /* Page Directory Enteries per cache line */
114 #define BRN31620_PDES_PER_CACHE_LINE_SHIFT      (BRN31620_PDE_CACHE_FILL_SHIFT - BRN31620_PT_ADDRESS_RANGE_SHIFT)
115 #define BRN31620_PDES_PER_CACHE_LINE_SIZE       (1 << BRN31620_PDES_PER_CACHE_LINE_SHIFT)
116 #define BRN31620_PDES_PER_CACHE_LINE_MASK       (BRN31620_PDES_PER_CACHE_LINE_SIZE - 1)
118 /* Macros for working out offset for dummy pages */
119 #define BRN31620_DUMMY_PAGE_OFFSET      (1 * SGX_MMU_PAGE_SIZE)
120 #define BRN31620_DUMMY_PDE_INDEX        (BRN31620_DUMMY_PAGE_OFFSET / BRN31620_PT_ADDRESS_RANGE_SIZE)
121 #define BRN31620_DUMMY_PTE_INDEX        ((BRN31620_DUMMY_PAGE_OFFSET - (BRN31620_DUMMY_PDE_INDEX * BRN31620_PT_ADDRESS_RANGE_SIZE))/SGX_MMU_PAGE_SIZE)
123 /* Cache number of cache lines */
124 #define BRN31620_CACHE_FLUSH_SHIFT              (32 - BRN31620_PDE_CACHE_FILL_SHIFT)
125 #define BRN31620_CACHE_FLUSH_SIZE               (1 << BRN31620_CACHE_FLUSH_SHIFT)
127 /* Cache line bits in a UINT32 */
128 #define BRN31620_CACHE_FLUSH_BITS_SHIFT         5
129 #define BRN31620_CACHE_FLUSH_BITS_SIZE          (1 << BRN31620_CACHE_FLUSH_BITS_SHIFT)
130 #define BRN31620_CACHE_FLUSH_BITS_MASK          (BRN31620_CACHE_FLUSH_BITS_SIZE - 1)
132 /* Cache line index in array */
133 #define BRN31620_CACHE_FLUSH_INDEX_BITS         (BRN31620_CACHE_FLUSH_SHIFT - BRN31620_CACHE_FLUSH_BITS_SHIFT)
134 #define BRN31620_CACHE_FLUSH_INDEX_SIZE         (1 << BRN31620_CACHE_FLUSH_INDEX_BITS)
136 #define BRN31620_DUMMY_PAGE_SIGNATURE   0xFEEBEE01
137 #endif
139 typedef struct _MMU_PT_INFO_
141         /* note: may need a union here to accommodate a PT page address for local memory */
142         IMG_VOID *hPTPageOSMemHandle;
143         IMG_CPU_VIRTADDR PTPageCpuVAddr;
144         /* Map of reserved PTEs.
145          * Reserved PTEs are like "valid" PTEs in that they (and the DevVAddrs they represent)
146          * cannot be assigned to another allocation but their "reserved" status persists through
147          * any amount of mapping and unmapping, until the allocation is finally destroyed.
148          *
149          * Reserved and Valid are independent.
150          * When a PTE is first reserved, it will have Reserved=1 and Valid=0.
151          * When the PTE is actually mapped, it will have Reserved=1 and Valid=1.
152          * When the PTE is unmapped, it will have Reserved=1 and Valid=0.
153          * At this point, the PT will can not be destroyed because although there is
154          * not an active mapping on the PT, it is known a PTE is reserved for use.
155          *
156          * The above sequence of mapping and unmapping may repeat any number of times
157          * until the allocation is unmapped and destroyed which causes the PTE to have
158          * Valid=0 and Reserved=0.
159          */
160         /* Number of PTEs set up.
161          * i.e. have a valid SGX Phys Addr and the "VALID" PTE bit == 1
162          */
163         IMG_UINT32 ui32ValidPTECount;
164 } MMU_PT_INFO;
166 #define MMU_CONTEXT_NAME_SIZE   50
167 struct _MMU_CONTEXT_
169         /* the device node */
170         PVRSRV_DEVICE_NODE *psDeviceNode;
172         /* Page Directory CPUVirt and DevPhys Addresses */
173         IMG_CPU_VIRTADDR pvPDCpuVAddr;
174         IMG_DEV_PHYADDR sPDDevPAddr;
176         IMG_VOID *hPDOSMemHandle;
178         /* information about dynamically allocated pagetables */
179         MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
181         PVRSRV_SGXDEV_INFO *psDevInfo;
183 #if defined(PDUMP)
184         IMG_UINT32 ui32PDumpMMUContextID;
185 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
186         IMG_BOOL bPDumpActive;
187 #endif
188 #endif
190         IMG_UINT32 ui32PID;
191         IMG_CHAR szName[MMU_CONTEXT_NAME_SIZE];
193 #if defined (FIX_HW_BRN_31620)
194         IMG_UINT32 ui32PDChangeMask[BRN31620_CACHE_FLUSH_INDEX_SIZE];
195         IMG_UINT32 ui32PDCacheRangeRefCount[BRN31620_CACHE_FLUSH_SIZE];
196         MMU_PT_INFO *apsPTInfoListSave[SGX_MAX_PD_ENTRIES];
197 #endif
198         struct _MMU_CONTEXT_ *psNext;
199 };
201 struct _MMU_HEAP_
203         /* MMU context */
204         MMU_CONTEXT                     *psMMUContext;
206         /*
207                 heap specific details:
208         */
209         /* the Base PD index for the heap */
210         IMG_UINT32                      ui32PDBaseIndex;
211         /* number of pagetables in this heap */
212         IMG_UINT32                      ui32PageTableCount;
213         /* total number of pagetable entries in this heap which may be mapped to data pages */
214         IMG_UINT32                      ui32PTETotalUsable;
215         /* PD entry DP size control field */
216         IMG_UINT32                      ui32PDEPageSizeCtrl;
218         /*
219                 Data Page (DP) Details:
220         */
221         /* size in bytes of a data page */
222         IMG_UINT32                      ui32DataPageSize;
223         /* bit width of the data page offset addressing field */
224         IMG_UINT32                      ui32DataPageBitWidth;
225         /* bit mask of the data page offset addressing field */
226         IMG_UINT32                      ui32DataPageMask;
228         /*
229                 PageTable (PT) Details:
230         */
231         /* bit shift to base of PT addressing field */
232         IMG_UINT32                      ui32PTShift;
233         /* bit width of the PT addressing field */
234         IMG_UINT32                      ui32PTBitWidth;
235         /* bit mask of the PT addressing field */
236         IMG_UINT32                      ui32PTMask;
237         /* size in bytes of a pagetable */
238         IMG_UINT32                      ui32PTSize;
239         /* Allocated PT Entries per PT */
240         IMG_UINT32                      ui32PTNumEntriesAllocated;
241         /* Usable PT Entries per PT (may be different to num allocated for 4MB data page) */
242         IMG_UINT32                      ui32PTNumEntriesUsable;
244         /*
245                 PageDirectory Details:
246         */
247         /* bit shift to base of PD addressing field */
248         IMG_UINT32                      ui32PDShift;
249         /* bit width of the PD addressing field */
250         IMG_UINT32                      ui32PDBitWidth;
251         /* bit mask of the PT addressing field */
252         IMG_UINT32                      ui32PDMask;
254         /*
255                 Arena Info:
256         */
257         RA_ARENA *psVMArena;
258         DEV_ARENA_DESCRIPTOR *psDevArena;
260         /* If we have sparse mappings then we can't do PT level sanity checks */
261         IMG_BOOL bHasSparseMappings;
262 #if defined(PDUMP)
263         PDUMP_MMU_ATTRIB sMMUAttrib;
264 #endif
265 };
269 #if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
270 #define DUMMY_DATA_PAGE_SIGNATURE       0xDEADBEEF
271 #endif
273 /* local prototypes: */
274 static IMG_VOID
275 _DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT);
277 #if defined (MEM_TRACK_INFO_DEBUG)
278 IMG_IMPORT IMG_VOID PVRSRVPrintMemTrackInfo(IMG_UINT32 ui32FaultAddr);
279 #endif
281 #if defined(PDUMP)
282 static IMG_VOID
283 MMU_PDumpPageTables     (MMU_HEAP *pMMUHeap,
284                                          IMG_DEV_VIRTADDR DevVAddr,
285                                          IMG_SIZE_T uSize,
286                                          IMG_BOOL bForUnmap,
287                                          IMG_HANDLE hUniqueTag);
288 #endif /* #if defined(PDUMP) */
290 /* This option tests page table memory, for use during device bring-up. */
291 #define PAGE_TEST                                       0
292 #if PAGE_TEST
293 static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
294 #endif
296 /* This option dumps out the PT if an assert fails */
297 #define PT_DUMP 1
299 /* This option sanity checks page table PTE valid count matches active PTEs */
300 #define PT_DEBUG 0
301 #if (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF)
302 static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
304         IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
305         IMG_UINT32 i;
307         /* 1024 entries in a 4K page table */
308         for(i = 0; i < 1024; i += 8)
309         {
310                 PVR_LOG(("%08X %08X %08X %08X %08X %08X %08X %08X",
311                                  p[i + 0], p[i + 1], p[i + 2], p[i + 3],
312                                  p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
313         }
315 #else /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */
316 static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
318         PVR_UNREFERENCED_PARAMETER(psPTInfoList);
320 #endif /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */
322 #if PT_DEBUG
323 static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
325         IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
326         IMG_UINT32 i, ui32Count = 0;
328         /* 1024 entries in a 4K page table */
329         for(i = 0; i < 1024; i++)
330                 if(p[i] & SGX_MMU_PTE_VALID)
331                         ui32Count++;
333         if(psPTInfoList->ui32ValidPTECount != ui32Count)
334         {
335                 PVR_DPF((PVR_DBG_ERROR, "ui32ValidPTECount: %u ui32Count: %u",
336                                  psPTInfoList->ui32ValidPTECount, ui32Count));
337                 DumpPT(psPTInfoList);
338                 PVR_DBG_BREAK;
339         }
341 #else /* PT_DEBUG */
342 static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
344         PVR_UNREFERENCED_PARAMETER(psPTInfoList);
346 #endif /* PT_DEBUG */
348 /*
349         Debug functionality that allows us to make the CPU
350         mapping of pagetable memory readonly and only make
351         it read/write when we alter it. This allows us
352         to check that our memory isn't being overwritten
353 */
354 #if defined(__linux__) && defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND)
356 #include <linux/version.h>
358 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
359 #ifndef AUTOCONF_INCLUDED
360 #include <linux/config.h>
361 #endif
362 #else
363 #include <generated/autoconf.h>
364 #endif
366 #include <linux/mm.h>
367 #include <linux/sched.h>
368 #include <linux/highmem.h>
369 #include <asm/pgtable.h>
370 #include <asm/tlbflush.h>
372 static IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr)
374     pgd_t *psPGD;
375     pud_t *psPUD;
376     pmd_t *psPMD;
377     pte_t *psPTE;
378     pte_t ptent;
379     IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr;
381     psPGD = pgd_offset_k(ui32CPUVAddr);
382     if (pgd_none(*psPGD) || pgd_bad(*psPGD))
383     {
384         PVR_ASSERT(0);
385     }
387     psPUD = pud_offset(psPGD, ui32CPUVAddr);
388     if (pud_none(*psPUD) || pud_bad(*psPUD))
389     {
390         PVR_ASSERT(0);
391     }
393     psPMD = pmd_offset(psPUD, ui32CPUVAddr);
394     if (pmd_none(*psPMD) || pmd_bad(*psPMD))
395     {
396         PVR_ASSERT(0);
397     }
398         psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr);
400         ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE);
401         ptent = pte_mkwrite(ptent);
402         ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent);
404         flush_tlb_all();
407 static IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr)
409     pgd_t *psPGD;
410     pud_t *psPUD;
411     pmd_t *psPMD;
412     pte_t *psPTE;
413     pte_t ptent;
414     IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr;
416         OSWriteMemoryBarrier();
418     psPGD = pgd_offset_k(ui32CPUVAddr);
419     if (pgd_none(*psPGD) || pgd_bad(*psPGD))
420     {
421         PVR_ASSERT(0);
422     }
424     psPUD = pud_offset(psPGD, ui32CPUVAddr);
425     if (pud_none(*psPUD) || pud_bad(*psPUD))
426     {
427         PVR_ASSERT(0);
428     }
430     psPMD = pmd_offset(psPUD, ui32CPUVAddr);
431     if (pmd_none(*psPMD) || pmd_bad(*psPMD))
432     {
433         PVR_ASSERT(0);
434     }
436         psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr);
438         ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE);
439         ptent = pte_wrprotect(ptent);
440         ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent);
442         flush_tlb_all();
446 #else /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */
448 static INLINE IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr)
450         PVR_UNREFERENCED_PARAMETER(ulCPUVAddr);
453 static INLINE IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr)
455         PVR_UNREFERENCED_PARAMETER(ulCPUVAddr);
458 #endif /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */
460 /*___________________________________________________________________________
462         Information for SUPPORT_PDUMP_MULTI_PROCESS feature.
463         
464         The client marked for pdumping will set the bPDumpActive flag in
465         the MMU Context (see MMU_Initialise).
466         
467         Shared heap allocations should be persistent so all apps which
468         are pdumped will see the allocation. Persistent flag over-rides
469         the bPDumpActive flag (see pdump_common.c/DbgWrite function).
470         
471         The idea is to dump PT,DP for shared heap allocations, but only
472         dump the PDE if the allocation is mapped into the kernel or active
473         client context. This ensures if a background app allocates on a
474         shared heap then all clients can access it in the pdump toolchain.
475         
476         
477         
478         PD              PT              DP
479         +-+
480         | |---> +-+
481         +-+             | |---> +-+
482                         +-+             + +
483                                         +-+
484                                         
485         PD allocation/free: pdump flags are 0 (only need PD for active apps)
486         PT allocation/free: pdump flags are 0
487                                                 unless PT is for a shared heap, in which case persistent is set
488         PD entries (MMU init/insert shared heap):
489                                                 only pdump if PDE is on the active MMU context, flags are 0
490         PD entries (PT alloc):
491                                                 pdump flags are 0 if kernel heap
492                                                 pdump flags are 0 if shared heap and PDE is on active MMU context
493                                                 otherwise ignore.
494         PT entries                      pdump flags are 0
495                                                 unless PTE is for a shared heap, in which case persistent is set
496                                                 
497         NOTE: PDump common code:-
498         PDumpMallocPages and PDumpMemKM also set the persistent flag for
499         shared heap allocations.
500         
501   ___________________________________________________________________________
502 */
505 /*!
506 ******************************************************************************
507         FUNCTION:   MMU_IsHeapShared
509         PURPOSE:    Is this heap shared?
510         PARAMETERS: In: pMMU_Heap
511         RETURNS:    true if heap is shared
512 ******************************************************************************/
513 IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMUHeap)
515         switch(pMMUHeap->psDevArena->DevMemHeapType)
516         {
517                 case DEVICE_MEMORY_HEAP_SHARED :
518                 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
519                         return IMG_TRUE;
520                 case DEVICE_MEMORY_HEAP_PERCONTEXT :
521                 case DEVICE_MEMORY_HEAP_KERNEL :
522                         return IMG_FALSE;
523                 default:
524                 {
525                         PVR_DPF((PVR_DBG_ERROR, "MMU_IsHeapShared: ERROR invalid heap type"));
526                         return IMG_FALSE;
527                 }
528         }
531 #ifdef SUPPORT_SGX_MMU_BYPASS
532 /*!
533 ******************************************************************************
534         FUNCTION:   EnableHostAccess
536         PURPOSE:    Enables Host accesses to device memory, by passing the device
537                                 MMU address translation
539         PARAMETERS: In: psMMUContext
540         RETURNS:    None
541 ******************************************************************************/
542 IMG_VOID
543 EnableHostAccess (MMU_CONTEXT *psMMUContext)
545         IMG_UINT32 ui32RegVal;
546         IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
548         /*
549                 bypass the MMU for the host port requestor,
550                 conserving bypass state of other requestors
551         */
552         ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
554         OSWriteHWReg(pvRegsBaseKM,
555                                 EUR_CR_BIF_CTRL,
556                                 ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
557         /* assume we're not wiping-out any other bits */
558         PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
561 /*!
562 ******************************************************************************
563         FUNCTION:   DisableHostAccess
565         PURPOSE:    Disables Host accesses to device memory, by passing the device
566                                 MMU address translation
568         PARAMETERS: In: psMMUContext
569         RETURNS:    None
570 ******************************************************************************/
571 IMG_VOID
572 DisableHostAccess (MMU_CONTEXT *psMMUContext)
574         IMG_UINT32 ui32RegVal;
575         IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
577         /*
578                 disable MMU-bypass for the host port requestor,
579                 conserving bypass state of other requestors
580                 and flushing all caches/tlbs
581         */
582         OSWriteHWReg(pvRegsBaseKM,
583                                 EUR_CR_BIF_CTRL,
584                                 ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
585         /* assume we're not wiping-out any other bits */
586         PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, 0);
588 #endif
591 #if defined(SGX_FEATURE_SYSTEM_CACHE)
592 /*!
593 ******************************************************************************
594         FUNCTION:   MMU_InvalidateSystemLevelCache
596         PURPOSE:    Invalidates the System Level Cache to purge stale PDEs and PTEs
598         PARAMETERS: In: psDevInfo
599         RETURNS:    None
601 ******************************************************************************/
602 static IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
604         #if defined(SGX_FEATURE_MP)
605         psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL;
606         #else
607         /* The MMU always bypasses the SLC */
608         PVR_UNREFERENCED_PARAMETER(psDevInfo);
609         #endif /* SGX_FEATURE_MP */
611 #endif /* SGX_FEATURE_SYSTEM_CACHE */
613 /*!
614 ******************************************************************************
615         FUNCTION:   MMU_InvalidateDirectoryCache
617         PURPOSE:    Invalidates the page directory cache + page table cache + requestor TLBs
619         PARAMETERS: In: psDevInfo
620         RETURNS:    None
622 ******************************************************************************/
623 IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
625         psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD;
626         #if defined(SGX_FEATURE_SYSTEM_CACHE)
627         MMU_InvalidateSystemLevelCache(psDevInfo);
628         #endif /* SGX_FEATURE_SYSTEM_CACHE */
632 /*!
633 ******************************************************************************
634         FUNCTION:   MMU_InvalidatePageTableCache
636         PURPOSE:    Invalidates the page table cache + requestor TLBs
638         PARAMETERS: In: psDevInfo
639         RETURNS:    None
641 ******************************************************************************/
642 static IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
644         psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT;
645         #if defined(SGX_FEATURE_SYSTEM_CACHE)
646         MMU_InvalidateSystemLevelCache(psDevInfo);
647         #endif /* SGX_FEATURE_SYSTEM_CACHE */
650 #if defined(FIX_HW_BRN_31620)
651 /*!
652 ******************************************************************************
653         FUNCTION:   BRN31620InvalidatePageTableEntry
655         PURPOSE:    Frees page tables in PDE cache line chunks re-wiring the
656                     dummy page when required
658         PARAMETERS: In: psMMUContext, ui32PDIndex, ui32PTIndex
659         RETURNS:    None
661 ******************************************************************************/
662 static IMG_VOID BRN31620InvalidatePageTableEntry(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32PDIndex, IMG_UINT32 ui32PTIndex, IMG_UINT32 *pui32PTE)
664         PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
666         /*
667          * Note: We can't tell at this stage if this PT will be freed before
668          * the end of the function so we always wire up the dummy page to
669          * to the PT.
670          */
671         if (((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX)
672                 && (ui32PTIndex == BRN31620_DUMMY_PTE_INDEX))
673         {
674                 *pui32PTE = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
675                                                                 | SGX_MMU_PTE_DUMMY_PAGE
676                                                                 | SGX_MMU_PTE_READONLY
677                                                                 | SGX_MMU_PTE_VALID;
678         }
679         else
680         {
681                 *pui32PTE = 0;
682         }
685 /*!
686 ******************************************************************************
687         FUNCTION:   BRN31620FreePageTable
689         PURPOSE:    Frees page tables in PDE cache line chunks re-wiring the
690                     dummy page when required
692         PARAMETERS: In: psMMUContext, ui32PDIndex
693         RETURNS:    IMG_TRUE if we freed any PT's
695 ******************************************************************************/
696 static IMG_BOOL BRN31620FreePageTable(MMU_HEAP *psMMUHeap, IMG_UINT32 ui32PDIndex)
698         MMU_CONTEXT *psMMUContext = psMMUHeap->psMMUContext;
699         PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
700         IMG_UINT32 ui32PDCacheLine = ui32PDIndex >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
701         IMG_UINT32 bFreePTs = IMG_FALSE;
702         IMG_UINT32 *pui32Tmp;
704         PVR_ASSERT(psMMUHeap != IMG_NULL);
706         /* 
707          * Clear the PT info for this PD index so even if we don't
708          * free the memory here apsPTInfoList[PDIndex] will trigger
709          * an "allocation" in _DeferredAllocPagetables which
710          * bumps up the refcount.
711          */
712         PVR_ASSERT(psMMUContext->apsPTInfoListSave[ui32PDIndex] == IMG_NULL);
714         psMMUContext->apsPTInfoListSave[ui32PDIndex] = psMMUContext->apsPTInfoList[ui32PDIndex];
715         psMMUContext->apsPTInfoList[ui32PDIndex] = IMG_NULL;
717         /* Check if this was the last PT in the cache line */
718         if (--psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine] == 0)
719         {
720                 IMG_UINT32 i;
721                 IMG_UINT32 ui32PDIndexStart = ui32PDCacheLine * BRN31620_PDES_PER_CACHE_LINE_SIZE;
722                 IMG_UINT32 ui32PDIndexEnd = ui32PDIndexStart + BRN31620_PDES_PER_CACHE_LINE_SIZE;
723                 IMG_UINT32 ui32PDBitMaskIndex, ui32PDBitMaskShift;
725                 /* Free all PT's in cache line */
726                 for (i=ui32PDIndexStart;i<ui32PDIndexEnd;i++)
727                 {
728                         /* This PT is _really_ being freed now */
729                         psMMUContext->apsPTInfoList[i] = psMMUContext->apsPTInfoListSave[i];
730                         psMMUContext->apsPTInfoListSave[i] = IMG_NULL;
731                         _DeferredFreePageTable(psMMUHeap, i - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
732                 }
734                 ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT;
735                 ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK;
737                 /* Check if this is a shared heap */
738                 if (MMU_IsHeapShared(psMMUHeap))
739                 {
740                         /* Mark the remove of the Page Table from all memory contexts */
741                         MMU_CONTEXT *psMMUContextWalker = (MMU_CONTEXT*) psMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
743                         while(psMMUContextWalker)
744                         {
745                                 psMMUContextWalker->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
747                                 /*
748                                  * We've just cleared a cache line's worth of PDE's so we need
749                                  * to wire up the dummy PT
750                                  */
751                                 MakeKernelPageReadWrite(psMMUContextWalker->pvPDCpuVAddr);
752                                 pui32Tmp = (IMG_UINT32 *) psMMUContextWalker->pvPDCpuVAddr;
753                                 pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
754                                                                                                 | SGX_MMU_PDE_PAGE_SIZE_4K
755                                                                                                 | SGX_MMU_PDE_DUMMY_PAGE
756                                                                                                 | SGX_MMU_PDE_VALID;
757                                 MakeKernelPageReadOnly(psMMUContextWalker->pvPDCpuVAddr);
759                                 PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block");
760                                 PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContextWalker->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
761                                 psMMUContextWalker = psMMUContextWalker->psNext;
762                         }
763                 }
764                 else
765                 {
766                         psMMUContext->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
768                         /*
769                          * We've just cleared a cache line's worth of PDE's so we need
770                          * to wire up the dummy PT
771                          */
772                         MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
773                         pui32Tmp = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
774                         pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
775                                                                                         | SGX_MMU_PDE_PAGE_SIZE_4K
776                                                                                         | SGX_MMU_PDE_DUMMY_PAGE
777                                                                                         | SGX_MMU_PDE_VALID;
778                         MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
780                         PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block");
781                         PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
782                 }
783                 /* We've freed a cachline's worth of PDE's so trigger a PD cache flush */
784                 bFreePTs = IMG_TRUE;
785         }
787         return bFreePTs;
789 #endif
791 /*!
792 ******************************************************************************
793         FUNCTION:   _AllocPageTableMemory
795         PURPOSE:    Allocate physical memory for a page table
797         PARAMETERS: In: pMMUHeap - the mmu
798                                 In: psPTInfoList - PT info
799                                 Out: psDevPAddr - device physical address for new PT
800         RETURNS:    IMG_TRUE - Success
801                     IMG_FALSE - Failed
802 ******************************************************************************/
803 static IMG_BOOL
804 _AllocPageTableMemory (MMU_HEAP *pMMUHeap,
805                                                 MMU_PT_INFO *psPTInfoList,
806                                                 IMG_DEV_PHYADDR *psDevPAddr)
808         IMG_DEV_PHYADDR sDevPAddr;
809         IMG_CPU_PHYADDR sCpuPAddr;
811         /*
812                 depending on the specific system, pagetables are allocated from system memory
813                 or device local memory.  For now, just look for at least a valid local heap/arena
814         */
815         if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
816         {
817                 //FIXME: replace with an RA, this allocator only handles 4k allocs
818                 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
819                                                  pMMUHeap->ui32PTSize,
820                                                  SGX_MMU_PAGE_SIZE,//FIXME: assume 4K page size for now (wastes memory for smaller pagetables
821                                                  IMG_NULL,
822                                                  0,
823                                                  IMG_NULL,
824                                                  (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
825                                                  &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
826                 {
827                         PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
828                         return IMG_FALSE;
829                 }
831                 /*
832                         Force the page to read only, we will make it read/write as
833                         and when we need to
834                 */
835                 MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr);
837                 /* translate address to device physical */
838                 if(psPTInfoList->PTPageCpuVAddr)
839                 {
840                         sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle,
841                                                                                   psPTInfoList->PTPageCpuVAddr);
842                 }
843                 else
844                 {
845                         /* This isn't used in all cases since not all ports currently support
846                          * OSMemHandleToCpuPAddr() */
847                         sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
848                 }
850                 sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
851         }
852         else
853         {
854                 /* 
855                    We cannot use IMG_SYS_PHYADDR here, as that is 64-bit for 32-bit PAE builds.
856                    The physical address in this call to RA_Alloc is specifically the SysPAddr 
857                    of local (card) space, and it is highly unlikely we would ever need to 
858                    support > 4GB of local (card) memory (this does assume that such local
859                    memory will be mapped into System physical memory space at a low address so
860                    that any and all local memory exists within the 4GB SYSPAddr range).
861                  */
862                 IMG_UINTPTR_T uiLocalPAddr;
863                 IMG_SYS_PHYADDR sSysPAddr;
865                 /*
866                         just allocate from the first local memory arena
867                         (unlikely to be more than one local mem area(?))
868                 */
869                 //FIXME: just allocate a 4K page for each PT for now
870                 if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
871                                         SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize,
872                                         IMG_NULL,
873                                         IMG_NULL,
874                                         0,
875                                         SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize,
876                                         0,
877                                         IMG_NULL,
878                                         0,
879                                         &uiLocalPAddr)!= IMG_TRUE)
880                 {
881                         PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
882                         return IMG_FALSE;
883                 }
885                 /* Munge the local PAddr back into the SysPAddr */
886                 sSysPAddr.uiAddr = uiLocalPAddr;
888                 /* derive the CPU virtual address */
889                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
890                 /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */
891                 psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
892                                                                                                         SGX_MMU_PAGE_SIZE,
893                                                                                                         PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
894                                                                                                         &psPTInfoList->hPTPageOSMemHandle);
895                 if(!psPTInfoList->PTPageCpuVAddr)
896                 {
897                         PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
898                         return IMG_FALSE;
899                 }
901                 /* translate address to device physical */
902                 sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
904                 #if PAGE_TEST
905                 PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
906                 #endif
907         }
909         PVR_ASSERT(psPTInfoList->PTPageCpuVAddr != IMG_NULL);
911         MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr);
912 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
913         {
914                 IMG_UINT32 *pui32Tmp;
915                 IMG_UINT32 i;
917                 pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
918                 /* point the new PT entries to the dummy data page */
919                 for(i=0; i<pMMUHeap->ui32PTNumEntriesUsable; i++)
920                 {
921                         pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
922                                                 | SGX_MMU_PTE_VALID;
923                 }
924                 /* zero the remaining allocated entries, if any */
925                 for(; i<pMMUHeap->ui32PTNumEntriesAllocated; i++)
926                 {
927                         pui32Tmp[i] = 0;
928                 }
929         }
930 #else
931         /* Zero the page table. */
932         OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
933 #endif
934         MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr);
936 #if defined(PDUMP)
937         {
938                 IMG_UINT32 ui32Flags = 0;
939 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
940                 /* make sure shared heap PT allocs are always pdumped */
941                 ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
942 #endif
943                 /* pdump the PT malloc */
944                 PDUMPMALLOCPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, psPTInfoList->hPTPageOSMemHandle, 0, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG);
945                 /* pdump the PT Pages */
946                 PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfoList->hPTPageOSMemHandle, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
947         }
948 #endif
950         /* return the DevPAddr */
951         *psDevPAddr = sDevPAddr;
953         return IMG_TRUE;
957 /*!
958 ******************************************************************************
959         FUNCTION:   _FreePageTableMemory
961         PURPOSE:    Free physical memory for a page table
963         PARAMETERS: In: pMMUHeap - the mmu
964                                 In: psPTInfoList - PT info to free
965         RETURNS:    NONE
966 ******************************************************************************/
967 static IMG_VOID
968 _FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
970         /*
971                 free the PT page:
972                 depending on the specific system, pagetables are allocated from system memory
973                 or device local memory.  For now, just look for at least a valid local heap/arena
974         */
975         if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
976         {
977                 /* Force the page to read write before we free it*/
978                 MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr);
980                 //FIXME: replace with an RA, this allocator only handles 4k allocs
981                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
982                                           pMMUHeap->ui32PTSize,
983                                           psPTInfoList->PTPageCpuVAddr,
984                                           psPTInfoList->hPTPageOSMemHandle);
985         }
986         else
987         {
988                 IMG_SYS_PHYADDR sSysPAddr;
989                 IMG_CPU_PHYADDR sCpuPAddr;
991                 /*  derive the system physical address */
992                 sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle, 
993                                                                           psPTInfoList->PTPageCpuVAddr);
994                 sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
996                 /* unmap the CPU mapping */
997                 /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */
998                 OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
999                          SGX_MMU_PAGE_SIZE,
1000                          PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
1001                          psPTInfoList->hPTPageOSMemHandle);
1003                 /*
1004                         just free from the first local memory arena
1005                         (unlikely to be more than one local mem area(?))
1006                         Note that the cast to IMG_UINTPTR_T is ok as we're local mem.
1007                 */
1008                 RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE);
1009         }
1014 /*!
1015 ******************************************************************************
1016         FUNCTION:   _DeferredFreePageTable
1018         PURPOSE:    Free one page table associated with an MMU.
1020         PARAMETERS: In:  pMMUHeap - the mmu heap
1021                                 In:  ui32PTIndex - index of the page table to free relative
1022                                                                    to the base of heap.
1023         RETURNS:    None
1024 ******************************************************************************/
1025 static IMG_VOID
1026 _DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
1028         IMG_UINT32 *pui32PDEntry;
1029         IMG_UINT32 i;
1030         IMG_UINT32 ui32PDIndex;
1031         SYS_DATA *psSysData;
1032         MMU_PT_INFO **ppsPTInfoList;
1034         SysAcquireData(&psSysData);
1036         /* find the index/offset in PD entries  */
1037         ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1039         /* set the base PT info */
1040         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
1042         {
1043 #if PT_DEBUG
1044                 if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
1045                 {
1046                         DumpPT(ppsPTInfoList[ui32PTIndex]);
1047                         /* Fall-through, will fail assert */
1048                 }
1049 #endif
1051                 /* Assert that all mappings have gone */
1052                 PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
1053         }
1055 #if defined(PDUMP)
1056         {
1057                 IMG_UINT32 ui32Flags = 0;
1058 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
1059                 ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
1060 #endif
1061                 /* pdump the PT free */
1062                 PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
1063                 if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
1064                 {
1065                         PDUMPFREEPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG);
1066                 }
1067         }
1068 #endif
1070         switch(pMMUHeap->psDevArena->DevMemHeapType)
1071         {
1072                 case DEVICE_MEMORY_HEAP_SHARED :
1073                 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
1074                 {
1075                         /* Remove Page Table from all memory contexts */
1076                         MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
1078                         while(psMMUContext)
1079                         {
1080                                 /* get the PD CPUVAddr base and advance to the first entry */
1081                                 MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
1082                                 pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
1083                                 pui32PDEntry += ui32PDIndex;
1085 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1086                                 /* point the PD entry to the dummy PT */
1087                                 pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
1088                                                                                         >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
1089                                                                                         | SGX_MMU_PDE_PAGE_SIZE_4K
1090                                                                                         | SGX_MMU_PDE_VALID;
1091 #else
1092                                 /* free the entry */
1093                                 if(bOSFreePT)
1094                                 {
1095                                         pui32PDEntry[ui32PTIndex] = 0;
1096                                 }
1097 #endif
1098                                 MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
1099                         #if defined(PDUMP)
1100                                 /* pdump the PD Page modifications */
1101                         #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
1102                                 if(psMMUContext->bPDumpActive)
1103                         #endif
1104                                 {
1105                                         PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1106                                 }
1107                         #endif
1108                                 /* advance to next context */
1109                                 psMMUContext = psMMUContext->psNext;
1110                         }
1111                         break;
1112                 }
1113                 case DEVICE_MEMORY_HEAP_PERCONTEXT :
1114                 case DEVICE_MEMORY_HEAP_KERNEL :
1115                 {
1116                         MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr);
1117                         /* Remove Page Table from this memory context only */
1118                         pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
1119                         pui32PDEntry += ui32PDIndex;
1121 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1122                         /* point the PD entry to the dummy PT */
1123                         pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
1124                                                                                 >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
1125                                                                                 | SGX_MMU_PDE_PAGE_SIZE_4K
1126                                                                                 | SGX_MMU_PDE_VALID;
1127 #else
1128                         /* free the entry */
1129                         if(bOSFreePT)
1130                         {
1131                                 pui32PDEntry[ui32PTIndex] = 0;
1132                         }
1133 #endif
1134                         MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr);
1136                         /* pdump the PD Page modifications */
1137                         PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1138                         break;
1139                 }
1140                 default:
1141                 {
1142                         PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
1143                         return;
1144                 }
1145         }
1147         /* clear the PT entries in each PT page */
1148         if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
1149         {
1150                 if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
1151                 {
1152                         IMG_PUINT32 pui32Tmp;
1154                         MakeKernelPageReadWrite(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
1155                         pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
1157                         /* clear the entries */
1158                         for(i=0;
1159                                 (i<pMMUHeap->ui32PTETotalUsable) && (i<pMMUHeap->ui32PTNumEntriesUsable);
1160                                  i++)
1161                         {
1162                                 /* over-allocated PT entries for 4MB data page case should never be non-zero */
1163                                 pui32Tmp[i] = 0;
1164                         }
1165                         MakeKernelPageReadOnly(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
1167                         /*
1168                                 free the pagetable memory
1169                         */
1170                         if(bOSFreePT)
1171                         {
1172                                 _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
1173                         }
1175                         /*
1176                                 decrement the PT Entry Count by the number
1177                                 of entries we've cleared in this pass
1178                         */
1179                         pMMUHeap->ui32PTETotalUsable -= i;
1180                 }
1181                 else
1182                 {
1183                         /* decrement the PT Entry Count by a page's worth of entries  */
1184                         pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable;
1185                 }
1187                 if(bOSFreePT)
1188                 {
1189                         /* free the pt info */
1190                         OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
1191                                                 sizeof(MMU_PT_INFO),
1192                                                 ppsPTInfoList[ui32PTIndex],
1193                                                 IMG_NULL);
1194                         ppsPTInfoList[ui32PTIndex] = IMG_NULL;
1195                 }
1196         }
1197         else
1198         {
1199                 /* decrement the PT Entry Count by a page's worth of usable entries */
1200                 pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable;
1201         }
1203         PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
1206 /*!
1207 ******************************************************************************
1208         FUNCTION:   _DeferredFreePageTables
1210         PURPOSE:    Free the page tables associated with an MMU.
1212         PARAMETERS: In:  pMMUHeap - the mmu
1213         RETURNS:    None
1214 ******************************************************************************/
1215 static IMG_VOID
1216 _DeferredFreePageTables (MMU_HEAP *pMMUHeap)
1218         IMG_UINT32 i;
1219 #if defined(FIX_HW_BRN_31620)
1220         MMU_CONTEXT *psMMUContext = pMMUHeap->psMMUContext;
1221         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
1222         IMG_UINT32 ui32PDIndex;
1223         IMG_UINT32 *pui32Tmp;
1224         IMG_UINT32 j;
1225 #endif
1226 #if defined(PDUMP)
1227         PDUMPCOMMENT("Free PTs (MMU Context ID == %u, PDBaseIndex == %u, PT count == 0x%x)",
1228                         pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
1229                         pMMUHeap->ui32PDBaseIndex,
1230                         pMMUHeap->ui32PageTableCount);
1231 #endif
1232 #if defined(FIX_HW_BRN_31620)
1233         for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
1234         {
1235                 ui32PDIndex = (pMMUHeap->ui32PDBaseIndex + i);
1237                 if (psMMUContext->apsPTInfoList[ui32PDIndex])
1238                 {
1239                         if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
1240                         {
1241                                 /*
1242                                  * We have to do this to setup the dummy page as
1243                                  * not all heaps are PD cache size or aligned
1244                                  */
1245                                 for (j=0;j<SGX_MMU_PT_SIZE;j++)
1246                                 {
1247                                         pui32Tmp = (IMG_UINT32 *) psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
1248                                         BRN31620InvalidatePageTableEntry(psMMUContext, ui32PDIndex, j, &pui32Tmp[j]);
1249                                 }
1250                         }
1251                         /* Free the PT and NULL's out the PTInfo */
1252                         if (BRN31620FreePageTable(pMMUHeap, ui32PDIndex) == IMG_TRUE)
1253                         {
1254                                 bInvalidateDirectoryCache = IMG_TRUE;
1255                         }
1256                 }
1257         }
1259         /*
1260          * Due to freeing PT's in chunks we might need to flush the PT cache
1261          * rather then the directory cache
1262          */
1263         if (bInvalidateDirectoryCache)
1264         {
1265                 MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
1266         }
1267         else
1268         {
1269                 MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
1270         }
1271 #else
1272         for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
1273         {
1274                 _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
1275         }
1276         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
1277 #endif
1281 /*!
1282 ******************************************************************************
1283         FUNCTION:   _DeferredAllocPagetables
1285         PURPOSE:    allocates page tables at time of allocation
1287         PARAMETERS: In:  pMMUHeap - the mmu heap
1288                                          DevVAddr - devVAddr of allocation
1289                                          ui32Size - size of allocation
1290         RETURNS:    IMG_TRUE - Success
1291                     IMG_FALSE - Failed
1292 ******************************************************************************/
1293 static IMG_BOOL
1294 _DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
1296         IMG_UINT32 ui32PageTableCount;
1297         IMG_UINT32 ui32PDIndex;
1298         IMG_UINT32 i;
1299         IMG_UINT32 *pui32PDEntry;
1300         MMU_PT_INFO **ppsPTInfoList;
1301         SYS_DATA *psSysData;
1302         IMG_DEV_VIRTADDR sHighDevVAddr;
1303 #if defined(FIX_HW_BRN_31620)
1304         IMG_BOOL bFlushSystemCache = IMG_FALSE;
1305         IMG_BOOL bSharedPT = IMG_FALSE;
1306         IMG_DEV_VIRTADDR sDevVAddrRequestStart;
1307         IMG_DEV_VIRTADDR sDevVAddrRequestEnd;
1308         IMG_UINT32 ui32PDRequestStart;
1309         IMG_UINT32 ui32PDRequestEnd;
1310         IMG_UINT32 ui32ModifiedCachelines[BRN31620_CACHE_FLUSH_INDEX_SIZE];
1311 #endif
1313         /* Check device linear address */
1314 #if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
1315         PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
1316 #endif
1318         /* get the sysdata */
1319         SysAcquireData(&psSysData);
1321         /* find the index/offset in PD entries  */
1322         ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1324         /* how many PDs does the allocation occupy? */
1325         /* first check for overflows */
1326         if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
1327                 < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
1328         {
1329                 /* detected overflow, clamp to highest address, reserve all PDs */
1330                 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
1331                 ui32PageTableCount = 1024;
1332         }
1333         else
1334         {
1335                 sHighDevVAddr.uiAddr = DevVAddr.uiAddr
1336                                                                 + ui32Size
1337                                                                 + pMMUHeap->ui32DataPageMask
1338                                                                 + pMMUHeap->ui32PTMask;
1340                 ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1341         }
1344         /* Fix allocation of last 4MB */
1345         if (ui32PageTableCount == 0)
1346                 ui32PageTableCount = 1024;
1348 #if defined(FIX_HW_BRN_31620)
1349         for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
1350         {
1351                 ui32ModifiedCachelines[i] = 0;
1352         }
1354         /*****************************************************************/
1355         /* Save off requested data and round allocation to PD cache line */
1356         /*****************************************************************/
1357         sDevVAddrRequestStart = DevVAddr;
1358         ui32PDRequestStart = ui32PDIndex;
1359         sDevVAddrRequestEnd = sHighDevVAddr;
1360         ui32PDRequestEnd = ui32PageTableCount - 1;
1362         /* Round allocations down to the PD cacheline */
1363         DevVAddr.uiAddr = DevVAddr.uiAddr & (~BRN31620_PDE_CACHE_FILL_MASK);
1365         /* Round the end address of the PD allocation to cacheline */
1366         if (UINT32_MAX_VALUE - sHighDevVAddr.uiAddr < (BRN31620_PDE_CACHE_FILL_SIZE - 1))
1367         {
1368                 sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
1369                 ui32PageTableCount = 1024;
1370         }
1371         else
1372         {
1373                 sHighDevVAddr.uiAddr = ((sHighDevVAddr.uiAddr + (BRN31620_PDE_CACHE_FILL_SIZE - 1)) & (~BRN31620_PDE_CACHE_FILL_MASK));
1374                 ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1375         }
1377         ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
1379         /* Fix allocation of last 4MB */
1380         if (ui32PageTableCount == 0)
1381                 ui32PageTableCount = 1024;
1382 #endif
1384         ui32PageTableCount -= ui32PDIndex;
1386         /* get the PD CPUVAddr base and advance to the first entry */
1387         pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
1388         pui32PDEntry += ui32PDIndex;
1390         /* and advance to the first PT info list */
1391         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
1393 #if defined(PDUMP)
1394         {
1395                 IMG_UINT32 ui32Flags = 0;
1396                 
1397                 /* pdump the PD Page modifications */
1398                 if( MMU_IsHeapShared(pMMUHeap) )
1399                 {
1400                         ui32Flags |= PDUMP_FLAGS_CONTINUOUS;
1401                 }
1402                 PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc PTs (MMU Context ID == %u, PDBaseIndex == %u, Size == 0x%x, Shared = %s)",
1403                                 pMMUHeap->psMMUContext->ui32PDumpMMUContextID,
1404                                 pMMUHeap->ui32PDBaseIndex,
1405                                 ui32Size,
1406                                 MMU_IsHeapShared(pMMUHeap)?"True":"False");
1407                 PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc page table (page count == %08X)", ui32PageTableCount);
1408                 PDUMPCOMMENTWITHFLAGS(ui32Flags, "Page directory mods (page count == %08X)", ui32PageTableCount);
1409         }
1410 #endif
1411         /* walk the psPTInfoList to see what needs allocating: */
1412         for(i=0; i<ui32PageTableCount; i++)
1413         {
1414                 if(ppsPTInfoList[i] == IMG_NULL)
1415                 {
1416 #if defined(FIX_HW_BRN_31620)
1417                         /* Check if we have a saved PT (i.e. this PDE cache line is still live) */
1418                         if (pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i])
1419                         {
1420                                 /* Only make this PTInfo "live" if it's requested */
1421                                 if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd))
1422                                 {
1423                                         IMG_UINT32 ui32PDCacheLine = (ui32PDIndex + i) >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
1425                                         ppsPTInfoList[i] = pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i];
1426                                         pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = IMG_NULL;
1428                                         pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++;
1429                                 }
1430                         }
1431                         else
1432                         {
1433 #endif
1434                         OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1435                                                  sizeof (MMU_PT_INFO),
1436                                                  (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
1437                                                  "MMU Page Table Info");
1438                         if (ppsPTInfoList[i] == IMG_NULL)
1439                         {
1440                                 PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
1441                                 return IMG_FALSE;
1442                         }
1443                         OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
1444 #if defined(FIX_HW_BRN_31620)
1445                         }
1446 #endif
1447                 }
1448 #if defined(FIX_HW_BRN_31620)
1449                 /* Only try to allocate if ppsPTInfoList[i] is valid */
1450                 if (ppsPTInfoList[i])
1451                 {
1452 #endif
1453                 if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
1454                 && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
1455                 {
1456                         IMG_DEV_PHYADDR sDevPAddr = { 0 };
1457 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1458                         IMG_UINT32 *pui32Tmp;
1459                         IMG_UINT32 j;
1460 #else
1461 #if !defined(FIX_HW_BRN_31620)
1462                         /* no page table has been allocated so allocate one */
1463                         PVR_ASSERT(pui32PDEntry[i] == 0);
1464 #endif
1465 #endif
1466                         if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
1467                         {
1468                                 PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
1469                                 return IMG_FALSE;
1470                         }
1471 #if defined(FIX_HW_BRN_31620)
1472                         bFlushSystemCache = IMG_TRUE;
1473                         /* Bump up the page table count if required */
1474                         {
1475                                 IMG_UINT32 ui32PD;
1476                                 IMG_UINT32 ui32PDCacheLine;
1477                                 IMG_UINT32 ui32PDBitMaskIndex;
1478                                 IMG_UINT32 ui32PDBitMaskShift;
1480                                 ui32PD = ui32PDIndex + i;
1481                                 ui32PDCacheLine = ui32PD >> BRN31620_PDES_PER_CACHE_LINE_SHIFT;
1482                                 ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT;
1483                                 ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK;
1484                                 ui32ModifiedCachelines[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift;
1486                                 /* Add 1 to ui32PD as we want the count, not a range */
1487                                 if ((pMMUHeap->ui32PDBaseIndex + pMMUHeap->ui32PageTableCount) < (ui32PD + 1))
1488                                 {
1489                                         pMMUHeap->ui32PageTableCount = (ui32PD + 1) - pMMUHeap->ui32PDBaseIndex;
1490                                 }
1492                                 if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd))
1493                                 {
1494                                         pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++;
1495                                 }
1496                         }
1497 #endif
1498                         switch(pMMUHeap->psDevArena->DevMemHeapType)
1499                         {
1500                                 case DEVICE_MEMORY_HEAP_SHARED :
1501                                 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
1502                                 {
1503                                         /* insert Page Table into all memory contexts */
1504                                         MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
1505 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
1506                                         PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
1507 #endif
1508                                         while(psMMUContext)
1509                                         {
1510                                                 MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
1511                                                 /* get the PD CPUVAddr base and advance to the first entry */
1512                                                 pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
1513                                                 pui32PDEntry += ui32PDIndex;
1515                                                 /* insert the page, specify the data page size and make the pde valid */
1516                                                 pui32PDEntry[i] = (IMG_UINT32)(sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
1517                                                                                 | pMMUHeap->ui32PDEPageSizeCtrl
1518                                                                                 | SGX_MMU_PDE_VALID;
1519                                                 MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
1520                                                 #if defined(PDUMP)
1521                                                 /* pdump the PD Page modifications */
1522                                                 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
1523                                                 if(psMMUContext->bPDumpActive)
1524                                                 #endif
1525                                                 {
1526 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
1527                                                         /*
1528                                                                 Any modification of the uKernel memory context
1529                                                                 needs to be PDumped when we're multi-process
1530                                                          */
1531                                                         IMG_UINT32 ui32HeapFlags = ( psMMUContext->sPDDevPAddr.uiAddr == psDevInfo->sKernelPDDevPAddr.uiAddr ) ? PDUMP_FLAGS_PERSISTENT : 0;
1532 #else
1533                                                         IMG_UINT32 ui32HeapFlags = 0;
1534 #endif
1535                                                         PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), ui32HeapFlags, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1536                                                 }
1537                                                 #endif /* PDUMP */
1538                                                 /* advance to next context */
1539                                                 psMMUContext = psMMUContext->psNext;
1540                                         }
1541 #if defined(FIX_HW_BRN_31620)
1542                                         bSharedPT = IMG_TRUE;
1543 #endif
1544                                         break;
1545                                 }
1546                                 case DEVICE_MEMORY_HEAP_PERCONTEXT :
1547                                 case DEVICE_MEMORY_HEAP_KERNEL :
1548                                 {
1549                                         MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr);
1550                                         /* insert Page Table into only this memory context */
1551                                         pui32PDEntry[i] = (IMG_UINT32)(sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
1552                                                                         | pMMUHeap->ui32PDEPageSizeCtrl
1553                                                                         | SGX_MMU_PDE_VALID;
1554                                         MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr);
1555                                         /* pdump the PD Page modifications */
1556                                         PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1557                                         break;
1558                                 }
1559                                 default:
1560                                 {
1561                                         PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
1562                                         return IMG_FALSE;
1563                                 }
1564                         }
1566 #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
1567                         /* This is actually not to do with multiple mem contexts, but to do with the directory cache.
1568                            In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the
1569                            page directory memory, and requires updating whenever the page directory changes, even if there
1570                            was no previous value in a particular entry
1571                          */
1572                         MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
1573 #endif
1574 #if defined(FIX_HW_BRN_31620)
1575                         /* If this PT is not in the requested range then save it and null out the main PTInfo */
1576                         if (((ui32PDIndex + i) < ui32PDRequestStart) || ((ui32PDIndex + i) > ui32PDRequestEnd))
1577                         {
1578                                         pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = ppsPTInfoList[i];
1579                                         ppsPTInfoList[i] = IMG_NULL;
1580                         }
1581 #endif
1582                 }
1583                 else
1584                 {
1585 #if !defined(FIX_HW_BRN_31620)
1586                         /* already have an allocated PT */
1587                         PVR_ASSERT(pui32PDEntry[i] != 0);
1588 #endif
1589                 }
1590 #if defined(FIX_HW_BRN_31620)
1591                 }
1592 #endif
1593         }
1595         #if defined(SGX_FEATURE_SYSTEM_CACHE)
1596         #if defined(FIX_HW_BRN_31620)
1597         /* This function might not allocate any new PT's so check before flushing */
1598         if (bFlushSystemCache)
1599         #endif
1600         {
1601                 MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
1602         }
1603         #endif /* SGX_FEATURE_SYSTEM_CACHE */
1605         #if defined(FIX_HW_BRN_31620)
1606         /* Handle the last 4MB roll over */
1607         sHighDevVAddr.uiAddr = sHighDevVAddr.uiAddr - 1;
1609         /* Update our PD flush mask if required */
1610         if (bFlushSystemCache)
1611         {
1612                 MMU_CONTEXT *psMMUContext;
1614                 if (bSharedPT)
1615                 {
1616                         MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
1618                         while(psMMUContext)
1619                         {
1620                                 for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
1621                                 {
1622                                         psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i];
1623                                 }
1625                                 /* advance to next context */
1626                                 psMMUContext = psMMUContext->psNext;
1627                         }
1628                 }
1629                 else
1630                 {
1631                         for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
1632                         {
1633                                 pMMUHeap->psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i];
1634                         }
1635                 }
1637                 /*
1638                  * Always hook up the dummy page when we allocate a new range of PTs.
1639                  * It might be this is overwritten before the SGX access the dummy page
1640                  * but we don't care, it's a lot simpler to add this logic here.
1641                  */
1642                 psMMUContext = pMMUHeap->psMMUContext;
1643                 for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
1644                 {
1645                         IMG_UINT32 j;
1647                         for(j=0;j<BRN31620_CACHE_FLUSH_BITS_SIZE;j++)
1648                         {
1649                                 if (ui32ModifiedCachelines[i] & (1 << j))
1650                                 {
1651                                         PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo;
1652                                         MMU_PT_INFO *psTempPTInfo = IMG_NULL;
1653                                         IMG_UINT32 *pui32Tmp;
1655                                         ui32PDIndex = (((i * BRN31620_CACHE_FLUSH_BITS_SIZE) + j) * BRN31620_PDES_PER_CACHE_LINE_SIZE) + BRN31620_DUMMY_PDE_INDEX;
1657                                         /* The PT for the dummy page might not be "live". If not get it from the saved pointer */
1658                                         if (psMMUContext->apsPTInfoList[ui32PDIndex])
1659                                         {
1660                                                 psTempPTInfo = psMMUContext->apsPTInfoList[ui32PDIndex];
1661                                         }
1662                                         else
1663                                         {
1664                                                 psTempPTInfo = psMMUContext->apsPTInfoListSave[ui32PDIndex];
1665                                         }
1667                                         PVR_ASSERT(psTempPTInfo != IMG_NULL);
1669                                         MakeKernelPageReadWrite(psTempPTInfo->PTPageCpuVAddr);
1670                                         pui32Tmp = (IMG_UINT32 *) psTempPTInfo->PTPageCpuVAddr;
1671                                         PVR_ASSERT(pui32Tmp != IMG_NULL);
1672                                         pui32Tmp[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
1673                                                                                                                         | SGX_MMU_PTE_DUMMY_PAGE
1674                                                                                                                         | SGX_MMU_PTE_READONLY
1675                                                                                                                         | SGX_MMU_PTE_VALID;
1676                                         MakeKernelPageReadOnly(psTempPTInfo->PTPageCpuVAddr);
1677                                         PDUMPCOMMENT("BRN31620 Dump PTE for dummy page after wireing up new PT");
1678                                         PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psTempPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32Tmp[BRN31620_DUMMY_PTE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
1679                                 }
1680                         }
1681                 }
1682         }
1683         #endif
1685         return IMG_TRUE;
1689 #if defined(PDUMP)
1690 /*!
1691  *      FUNCTION:       MMU_GetPDumpContextID
1692  *
1693  *      RETURNS:        pdump MMU context ID
1694  */
1695 IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext)
1697         BM_CONTEXT *pBMContext = hDevMemContext;
1698         PVR_ASSERT(pBMContext);
1699         /* PRQA S 0505 1 */ /* PVR_ASSERT should catch NULL ptr */
1700         return pBMContext->psMMUContext->ui32PDumpMMUContextID;
1703 /*!
1704  *      FUNCTION:       MMU_SetPDumpAttribs
1705  *
1706  *      PURPOSE:        Called from MMU_Initialise and MMU_Create.
1707  *                              Sets up device-specific attributes for pdumping.
1708  *                              FIXME: breaks variable size PTs. Really need separate per context
1709  *                              and per heap attribs.
1710  *
1711  *      INPUT:          psDeviceNode - used to access deviceID
1712  *      INPUT:          ui32DataPageMask - data page mask
1713  *      INPUT:          ui32PTSize - PT size
1714  *
1715  *      OUTPUT:         psMMUAttrib - pdump MMU attributes
1716  *
1717  *      RETURNS:        none
1718  */
1719 #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
1720 # error "FIXME: breaks variable size pagetables"
1721 #endif
1722 static IMG_VOID MMU_SetPDumpAttribs(PDUMP_MMU_ATTRIB *psMMUAttrib,
1723         PVRSRV_DEVICE_NODE *psDeviceNode,
1724         IMG_UINT32 ui32DataPageMask,
1725         IMG_UINT32 ui32PTSize)
1727         /* Sets up device ID, contains pdump memspace name */
1728         psMMUAttrib->sDevId = psDeviceNode->sDevId;
1729         
1730         psMMUAttrib->pszPDRegRegion = IMG_NULL;
1731         psMMUAttrib->ui32DataPageMask = ui32DataPageMask;
1732         
1733         psMMUAttrib->ui32PTEValid = SGX_MMU_PTE_VALID;
1734         psMMUAttrib->ui32PTSize = ui32PTSize;
1735         psMMUAttrib->ui32PTEAlignShift = SGX_MMU_PTE_ADDR_ALIGNSHIFT;
1736         
1737         psMMUAttrib->ui32PDEMask = SGX_MMU_PDE_ADDR_MASK;
1738         psMMUAttrib->ui32PDEAlignShift = SGX_MMU_PDE_ADDR_ALIGNSHIFT;
1740 #endif /* PDUMP */
1742 /*!
1743 ******************************************************************************
1744         FUNCTION:   MMU_Initialise
1746         PURPOSE:        Called from BM_CreateContext.
1747                                 Allocates the top level Page Directory 4k Page for the new context.
1749         PARAMETERS: None
1750         RETURNS:    PVRSRV_ERROR
1751 ******************************************************************************/
1752 PVRSRV_ERROR
1753 MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
1755         IMG_UINT32 *pui32Tmp;
1756         IMG_UINT32 i;
1757         IMG_CPU_VIRTADDR pvPDCpuVAddr = IMG_NULL;
1758         IMG_DEV_PHYADDR sPDDevPAddr = {0};
1759         IMG_CPU_PHYADDR sCpuPAddr;
1760         MMU_CONTEXT *psMMUContext;
1761         IMG_HANDLE hPDOSMemHandle = IMG_NULL;
1762         SYS_DATA *psSysData;
1763         PVRSRV_SGXDEV_INFO *psDevInfo;
1764         PVRSRV_ERROR eError = PVRSRV_OK;
1765 #if defined(PDUMP)
1766         PDUMP_MMU_ATTRIB sMMUAttrib;
1767 #endif
1768         PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
1770         SysAcquireData(&psSysData);
1771 #if defined(PDUMP)
1772         /* Note: these attribs are on the stack, used only to pdump the MMU context
1773          * creation. */
1774         MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
1775                                                 SGX_MMU_PAGE_MASK,
1776                                                 SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
1777 #endif
1779         OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1780                                  sizeof (MMU_CONTEXT),
1781                                  (IMG_VOID **)&psMMUContext, IMG_NULL,
1782                                  "MMU Context");
1783         if (psMMUContext == IMG_NULL)
1784         {
1785                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
1786                 return PVRSRV_ERROR_OUT_OF_MEMORY;
1787         }
1788         OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
1790         /* stick the devinfo in the context for subsequent use */
1791         psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
1792         psMMUContext->psDevInfo = psDevInfo;
1794         /* record device node for subsequent use */
1795         psMMUContext->psDeviceNode = psDeviceNode;
1797         /* allocate 4k page directory page for the new context */
1798         if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
1799         {
1800                 if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1801                                                  SGX_MMU_PAGE_SIZE,
1802                                                  SGX_MMU_PAGE_SIZE,
1803                                                  IMG_NULL,
1804                                                  0,
1805                                                  IMG_NULL,
1806                                                  &pvPDCpuVAddr,
1807                                                  &hPDOSMemHandle) != PVRSRV_OK)
1808                 {
1809                         PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
1810                         eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
1811                         goto exit_setting_values;
1812                 }
1814                 if(pvPDCpuVAddr)
1815                 {
1816                         sCpuPAddr = OSMapLinToCPUPhys(hPDOSMemHandle,
1817                                                                                   pvPDCpuVAddr);
1818                 }
1819                 else
1820                 {
1821                         /* This is not used in all cases, since not all ports currently
1822                          * support OSMemHandleToCpuPAddr */
1823                         sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
1824                 }
1825                 sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
1827                 #if PAGE_TEST
1828                 PageTest(pvPDCpuVAddr, sPDDevPAddr);
1829                 #endif
1831 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
1832                 /* Allocate dummy PT and Data pages for the first context to be created */
1833                 if(!psDevInfo->pvMMUContextList)
1834                 {
1835                         /* Dummy PT page */
1836                         if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1837                                                          SGX_MMU_PAGE_SIZE,
1838                                                          SGX_MMU_PAGE_SIZE,
1839                                                          IMG_NULL,
1840                                                          0,
1841                                                          IMG_NULL,
1842                                                          &psDevInfo->pvDummyPTPageCpuVAddr,
1843                                                          &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
1844                         {
1845                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
1846                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
1847                                 goto exit_setting_values;
1848                         }
1850                         if(psDevInfo->pvDummyPTPageCpuVAddr)
1851                         {
1852                                 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
1853                                                                                           psDevInfo->pvDummyPTPageCpuVAddr);
1854                         }
1855                         else
1856                         {
1857                                 /* This is not used in all cases, since not all ports currently
1858                                  * support OSMemHandleToCpuPAddr */
1859                                 sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
1860                         }
1861                         psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
1863                         /* Dummy Data page */
1864                         if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1865                                                          SGX_MMU_PAGE_SIZE,
1866                                                          SGX_MMU_PAGE_SIZE,
1867                                                          IMG_NULL,
1868                                                          0,
1869                                                          IMG_NULL,
1870                                                          &psDevInfo->pvDummyDataPageCpuVAddr,
1871                                                          &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
1872                         {
1873                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
1874                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
1875                                 goto exit_setting_values;
1876                         }
1878                         if(psDevInfo->pvDummyDataPageCpuVAddr)
1879                         {
1880                                 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
1881                                                                                           psDevInfo->pvDummyDataPageCpuVAddr);
1882                         }
1883                         else
1884                         {
1885                                 sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
1886                         }
1887                         psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
1888                 }
1889 #endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
1890 #if defined(FIX_HW_BRN_31620)
1891                 /* Allocate dummy Data pages for the first context to be created */
1892                 if(!psDevInfo->pvMMUContextList)
1893                 {
1894                         IMG_UINT32 j;
1895                         /* Allocate dummy page */
1896                         if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1897                                                          SGX_MMU_PAGE_SIZE,
1898                                                          SGX_MMU_PAGE_SIZE,
1899                                                          IMG_NULL,
1900                                                          0,
1901                                                          IMG_NULL,
1902                                                          &psDevInfo->pvBRN31620DummyPageCpuVAddr,
1903                                                          &psDevInfo->hBRN31620DummyPageOSMemHandle) != PVRSRV_OK)
1904                         {
1905                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
1906                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
1907                                 goto exit_setting_values;
1908                         }                               
1910                         /* Get a physical address */
1911                         if(psDevInfo->pvBRN31620DummyPageCpuVAddr)
1912                         {
1913                                 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle,
1914                                                                                           psDevInfo->pvBRN31620DummyPageCpuVAddr);
1915                         }
1916                         else
1917                         {
1918                                 sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPageOSMemHandle, 0);
1919                         }
1921                         pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr;
1922                         for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++)
1923                         {
1924                                 pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE;
1925                         }
1927                         psDevInfo->sBRN31620DummyPageDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
1928                         PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
1930                         /* Allocate dummy PT */
1931                         if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
1932                                                          SGX_MMU_PAGE_SIZE,
1933                                                          SGX_MMU_PAGE_SIZE,
1934                                                          IMG_NULL,
1935                                                          0,
1936                                                          IMG_NULL,
1937                                                          &psDevInfo->pvBRN31620DummyPTCpuVAddr,
1938                                                          &psDevInfo->hBRN31620DummyPTOSMemHandle) != PVRSRV_OK)
1939                         {
1940                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
1941                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
1942                                 goto exit_setting_values;
1943                         }                               
1945                         /* Get a physical address */
1946                         if(psDevInfo->pvBRN31620DummyPTCpuVAddr)
1947                         {
1948                                 sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle,
1949                                                                                           psDevInfo->pvBRN31620DummyPTCpuVAddr);
1950                         }
1951                         else
1952                         {
1953                                 sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPTOSMemHandle, 0);
1954                         }
1956                         OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE);
1957                         psDevInfo->sBRN31620DummyPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
1958                         PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
1959                 }
1960 #endif
1961         }
1962         else
1963         {
1964                 /* 
1965                    We cannot use IMG_SYS_PHYADDR here, as that is 64-bit for 32-bit PAE builds.
1966                    The physical address in this call to RA_Alloc is specifically the SysPAddr 
1967                    of local (card) space, and it is highly unlikely we would ever need to 
1968                    support > 4GB of local (card) memory (this does assume that such local
1969                    memory will be mapped into System physical memory space at a low address so
1970                    that any and all local memory exists within the 4GB SYSPAddr range).
1971                  */
1972                 IMG_UINTPTR_T uiLocalPAddr;
1973                 IMG_SYS_PHYADDR sSysPAddr;
1975                 /* allocate from the device's local memory arena */
1976                 if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
1977                                         SGX_MMU_PAGE_SIZE,
1978                                         IMG_NULL,
1979                                         IMG_NULL,
1980                                         0,
1981                                         SGX_MMU_PAGE_SIZE,
1982                                         0,
1983                                         IMG_NULL,
1984                                         0,
1985                                         &uiLocalPAddr)!= IMG_TRUE)
1986                 {
1987                         PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
1988                         eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
1989                         goto exit_setting_values;
1990                 }
1992                 /* Munge the local PAddr back into the SysPAddr */
1993                 sSysPAddr.uiAddr = uiLocalPAddr;
1995                 /* derive the CPU virtual address */
1996                 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
1997                 sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
1998                 pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
1999                                                                                 SGX_MMU_PAGE_SIZE,
2000                                                                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2001                                                                                 &hPDOSMemHandle);
2002                 if(!pvPDCpuVAddr)
2003                 {
2004                         PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
2005                         eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
2006                         goto exit_setting_values;
2007                 }
2009                 #if PAGE_TEST
2010                 PageTest(pvPDCpuVAddr, sPDDevPAddr);
2011                 #endif
2013 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2014                 /* Allocate dummy PT and Data pages for the first context to be created */
2015                 if(!psDevInfo->pvMMUContextList)
2016                 {
2017                         /* Dummy PT page */
2018                         if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
2019                                                 SGX_MMU_PAGE_SIZE,
2020                                                 IMG_NULL,
2021                                                 IMG_NULL,
2022                                                 0,
2023                                                 SGX_MMU_PAGE_SIZE,
2024                                                 0,
2025                                                 IMG_NULL,
2026                                                 0,
2027                                                 &uiLocalPAddr)!= IMG_TRUE)
2028                         {
2029                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
2030                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
2031                                 goto exit_setting_values;
2032                         }
2034                         /* Munge the local PAddr back into the SysPAddr */
2035                         sSysPAddr.uiAddr = uiLocalPAddr;
2037                         /* derive the CPU virtual address */
2038                         sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
2039                         psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
2040                         psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
2041                                                                                                                                 SGX_MMU_PAGE_SIZE,
2042                                                                                                                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2043                                                                                                                                 &psDevInfo->hDummyPTPageOSMemHandle);
2044                         if(!psDevInfo->pvDummyPTPageCpuVAddr)
2045                         {
2046                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
2047                                 eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
2048                                 goto exit_setting_values;
2049                         }
2051                         /* Dummy Data page */
2052                         if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
2053                                                 SGX_MMU_PAGE_SIZE,
2054                                                 IMG_NULL,
2055                                                 IMG_NULL,
2056                                                 0,
2057                                                 SGX_MMU_PAGE_SIZE,
2058                                                 0,
2059                                                 IMG_NULL,
2060                                                 0,
2061                                                 &uiLocalPAddr)!= IMG_TRUE)
2062                         {
2063                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
2064                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
2065                                 goto exit_setting_values;
2066                         }
2068                         /* Munge the local PAddr back into the SysPAddr */
2069                         sSysPAddr.uiAddr = uiLocalPAddr;
2071                         /* derive the CPU virtual address */
2072                         sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
2073                         psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
2074                         psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
2075                                                                                                                                 SGX_MMU_PAGE_SIZE,
2076                                                                                                                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2077                                                                                                                                 &psDevInfo->hDummyDataPageOSMemHandle);
2078                         if(!psDevInfo->pvDummyDataPageCpuVAddr)
2079                         {
2080                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
2081                                 eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
2082                                 goto exit_setting_values;
2083                         }
2084                 }
2085 #endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
2086 #if defined(FIX_HW_BRN_31620)
2087                 /* Allocate dummy PT and Data pages for the first context to be created */
2088                 if(!psDevInfo->pvMMUContextList)
2089                 {
2090                         IMG_UINT32 j;
2091                         /* Allocate dummy page */
2092                         if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
2093                                                 SGX_MMU_PAGE_SIZE,
2094                                                 IMG_NULL,
2095                                                 IMG_NULL,
2096                                                 0,
2097                                                 SGX_MMU_PAGE_SIZE,
2098                                                 0,
2099                                                 IMG_NULL,
2100                                                 0,
2101                                                 &uiLocalPAddr)!= IMG_TRUE)
2102                         {
2103                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
2104                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
2105                                 goto exit_setting_values;
2106                         }
2108                         /* Munge the local PAddr back into the SysPAddr */
2109                         sSysPAddr.uiAddr = uiLocalPAddr;
2111                         /* derive the CPU virtual address */
2112                         sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
2113                         psDevInfo->sBRN31620DummyPageDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
2114                         psDevInfo->pvBRN31620DummyPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
2115                                                                                                                                 SGX_MMU_PAGE_SIZE,
2116                                                                                                                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2117                                                                                                                                 &psDevInfo->hBRN31620DummyPageOSMemHandle);
2118                         if(!psDevInfo->pvBRN31620DummyPageCpuVAddr)
2119                         {
2120                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
2121                                 eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
2122                                 goto exit_setting_values;
2123                         }
2125                         MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPageCpuVAddr);
2126                         pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr;
2127                         for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++)
2128                         {
2129                                 pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE;
2130                         }
2131                         MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPageCpuVAddr);
2132                         PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
2134                         /* Allocate dummy PT */
2135                         if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
2136                                                 SGX_MMU_PAGE_SIZE,
2137                                                 IMG_NULL,
2138                                                 IMG_NULL,
2139                                                 0,
2140                                                 SGX_MMU_PAGE_SIZE,
2141                                                 0,
2142                                                 IMG_NULL,
2143                                                 0,
2144                                                 &uiLocalPAddr)!= IMG_TRUE)
2145                         {
2146                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
2147                                 eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
2148                                 goto exit_setting_values;
2149                         }
2151                         /* Munge the local PAddr back into the SysPAddr */
2152                         sSysPAddr.uiAddr = uiLocalPAddr;
2154                         /* derive the CPU virtual address */
2155                         sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
2156                         psDevInfo->sBRN31620DummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
2157                         psDevInfo->pvBRN31620DummyPTCpuVAddr = OSMapPhysToLin(sCpuPAddr,
2158                                                                                                                                 SGX_MMU_PAGE_SIZE,
2159                                                                                                                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2160                                                                                                                                 &psDevInfo->hBRN31620DummyPTOSMemHandle);
2162                         if(!psDevInfo->pvBRN31620DummyPTCpuVAddr)
2163                         {
2164                                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
2165                                 eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE;
2166                                 goto exit_setting_values;
2167                         }
2169                         OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE);             
2170                         PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
2171                 }
2172 #endif /* #if defined(FIX_HW_BRN_31620) */
2173         }
2175 #if defined(FIX_HW_BRN_31620)
2176         if (!psDevInfo->pvMMUContextList)
2177         {
2178                 /* Save the kernel MMU context which is always the 1st to be created */
2179                 psDevInfo->hKernelMMUContext = psMMUContext;
2180                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: saving kernel mmu context: %p", psMMUContext));
2181         }
2182 #endif
2184 #if defined(PDUMP)
2185 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
2186         /* Find out if this context is for the active pdump client.
2187          * If it is, need to ensure PD entries are pdumped whenever another
2188          * process allocates from a shared heap. */
2189         {
2190                 PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData();
2191                 if(psPerProc == IMG_NULL)
2192                 {
2193                         /* changes to the kernel context PD/PTs should be pdumped */
2194                         psMMUContext->bPDumpActive = IMG_TRUE;
2195                 }
2196                 else
2197                 {
2198                         psMMUContext->bPDumpActive = psPerProc->bPDumpActive;
2199                 }
2200         }
2201 #endif /* SUPPORT_PDUMP_MULTI_PROCESS */
2202         /* pdump the PD malloc */
2203         PDUMPCOMMENT("Alloc page directory for new MMU context (PDDevPAddr == 0x" DEVPADDR_FMT ")", sPDDevPAddr.uiAddr);
2204         PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDOSMemHandle, 0, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG);
2205 #endif /* PDUMP */
2207 #ifdef SUPPORT_SGX_MMU_BYPASS
2208         EnableHostAccess(psMMUContext);
2209 #endif
2211         if (pvPDCpuVAddr)
2212         {
2213                 pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
2214         }
2215         else
2216         {
2217                 PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
2218                 eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
2219                 goto exit_setting_values;
2220         }
2223 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2224         MakeKernelPageReadWrite(pvPDCpuVAddr);
2225         /*  wire-up the new PD to the dummy PT */
2226         for(i=0; i<SGX_MMU_PD_SIZE; i++)
2227         {
2228                 pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
2229                                         | SGX_MMU_PDE_PAGE_SIZE_4K
2230                                         | SGX_MMU_PDE_VALID;
2231         }
2232         MakeKernelPageReadOnly(pvPDCpuVAddr);
2234         if(!psDevInfo->pvMMUContextList)
2235         {
2236                 /*
2237                         if we've just allocated the dummy pages
2238                         wire up the dummy PT to the dummy data page
2239                 */
2240                 MakeKernelPageReadWrite(psDevInfo->pvDummyPTPageCpuVAddr);
2241                 pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
2242                 for(i=0; i<SGX_MMU_PT_SIZE; i++)
2243                 {
2244                         pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
2245                                                 | SGX_MMU_PTE_VALID;
2246                 }
2247                 MakeKernelPageReadOnly(psDevInfo->pvDummyPTPageCpuVAddr);
2248                 /* pdump the Dummy PT Page */
2249                 PDUMPCOMMENT("Dummy Page table contents");
2250                 PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2252                 /*
2253                         write a signature to the dummy data page
2254                 */
2255                 MakeKernelPageReadWrite(psDevInfo->pvDummyDataPageCpuVAddr);
2256                 pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
2257                 for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
2258                 {
2259                         pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
2260                 }
2261                 MakeKernelPageReadOnly(psDevInfo->pvDummyDataPageCpuVAddr);
2262                 /* pdump the Dummy Data Page */
2263                 PDUMPCOMMENT("Dummy Data Page contents");
2264                 PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2265         }
2266 #else /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
2267         /* initialise the PD to invalid address state */
2268         MakeKernelPageReadWrite(pvPDCpuVAddr);
2269         for(i=0; i<SGX_MMU_PD_SIZE; i++)
2270         {
2271                 /* invalid, no read, no write, no cache consistency */
2272                 pui32Tmp[i] = 0;
2273         }
2274         MakeKernelPageReadOnly(pvPDCpuVAddr);
2275 #endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */
2277 #if defined(PDUMP)
2278 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
2279         if(psMMUContext->bPDumpActive)
2280 #endif /* SUPPORT_PDUMP_MULTI_PROCESS */
2281         {
2282                 /* pdump the PD Page */
2283                 PDUMPCOMMENT("Page directory contents");
2284                 PDUMPPDENTRIES(&sMMUAttrib, hPDOSMemHandle, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2285         }
2286 #endif
2287 #if defined(FIX_HW_BRN_31620)
2288         {
2289                 IMG_UINT32 i;
2290                 IMG_UINT32 ui32PDCount = 0;
2291                 IMG_UINT32 *pui32PT;
2292                 pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
2294                 PDUMPCOMMENT("BRN31620 Set up dummy PT");
2296                 MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPTCpuVAddr);
2297                 pui32PT = (IMG_UINT32 *) psDevInfo->pvBRN31620DummyPTCpuVAddr;
2298                 pui32PT[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
2299                                                                 | SGX_MMU_PTE_DUMMY_PAGE
2300                                                                 | SGX_MMU_PTE_READONLY
2301                                                                 | SGX_MMU_PTE_VALID;
2302                 MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPTCpuVAddr);
2304 #if defined(PDUMP)
2305                 /* Dump initial contents */
2306                 PDUMPCOMMENT("BRN31620 Dump dummy PT contents");
2307                 PDUMPMEMPTENTRIES(&sMMUAttrib,  psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2308                 PDUMPCOMMENT("BRN31620 Dump dummy page contents");
2309                 PDUMPMEMPTENTRIES(&sMMUAttrib,  psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2311                 /* Dump the wiring */           
2312                 for(i=0;i<SGX_MMU_PT_SIZE;i++)
2313                 {
2314                         PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, &pui32PT[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2315                 }
2316 #endif
2317                 PDUMPCOMMENT("BRN31620 Dump PDE wire up");
2318                 /* Walk the PD wireing up the PT's */
2319                 for(i=0;i<SGX_MMU_PD_SIZE;i++)
2320                 {
2321                         pui32Tmp[i] = 0;
2323                         if (ui32PDCount == BRN31620_DUMMY_PDE_INDEX)
2324                         {
2325                                 MakeKernelPageReadWrite(pvPDCpuVAddr);
2326                                 pui32Tmp[i] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
2327                                                 | SGX_MMU_PDE_PAGE_SIZE_4K
2328                                                 | SGX_MMU_PDE_DUMMY_PAGE
2329                                                 | SGX_MMU_PDE_VALID;
2330                                 MakeKernelPageReadOnly(pvPDCpuVAddr);
2331                         }
2332                                 PDUMPMEMPTENTRIES(&sMMUAttrib, hPDOSMemHandle, (IMG_VOID *) &pui32Tmp[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2333                         ui32PDCount++;
2334                         if (ui32PDCount == BRN31620_PDES_PER_CACHE_LINE_SIZE)
2335                         {
2336                                 /* Reset PT count */
2337                                 ui32PDCount = 0;
2338                         }
2339                 }
2342                 /* pdump the Dummy PT Page */
2343                 PDUMPCOMMENT("BRN31620 dummy Page table contents");
2344                 PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2345         }
2346 #endif
2347 #if defined(PDUMP)
2348         /* pdump set MMU context */
2349         {
2350                 /* default MMU type is 1, 4k page */
2351                 IMG_UINT32 ui32MMUType = 1;
2353                 #if defined(SGX_FEATURE_36BIT_MMU)
2354                         ui32MMUType = 3;
2355                 #else
2356                         #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
2357                                 ui32MMUType = 2;
2358                         #endif
2359                 #endif
2361                 eError = PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
2362                                                                         psDeviceNode->sDevId.pszPDumpDevName,
2363                                                                         &psMMUContext->ui32PDumpMMUContextID,
2364                                                                         ui32MMUType,
2365                                                                         PDUMP_PT_UNIQUETAG,
2366                                                                         hPDOSMemHandle,
2367                                                                         pvPDCpuVAddr);
2368                 if (eError != PVRSRV_OK)
2369                 {
2370                         PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
2371                         goto exit_setting_values;
2372                 }
2373         }
2375         /* PDump the context ID */
2376         PDUMPCOMMENT("Set MMU context complete (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
2377 #endif
2379 #if defined(FIX_HW_BRN_31620)
2380         for(i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
2381         {
2382                 psMMUContext->ui32PDChangeMask[i] = 0;
2383         }
2385         for(i=0;i<BRN31620_CACHE_FLUSH_SIZE;i++)
2386         {
2387                 psMMUContext->ui32PDCacheRangeRefCount[i] = 0;
2388         }
2390         for(i=0;i<SGX_MAX_PD_ENTRIES;i++)
2391         {
2392                 psMMUContext->apsPTInfoListSave[i] = IMG_NULL;
2393         }
2394 #endif
2396 exit_setting_values:
2398         /* store PD info in the MMU context */
2399         psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
2400         psMMUContext->sPDDevPAddr = sPDDevPAddr;
2401         psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
2403         /* Get some process information to aid debug */
2404         psMMUContext->ui32PID = OSGetCurrentProcessIDKM();
2405         psMMUContext->szName[0] = '\0';
2406         OSGetCurrentProcessNameKM(psMMUContext->szName, MMU_CONTEXT_NAME_SIZE);
2408         /* return context */
2409         *ppsMMUContext = psMMUContext;
2411         /* return the PD DevVAddr */
2412         *psPDDevPAddr = sPDDevPAddr;
2415         /* add the new MMU context onto the list of MMU contexts */
2416         psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
2417         psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
2419 #ifdef SUPPORT_SGX_MMU_BYPASS
2420         DisableHostAccess(psMMUContext);
2421 #endif
2423         return eError;
2426 /*!
2427 ******************************************************************************
2428         FUNCTION:   MMU_Finalise
2430         PURPOSE:    Finalise the mmu module, deallocate all resources.
2432         PARAMETERS: In: psMMUContext - MMU context to deallocate
2433         RETURNS:    None.
2434 ******************************************************************************/
2435 IMG_VOID
2436 MMU_Finalise (MMU_CONTEXT *psMMUContext)
2438         IMG_UINT32 *pui32Tmp, i;
2439         SYS_DATA *psSysData;
2440         MMU_CONTEXT **ppsMMUContext;
2441 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined(FIX_HW_BRN_31620)
2442         PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
2443         MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
2444 #endif
2446         SysAcquireData(&psSysData);
2448 #if defined(PDUMP)
2449         {
2450                 IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
2451                 /* pdump the MMU context clear */
2452                 PDUMPCOMMENT("Clear MMU context (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
2453                 PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->psDeviceNode->sDevId.pszPDumpDevName, psMMUContext->ui32PDumpMMUContextID, 2);
2455                 /* pdump the PD free */
2456                 PDUMPCOMMENT("Free page directory (PDDevPAddr == 0x" DEVPADDR_FMT ")",
2457                                 psMMUContext->sPDDevPAddr.uiAddr);
2459                 PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psMMUContext->hPDOSMemHandle, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, ui32Flags, PDUMP_PT_UNIQUETAG);
2460 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2461                 PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, ui32Flags, PDUMP_PT_UNIQUETAG);
2462                 PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, ui32Flags, PDUMP_PT_UNIQUETAG);
2463 #endif
2464         }
2465 #endif /* PDUMP */
2467         pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
2469         if (pui32Tmp)
2470         {
2471                 MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
2472                 /* initialise the PD to invalid address state */
2473                 for(i=0; i<SGX_MMU_PD_SIZE; i++)
2474                 {
2475                         /* invalid, no read, no write, no cache consistency */
2476                         pui32Tmp[i] = 0;
2477                 }
2478                 MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
2479         }
2481         /*
2482                 free the PD:
2483                 depending on the specific system, the PD is allocated from system memory
2484                 or device local memory.  For now, just look for at least a valid local heap/arena
2485         */
2486         if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
2487         {
2488 #if defined(FIX_HW_BRN_31620)
2489                 PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
2490 #endif
2491                 MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
2492                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2493                                                 SGX_MMU_PAGE_SIZE,
2494                                                 psMMUContext->pvPDCpuVAddr,
2495                                                 psMMUContext->hPDOSMemHandle);
2497 #if defined(FIX_HW_BRN_31620)
2498                 /* If this is the _last_ MMU context it must be the uKernel */
2499                 if (!psMMUContextList->psNext)
2500                 {
2501                         PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
2502                         OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2503                                                         SGX_MMU_PAGE_SIZE,
2504                                                         psDevInfo->pvBRN31620DummyPageCpuVAddr,
2505                                                         psDevInfo->hBRN31620DummyPageOSMemHandle);
2507                         PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
2508                         OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2509                                                         SGX_MMU_PAGE_SIZE,
2510                                                         psDevInfo->pvBRN31620DummyPTCpuVAddr,
2511                                                         psDevInfo->hBRN31620DummyPTOSMemHandle);
2512         
2513                 }
2514 #endif
2515 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2516                 /* if this is the last context free the dummy pages too */
2517                 if(!psMMUContextList->psNext)
2518                 {
2519                         OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2520                                                         SGX_MMU_PAGE_SIZE,
2521                                                         psDevInfo->pvDummyPTPageCpuVAddr,
2522                                                         psDevInfo->hDummyPTPageOSMemHandle);
2523                         OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
2524                                                         SGX_MMU_PAGE_SIZE,
2525                                                         psDevInfo->pvDummyDataPageCpuVAddr,
2526                                                         psDevInfo->hDummyDataPageOSMemHandle);
2527                 }
2528 #endif
2529         }
2530         else
2531         {
2532                 IMG_SYS_PHYADDR sSysPAddr;
2533                 IMG_CPU_PHYADDR sCpuPAddr;
2535                 /*  derive the system physical address */
2536                 sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->hPDOSMemHandle,
2537                                                                           psMMUContext->pvPDCpuVAddr);
2538                 sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
2540                 /* unmap the CPU mapping */
2541                 OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
2542                                                         SGX_MMU_PAGE_SIZE,
2543                             PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2544                                                         psMMUContext->hPDOSMemHandle);
2545                 /* and free the memory, Note that the cast to IMG_UINTPTR_T is ok as we're local mem. */
2546                 RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE);
2548 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2549                 /* if this is the last context free the dummy pages too */
2550                 if(!psMMUContextList->psNext)
2551                 {
2552                         /* free the Dummy PT Page */
2553                         sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle,
2554                                                                                   psDevInfo->pvDummyPTPageCpuVAddr);
2555                         sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
2557                         /* unmap the CPU mapping */
2558                         OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
2559                                                                 SGX_MMU_PAGE_SIZE,
2560                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2561                                                                 psDevInfo->hDummyPTPageOSMemHandle);
2562                         /* and free the memory */
2563                         RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
2565                         /* free the Dummy Data Page */
2566                         sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyDataPageOSMemHandle,
2567                                                                                   psDevInfo->pvDummyDataPageCpuVAddr);
2568                         sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
2570                         /* unmap the CPU mapping */
2571                         OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
2572                                                                 SGX_MMU_PAGE_SIZE,
2573                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2574                                                                 psDevInfo->hDummyDataPageOSMemHandle);
2575                         /* and free the memory */
2576                         RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
2577                 }
2578 #endif
2579 #if defined(FIX_HW_BRN_31620)
2580                 /* if this is the last context free the dummy pages too */
2581                 if(!psMMUContextList->psNext)
2582                 {
2583                         /* free the Page */
2584                         PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
2586                         sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle,
2587                                                                                   psDevInfo->pvBRN31620DummyPageCpuVAddr);
2588                         sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
2590                         /* unmap the CPU mapping */
2591                         OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPageCpuVAddr,
2592                                                                 SGX_MMU_PAGE_SIZE,
2593                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2594                                                                 psDevInfo->hBRN31620DummyPageOSMemHandle);
2595                         /* and free the memory */
2596                         RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
2598                         /* free the Dummy PT */
2599                         PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG);
2601                         sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle,
2602                                                                                   psDevInfo->pvBRN31620DummyPTCpuVAddr);
2603                         sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
2605                         /* unmap the CPU mapping */
2606                         OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPTCpuVAddr,
2607                                                                 SGX_MMU_PAGE_SIZE,
2608                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
2609                                                                 psDevInfo->hBRN31620DummyPTOSMemHandle);
2610                         /* and free the memory */
2611                         RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
2612                 }
2613 #endif
2614         }
2616         PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
2618         /* remove the MMU context from the list of MMU contexts */
2619         ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
2620         while(*ppsMMUContext)
2621         {
2622                 if(*ppsMMUContext == psMMUContext)
2623                 {
2624                         /* remove item from the list */
2625                         *ppsMMUContext = psMMUContext->psNext;
2626                         break;
2627                 }
2629                 /* advance to next next */
2630                 ppsMMUContext = &((*ppsMMUContext)->psNext);
2631         }
2633         /* free the context itself. */
2634         OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
2635         /*not nulling pointer, copy on stack*/
2639 /*!
2640 ******************************************************************************
2641         FUNCTION:   MMU_InsertHeap
2643         PURPOSE:    Copies PDEs from shared/exported heap into current MMU context.
2645         PARAMETERS:     In:  psMMUContext - the mmu
2646                     In:  psMMUHeap - a shared/exported heap
2648         RETURNS:        None
2649 ******************************************************************************/
2650 IMG_VOID
2651 MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
2653         IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
2654         IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
2655         IMG_UINT32 ui32PDEntry;
2656 #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
2657         IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
2658 #endif
2660         /* advance to the first entry */
2661         pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
2662         pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
2664         /*
2665                 update the PD range relating to the heap's
2666                 device virtual address range
2667         */
2668 #if defined(PDUMP)
2669         PDUMPCOMMENT("Page directory shared heap range copy");
2670         PDUMPCOMMENT("  (Source heap MMU Context ID == %u, PT count == 0x%x)",
2671                         psMMUHeap->psMMUContext->ui32PDumpMMUContextID,
2672                         psMMUHeap->ui32PageTableCount);
2673         PDUMPCOMMENT("  (Destination MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID);
2674 #endif /* PDUMP */
2675 #ifdef SUPPORT_SGX_MMU_BYPASS
2676         EnableHostAccess(psMMUContext);
2677 #endif
2679         for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
2680         {
2681 #if (!defined(SUPPORT_SGX_MMU_DUMMY_PAGE)) && (!defined(FIX_HW_BRN_31620))
2682                 /* check we have invalidated target PDEs */
2683                 PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
2684 #endif
2685                 MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr);
2686                 /* copy over the PDEs */
2687                 pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
2688                 MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr);
2689                 if (pui32PDCpuVAddr[ui32PDEntry])
2690                 {
2691                         /* Ensure the shared heap allocation is mapped into the context/PD
2692                          * for the active pdump process/app. The PTs and backing physical
2693                          * should also be pdumped (elsewhere).
2694                          *              MALLOC (PT)
2695                          *              LDB (init PT)
2696                          *              MALLOC (data page)
2697                          *              WRW (PTE->data page)
2698                          *              LDB (init data page) -- could be useful to ensure page is initialised
2699                          */
2700                 #if defined(PDUMP)
2701                         //PDUMPCOMMENT("MMU_InsertHeap: Mapping shared heap to new context %d (%s)", psMMUContext->ui32PDumpMMUContextID, (psMMUContext->bPDumpActive) ? "active" : "");
2702                 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
2703                         if(psMMUContext->bPDumpActive)
2704                 #endif /* SUPPORT_PDUMP_MULTI_PROCESS */
2705                         {
2706                                 PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
2707                         }
2708                 #endif
2709 #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
2710                         bInvalidateDirectoryCache = IMG_TRUE;
2711 #endif
2712                 }
2713         }
2715 #ifdef SUPPORT_SGX_MMU_BYPASS
2716         DisableHostAccess(psMMUContext);
2717 #endif
2719 #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
2720         if (bInvalidateDirectoryCache)
2721         {
2722                 /* This is actually not to do with multiple mem contexts, but to do with the directory cache.
2723                         In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the
2724                         page directory memory, and requires updating whenever the page directory changes, even if there
2725                         was no previous value in a particular entry
2726                 */
2727                 MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
2728         }
2729 #endif
2733 /*!
2734 ******************************************************************************
2735         FUNCTION:   MMU_UnmapPagesAndFreePTs
2737         PURPOSE:    unmap pages, invalidate virtual address and try to free the PTs
2739         PARAMETERS:     In:  psMMUHeap - the mmu.
2740                     In:  sDevVAddr - the device virtual address.
2741                     In:  ui32PageCount - page count
2742                     In:  hUniqueTag - A unique ID for use as a tag identifier
2744         RETURNS:        None
2745 ******************************************************************************/
2746 static IMG_VOID
2747 MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
2748                                                   IMG_DEV_VIRTADDR sDevVAddr,
2749                                                   IMG_UINT32 ui32PageCount,
2750                                                   IMG_HANDLE hUniqueTag)
2752         IMG_DEV_VIRTADDR        sTmpDevVAddr;
2753         IMG_UINT32                      i;
2754         IMG_UINT32                      ui32PDIndex;
2755         IMG_UINT32                      ui32PTIndex;
2756         IMG_UINT32                      *pui32Tmp;
2757         IMG_BOOL                        bInvalidateDirectoryCache = IMG_FALSE;
2759 #if !defined (PDUMP)
2760         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
2761 #endif
2762         /* setup tmp devvaddr to base of allocation */
2763         sTmpDevVAddr = sDevVAddr;
2765         for(i=0; i<ui32PageCount; i++)
2766         {
2767                 MMU_PT_INFO **ppsPTInfoList;
2769                 /* find the index/offset in PD entries  */
2770                 ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
2772                 /* and advance to the first PT info list */
2773                 ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
2775                 {
2776                         /* find the index/offset of the first PT in the first PT page */
2777                         ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
2779                         /* Is the PT page valid? */
2780                         if (!ppsPTInfoList[0])
2781                         {
2782                                 /*
2783                                         With sparse mappings we expect that the PT could be freed
2784                                         before we reach the end of it as the unmapped pages don't
2785                                         bump ui32ValidPTECount so it can reach zero before we reach
2786                                         the end of the PT.
2787                                 */
2788                                 if (!psMMUHeap->bHasSparseMappings)
2789                                 {
2790                                         PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
2791                                 }
2793                                 /* advance the sTmpDevVAddr by one page */
2794                                 sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
2796                                 /* Try to unmap the remaining allocation pages */
2797                                 continue;
2798                         }
2800                         /* setup pointer to the first entry in the PT page */
2801                         pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
2803                         /* Is PTPageCpuVAddr valid ? */
2804                         if (!pui32Tmp)
2805                         {
2806                                 continue;
2807                         }
2809                         CheckPT(ppsPTInfoList[0]);
2811                         /* Decrement the valid page count only if the current page is valid*/
2812                         if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
2813                         {
2814                                 ppsPTInfoList[0]->ui32ValidPTECount--;
2815                         }
2816                         else
2817                         {
2818                                 if (!psMMUHeap->bHasSparseMappings)
2819                                 {
2820                                         PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
2821                                 }
2822                         }
2824                         /* The page table count should not go below zero */
2825                         PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
2826                         MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
2827 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
2828                         /* point the PT entry to the dummy data page */
2829                         pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
2830                                                                         | SGX_MMU_PTE_VALID;
2831 #else
2832                         /* invalidate entry */
2833 #if defined(FIX_HW_BRN_31620)
2834                         BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]);
2835 #else
2836                         pui32Tmp[ui32PTIndex] = 0;
2837 #endif
2838 #endif
2839                         MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
2840                         CheckPT(ppsPTInfoList[0]);
2841                 }
2843                 /*
2844                         Free a page table if we can.
2845                 */
2846                 if (ppsPTInfoList[0] && (ppsPTInfoList[0]->ui32ValidPTECount == 0)
2847                         )
2848                 {
2849 #if defined(FIX_HW_BRN_31620)
2850                         if (BRN31620FreePageTable(psMMUHeap, ui32PDIndex) == IMG_TRUE)
2851                         {
2852                                 bInvalidateDirectoryCache = IMG_TRUE;
2853                         }
2854 #else
2855                         _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
2856                         bInvalidateDirectoryCache = IMG_TRUE;
2857 #endif
2858                 }
2860                 /* advance the sTmpDevVAddr by one page */
2861                 sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
2862         }
2864         if(bInvalidateDirectoryCache)
2865         {
2866                 MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
2867         }
2868         else
2869         {
2870                 MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
2871         }
2873 #if defined(PDUMP)
2874         MMU_PDumpPageTables(psMMUHeap,
2875                                                 sDevVAddr,
2876                                                 psMMUHeap->ui32DataPageSize * ui32PageCount,
2877                                                 IMG_TRUE,
2878                                                 hUniqueTag);
2879 #endif /* #if defined(PDUMP) */
2883 /*!
2884 ******************************************************************************
2885         FUNCTION:   MMU_FreePageTables
2887         PURPOSE:    Call back from RA_Free to zero page table entries used by freed
2888                                 spans.
2890         PARAMETERS: In: pvMMUHeap
2891                                 In: ui32Start
2892                                 In: ui32End
2893                                 In: hUniqueTag - A unique ID for use as a tag identifier
2894         RETURNS:
2895 ******************************************************************************/
2896 static IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
2897                                    IMG_SIZE_T uStart,
2898                                    IMG_SIZE_T uEnd,
2899                                    IMG_HANDLE hUniqueTag)
2901         MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
2902         IMG_DEV_VIRTADDR Start;
2904         Start.uiAddr = (IMG_UINT32)uStart;
2906         MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (IMG_UINT32)((uEnd - uStart) >> pMMUHeap->ui32PTShift), hUniqueTag);
2909 /*!
2910 ******************************************************************************
2911         FUNCTION:   MMU_Create
2913         PURPOSE:    Create an mmu device virtual heap.
2915         PARAMETERS: In: psMMUContext - MMU context
2916                     In: psDevArena - device memory resource arena
2917                                 Out: ppsVMArena - virtual mapping arena
2918         RETURNS:        MMU_HEAP
2919         RETURNS:
2920 ******************************************************************************/
2921 MMU_HEAP *
2922 MMU_Create (MMU_CONTEXT *psMMUContext,
2923                         DEV_ARENA_DESCRIPTOR *psDevArena,
2924                         RA_ARENA **ppsVMArena,
2925                         PDUMP_MMU_ATTRIB **ppsMMUAttrib)
2927         MMU_HEAP *pMMUHeap;
2928         IMG_UINT32 ui32ScaleSize;
2930         PVR_UNREFERENCED_PARAMETER(ppsMMUAttrib);
2932         PVR_ASSERT (psDevArena != IMG_NULL);
2934         if (psDevArena == IMG_NULL)
2935         {
2936                 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
2937                 return IMG_NULL;
2938         }
2940         OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
2941                                  sizeof (MMU_HEAP),
2942                                  (IMG_VOID **)&pMMUHeap, IMG_NULL,
2943                                  "MMU Heap");
2944         if (pMMUHeap == IMG_NULL)
2945         {
2946                 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
2947                 return IMG_NULL;
2948         }
2950         pMMUHeap->psMMUContext = psMMUContext;
2951         pMMUHeap->psDevArena = psDevArena;
2953         /*
2954                 generate page table and data page mask and shift values
2955                 based on the data page size
2956         */
2957         switch(pMMUHeap->psDevArena->ui32DataPageSize)
2958         {
2959                 case 0x1000:
2960                         ui32ScaleSize = 0;
2961                         pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
2962                         break;
2963 #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
2964                 case 0x4000:
2965                         ui32ScaleSize = 2;
2966                         pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
2967                         break;
2968                 case 0x10000:
2969                         ui32ScaleSize = 4;
2970                         pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
2971                         break;
2972                 case 0x40000:
2973                         ui32ScaleSize = 6;
2974                         pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
2975                         break;
2976                 case 0x100000:
2977                         ui32ScaleSize = 8;
2978                         pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
2979                         break;
2980                 case 0x400000:
2981                         ui32ScaleSize = 10;
2982                         pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
2983                         break;
2984 #endif /* #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) */
2985                 default:
2986                         PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
2987                         goto ErrorFreeHeap;
2988         }
2990         /* number of bits of address offset into the data page */
2991         pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
2992         pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
2993         pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
2994         /* number of bits of address indexing into a pagetable */
2995         pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
2996         pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
2997         pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
2998         pMMUHeap->ui32PTSize = (IMG_UINT32)(1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
3000         /* note: PT size must be at least 4 entries, even for 4Mb data page size */
3001         if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
3002         {
3003                 pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
3004         }
3005         pMMUHeap->ui32PTNumEntriesAllocated = pMMUHeap->ui32PTSize >> 2;
3007         /* find the number of actual PT entries per PD entry range. For 4MB data
3008          * pages we only use the first entry although the PT has 16 byte allocation/alignment
3009          * (due to 4 LSbits of the PDE are reserved for control) */
3010         pMMUHeap->ui32PTNumEntriesUsable = (IMG_UINT32)(1UL << pMMUHeap->ui32PTBitWidth);
3012         /* number of bits of address indexing into a page directory */
3013         pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
3014         pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
3015         pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
3017         /* External system cache violates this rule */
3018 #if !defined (SUPPORT_EXTERNAL_SYSTEM_CACHE)
3019         /*
3020                 The heap must start on a PT boundary to avoid PT sharing across heaps
3021                 The only exception is the first heap which can start at any address
3022                 from 0 to the end of the first PT boundary
3023         */
3024         if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
3025         {
3026                 /*
3027                         if for some reason the first heap starts after the end of the first PT boundary
3028                         but is not aligned to a PT boundary then the assert will trigger unncessarily
3029                 */
3030                 PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
3031                                                 & (pMMUHeap->ui32DataPageMask
3032                                                         | pMMUHeap->ui32PTMask)) == 0);
3033         }
3034 #endif
3035         /* how many PT entries do we need? */
3036         pMMUHeap->ui32PTETotalUsable = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
3038         /* calculate the PD Base index for the Heap (required for page mapping) */
3039         pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
3041         /*
3042                 how many page tables?
3043                 round up to nearest entries to the nearest page table sized block
3044         */
3045         pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotalUsable + pMMUHeap->ui32PTNumEntriesUsable - 1)
3046                                                                                 >> pMMUHeap->ui32PTBitWidth;
3047         PVR_ASSERT(pMMUHeap->ui32PageTableCount > 0);
3049         /* Create the arena */
3050         pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
3051                                                                         psDevArena->BaseDevVAddr.uiAddr,
3052                                                                         psDevArena->ui32Size,
3053                                                                         IMG_NULL,
3054                                                                         MIN(HOST_PAGESIZE(), pMMUHeap->ui32DataPageSize),
3055                                                                         IMG_NULL,
3056                                                                         IMG_NULL,
3057                                                                         &MMU_FreePageTables,
3058                                                                         pMMUHeap);
3060         if (pMMUHeap->psVMArena == IMG_NULL)
3061         {
3062                 PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
3063                 goto ErrorFreePagetables;
3064         }
3066 #if defined(PDUMP)
3067         /* setup per-heap PDUMP MMU attributes */
3068         MMU_SetPDumpAttribs(&pMMUHeap->sMMUAttrib,
3069                                                 psMMUContext->psDeviceNode,
3070                                                 pMMUHeap->ui32DataPageMask,
3071                                                 pMMUHeap->ui32PTSize);
3072         *ppsMMUAttrib = &pMMUHeap->sMMUAttrib;
3074         PDUMPCOMMENT("Create MMU device from arena %s (Size == 0x%x, DataPageSize == 0x%x, BaseDevVAddr == 0x%x)",
3075                         psDevArena->pszName,
3076                         psDevArena->ui32Size,
3077                         pMMUHeap->ui32DataPageSize,
3078                         psDevArena->BaseDevVAddr.uiAddr);
3079 #endif /* PDUMP */
3081         /*
3082                 And return the RA for VM arena management
3083         */
3084         *ppsVMArena = pMMUHeap->psVMArena;
3086         return pMMUHeap;
3088         /* drop into here if errors */
3089 ErrorFreePagetables:
3090         _DeferredFreePageTables (pMMUHeap);
3092 ErrorFreeHeap:
3093         OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
3094         /*not nulling pointer, out of scope*/
3096         return IMG_NULL;
3099 /*!
3100 ******************************************************************************
3101         FUNCTION:   MMU_Delete
3103         PURPOSE:    Delete an MMU device virtual heap.
3105         PARAMETERS: In:  pMMUHeap - The MMU heap to delete.
3106         RETURNS:
3107 ******************************************************************************/
3108 IMG_VOID
3109 MMU_Delete (MMU_HEAP *pMMUHeap)
3111         if (pMMUHeap != IMG_NULL)
3112         {
3113                 PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
3115                 if(pMMUHeap->psVMArena)
3116                 {
3117                         RA_Delete (pMMUHeap->psVMArena);
3118                 }
3120 #if defined(PDUMP)
3121                 PDUMPCOMMENT("Delete MMU device from arena %s (BaseDevVAddr == 0x%x, PT count for deferred free == 0x%x)",
3122                                 pMMUHeap->psDevArena->pszName,
3123                                 pMMUHeap->psDevArena->BaseDevVAddr.uiAddr,
3124                                 pMMUHeap->ui32PageTableCount);
3125 #endif /* PDUMP */
3127 #ifdef SUPPORT_SGX_MMU_BYPASS
3128                 EnableHostAccess(pMMUHeap->psMMUContext);
3129 #endif
3130                 _DeferredFreePageTables (pMMUHeap);
3131 #ifdef SUPPORT_SGX_MMU_BYPASS
3132                 DisableHostAccess(pMMUHeap->psMMUContext);
3133 #endif
3135                 OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
3136                 /*not nulling pointer, copy on stack*/
3137         }
3140 /*!
3141 ******************************************************************************
3142         FUNCTION:   MMU_Alloc
3143         PURPOSE:    Allocate space in an mmu's virtual address space.
3144         PARAMETERS:     In:  pMMUHeap - MMU to allocate on.
3145                     In:  uSize - Size in bytes to allocate.
3146                     Out: pActualSize - If non null receives actual size allocated.
3147                     In:  uFlags - Allocation flags.
3148                     In:  uDevVAddrAlignment - Required alignment.
3149                     Out: DevVAddr - Receives base address of allocation.
3150         RETURNS:        IMG_TRUE - Success
3151                     IMG_FALSE - Failure
3152 ******************************************************************************/
3153 IMG_BOOL
3154 MMU_Alloc (MMU_HEAP *pMMUHeap,
3155                    IMG_SIZE_T uSize,
3156                    IMG_SIZE_T *pActualSize,
3157                    IMG_UINT32 uFlags,
3158                    IMG_UINT32 uDevVAddrAlignment,
3159                    IMG_DEV_VIRTADDR *psDevVAddr)
3161         IMG_BOOL bStatus;
3163         PVR_DPF ((PVR_DBG_MESSAGE,
3164                 "MMU_Alloc: uSize=0x%" SIZE_T_FMT_LEN "x, flags=0x%x, align=0x%x",
3165                 uSize, uFlags, uDevVAddrAlignment));
3167         /*
3168                 Only allocate a VM address if the caller did not supply one
3169         */
3170         if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
3171         {
3172                 IMG_UINTPTR_T uiAddr;
3174                 bStatus = RA_Alloc (pMMUHeap->psVMArena,
3175                                                         uSize,
3176                                                         pActualSize,
3177                                                         IMG_NULL,
3178                                                         0,
3179                                                         uDevVAddrAlignment,
3180                                                         0,
3181                                                         IMG_NULL,
3182                                                         0,
3183                                                         &uiAddr);
3184                 if(!bStatus)
3185                 {
3186                         IMG_CHAR asCurrentProcessName[128];
3188                         PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));       
3189                         OSGetCurrentProcessNameKM(asCurrentProcessName, 128);
3190                         PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Alloc of DevVAddr failed from heap %s ID%d, pid: %d, task: %s",
3191                                                                         pMMUHeap->psDevArena->pszName,
3192                                                                         pMMUHeap->psDevArena->ui32HeapID,
3193                                                                         OSGetCurrentProcessIDKM(),
3194                                                                         asCurrentProcessName));                                                                 
3195                 #if defined (MEM_TRACK_INFO_DEBUG)
3196                         PVRSRVPrintMemTrackInfo(0);
3197                 #endif
3199                         return bStatus;
3200                 }
3202                 psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
3203         }
3205         #ifdef SUPPORT_SGX_MMU_BYPASS
3206         EnableHostAccess(pMMUHeap->psMMUContext);
3207         #endif
3209         /* allocate page tables to cover allocation as required */
3210         bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, (IMG_UINT32)uSize);
3212         #ifdef SUPPORT_SGX_MMU_BYPASS
3213         DisableHostAccess(pMMUHeap->psMMUContext);
3214         #endif
3216         if (!bStatus)
3217         {
3218                 PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
3219                 PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to alloc pagetable(s) for DevVAddr 0x%8.8x from heap %s ID%d",
3220                                                                 psDevVAddr->uiAddr,
3221                                                                 pMMUHeap->psDevArena->pszName,
3222                                                                 pMMUHeap->psDevArena->ui32HeapID));
3223                 if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
3224                 {
3225                         /* free the VM address */
3226                         RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
3227                 }
3228         }
3230         return bStatus;
3233 /*!
3234 ******************************************************************************
3235         FUNCTION:   MMU_Free
3236         PURPOSE:    Free space in an mmu's virtual address space.
3237         PARAMETERS:     In:  pMMUHeap - MMU to deallocate on.
3238                     In:  DevVAddr - Base address to deallocate.
3239         RETURNS:        None
3240 ******************************************************************************/
3241 IMG_VOID
3242 MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
3244         PVR_ASSERT (pMMUHeap != IMG_NULL);
3246         if (pMMUHeap == IMG_NULL)
3247         {
3248                 PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
3249                 return;
3250         }
3252         PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%08X from heap %s ID%d",
3253                                                                 DevVAddr.uiAddr,
3254                                                                 pMMUHeap->psDevArena->pszName,
3255                                                                 pMMUHeap->psDevArena->ui32HeapID));
3257         if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
3258                 (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
3259         {
3260                 RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
3261                 return;
3262         }
3264         PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't free DevVAddr %08X from heap %s ID%d (not in range of heap))",
3265                                                         DevVAddr.uiAddr,
3266                                                         pMMUHeap->psDevArena->pszName,
3267                                                         pMMUHeap->psDevArena->ui32HeapID));
3270 /*!
3271 ******************************************************************************
3272         FUNCTION:   MMU_Enable
3274         PURPOSE:    Enable an mmu. Establishes pages tables and takes the mmu out
3275                     of bypass and waits for the mmu to acknowledge enabled.
3277         PARAMETERS: In:  pMMUHeap - the mmu
3278         RETURNS:    None
3279 ******************************************************************************/
3280 IMG_VOID
3281 MMU_Enable (MMU_HEAP *pMMUHeap)
3283         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
3284         /* SGX mmu is always enabled (stub function) */
3287 /*!
3288 ******************************************************************************
3289         FUNCTION:   MMU_Disable
3291         PURPOSE:    Disable an mmu, takes the mmu into bypass.
3293         PARAMETERS: In:  pMMUHeap - the mmu
3294         RETURNS:    None
3295 ******************************************************************************/
3296 IMG_VOID
3297 MMU_Disable (MMU_HEAP *pMMUHeap)
3299         PVR_UNREFERENCED_PARAMETER(pMMUHeap);
3300         /* SGX mmu is always enabled (stub function) */
3303 #if defined(FIX_HW_BRN_31620)
3304 /*!
3305 ******************************************************************************
3306         FUNCTION:   MMU_GetCacheFlushRange
3308         PURPOSE:    Gets device physical address of the mmu context.
3310         PARAMETERS: In:  pMMUContext - the mmu context
3311                     Out:  pui32RangeMask - Bit mask showing which PD cache
3312                           lines have changed
3313         RETURNS:    None
3314 ******************************************************************************/
3316 IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask)
3318         IMG_UINT32 i;
3320         for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++)
3321         {
3322                 pui32RangeMask[i] = pMMUContext->ui32PDChangeMask[i];
3324                 /* Clear bit mask for the next set of allocations */
3325                 pMMUContext->ui32PDChangeMask[i] = 0;
3326         }
3329 /*!
3330 ******************************************************************************
3331         FUNCTION:   MMU_GetPDPhysAddr
3333         PURPOSE:    Gets device physical address of the mmu contexts PD.
3335         PARAMETERS: In:  pMMUContext - the mmu context
3336                     Out:  psDevPAddr - Address of PD
3337         RETURNS:    None
3338 ******************************************************************************/
3340 IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr)
3342         *psDevPAddr = pMMUContext->sPDDevPAddr;
3345 #endif
3346 #if defined(PDUMP)
3347 /*!
3348 ******************************************************************************
3349         FUNCTION:   MMU_PDumpPageTables
3351         PURPOSE:    PDump the linear mapping for a range of pages at a specified
3352                     virtual address.
3354         PARAMETERS: In:  pMMUHeap - the mmu.
3355                     In:  DevVAddr - the device virtual address.
3356                     In:  uSize - size of memory range in bytes
3357                     In:  hUniqueTag - A unique ID for use as a tag identifier
3358         RETURNS:    None
3359 ******************************************************************************/
3360 static IMG_VOID
3361 MMU_PDumpPageTables     (MMU_HEAP *pMMUHeap,
3362                                          IMG_DEV_VIRTADDR DevVAddr,
3363                                          IMG_SIZE_T uSize,
3364                                          IMG_BOOL bForUnmap,
3365                                          IMG_HANDLE hUniqueTag)
3367         IMG_UINT32      ui32NumPTEntries;
3368         IMG_UINT32      ui32PTIndex;
3369         IMG_UINT32      *pui32PTEntry;
3371         MMU_PT_INFO **ppsPTInfoList;
3372         IMG_UINT32 ui32PDIndex;
3373         IMG_UINT32 ui32PTDumpCount;
3375 #if defined(FIX_HW_BRN_31620)
3376         PVRSRV_SGXDEV_INFO *psDevInfo = pMMUHeap->psMMUContext->psDevInfo;
3377 #endif
3378         /* find number of PT entries to dump */
3379         ui32NumPTEntries = (IMG_UINT32)((uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift);
3381         /* find the index/offset in PD entries  */
3382         ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
3384         /* set the base PT info */
3385         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
3387         /* find the index/offset of the first PT entry in the first PT page */
3388         ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
3390         /* pdump the PT Page modification */
3391         PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
3393         /* walk the PT pages, dumping as we go */
3394         while(ui32NumPTEntries > 0)
3395         {
3396                 MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
3398                 if(ui32NumPTEntries <= pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex)
3399                 {
3400                         ui32PTDumpCount = ui32NumPTEntries;
3401                 }
3402                 else
3403                 {
3404                         ui32PTDumpCount = pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex;
3405                 }
3407                 if (psPTInfo)
3408                 {
3409 #if defined(FIX_HW_BRN_31620)
3410                         IMG_UINT32 i;
3411 #endif
3412                         IMG_UINT32 ui32Flags = 0;
3413 #if defined(SUPPORT_PDUMP_MULTI_PROCESS)
3414                         ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0;
3415 #endif
3416                         pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
3417 #if defined(FIX_HW_BRN_31620)
3418                         if ((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX)
3419                         {
3420                                 for (i=ui32PTIndex;i<(ui32PTIndex + ui32PTDumpCount);i++)
3421                                 {
3422                                         if (pui32PTEntry[i] == ((psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
3423                                                                                         | SGX_MMU_PTE_DUMMY_PAGE
3424                                                                                         | SGX_MMU_PTE_READONLY
3425                                                                                         | SGX_MMU_PTE_VALID))
3426                                         {
3427                                                 PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
3428                                         }
3429                                         else
3430                                         {
3431                                                 PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
3432                                         }
3433                                 }
3434                         }
3435                         else
3436 #endif
3437                         {
3438                                 PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
3439                         }
3440                 }
3442                 /* decrement PT entries left */
3443                 ui32NumPTEntries -= ui32PTDumpCount;
3445                 /* reset offset in page */
3446                 ui32PTIndex = 0;
3448 #if defined(FIX_HW_BRN_31620)
3449                 /* For 31620 we need to know which PD index we're working on */
3450                 ui32PDIndex++;
3451 #endif
3452         }
3454         PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
3456 #endif /* #if defined(PDUMP) */
3459 /*!
3460 ******************************************************************************
3461         FUNCTION:   MMU_MapPage
3463         PURPOSE:    Create a mapping for one page at a specified virtual address.
3465         PARAMETERS: In:  pMMUHeap - the mmu.
3466                     In:  DevVAddr - the device virtual address.
3467                     In:  DevPAddr - the device physical address of the page to map.
3468                     In:  ui32MemFlags - BM r/w/cache flags
3469         RETURNS:    None
3470 ******************************************************************************/
3471 static IMG_VOID
3472 MMU_MapPage (MMU_HEAP *pMMUHeap,
3473                          IMG_DEV_VIRTADDR DevVAddr,
3474                          IMG_DEV_PHYADDR DevPAddr,
3475                          IMG_UINT32 ui32MemFlags)
3477         IMG_UINT32 ui32Index;
3478         IMG_UINT32 *pui32Tmp;
3479         IMG_UINT32 ui32MMUFlags = 0;
3480         MMU_PT_INFO **ppsPTInfoList;
3482         /* check the physical alignment of the memory to map */
3483         PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
3485         /*
3486                 unravel the read/write/cache flags
3487         */
3488         if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
3489         {
3490                 /* read/write */
3491                 ui32MMUFlags = 0;
3492         }
3493         else if(PVRSRV_MEM_READ & ui32MemFlags)
3494         {
3495                 /* read only */
3496                 ui32MMUFlags |= SGX_MMU_PTE_READONLY;
3497         }
3498         else if(PVRSRV_MEM_WRITE & ui32MemFlags)
3499         {
3500                 /* write only */
3501                 ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
3502         }
3504         /* cache coherency */
3505         if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
3506         {
3507                 ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
3508         }
3510 #if !defined(FIX_HW_BRN_25503)
3511         /* EDM protection */
3512         if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
3513         {
3514                 ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
3515         }
3516 #endif
3518         /*
3519                 we receive a device physical address for the page that is to be mapped
3520                 and a device virtual address representing where it should be mapped to
3521         */
3523         /* find the index/offset in PD entries  */
3524         ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
3526         /* and advance to the first PT info list */
3527         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
3529         CheckPT(ppsPTInfoList[0]);
3531         /* find the index/offset of the first PT in the first PT page */
3532         ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
3534         /* setup pointer to the first entry in the PT page */
3535         pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
3537 #if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
3538         {
3539                 IMG_UINT32 uTmp = pui32Tmp[ui32Index];
3540                 
3541                 /* Is the current page already valid? (should not be unless it was allocated and not deallocated) */
3542 #if defined(FIX_HW_BRN_31620)
3543                 if ((uTmp & SGX_MMU_PTE_VALID) && ((DevVAddr.uiAddr & BRN31620_PDE_CACHE_FILL_MASK) != BRN31620_DUMMY_PAGE_OFFSET))
3544 #else
3545                 if ((uTmp & SGX_MMU_PTE_VALID) != 0)
3546 #endif
3548                 {
3549                         PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08X PDIdx:%u PTIdx:%u",
3550                                                                         DevVAddr.uiAddr,
3551                                                                         DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
3552                                                                         ui32Index ));
3553                         PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08X", uTmp));
3555                         PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x" DEVPADDR_FMT,
3556                                                 DevPAddr.uiAddr));
3558 #if PT_DUMP
3559                         DumpPT(ppsPTInfoList[0]);
3560 #endif
3561                 }
3562 #if !defined(FIX_HW_BRN_31620)
3563                 PVR_ASSERT((uTmp & SGX_MMU_PTE_VALID) == 0);
3564 #endif
3565         }
3566 #endif
3568         /* One more valid entry in the page table. */
3569         ppsPTInfoList[0]->ui32ValidPTECount++;
3571         MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
3572         /* map in the physical page */
3573         pui32Tmp[ui32Index] = ((IMG_UINT32)(DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
3574                                                 & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
3575                                                 | SGX_MMU_PTE_VALID
3576                                                 | ui32MMUFlags;
3577         MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
3578         CheckPT(ppsPTInfoList[0]);
3582 /*!
3583 ******************************************************************************
3584         FUNCTION:   MMU_MapScatter
3586         PURPOSE:    Create a linear mapping for a range of pages at a specified
3587                     virtual address.
3589         PARAMETERS: In:  pMMUHeap - the mmu.
3590                     In:  DevVAddr - the device virtual address.
3591                     In:  psSysAddr - the device physical address of the page to
3592                          map.
3593                     In:  uSize - size of memory range in bytes
3594                 In:  ui32MemFlags - page table flags.
3595                     In:  hUniqueTag - A unique ID for use as a tag identifier
3596         RETURNS:    None
3597 ******************************************************************************/
3598 IMG_VOID
3599 MMU_MapScatter (MMU_HEAP *pMMUHeap,
3600                                 IMG_DEV_VIRTADDR DevVAddr,
3601                                 IMG_SYS_PHYADDR *psSysAddr,
3602                                 IMG_SIZE_T uSize,
3603                                 IMG_UINT32 ui32MemFlags,
3604                                 IMG_HANDLE hUniqueTag)
3606 #if defined(PDUMP)
3607         IMG_DEV_VIRTADDR MapBaseDevVAddr;
3608 #endif /*PDUMP*/
3609         IMG_UINT32 uCount, i, j;
3610         IMG_UINT32 ui32NumDevicePages;
3611         IMG_DEV_PHYADDR DevPAddr;
3613         PVR_ASSERT (pMMUHeap != IMG_NULL);
3615 #if defined(PDUMP)
3616         MapBaseDevVAddr = DevVAddr;
3617 #else
3618         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
3619 #endif /*PDUMP*/
3621         PVR_ASSERT((HOST_PAGESIZE() % pMMUHeap->ui32DataPageSize) == 0);
3623         ui32NumDevicePages = HOST_PAGESIZE() / pMMUHeap->ui32DataPageSize;
3625         for (i=0, uCount=0; uCount<uSize; i++, uCount+=HOST_PAGESIZE())
3626         {
3627                 IMG_SYS_PHYADDR sSysAddr;
3629                 sSysAddr = psSysAddr[i];
3632                 /* check the physical alignment of the memory to map */
3633                 PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
3635                 for(j=0; j< ui32NumDevicePages; j++)
3636                 {
3637                         DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
3639                         MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
3641                         PVR_DPF ((PVR_DBG_MESSAGE,
3642                                                 "MMU_MapScatter: devVAddr=%x, SysAddr=" SYSPADDR_FMT ", size=0x%x/0x%" SIZE_T_FMT_LEN "x",
3643                                                 DevVAddr.uiAddr, sSysAddr.uiAddr, (uCount + j*pMMUHeap->ui32DataPageSize), uSize));
3645                         DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
3646                         sSysAddr.uiAddr += pMMUHeap->ui32DataPageSize;
3647                 }
3648         }
3650 #if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1)
3651         MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
3652 #endif
3654 #if defined(PDUMP)
3655         MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
3656 #endif /* #if defined(PDUMP) */
3659 /*!
3660 ******************************************************************************
3661         FUNCTION:   MMU_MapPages
3663         PURPOSE:    Create a linear mapping for a ranege of pages at a specified
3664                     virtual address.
3666         PARAMETERS: In:  pMMUHeap - the mmu.
3667                     In:  DevVAddr - the device virtual address.
3668                     In:  SysPAddr - the system physical address of the page to
3669                          map.
3670                     In:  uSize - size of memory range in bytes
3671                 In:  ui32MemFlags - page table flags.
3672                     In:  hUniqueTag - A unique ID for use as a tag identifier
3673         RETURNS:    None
3674 ******************************************************************************/
3675 IMG_VOID
3676 MMU_MapPages (MMU_HEAP *pMMUHeap,
3677                           IMG_DEV_VIRTADDR DevVAddr,
3678                           IMG_SYS_PHYADDR SysPAddr,
3679                           IMG_SIZE_T uSize,
3680                           IMG_UINT32 ui32MemFlags,
3681                           IMG_HANDLE hUniqueTag)
3683         IMG_DEV_PHYADDR DevPAddr;
3684 #if defined(PDUMP)
3685         IMG_DEV_VIRTADDR MapBaseDevVAddr;
3686 #endif /*PDUMP*/
3687         IMG_UINT32 uCount;
3688         IMG_UINT32 ui32VAdvance;
3689         IMG_UINT32 ui32PAdvance;
3691         PVR_ASSERT (pMMUHeap != IMG_NULL);
3693         PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPages: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=" SYSPADDR_FMT ", size=0x%" SIZE_T_FMT_LEN "x",
3694                                                                 pMMUHeap->psDevArena->pszName,
3695                                                                 pMMUHeap->psDevArena->ui32HeapID,
3696                                                                 DevVAddr.uiAddr, 
3697                                                                 SysPAddr.uiAddr,
3698                                                                 uSize));
3700         /* set the virtual and physical advance */
3701         ui32VAdvance = pMMUHeap->ui32DataPageSize;
3702         ui32PAdvance = pMMUHeap->ui32DataPageSize;
3704 #if defined(PDUMP)
3705         MapBaseDevVAddr = DevVAddr;
3706 #else
3707         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
3708 #endif /*PDUMP*/
3710         DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
3712         /* check the physical alignment of the memory to map */
3713         PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
3715         /*
3716                 for dummy allocations there is only one physical
3717                 page backing the virtual range
3718         */
3719         if(ui32MemFlags & PVRSRV_MEM_DUMMY)
3720         {
3721                 ui32PAdvance = 0;
3722         }
3724         for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
3725         {
3726                 MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
3727                 DevVAddr.uiAddr += ui32VAdvance;
3728                 DevPAddr.uiAddr += ui32PAdvance;
3729         }
3731 #if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1)
3732         MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
3733 #endif
3735 #if defined(PDUMP)
3736         MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
3737 #endif /* #if defined(PDUMP) */
3741 /*!
3742 ******************************************************************************
3743         FUNCTION:   MMU_MapPagesSparse
3745         PURPOSE:    Create a linear mapping for a ranege of pages at a specified
3746                     virtual address.
3748         PARAMETERS: In:  pMMUHeap - the mmu.
3749                     In:  DevVAddr - the device virtual address.
3750                     In:  SysPAddr - the system physical address of the page to
3751                          map.
3752                                 In:  ui32ChunkSize - Size of the chunk (must be page multiple)
3753                                 In:  ui32NumVirtChunks - Number of virtual chunks
3754                                 In:  ui32NumPhysChunks - Number of physical chunks
3755                                 In:  pabMapChunk - Mapping array
3756                 In:  ui32MemFlags - page table flags.
3757                     In:  hUniqueTag - A unique ID for use as a tag identifier
3758         RETURNS:    None
3759 ******************************************************************************/
3760 IMG_VOID
3761 MMU_MapPagesSparse (MMU_HEAP *pMMUHeap,
3762                                         IMG_DEV_VIRTADDR DevVAddr,
3763                                         IMG_SYS_PHYADDR SysPAddr,
3764                                         IMG_UINT32 ui32ChunkSize,
3765                                         IMG_UINT32 ui32NumVirtChunks,
3766                                         IMG_UINT32 ui32NumPhysChunks,
3767                                         IMG_BOOL *pabMapChunk,
3768                                         IMG_UINT32 ui32MemFlags,
3769                                         IMG_HANDLE hUniqueTag)
3771         IMG_DEV_PHYADDR DevPAddr;
3772 #if defined(PDUMP)
3773         IMG_DEV_VIRTADDR MapBaseDevVAddr;
3774 #endif /*PDUMP*/
3775         IMG_UINT32 uCount;
3776         IMG_UINT32 ui32VAdvance;
3777         IMG_UINT32 ui32PAdvance;
3778         IMG_SIZE_T uSizeVM = ui32ChunkSize * ui32NumVirtChunks;
3779 #if !defined(PVRSRV_NEED_PVR_DPF)
3780         PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
3781 #endif
3783         PVR_ASSERT (pMMUHeap != IMG_NULL);
3785         PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPagesSparse: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=" SYSPADDR_FMT ", VM space=0x%" SIZE_T_FMT_LEN "x, PHYS space=0x%x",
3786                                                                 pMMUHeap->psDevArena->pszName,
3787                                                                 pMMUHeap->psDevArena->ui32HeapID,
3788                                                                 DevVAddr.uiAddr, 
3789                                                                 SysPAddr.uiAddr,
3790                                                                 uSizeVM,
3791                                                                 ui32ChunkSize * ui32NumPhysChunks));
3793         /* set the virtual and physical advance */
3794         ui32VAdvance = pMMUHeap->ui32DataPageSize;
3795         ui32PAdvance = pMMUHeap->ui32DataPageSize;
3797 #if defined(PDUMP)
3798         MapBaseDevVAddr = DevVAddr;
3799 #else
3800         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
3801 #endif /*PDUMP*/
3803         DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
3805         /* check the physical alignment of the memory to map */
3806         PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
3808         /*
3809                 for dummy allocations there is only one physical
3810                 page backing the virtual range
3811         */
3812         if(ui32MemFlags & PVRSRV_MEM_DUMMY)
3813         {
3814                 ui32PAdvance = 0;
3815         }
3817         for (uCount=0; uCount<uSizeVM; uCount+=ui32VAdvance)
3818         {
3819                 if (pabMapChunk[uCount/ui32ChunkSize])
3820                 {
3821                         MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
3822                         DevPAddr.uiAddr += ui32PAdvance;
3823                 }
3824                 DevVAddr.uiAddr += ui32VAdvance;
3825         }
3826         pMMUHeap->bHasSparseMappings = IMG_TRUE;
3828 #if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1)
3829         MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
3830 #endif
3832 #if defined(PDUMP)
3833         MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSizeVM, IMG_FALSE, hUniqueTag);
3834 #endif /* #if defined(PDUMP) */
3837 /*!
3838 ******************************************************************************
3839         FUNCTION:   MMU_MapShadow
3841         PURPOSE:    Create a mapping for a range of pages from either a CPU
3842                                 virtual adddress, (or if NULL a hOSMemHandle) to a specified
3843                                 device virtual address.
3845         PARAMETERS: In:  pMMUHeap - the mmu.
3846                 In:  MapBaseDevVAddr - A page aligned device virtual address
3847                                        to start mapping from.
3848                 In:  uByteSize - A page aligned mapping length in bytes.
3849                 In:  CpuVAddr - A page aligned CPU virtual address.
3850                 In:  hOSMemHandle - An alternative OS specific memory handle
3851                                     for mapping RAM without a CPU virtual
3852                                     address
3853                 Out: pDevVAddr - deprecated - It used to return a byte aligned
3854                                  device virtual address corresponding to the
3855                                  cpu virtual address (When CpuVAddr wasn't
3856                                  constrained to be page aligned.) Now it just
3857                                  returns MapBaseDevVAddr. Unaligned semantics
3858                                  can easily be handled above this API if required.
3859                 In: hUniqueTag - A unique ID for use as a tag identifier
3860                 In: ui32MemFlags - page table flags.
3861         RETURNS:    None
3862 ******************************************************************************/
3863 IMG_VOID
3864 MMU_MapShadow (MMU_HEAP          *pMMUHeap,
3865                            IMG_DEV_VIRTADDR   MapBaseDevVAddr,
3866                            IMG_SIZE_T         uByteSize,
3867                            IMG_CPU_VIRTADDR   CpuVAddr,
3868                            IMG_HANDLE         hOSMemHandle,
3869                            IMG_DEV_VIRTADDR  *pDevVAddr,
3870                            IMG_UINT32         ui32MemFlags,
3871                            IMG_HANDLE         hUniqueTag)
3873         IMG_UINT32                      i;
3874         IMG_UINT32                      uOffset = 0;
3875         IMG_DEV_VIRTADDR        MapDevVAddr;
3876         IMG_UINT32                      ui32VAdvance;
3877         IMG_UINT32                      ui32PAdvance;
3879 #if !defined (PDUMP)
3880         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
3881 #endif
3883         PVR_DPF ((PVR_DBG_MESSAGE,
3884                         "MMU_MapShadow: DevVAddr:%08X, Bytes:0x%" SIZE_T_FMT_LEN "x, CPUVAddr:%p",
3885                         MapBaseDevVAddr.uiAddr,
3886                         uByteSize,
3887                         CpuVAddr));
3889         /* set the virtual and physical advance */
3890         ui32VAdvance = pMMUHeap->ui32DataPageSize;
3891         ui32PAdvance = pMMUHeap->ui32DataPageSize;
3893         /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */
3894         PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
3895         PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
3896         pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
3898         /*
3899                 for dummy allocations there is only one physical
3900                 page backing the virtual range
3901         */
3902         if(ui32MemFlags & PVRSRV_MEM_DUMMY)
3903         {
3904                 ui32PAdvance = 0;
3905         }
3907         /* Loop through cpu memory and map page by page */
3908         MapDevVAddr = MapBaseDevVAddr;
3909         for (i=0; i<uByteSize; i+=ui32VAdvance)
3910         {
3911                 IMG_CPU_PHYADDR CpuPAddr;
3912                 IMG_DEV_PHYADDR DevPAddr;
3914                 if(CpuVAddr)
3915                 {
3916                         CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle,
3917                                                                                   (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset));
3918                 }
3919                 else
3920                 {
3921                         CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
3922                 }
3923                 DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
3925                 /* check the physical alignment of the memory to map */
3926                 PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
3928                 PVR_DPF ((PVR_DBG_MESSAGE,
3929                                 "Offset=0x%x: CpuVAddr=%p, CpuPAddr=" CPUPADDR_FMT ", DevVAddr=%08X, DevPAddr=" DEVPADDR_FMT,
3930                                 uOffset,
3931                                 (IMG_PVOID)((IMG_UINTPTR_T)CpuVAddr + uOffset),
3932                                 CpuPAddr.uiAddr,
3933                                 MapDevVAddr.uiAddr,
3934                                 DevPAddr.uiAddr));
3936                 MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
3938                 /* loop update */
3939                 MapDevVAddr.uiAddr += ui32VAdvance;
3940                 uOffset += ui32PAdvance;
3941         }
3943 #if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1)
3944         MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
3945 #endif
3947 #if defined(PDUMP)
3948         MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
3949 #endif /* #if defined(PDUMP) */
3952 /*!
3953 ******************************************************************************
3954         FUNCTION:   MMU_MapShadowSparse
3956         PURPOSE:    Create a mapping for a range of pages from either a CPU
3957                                 virtual adddress, (or if NULL a hOSMemHandle) to a specified
3958                                 device virtual address.
3960         PARAMETERS: In:  pMMUHeap - the mmu.
3961                 In:  MapBaseDevVAddr - A page aligned device virtual address
3962                                        to start mapping from.
3963                                 In:  ui32ChunkSize - Size of the chunk (must be page multiple)
3964                                 In:  ui32NumVirtChunks - Number of virtual chunks
3965                                 In:  ui32NumPhysChunks - Number of physical chunks
3966                                 In:  pabMapChunk - Mapping array
3967                 In:  CpuVAddr - A page aligned CPU virtual address.
3968                 In:  hOSMemHandle - An alternative OS specific memory handle
3969                                     for mapping RAM without a CPU virtual
3970                                     address
3971                 Out: pDevVAddr - deprecated - It used to return a byte aligned
3972                                  device virtual address corresponding to the
3973                                  cpu virtual address (When CpuVAddr wasn't
3974                                  constrained to be page aligned.) Now it just
3975                                  returns MapBaseDevVAddr. Unaligned semantics
3976                                  can easily be handled above this API if required.
3977                 In: hUniqueTag - A unique ID for use as a tag identifier
3978                 In: ui32MemFlags - page table flags.
3979         RETURNS:    None
3980 ******************************************************************************/
3981 IMG_VOID
3982 MMU_MapShadowSparse (MMU_HEAP          *pMMUHeap,
3983                                          IMG_DEV_VIRTADDR   MapBaseDevVAddr,
3984                                          IMG_UINT32         ui32ChunkSize,
3985                                          IMG_UINT32         ui32NumVirtChunks,
3986                                          IMG_UINT32         ui32NumPhysChunks,
3987                                          IMG_BOOL          *pabMapChunk,
3988                                          IMG_CPU_VIRTADDR   CpuVAddr,
3989                                          IMG_HANDLE         hOSMemHandle,
3990                                          IMG_DEV_VIRTADDR  *pDevVAddr,
3991                                          IMG_UINT32         ui32MemFlags,
3992                                          IMG_HANDLE         hUniqueTag)
3994         IMG_UINT32                      i;
3995         IMG_UINT32                      uOffset = 0;
3996         IMG_DEV_VIRTADDR        MapDevVAddr;
3997         IMG_UINT32                      ui32VAdvance;
3998         IMG_UINT32                      ui32PAdvance;
3999         IMG_SIZE_T                      uiSizeVM = ui32ChunkSize * ui32NumVirtChunks;
4000         IMG_UINT32                      ui32ChunkIndex = 0;
4001         IMG_UINT32                      ui32ChunkOffset = 0;
4002 #if !defined(PVRSRV_NEED_PVR_DPF)
4003         PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
4004 #endif
4005 #if !defined (PDUMP)
4006         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
4007 #endif
4009         PVR_DPF ((PVR_DBG_MESSAGE,
4010                         "MMU_MapShadowSparse: DevVAddr:%08X, VM space:0x%" SIZE_T_FMT_LEN "x, CPUVAddr:%p PHYS space:0x%x",
4011                         MapBaseDevVAddr.uiAddr,
4012                         uiSizeVM,
4013                         CpuVAddr,
4014                         ui32ChunkSize * ui32NumPhysChunks));
4016         /* set the virtual and physical advance */
4017         ui32VAdvance = pMMUHeap->ui32DataPageSize;
4018         ui32PAdvance = pMMUHeap->ui32DataPageSize;
4020         /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */
4021         PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
4022         PVR_ASSERT(((IMG_UINT32)uiSizeVM & pMMUHeap->ui32DataPageMask) == 0);
4023         pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
4025         /* Shouldn't come through the sparse interface */
4026         PVR_ASSERT((ui32MemFlags & PVRSRV_MEM_DUMMY) == 0);
4028         /* Loop through cpu memory and map page by page */
4029         MapDevVAddr = MapBaseDevVAddr;
4030         for (i=0; i<uiSizeVM; i+=ui32VAdvance)
4031         {
4032                 IMG_CPU_PHYADDR CpuPAddr;
4033                 IMG_DEV_PHYADDR DevPAddr;
4035                 if (pabMapChunk[i/ui32ChunkSize])
4036                 /*if (pabMapChunk[ui32ChunkIndex])*/
4037                 {
4038                         if(CpuVAddr)
4039                         {
4040                                 CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle,
4041                                                                                           (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset));
4042                         }
4043                         else
4044                         {
4045                                 CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
4046                         }
4047                         DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
4048         
4049                         /* check the physical alignment of the memory to map */
4050                         PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
4051         
4052                         PVR_DPF ((PVR_DBG_MESSAGE,
4053                                         "Offset=0x%x: CpuVAddr=%p, CpuPAddr=" CPUPADDR_FMT ", DevVAddr=%08X, DevPAddr=" DEVPADDR_FMT,
4054                                         uOffset,
4055                                         (void *)((IMG_UINTPTR_T)CpuVAddr + uOffset),
4056                                         CpuPAddr.uiAddr,
4057                                         MapDevVAddr.uiAddr,
4058                                         DevPAddr.uiAddr));
4059         
4060                         MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
4061                         uOffset += ui32PAdvance;
4062                 }
4064                 /* loop update */
4065                 MapDevVAddr.uiAddr += ui32VAdvance;
4067                 if (ui32ChunkOffset == ui32ChunkSize)
4068                 {
4069                         ui32ChunkIndex++;
4070                         ui32ChunkOffset = 0;
4071                 }
4072         }
4074         pMMUHeap->bHasSparseMappings = IMG_TRUE;
4076 #if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1)
4077         MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo);
4078 #endif
4080 #if defined(PDUMP)
4081         MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uiSizeVM, IMG_FALSE, hUniqueTag);
4082 #endif /* #if defined(PDUMP) */
4085 /*!
4086 ******************************************************************************
4087         FUNCTION:   MMU_UnmapPages
4089         PURPOSE:    unmap pages and invalidate virtual address
4091         PARAMETERS:     In:  psMMUHeap - the mmu.
4092                     In:  sDevVAddr - the device virtual address.
4093                     In:  ui32PageCount - page count
4094                     In:  hUniqueTag - A unique ID for use as a tag identifier
4096         RETURNS:        None
4097 ******************************************************************************/
4098 IMG_VOID
4099 MMU_UnmapPages (MMU_HEAP *psMMUHeap,
4100                                 IMG_DEV_VIRTADDR sDevVAddr,
4101                                 IMG_UINT32 ui32PageCount,
4102                                 IMG_HANDLE hUniqueTag)
4104         IMG_UINT32                      uPageSize = psMMUHeap->ui32DataPageSize;
4105         IMG_DEV_VIRTADDR        sTmpDevVAddr;
4106         IMG_UINT32                      i;
4107         IMG_UINT32                      ui32PDIndex;
4108         IMG_UINT32                      ui32PTIndex;
4109         IMG_UINT32                      *pui32Tmp;
4111 #if !defined (PDUMP)
4112         PVR_UNREFERENCED_PARAMETER(hUniqueTag);
4113 #endif
4115         /* setup tmp devvaddr to base of allocation */
4116         sTmpDevVAddr = sDevVAddr;
4118         for(i=0; i<ui32PageCount; i++)
4119         {
4120                 MMU_PT_INFO **ppsPTInfoList;
4122                 /* find the index/offset in PD entries  */
4123                 ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
4125                 /* and advance to the first PT info list */
4126                 ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
4128                 /* find the index/offset of the first PT in the first PT page */
4129                 ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
4131                 /* Is the PT page valid? */
4132                 if ((!ppsPTInfoList[0]) && (!psMMUHeap->bHasSparseMappings))
4133                 {
4134                         PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
4135                                                                         sTmpDevVAddr.uiAddr,
4136                                                                         sDevVAddr.uiAddr,
4137                                                                         i,
4138                                                                         ui32PDIndex,
4139                                                                         ui32PTIndex));
4141                         /* advance the sTmpDevVAddr by one page */
4142                         sTmpDevVAddr.uiAddr += uPageSize;
4144                         /* Try to unmap the remaining allocation pages */
4145                         continue;
4146                 }
4148                 CheckPT(ppsPTInfoList[0]);
4150                 /* setup pointer to the first entry in the PT page */
4151                 pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
4153                 /* Decrement the valid page count only if the current page is valid*/
4154                 if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
4155                 {
4156                         ppsPTInfoList[0]->ui32ValidPTECount--;
4157                 }
4158                 else
4159                 {
4160                         PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",
4161                                                                         sTmpDevVAddr.uiAddr,
4162                                                                         sDevVAddr.uiAddr,
4163                                                                         i,
4164                                                                         ui32PDIndex,
4165                                                                         ui32PTIndex));
4166                         PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08X", pui32Tmp[ui32PTIndex]));
4167                 }
4169                 /* The page table count should not go below zero */
4170                 PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
4172                 MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr);
4173 #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
4174                 /* point the PT entry to the dummy data page */
4175                 pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
4176                                                                 | SGX_MMU_PTE_VALID;
4177 #else
4178                 /* invalidate entry */
4179 #if defined(FIX_HW_BRN_31620)
4180                 BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]);
4181 #else
4182                 pui32Tmp[ui32PTIndex] = 0;
4183 #endif
4184 #endif
4185                 MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr);
4187                 CheckPT(ppsPTInfoList[0]);
4189                 /* advance the sTmpDevVAddr by one page */
4190                 sTmpDevVAddr.uiAddr += uPageSize;
4191         }
4193         MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
4195 #if defined(PDUMP)
4196         MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
4197 #endif /* #if defined(PDUMP) */
4201 /*!
4202 ******************************************************************************
4203         FUNCTION:   MMU_GetPhysPageAddr
4205         PURPOSE:    extracts physical address from MMU page tables
4207         PARAMETERS: In:  pMMUHeap - the mmu
4208         PARAMETERS: In:  sDevVPageAddr - the virtual address to extract physical
4209                                         page mapping from
4210         RETURNS:    None
4211 ******************************************************************************/
4212 IMG_DEV_PHYADDR
4213 MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
4215         IMG_UINT32 *pui32PageTable;
4216         IMG_UINT32 ui32Index;
4217         IMG_DEV_PHYADDR sDevPAddr;
4218         MMU_PT_INFO **ppsPTInfoList;
4220         /* find the index/offset in PD entries  */
4221         ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
4223         /* and advance to the first PT info list */
4224         ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
4225         if (!ppsPTInfoList[0])
4226         {
4227                 /* Heaps with sparse mappings are allowed invalid pages */
4228                 if (!pMMUHeap->bHasSparseMappings)
4229                 {
4230                         PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
4231                 }
4232                 sDevPAddr.uiAddr = 0;
4233                 return sDevPAddr;
4234         }
4236         /* find the index/offset of the first PT in the first PT page */
4237         ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
4239         /* setup pointer to the first entry in the PT page */
4240         pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
4242         /* read back physical page */
4243         sDevPAddr.uiAddr = pui32PageTable[ui32Index];
4245         /* Mask off non-address bits */
4246         sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
4248         /* and align the address */
4249         sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
4251         return sDevPAddr;
4255 IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
4257         return (pMMUContext->sPDDevPAddr);
4261 /*!
4262 ******************************************************************************
4263         FUNCTION:   SGXGetPhysPageAddr
4265         PURPOSE:    Gets DEV and CPU physical address of sDevVAddr
4267         PARAMETERS: In:  hDevMemHeap - device mem heap handle
4268         PARAMETERS: In:  sDevVAddr - the base virtual address to unmap from
4269         PARAMETERS: Out: pDevPAddr - DEV physical address
4270         PARAMETERS: Out: pCpuPAddr - CPU physical address
4271         RETURNS:    None
4272 ******************************************************************************/
4273 IMG_EXPORT
4274 PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
4275                                                                    IMG_DEV_VIRTADDR sDevVAddr,
4276                                                                    IMG_DEV_PHYADDR *pDevPAddr,
4277                                                                    IMG_CPU_PHYADDR *pCpuPAddr)
4279         MMU_HEAP *pMMUHeap;
4280         IMG_DEV_PHYADDR DevPAddr;
4282         /*
4283                 Get MMU Heap From hDevMemHeap
4284         */
4285         pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
4287         DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
4288         pCpuPAddr->uiAddr = DevPAddr.uiAddr; /* SysDevPAddrToCPUPAddr(DevPAddr) */
4289         pDevPAddr->uiAddr = DevPAddr.uiAddr;
4291         return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
4295 /*!
4296 ******************************************************************************
4297     FUNCTION:   SGXGetMMUPDAddrKM
4299     PURPOSE:    Gets PD device physical address of hDevMemContext
4301     PARAMETERS: In:  hDevCookie - device cookie
4302         PARAMETERS: In:  hDevMemContext - memory context
4303         PARAMETERS: Out: psPDDevPAddr - MMU PD address
4304     RETURNS:    None
4305 ******************************************************************************/
4306 PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE               hDevCookie,
4307                                                                 IMG_HANDLE              hDevMemContext,
4308                                                                 IMG_DEV_PHYADDR *psPDDevPAddr)
4310         if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
4311         {
4312                 return PVRSRV_ERROR_INVALID_PARAMS;
4313         }
4315         /* return the address */
4316         *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
4318         return PVRSRV_OK;
4321 /*!
4322 ******************************************************************************
4323         FUNCTION:   MMU_BIFResetPDAlloc
4325         PURPOSE:    Allocate a dummy Page Directory, Page Table and Page which can
4326                                 be used for dynamic dummy page mapping during SGX reset.
4327                                 Note: since this is only used for hardware recovery, no
4328                                 pdumping is performed.
4330         PARAMETERS: In:  psDevInfo - device info
4331         RETURNS:    PVRSRV_OK or error
4332 ******************************************************************************/
4333 PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
4335         PVRSRV_ERROR eError;
4336         SYS_DATA *psSysData;
4337         RA_ARENA *psLocalDevMemArena;
4338         IMG_HANDLE hOSMemHandle = IMG_NULL;
4339         IMG_BYTE *pui8MemBlock = IMG_NULL;
4340         IMG_SYS_PHYADDR sMemBlockSysPAddr;
4341         IMG_CPU_PHYADDR sMemBlockCpuPAddr;
4343         SysAcquireData(&psSysData);
4345         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
4347         /* allocate 3 pages - for the PD, PT and dummy page */
4348         if(psLocalDevMemArena == IMG_NULL)
4349         {
4350                 /* UMA system */
4351                 eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
4352                                                       3 * SGX_MMU_PAGE_SIZE,
4353                                                       SGX_MMU_PAGE_SIZE,
4354                                                           IMG_NULL,
4355                                                           0,
4356                                                           IMG_NULL,
4357                                                       (IMG_VOID **)&pui8MemBlock,
4358                                                       &hOSMemHandle);
4359                 if (eError != PVRSRV_OK)
4360                 {
4361                         PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
4362                         return eError;
4363                 }
4365                 /* translate address to device physical */
4366                 if(pui8MemBlock)
4367                 {
4368                         sMemBlockCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle,
4369                                                                                                   pui8MemBlock);
4370                 }
4371                 else
4372                 {
4373                         /* This isn't used in all cases since not all ports currently support
4374                          * OSMemHandleToCpuPAddr() */
4375                         sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
4376                 }
4377         }
4378         else
4379         {
4380                 /* non-UMA system */
4382                 /* 
4383                    We cannot use IMG_SYS_PHYADDR here, as that is 64-bit for 32-bit PAE builds.
4384                    The physical address in this call to RA_Alloc is specifically the SysPAddr 
4385                    of local (card) space, and it is highly unlikely we would ever need to 
4386                    support > 4GB of local (card) memory (this does assume that such local
4387                    memory will be mapped into System physical memory space at a low address so
4388                    that any and all local memory exists within the 4GB SYSPAddr range).
4389                  */
4390                 IMG_UINTPTR_T uiLocalPAddr;
4392                 if(RA_Alloc(psLocalDevMemArena,
4393                                         3 * SGX_MMU_PAGE_SIZE,
4394                                         IMG_NULL,
4395                                         IMG_NULL,
4396                                         0,
4397                                         SGX_MMU_PAGE_SIZE,
4398                                         0,
4399                                         IMG_NULL,
4400                                         0,
4401                                         &uiLocalPAddr) != IMG_TRUE)
4402                 {
4403                         PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
4404                         return PVRSRV_ERROR_OUT_OF_MEMORY;
4405                 }
4407                 /* Munge the local PAddr back into the SysPAddr */
4408                 sMemBlockSysPAddr.uiAddr = uiLocalPAddr;
4410                 /* derive the CPU virtual address */
4411                 sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
4412                 pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
4413                                                                           SGX_MMU_PAGE_SIZE * 3,
4414                                                                           PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
4415                                                                           &hOSMemHandle);
4416                 if(!pui8MemBlock)
4417                 {
4418                         PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
4419                         return PVRSRV_ERROR_BAD_MAPPING;
4420                 }
4421         }
4423         PVR_ASSERT(pui8MemBlock != IMG_NULL);
4425         psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
4426         psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
4427         psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
4428         psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
4429         /* override pointer cast warnings */
4430         /* PRQA S 3305,509 2 */
4431         psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
4432         psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
4434         /* Invalidate entire PD and PT. */
4435         OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
4436         OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
4437         /* Fill dummy page with markers. */
4438         OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
4440         return PVRSRV_OK;
4443 /*!
4444 ******************************************************************************
4445         FUNCTION:   MMU_BIFResetPDFree
4447         PURPOSE:    Free resources allocated in MMU_BIFResetPDAlloc.
4449         PARAMETERS: In:  psDevInfo - device info
4450         RETURNS:
4451 ******************************************************************************/
4452 IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
4454         SYS_DATA *psSysData;
4455         RA_ARENA *psLocalDevMemArena;
4456         IMG_SYS_PHYADDR sPDSysPAddr;
4458         SysAcquireData(&psSysData);
4460         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
4462         /* free the page directory */
4463         if(psLocalDevMemArena == IMG_NULL)
4464         {
4465                 OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
4466                                         3 * SGX_MMU_PAGE_SIZE,
4467                                         psDevInfo->pui32BIFResetPD,
4468                                         psDevInfo->hBIFResetPDOSMemHandle);
4469         }
4470         else
4471         {
4472                 OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
4473                          3 * SGX_MMU_PAGE_SIZE,
4474                          PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
4475                          psDevInfo->hBIFResetPDOSMemHandle);
4477                 sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
4478                 /* Note that the cast to IMG_UINTPTR_T is ok as we're local mem. */
4479                 RA_Free(psLocalDevMemArena, (IMG_UINTPTR_T)sPDSysPAddr.uiAddr, IMG_FALSE);
4480         }
4483 IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32FaultAddr)
4485         MMU_CONTEXT *psMMUContext = psDevInfo->pvMMUContextList;
4487         while (psMMUContext && (psMMUContext->sPDDevPAddr.uiAddr != ui32PDDevPAddr))
4488         {
4489                 psMMUContext = psMMUContext->psNext;
4490         }
4492         if (psMMUContext)
4493         {
4494                 IMG_UINT32 ui32PTIndex;
4495                 IMG_UINT32 ui32PDIndex;
4497                 PVR_LOG(("Found MMU context for page fault 0x%08x", ui32FaultAddr));
4498                 PVR_LOG(("GPU memory context is for PID=%d (%s)", psMMUContext->ui32PID, psMMUContext->szName));
4500                 ui32PTIndex = (ui32FaultAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
4501                 ui32PDIndex = (ui32FaultAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PT_SHIFT + SGX_MMU_PAGE_SHIFT);
4503                 if (psMMUContext->apsPTInfoList[ui32PDIndex])
4504                 {
4505                         if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
4506                         {
4507                                 IMG_UINT32 *pui32Ptr = psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
4508                                 IMG_UINT32 ui32PTE = pui32Ptr[ui32PTIndex];
4510                                 PVR_LOG(("PDE valid: PTE = 0x%08x (PhysAddr = 0x%08x, %s)",
4511                                                   ui32PTE,
4512                                                   ui32PTE & SGX_MMU_PTE_ADDR_MASK,
4513                                                   ui32PTE & SGX_MMU_PTE_VALID?"valid":"Invalid"));
4514                         }
4515                         else
4516                         {
4517                                 PVR_LOG(("Found PT info but no CPU address"));
4518                         }
4519                 }
4520                 else
4521                 {
4522                         PVR_LOG(("No PDE found"));
4523                 }
4524         }
4527 #if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
4528 /*!
4529 ******************************************************************************
4530         FUNCTION:   MMU_MapExtSystemCacheRegs
4532         PURPOSE:    maps external system cache control registers into SGX MMU
4534         PARAMETERS: In:  psDeviceNode - device node
4535         RETURNS:
4536 ******************************************************************************/
4537 PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
4539         IMG_UINT32 *pui32PT;
4540         PVRSRV_SGXDEV_INFO *psDevInfo;
4541         IMG_UINT32 ui32PDIndex;
4542         IMG_UINT32 ui32PTIndex;
4543         PDUMP_MMU_ATTRIB sMMUAttrib;
4545         psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
4547         sMMUAttrib = psDevInfo->sMMUAttrib;
4548 #if defined(PDUMP)
4549         MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
4550                                                 SGX_MMU_PAGE_MASK,
4551                                                 SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
4552 #endif
4554 #if defined(PDUMP)
4555         {
4556                 IMG_CHAR                szScript[128];
4558                 sprintf(szScript, "MALLOC :EXTSYSCACHE:PA_%08X%08X %u %u 0x%p\r\n", 0, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr);
4559                 PDumpOSWriteString2(szScript, PDUMP_FLAGS_CONTINUOUS);
4560         }
4561 #endif
4563         ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
4564         ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
4566         pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
4568         MakeKernelPageReadWrite(pui32PT);
4569         /* map the PT to the registers */
4570         pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
4571                                                         | SGX_MMU_PTE_VALID;
4572         MakeKernelPageReadOnly(pui32PT);
4573 #if defined(PDUMP)
4574         /* Add the entery to the PT */
4575         {
4576                 IMG_DEV_PHYADDR sDevPAddr;
4577                 IMG_CPU_PHYADDR sCpuPAddr;
4578                 IMG_UINT32 ui32PageMask;
4579                 IMG_UINT32 ui32PTE;
4580                 PVRSRV_ERROR eErr;
4582                 PDUMP_GET_SCRIPT_AND_FILE_STRING();
4584                 ui32PageMask = sMMUAttrib.ui32PTSize - 1;
4585                 sCpuPAddr = OSMapLinToCPUPhys(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->hPTPageOSMemHandle, &pui32PT[ui32PTIndex]);
4586                 sDevPAddr = SysCpuPAddrToDevPAddr(sMMUAttrib.sDevId.eDeviceType, sCpuPAddr);
4587                 ui32PTE = *((IMG_UINT32 *) (&pui32PT[ui32PTIndex]));
4589                 eErr = PDumpOSBufprintf(hScript,
4590                                                                 ui32MaxLenScript,
4591                                                                 "WRW :%s:PA_%p%p:0x%08X :%s:PA_%p%08X:0x%08X\r\n",
4592                                                                 sMMUAttrib.sDevId.pszPDumpDevName,
4593                                                                 PDUMP_PT_UNIQUETAG,
4594                                                                 (IMG_PVOID)((sDevPAddr.uiAddr) & ~ui32PageMask),
4595                                                                 (sDevPAddr.uiAddr) & ui32PageMask,
4596                                                                 "EXTSYSCACHE",
4597                                                                 PDUMP_PD_UNIQUETAG,
4598                                                                 (ui32PTE & sMMUAttrib.ui32PDEMask) << sMMUAttrib.ui32PTEAlignShift,
4599                                                                 ui32PTE & ~sMMUAttrib.ui32PDEMask);
4600                                         if(eErr != PVRSRV_OK)
4601                                         {
4602                                                 return eErr;
4603                                         }
4604                                         PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
4605         }
4606 #endif
4608         return PVRSRV_OK;
4612 /*!
4613 ******************************************************************************
4614         FUNCTION:   MMU_UnmapExtSystemCacheRegs
4616         PURPOSE:    unmaps external system cache control registers
4618         PARAMETERS: In:  psDeviceNode - device node
4619         RETURNS:
4620 ******************************************************************************/
4621 PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
4623         SYS_DATA *psSysData;
4624         RA_ARENA *psLocalDevMemArena;
4625         PVRSRV_SGXDEV_INFO *psDevInfo;
4626         IMG_UINT32 ui32PDIndex;
4627         IMG_UINT32 ui32PTIndex;
4628         IMG_UINT32 *pui32PT;
4629         PDUMP_MMU_ATTRIB sMMUAttrib;
4631         psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
4633         sMMUAttrib = psDevInfo->sMMUAttrib;
4635 #if defined(PDUMP)
4636         MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode,
4637                                                 SGX_MMU_PAGE_MASK,
4638                                                 SGX_MMU_PT_SIZE * sizeof(IMG_UINT32));
4639 #endif
4640         SysAcquireData(&psSysData);
4642         psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
4644         /* unmap the MMU page table from the PD */
4645         ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
4646         ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
4648         /* Only unmap it if the PT hasn't already been freed */
4649         if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex])
4650         {
4651                 if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr)
4652                 {
4653                         pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr;
4654                 }
4655         }
4657         MakeKernelPageReadWrite(pui32PT);
4658         pui32PT[ui32PTIndex] = 0;
4659         MakeKernelPageReadOnly(pui32PT);
4661         PDUMPMEMPTENTRIES(&sMMUAttrib, psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->hPDOSMemHandle, &pui32PT[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
4663         return PVRSRV_OK;
4665 #endif
4668 #if PAGE_TEST
4669 /*!
4670 ******************************************************************************
4671         FUNCTION:   PageTest
4673         PURPOSE:    Tests page table memory, for use during device bring-up.
4675         PARAMETERS: In:  void* pMem - page address (CPU mapped)
4676         PARAMETERS: In:  IMG_DEV_PHYADDR sDevPAddr - page device phys address
4677         RETURNS:    None, provides debug output and breaks if an error is detected.
4678 ******************************************************************************/
4679 static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
4681         volatile IMG_UINT32 ui32WriteData;
4682         volatile IMG_UINT32 ui32ReadData;
4683         volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
4684         IMG_INT n;
4685         IMG_BOOL bOK=IMG_TRUE;
4687         ui32WriteData = 0xffffffff;
4689         for (n=0; n<1024; n++)
4690         {
4691                 pMem32[n] = ui32WriteData;
4692                 ui32ReadData = pMem32[n];
4694                 if (ui32WriteData != ui32ReadData)
4695                 {
4696                         // Mem fault
4697                         PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x" DEVPADDR_FMT, sDevPAddr.uiAddr + (n<<2) ));
4698                         PVR_DBG_BREAK;
4699                         bOK = IMG_FALSE;
4700                 }
4701         }
4703         ui32WriteData = 0;
4705         for (n=0; n<1024; n++)
4706         {
4707                 pMem32[n] = ui32WriteData;
4708                 ui32ReadData = pMem32[n];
4710                 if (ui32WriteData != ui32ReadData)
4711                 {
4712                         // Mem fault
4713                         PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x" DEVPADDR_FMT, sDevPAddr.uiAddr + (n<<2)));
4714                         PVR_DBG_BREAK;
4715                         bOK = IMG_FALSE;
4716                 }
4717         }
4719         if (bOK)
4720         {
4721                 PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x" DEVPADDR_FMT " is OK", sDevPAddr.uiAddr));
4722         }
4723         else
4724         {
4725                 PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x" DEVPADDR_FMT " *** FAILED ***", sDevPAddr.uiAddr));
4726         }
4728 #endif
4730 /******************************************************************************
4731  End of file (mmu.c)
4732 ******************************************************************************/