[android-sdk/device-ti-proprietary-open.git] / omap5 / sgx_src / eurasia_km / services4 / srvkm / env / linux / mmap.c
1 /*************************************************************************/ /*!
2 @Title Linux mmap interface
3 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
4 @License Dual MIT/GPLv2
6 The contents of this file are subject to the MIT license as set out below.
8 Permission is hereby granted, free of charge, to any person obtaining a copy
9 of this software and associated documentation files (the "Software"), to deal
10 in the Software without restriction, including without limitation the rights
11 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 copies of the Software, and to permit persons to whom the Software is
13 furnished to do so, subject to the following conditions:
15 The above copyright notice and this permission notice shall be included in
16 all copies or substantial portions of the Software.
18 Alternatively, the contents of this file may be used under the terms of
19 the GNU General Public License Version 2 ("GPL") in which case the provisions
20 of GPL are applicable instead of those above.
22 If you wish to allow use of your version of this file only under the terms of
23 GPL, and not to allow others to use your version of this file under the terms
24 of the MIT license, indicate your decision by deleting the provisions above
25 and replace them with the notice and other provisions required by GPL as set
26 out in the file called "GPL-COPYING" included in this distribution. If you do
27 not delete the provisions above, a recipient may use your version of this file
28 under the terms of either the MIT license or GPL.
30 This License is also included in this distribution in the file called
31 "MIT-COPYING".
33 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
34 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
35 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
36 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
37 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
38 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
39 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 */ /**************************************************************************/
42 #include <linux/version.h>
44 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
45 #ifndef AUTOCONF_INCLUDED
46 #include <linux/config.h>
47 #endif
48 #endif
50 #include <linux/mm.h>
51 #include <linux/module.h>
52 #include <linux/vmalloc.h>
53 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
54 #include <linux/wrapper.h>
55 #endif
56 #include <linux/slab.h>
57 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
58 #include <linux/highmem.h>
59 #endif
60 #include <asm/io.h>
61 #include <asm/page.h>
62 #include <asm/shmparam.h>
63 #include <asm/pgtable.h>
64 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
65 #include <linux/sched.h>
66 #include <asm/current.h>
67 #endif
68 #if defined(SUPPORT_DRI_DRM)
69 #include <drm/drmP.h>
70 #endif
72 #include "services_headers.h"
74 #include "pvrmmap.h"
75 #include "mutils.h"
76 #include "mmap.h"
77 #include "mm.h"
78 #include "proc.h"
79 #include "mutex.h"
80 #include "handle.h"
81 #include "perproc.h"
82 #include "env_perproc.h"
83 #include "bridged_support.h"
84 #if defined(SUPPORT_DRI_DRM)
85 #include "pvr_drm.h"
86 #endif
88 #if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE)
89 #error "The mmap code requires PVR_SECURE_HANDLES"
90 #endif
92 /* WARNING:
93 * The mmap code has its own mutex, to prevent a possible deadlock,
94 * when using gPVRSRVLock.
95 * The Linux kernel takes the mm->mmap_sem before calling the mmap
96 * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
97 * entry point may take mm->mmap_sem during fault handling, or
98 * before calling get_user_pages. If gPVRSRVLock was used in the
99 * mmap entry points, a deadlock could result, due to the ioctl
100 * and mmap code taking the two locks in different orders.
101 * As a corollary to this, the mmap entry points must not call
102 * any driver code that relies on gPVRSRVLock is held.
103 */
104 PVRSRV_LINUX_MUTEX g_sMMapMutex;
106 static LinuxKMemCache *g_psMemmapCache = NULL;
107 static LIST_HEAD(g_sMMapAreaList);
108 static LIST_HEAD(g_sMMapOffsetStructList);
109 #if defined(DEBUG_LINUX_MMAP_AREAS)
110 static IMG_UINT32 g_ui32RegisteredAreas = 0;
111 static IMG_UINT32 g_ui32TotalByteSize = 0;
112 #endif
115 #if defined(DEBUG_LINUX_MMAP_AREAS)
116 static struct proc_dir_entry *g_ProcMMap;
117 #endif /* defined(DEBUG_LINUX_MMAP_AREAS) */
119 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
120 /*
121 * Now that we are using mmap2 in srvclient, almost (*) the full 32
122 * bit offset is available. The range of values is divided into two.
123 * The first part of the range, from FIRST_PHYSICAL_PFN to
124 * LAST_PHYSICAL_PFN, is for raw page mappings (VM_PFNMAP). The
125 * resulting 43 bit (*) physical address range should be enough for
126 * the current range of processors we support.
127 *
128 * NB: (*) -- the above figures assume 4KB page size. The offset
129 * argument to mmap2() is in units of 4,096 bytes regardless of page
130 * size. Thus, we lose (PAGE_SHIFT-12) bits of resolution on other
131 * architectures.
132 *
133 * The second part of the range, from FIRST_SPECIAL_PFN to LAST_SPECIAL_PFN,
134 * is used for all other mappings. These other mappings will always
135 * consist of pages with associated page structures, and need not
136 * represent a contiguous range of physical addresses.
137 *
138 */
139 #define MMAP2_PGOFF_RESOLUTION (32-PAGE_SHIFT+12)
140 #define RESERVED_PGOFF_BITS 1
141 #define MAX_MMAP_HANDLE ((1UL<<(MMAP2_PGOFF_RESOLUTION-RESERVED_PGOFF_BITS))-1)
143 #define FIRST_PHYSICAL_PFN 0
144 #define LAST_PHYSICAL_PFN (FIRST_PHYSICAL_PFN + MAX_MMAP_HANDLE)
145 #define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
146 #define LAST_SPECIAL_PFN (FIRST_SPECIAL_PFN + MAX_MMAP_HANDLE)
148 #else /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */
150 #if PAGE_SHIFT != 12
151 #error This build variant has not yet been made non-4KB page-size aware
152 #endif
154 /*
155 * Since we no longer have to worry about clashes with the mmap
156 * offsets used for pure PFN mappings (VM_PFNMAP), there is greater
157 * freedom in choosing the mmap handles. This is useful if the
158 * mmap offset space has to be shared with another driver component.
159 */
161 #if defined(PVR_MMAP_OFFSET_BASE)
162 #define FIRST_SPECIAL_PFN PVR_MMAP_OFFSET_BASE
163 #else
164 #define FIRST_SPECIAL_PFN 0x80000000UL
165 #endif
167 #if defined(PVR_NUM_MMAP_HANDLES)
168 #define MAX_MMAP_HANDLE PVR_NUM_MMAP_HANDLES
169 #else
170 #define MAX_MMAP_HANDLE 0x7fffffffUL
171 #endif
173 #endif /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */
175 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
176 static inline IMG_BOOL
177 PFNIsPhysical(IMG_UINT32 pfn)
178 {
179 /* Unsigned, no need to compare >=0 */
180 return (/*(pfn >= FIRST_PHYSICAL_PFN) &&*/ (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
181 }
183 static inline IMG_BOOL
184 PFNIsSpecial(IMG_UINT32 pfn)
185 {
186 /* Unsigned, no need to compare <=MAX_UINT */
187 return ((pfn >= FIRST_SPECIAL_PFN) /*&& (pfn <= LAST_SPECIAL_PFN)*/) ? IMG_TRUE : IMG_FALSE;
188 }
189 #endif
191 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
192 static inline IMG_HANDLE
193 MMapOffsetToHandle(IMG_UINT32 pfn)
194 {
195 if (PFNIsPhysical(pfn))
196 {
197 PVR_ASSERT(PFNIsPhysical(pfn));
198 return IMG_NULL;
199 }
200 return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
201 }
202 #endif
204 static inline IMG_UINT32
205 #if defined (SUPPORT_SID_INTERFACE)
206 HandleToMMapOffset(IMG_SID hHandle)
207 #else
208 HandleToMMapOffset(IMG_HANDLE hHandle)
209 #endif
210 {
211 IMG_UINT32 ulHandle = (IMG_UINT32)hHandle;
213 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
214 if (PFNIsSpecial(ulHandle))
215 {
216 PVR_ASSERT(PFNIsSpecial(ulHandle));
217 return 0;
218 }
219 #endif
220 return ulHandle + FIRST_SPECIAL_PFN;
221 }
223 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
224 /*
225 * Determine whether physical or special mappings will be used for
226 * a given memory area. At present, this decision is made on
227 * whether the mapping represents a contiguous range of physical
228 * addresses, which is a requirement for raw page mappings (VM_PFNMAP).
229 * In the VMA structure for such a mapping, vm_pgoff is the PFN
230 * (page frame number, the physical address divided by the page size)
231 * of the first page in the VMA. The second page is assumed to have
232 * PFN (vm_pgoff + 1), the third (vm_pgoff + 2) and so on.
233 */
234 static inline IMG_BOOL
235 LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
236 {
237 return LinuxMemAreaPhysIsContig(psLinuxMemArea);
238 }
239 #endif
241 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
242 static inline IMG_UINT32
243 GetCurrentThreadID(IMG_VOID)
244 {
245 /*
246 * The PID is the thread ID, as each thread is a
247 * seperate process.
248 */
249 return (IMG_UINT32)current->pid;
250 }
251 #endif
253 /*
254 * Create an offset structure, which is used to hold per-process
255 * mmap data.
256 */
257 static PKV_OFFSET_STRUCT
258 CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
259 {
260 PKV_OFFSET_STRUCT psOffsetStruct;
261 #if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
262 const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
263 #endif
265 #if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
266 PVR_DPF((PVR_DBG_MESSAGE,
267 "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8x)",
268 __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
269 #endif
271 PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
273 PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
275 psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
276 if(psOffsetStruct == IMG_NULL)
277 {
278 PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
279 return IMG_NULL;
280 }
282 psOffsetStruct->ui32MMapOffset = ui32Offset;
284 psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
286 psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
288 /*
289 * We store the TID in case two threads within a process
290 * generate the same offset structure, and both end up on the
291 * list of structures waiting to be mapped, at the same time.
292 * This could happen if two sub areas within the same page are
293 * being mapped at the same time.
294 * The TID allows the mmap entry point to distinguish which
295 * mapping is being done by which thread.
296 */
297 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
298 psOffsetStruct->ui32TID = GetCurrentThreadID();
299 #endif
300 psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
302 #if defined(DEBUG_LINUX_MMAP_AREAS)
303 /* Extra entries to support proc filesystem debug info */
304 psOffsetStruct->pszName = pszName;
305 #endif
307 list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
309 return psOffsetStruct;
310 }
313 static IMG_VOID
314 DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
315 {
316 #ifdef DEBUG
317 IMG_CPU_PHYADDR CpuPAddr;
318 CpuPAddr = LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0);
319 #endif
321 list_del(&psOffsetStruct->sAreaItem);
323 if (psOffsetStruct->bOnMMapList)
324 {
325 list_del(&psOffsetStruct->sMMapItem);
326 }
328 #ifdef DEBUG
329 PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
330 "psLinuxMemArea=%p, CpuPAddr=0x%08X", __FUNCTION__,
331 psOffsetStruct->psLinuxMemArea,
332 CpuPAddr.uiAddr));
333 #endif
335 KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
336 }
339 /*
340 * There are no alignment constraints for mapping requests made by user
341 * mode Services. For this, and potentially other reasons, the
342 * mapping created for a users request may look different to the
343 * original request in terms of size and alignment.
344 *
345 * This function determines an offset that the user can add to the mapping
346 * that is _actually_ created which will point to the memory they are
347 * _really_ interested in.
348 *
349 */
350 static inline IMG_VOID
351 DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
352 IMG_UINT32 *pui32RealByteSize,
353 IMG_UINT32 *pui32ByteOffset)
354 {
355 IMG_UINT32 ui32PageAlignmentOffset;
356 IMG_CPU_PHYADDR CpuPAddr;
358 CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
359 ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
361 *pui32ByteOffset = ui32PageAlignmentOffset;
363 *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
364 }
367 /*!
368 *******************************************************************************
370 @Function PVRMMapOSMemHandleToMMapData
372 @Description
374 Determine various parameters needed to mmap a memory area, and to
375 locate the memory within the mapped area.
377 @input psPerProc : Per-process data.
378 @input hMHandle : Memory handle.
379 @input pui32MMapOffset : pointer to location for returned mmap offset.
380 @input pui32ByteOffset : pointer to location for returned byte offset.
381 @input pui32RealByteSize : pointer to location for returned real byte size.
382 @input pui32UserVaddr : pointer to location for returned user mode address.
384 @output pui32MMapOffset : points to mmap offset to be used in mmap2 sys call.
385 @output pui32ByteOffset : points to byte offset of start of memory
386 within mapped area returned by mmap2.
387 @output pui32RealByteSize : points to size of area to be mapped.
388 @output pui32UserVAddr : points to user mode address of start of
389 mapping, or 0 if it hasn't been mapped yet.
391 @Return PVRSRV_ERROR : PVRSRV_OK, or error code.
393 ******************************************************************************/
394 PVRSRV_ERROR
395 PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
396 #if defined (SUPPORT_SID_INTERFACE)
397 IMG_SID hMHandle,
398 #else
399 IMG_HANDLE hMHandle,
400 #endif
401 IMG_UINT32 *pui32MMapOffset,
402 IMG_UINT32 *pui32ByteOffset,
403 IMG_UINT32 *pui32RealByteSize,
404 IMG_UINT32 *pui32UserVAddr)
405 {
406 LinuxMemArea *psLinuxMemArea;
407 PKV_OFFSET_STRUCT psOffsetStruct;
408 IMG_HANDLE hOSMemHandle;
409 PVRSRV_ERROR eError;
411 LinuxLockMutex(&g_sMMapMutex);
413 PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
415 eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
416 if (eError != PVRSRV_OK)
417 {
418 #if defined (SUPPORT_SID_INTERFACE)
419 PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %x failed", __FUNCTION__, hMHandle));
420 #else
421 PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle));
422 #endif
424 goto exit_unlock;
425 }
427 psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
429 if (psLinuxMemArea && (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION))
430 {
431 *pui32RealByteSize = psLinuxMemArea->ui32ByteSize;
432 *pui32ByteOffset = psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[0];
433 /* The offsets for the subsequent planes must be co-aligned for user
434 * space mapping and sgx 544 and later. I.e.
435 * psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[n];
436 */
437 }
438 else
439 {
441 /* Sparse mappings have to ask the BM for the virtual size */
442 if (psLinuxMemArea->hBMHandle)
443 {
444 *pui32RealByteSize = BM_GetVirtualSize(psLinuxMemArea->hBMHandle);
445 *pui32ByteOffset = 0;
446 }
447 else
448 {
449 DetermineUsersSizeAndByteOffset(psLinuxMemArea,
450 pui32RealByteSize,
451 pui32ByteOffset);
452 }
453 }
455 /* Check whether this memory area has already been mapped */
456 list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
457 {
458 if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
459 {
460 if (!psLinuxMemArea->hBMHandle)
461 {
462 PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize);
463 }
464 /*
465 * User mode locking is required to stop two threads racing to
466 * map the same memory area. The lock should prevent a
467 * second thread retrieving mmap data for a given handle,
468 * before the first thread has done the mmap.
469 * Without locking, both threads may attempt the mmap,
470 * and one of them will fail.
471 */
472 *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
473 *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
474 PVRSRVOffsetStructIncRef(psOffsetStruct);
476 eError = PVRSRV_OK;
477 goto exit_unlock;
478 }
479 }
481 /* Memory area won't have been mapped yet */
482 *pui32UserVAddr = 0;
484 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
485 if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
486 {
487 *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
488 PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
489 }
490 else
491 #endif
492 {
493 *pui32MMapOffset = HandleToMMapOffset(hMHandle);
494 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
495 PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
496 #endif
497 }
499 psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize);
500 if (psOffsetStruct == IMG_NULL)
501 {
502 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
503 goto exit_unlock;
504 }
506 /*
507 * Offset structures representing physical mappings are added to
508 * a list, so that they can be located when the memory area is mapped.
509 */
510 list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
512 psOffsetStruct->bOnMMapList = IMG_TRUE;
514 PVRSRVOffsetStructIncRef(psOffsetStruct);
516 eError = PVRSRV_OK;
518 /* Need to scale up the offset to counter the shifting that
519 is done in the mmap2() syscall, as it expects the pgoff
520 argument to be in units of 4,096 bytes irrespective of
521 page size */
522 *pui32MMapOffset = *pui32MMapOffset << (PAGE_SHIFT - 12);
524 exit_unlock:
525 LinuxUnLockMutex(&g_sMMapMutex);
527 return eError;
528 }
531 /*!
532 *******************************************************************************
534 @Function PVRMMapReleaseMMapData
536 @Description
538 Release mmap data.
540 @input psPerProc : Per-process data.
541 @input hMHandle : Memory handle.
542 @input pbMUnmap : pointer to location for munmap flag.
543 @input pui32UserVAddr : pointer to location for user mode address of mapping.
544 @input pui32ByteSize : pointer to location for size of mapping.
546 @Output pbMUnmap : points to flag that indicates whether an munmap is
547 required.
548 @output pui32UserVAddr : points to user mode address to munmap.
550 @Return PVRSRV_ERROR : PVRSRV_OK, or error code.
552 ******************************************************************************/
553 PVRSRV_ERROR
554 PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
555 #if defined (SUPPORT_SID_INTERFACE)
556 IMG_SID hMHandle,
557 #else
558 IMG_HANDLE hMHandle,
559 #endif
560 IMG_BOOL *pbMUnmap,
561 IMG_UINT32 *pui32RealByteSize,
562 IMG_UINT32 *pui32UserVAddr)
563 {
564 LinuxMemArea *psLinuxMemArea;
565 PKV_OFFSET_STRUCT psOffsetStruct;
566 IMG_HANDLE hOSMemHandle;
567 PVRSRV_ERROR eError;
568 IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
570 LinuxLockMutex(&g_sMMapMutex);
572 PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
574 eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
575 if (eError != PVRSRV_OK)
576 {
577 #if defined (SUPPORT_SID_INTERFACE)
578 PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %x failed", __FUNCTION__, hMHandle));
579 #else
580 PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle));
581 #endif
583 goto exit_unlock;
584 }
586 psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
588 /* Find the offset structure */
589 list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
590 {
591 if (psOffsetStruct->ui32PID == ui32PID)
592 {
593 if (psOffsetStruct->ui32RefCount == 0)
594 {
595 PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area %p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
596 eError = PVRSRV_ERROR_STILL_MAPPED;
597 goto exit_unlock;
598 }
600 PVRSRVOffsetStructDecRef(psOffsetStruct);
602 *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0));
604 *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
605 *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
607 eError = PVRSRV_OK;
608 goto exit_unlock;
609 }
610 }
612 /* MMap data not found */
613 #if defined (SUPPORT_SID_INTERFACE)
614 PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %x (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea));
615 #else
616 PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %p (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea));
617 #endif
619 eError = PVRSRV_ERROR_MAPPING_NOT_FOUND;
621 exit_unlock:
622 LinuxUnLockMutex(&g_sMMapMutex);
624 return eError;
625 }
627 static inline PKV_OFFSET_STRUCT
628 FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
629 {
630 PKV_OFFSET_STRUCT psOffsetStruct;
631 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
632 IMG_UINT32 ui32TID = GetCurrentThreadID();
633 #endif
634 IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
636 list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
637 {
638 if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID)
639 {
640 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
641 /*
642 * If the offset is physical, make sure the thread IDs match,
643 * as different threads may be mapping different memory areas
644 * with the same offset.
645 */
646 if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID)
647 #endif
648 {
649 return psOffsetStruct;
650 }
651 }
652 }
654 return IMG_NULL;
655 }
658 /*
659 * Map a memory area into user space.
660 * Note, the ui32ByteOffset is _not_ implicitly page aligned since
661 * LINUX_MEM_AREA_SUB_ALLOC LinuxMemAreas have no alignment constraints.
662 */
663 static IMG_BOOL
664 DoMapToUser(LinuxMemArea *psLinuxMemArea,
665 struct vm_area_struct* ps_vma,
666 IMG_UINT32 ui32ByteOffset)
667 {
668 IMG_UINT32 ui32ByteSize;
670 if ((psLinuxMemArea->hBMHandle) && (ui32ByteOffset != 0))
671 {
672 /* Partial mapping of sparse allocations should never happen */
673 return IMG_FALSE;
674 }
676 if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
677 {
678 return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea), /* PRQA S 3670 */ /* allow recursion */
679 ps_vma,
680 psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset);
681 }
683 /*
684 * Note that ui32ByteSize may be larger than the size of the memory
685 * area being mapped, as the former is a multiple of the page size.
686 */
687 ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
688 PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
690 #if defined (__sparc__)
691 /*
692 * For LINUX_MEM_AREA_EXTERNAL_KV, we don't know where the address range
693 * we are being asked to map has come from, that is, whether it is memory
694 * or I/O. For all architectures other than SPARC, there is no distinction.
695 * Since we don't currently support SPARC, we won't worry about it.
696 */
697 #error "SPARC not supported"
698 #endif
700 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
701 if (PFNIsPhysical(ps_vma->vm_pgoff))
702 {
703 IMG_INT result;
705 PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
706 PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff);
707 /*
708 * Since the memory is contiguous, we can map the whole range in one
709 * go .
710 */
712 PVR_ASSERT(psLinuxMemArea->hBMHandle == IMG_NULL);
714 result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot);
716 if(result == 0)
717 {
718 return IMG_TRUE;
719 }
721 PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
722 }
723 #endif
725 {
726 /*
727 * Memory may be non-contiguous, so we map the range page,
728 * by page. Since VM_PFNMAP mappings are assumed to be physically
729 * contiguous, we can't legally use REMAP_PFN_RANGE (that is, we
730 * could, but the resulting VMA may confuse other bits of the kernel
731 * that attempt to interpret it).
732 * The only alternative is to use VM_INSERT_PAGE, which requires
733 * finding the page structure corresponding to each page, or
734 * if mixed maps are supported (VM_MIXEDMAP), vm_insert_mixed.
735 */
736 IMG_UINT32 ulVMAPos;
737 IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
738 IMG_UINT32 ui32PA;
739 IMG_UINT32 ui32AdjustedPA = ui32ByteOffset;
740 #if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
741 IMG_BOOL bMixedMap = IMG_FALSE;
742 #endif
743 /* First pass, validate the page frame numbers */
744 for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
745 {
746 IMG_UINT32 pfn;
747 IMG_BOOL bMapPage = IMG_TRUE;
749 if (psLinuxMemArea->hBMHandle)
750 {
751 if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, ui32PA))
752 {
753 bMapPage = IMG_FALSE;
754 }
755 }
757 if (bMapPage)
758 {
759 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32AdjustedPA);
760 if (!pfn_valid(pfn))
761 {
762 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
763 PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%x", __FUNCTION__, pfn));
764 return IMG_FALSE;
765 #else
766 bMixedMap = IMG_TRUE;
767 #endif
768 }
769 ui32AdjustedPA += PAGE_SIZE;
770 }
771 }
773 #if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
774 if (bMixedMap)
775 {
776 ps_vma->vm_flags |= VM_MIXEDMAP;
777 }
778 #endif
779 /* Second pass, get the page structures and insert the pages */
780 ulVMAPos = ps_vma->vm_start;
781 ui32AdjustedPA = ui32ByteOffset;
782 for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
783 {
784 IMG_UINT32 pfn;
785 IMG_INT result;
786 IMG_BOOL bMapPage = IMG_TRUE;
788 if (psLinuxMemArea->hBMHandle)
789 {
790 /* We have a sparse allocation, check if this page should be mapped */
791 if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, ui32PA))
792 {
793 bMapPage = IMG_FALSE;
794 }
795 }
797 if (bMapPage)
798 {
799 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32AdjustedPA);
801 #if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
802 if (bMixedMap)
803 {
804 result = vm_insert_mixed(ps_vma, ulVMAPos, pfn);
805 if(result != 0)
806 {
807 PVR_DPF((PVR_DBG_ERROR,"%s: Error - vm_insert_mixed failed (%d)", __FUNCTION__, result));
808 return IMG_FALSE;
809 }
810 }
811 else
812 #endif
813 {
814 struct page *psPage;
816 PVR_ASSERT(pfn_valid(pfn));
818 psPage = pfn_to_page(pfn);
820 result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
821 if(result != 0)
822 {
823 PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result));
824 return IMG_FALSE;
825 }
826 }
827 ui32AdjustedPA += PAGE_SIZE;
828 }
829 ulVMAPos += PAGE_SIZE;
830 }
831 }
833 return IMG_TRUE;
834 }
837 static IMG_VOID
838 MMapVOpenNoLock(struct vm_area_struct* ps_vma)
839 {
840 PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
842 PVR_ASSERT(psOffsetStruct != IMG_NULL);
843 PVR_ASSERT(!psOffsetStruct->bOnMMapList);
845 PVRSRVOffsetStructIncMapped(psOffsetStruct);
847 if (psOffsetStruct->ui32Mapped > 1)
848 {
849 PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %u)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
850 PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
851 }
853 #if defined(DEBUG_LINUX_MMAP_AREAS)
855 PVR_DPF((PVR_DBG_MESSAGE,
856 "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %d, ui32Mapped %d",
857 __FUNCTION__,
858 psOffsetStruct->psLinuxMemArea,
859 LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
860 psOffsetStruct->ui32MMapOffset,
861 psOffsetStruct->ui32Mapped));
862 #endif
863 }
866 /*
867 * Linux mmap open entry point.
868 */
869 static void
870 MMapVOpen(struct vm_area_struct* ps_vma)
871 {
872 LinuxLockMutex(&g_sMMapMutex);
874 MMapVOpenNoLock(ps_vma);
876 LinuxUnLockMutex(&g_sMMapMutex);
877 }
880 static IMG_VOID
881 MMapVCloseNoLock(struct vm_area_struct* ps_vma)
882 {
883 PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
884 PVR_ASSERT(psOffsetStruct != IMG_NULL);
886 #if defined(DEBUG_LINUX_MMAP_AREAS)
887 PVR_DPF((PVR_DBG_MESSAGE,
888 "%s: psLinuxMemArea %p, CpuVAddr %p ui32MMapOffset %d, ui32Mapped %d",
889 __FUNCTION__,
890 psOffsetStruct->psLinuxMemArea,
891 LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
892 psOffsetStruct->ui32MMapOffset,
893 psOffsetStruct->ui32Mapped));
894 #endif
896 PVR_ASSERT(!psOffsetStruct->bOnMMapList);
897 PVRSRVOffsetStructDecMapped(psOffsetStruct);
898 if (psOffsetStruct->ui32Mapped == 0)
899 {
900 if (psOffsetStruct->ui32RefCount != 0)
901 {
902 PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct %p has non-zero reference count (ui32RefCount = %u). User mode address of start of mapping: 0x%x", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr));
903 }
905 DestroyOffsetStruct(psOffsetStruct);
906 }
908 ps_vma->vm_private_data = NULL;
909 }
911 /*
912 * Linux mmap close entry point.
913 */
914 static void
915 MMapVClose(struct vm_area_struct* ps_vma)
916 {
917 LinuxLockMutex(&g_sMMapMutex);
919 MMapVCloseNoLock(ps_vma);
921 LinuxUnLockMutex(&g_sMMapMutex);
922 }
924 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
925 /*
926 * This vma operation is used to read data from mmap regions. It is called
927 * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
928 * requests and reads from /proc/<pid>/mem.
929 */
930 static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
931 void *buf, int len, int write)
932 {
933 PKV_OFFSET_STRUCT psOffsetStruct;
934 LinuxMemArea *psLinuxMemArea;
935 unsigned long ulOffset;
936 int iRetVal = -EINVAL;
937 IMG_VOID *pvKernelAddr;
939 LinuxLockMutex(&g_sMMapMutex);
941 psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
942 psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
943 ulOffset = addr - ps_vma->vm_start;
945 if (ulOffset+len > psLinuxMemArea->ui32ByteSize)
946 /* Out of range. We shouldn't get here, because the kernel will do
947 the necessary checks before calling access_process_vm. */
948 goto exit_unlock;
950 pvKernelAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
952 if (pvKernelAddr)
953 {
954 memcpy(buf, pvKernelAddr+ulOffset, len);
955 iRetVal = len;
956 }
957 else
958 {
959 IMG_UINT32 pfn, ui32OffsetInPage;
960 struct page *page;
962 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ulOffset);
964 if (!pfn_valid(pfn))
965 goto exit_unlock;
967 page = pfn_to_page(pfn);
968 ui32OffsetInPage = ADDR_TO_PAGE_OFFSET(ulOffset);
970 if (ui32OffsetInPage+len > PAGE_SIZE)
971 /* The region crosses a page boundary */
972 goto exit_unlock;
974 pvKernelAddr = kmap(page);
975 memcpy(buf, pvKernelAddr+ui32OffsetInPage, len);
976 kunmap(page);
978 iRetVal = len;
979 }
981 exit_unlock:
982 LinuxUnLockMutex(&g_sMMapMutex);
983 return iRetVal;
984 }
985 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) */
987 static struct vm_operations_struct MMapIOOps =
988 {
989 .open=MMapVOpen,
990 .close=MMapVClose,
991 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
992 .access=MMapVAccess,
993 #endif
994 };
997 /*!
998 *******************************************************************************
1000 @Function PVRMMap
1002 @Description
1004 Driver mmap entry point.
1006 @input pFile : unused.
1007 @input ps_vma : pointer to linux memory area descriptor.
1009 @Return 0, or Linux error code.
1011 ******************************************************************************/
1012 int
1013 PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
1014 {
1015 LinuxMemArea *psFlushMemArea = IMG_NULL;
1016 PKV_OFFSET_STRUCT psOffsetStruct;
1017 IMG_UINT32 ui32ByteSize;
1018 IMG_VOID *pvBase = IMG_NULL;
1019 int iRetVal = 0;
1020 IMG_UINT32 ui32ByteOffset = 0; /* Keep compiler happy */
1021 IMG_UINT32 ui32FlushSize = 0;
1023 PVR_UNREFERENCED_PARAMETER(pFile);
1025 LinuxLockMutex(&g_sMMapMutex);
1027 ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
1029 PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
1030 " and ui32ByteSize %d(0x%08x)",
1031 __FUNCTION__,
1032 ps_vma->vm_pgoff,
1033 ui32ByteSize, ui32ByteSize));
1035 psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
1037 if (psOffsetStruct == IMG_NULL)
1038 {
1039 #if defined(SUPPORT_DRI_DRM)
1040 LinuxUnLockMutex(&g_sMMapMutex);
1042 #if !defined(SUPPORT_DRI_DRM_EXT)
1043 /* Pass unknown requests onto the DRM module */
1044 return drm_mmap(pFile, ps_vma);
1045 #else
1046 /*
1047 * Indicate to caller that the request is not for us.
1048 * Do not return this error elsewhere in this function, as the
1049 * caller may use it as a clue as to whether the mmap request
1050 * should be passed on to another component (e.g. drm_mmap).
1051 */
1052 return -ENOENT;
1053 #endif
1054 #else
1055 PVR_UNREFERENCED_PARAMETER(pFile);
1057 PVR_DPF((PVR_DBG_ERROR,
1058 "%s: Attempted to mmap unregistered area at vm_pgoff 0x%lx",
1059 __FUNCTION__, ps_vma->vm_pgoff));
1060 iRetVal = -EINVAL;
1061 #endif
1062 goto unlock_and_return;
1063 }
1065 list_del(&psOffsetStruct->sMMapItem);
1066 psOffsetStruct->bOnMMapList = IMG_FALSE;
1068 /* Only support shared writeable mappings */
1069 if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
1070 ((ps_vma->vm_flags & VM_SHARED) == 0))
1071 {
1072 PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
1073 iRetVal = -EINVAL;
1074 goto unlock_and_return;
1075 }
1077 PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
1078 __FUNCTION__, psOffsetStruct->psLinuxMemArea));
1080 ps_vma->vm_flags |= VM_RESERVED;
1081 ps_vma->vm_flags |= VM_IO;
1083 /*
1084 * Disable mremap because our nopage handler assumes all
1085 * page requests have already been validated.
1086 */
1087 ps_vma->vm_flags |= VM_DONTEXPAND;
1089 /* Don't allow mapping to be inherited across a process fork */
1090 ps_vma->vm_flags |= VM_DONTCOPY;
1092 ps_vma->vm_private_data = (void *)psOffsetStruct;
1094 switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
1095 {
1096 case PVRSRV_HAP_CACHED:
1097 /* This is the default, do nothing. */
1098 break;
1099 case PVRSRV_HAP_WRITECOMBINE:
1100 ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
1101 break;
1102 case PVRSRV_HAP_UNCACHED:
1103 ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
1104 break;
1105 default:
1106 PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
1107 iRetVal = -EINVAL;
1108 goto unlock_and_return;
1109 }
1111 #if defined(SGX544) && defined(SGX_FEATURE_MP)
1112 /* In OMAP5, the A15 no longer masks an issue with the interconnect.
1113 writecombined access to the Tiler 2D memory will encounter errors due to
1114 interconect bus accesses. This will result in a SIGBUS error with a
1115 "non-line fetch abort". The workaround is to use a shared device
1116 access. */
1117 if (psOffsetStruct->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION)
1118 ps_vma->vm_page_prot = __pgprot_modify(ps_vma->vm_page_prot,
1119 L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED);
1120 #endif
1122 /* Install open and close handlers for ref-counting */
1123 ps_vma->vm_ops = &MMapIOOps;
1125 if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
1126 {
1127 iRetVal = -EAGAIN;
1128 goto unlock_and_return;
1129 }
1131 PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0);
1133 psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
1135 /* Invalidate for the ION memory is performed during the mapping */
1136 if(psOffsetStruct->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION)
1137 psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate = IMG_FALSE;
1139 /* Compute the flush region (if necessary) inside the mmap mutex */
1140 if(psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate)
1141 {
1142 psFlushMemArea = psOffsetStruct->psLinuxMemArea;
1144 /* Sparse mappings have to ask the BM for the virtual size */
1145 if (psFlushMemArea->hBMHandle)
1146 {
1147 pvBase = (IMG_VOID *)ps_vma->vm_start;
1148 ui32ByteOffset = 0;
1149 ui32FlushSize = BM_GetVirtualSize(psFlushMemArea->hBMHandle);
1150 }
1151 else
1152 {
1153 IMG_UINT32 ui32DummyByteSize;
1155 DetermineUsersSizeAndByteOffset(psFlushMemArea,
1156 &ui32DummyByteSize,
1157 &ui32ByteOffset);
1159 pvBase = (IMG_VOID *)ps_vma->vm_start + ui32ByteOffset;
1160 ui32FlushSize = psFlushMemArea->ui32ByteSize;
1161 }
1163 psFlushMemArea->bNeedsCacheInvalidate = IMG_FALSE;
1164 }
1166 /* Call the open routine to increment the usage count */
1167 MMapVOpenNoLock(ps_vma);
1169 PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
1170 __FUNCTION__, ps_vma->vm_pgoff));
1172 unlock_and_return:
1173 if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
1174 {
1175 DestroyOffsetStruct(psOffsetStruct);
1176 }
1178 LinuxUnLockMutex(&g_sMMapMutex);
1180 if(psFlushMemArea)
1181 {
1182 OSInvalidateCPUCacheRangeKM(psFlushMemArea, ui32ByteOffset, pvBase,
1183 ui32FlushSize);
1184 }
1186 return iRetVal;
1187 }
1190 #if defined(DEBUG_LINUX_MMAP_AREAS)
1192 /*
1193 * Lock MMap regions list (called on page start/stop while reading /proc/mmap)
1195 * sfile : seq_file that handles /proc file
1196 * start : TRUE if it's start, FALSE if it's stop
1197 *
1198 */
1199 static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start)
1200 {
1201 if(start)
1202 {
1203 LinuxLockMutex(&g_sMMapMutex);
1204 }
1205 else
1206 {
1207 LinuxUnLockMutex(&g_sMMapMutex);
1208 }
1209 }
1212 /*
1213 * Convert offset (index from KVOffsetTable) to element
1214 * (called when reading /proc/mmap file)
1216 * sfile : seq_file that handles /proc file
1217 * off : index into the KVOffsetTable from which to print
1218 *
1219 * returns void* : Pointer to element that will be dumped
1220 *
1221 */
1222 static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off)
1223 {
1224 LinuxMemArea *psLinuxMemArea;
1225 if(!off)
1226 {
1227 return PVR_PROC_SEQ_START_TOKEN;
1228 }
1230 list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
1231 {
1232 PKV_OFFSET_STRUCT psOffsetStruct;
1234 list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
1235 {
1236 off--;
1237 if (off == 0)
1238 {
1239 PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
1240 return (void*)psOffsetStruct;
1241 }
1242 }
1243 }
1244 return (void*)0;
1245 }
1247 /*
1248 * Gets next MMap element to show. (called when reading /proc/mmap file)
1250 * sfile : seq_file that handles /proc file
1251 * el : actual element
1252 * off : index into the KVOffsetTable from which to print
1253 *
1254 * returns void* : Pointer to element to show (0 ends iteration)
1255 */
1256 static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off)
1257 {
1258 return ProcSeqOff2ElementMMapRegistrations(sfile,off);
1259 }
1262 /*
1263 * Show MMap element (called when reading /proc/mmap file)
1265 * sfile : seq_file that handles /proc file
1266 * el : actual element
1267 *
1268 */
1269 static void ProcSeqShowMMapRegistrations(struct seq_file *sfile, void *el)
1270 {
1271 KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el;
1272 LinuxMemArea *psLinuxMemArea;
1273 IMG_UINT32 ui32RealByteSize;
1274 IMG_UINT32 ui32ByteOffset;
1276 if(el == PVR_PROC_SEQ_START_TOKEN)
1277 {
1278 seq_printf( sfile,
1279 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
1280 "Allocations registered for mmap: %u\n"
1281 "In total these areas correspond to %u bytes\n"
1282 "psLinuxMemArea "
1283 "UserVAddr "
1284 "KernelVAddr "
1285 "CpuPAddr "
1286 "MMapOffset "
1287 "ByteLength "
1288 "LinuxMemType "
1289 "Pid Name Flags\n",
1290 #else
1291 "<mmap_header>\n"
1292 "\t<count>%u</count>\n"
1293 "\t<bytes>%u</bytes>\n"
1294 "</mmap_header>\n",
1295 #endif
1296 g_ui32RegisteredAreas,
1297 g_ui32TotalByteSize
1298 );
1299 return;
1300 }
1302 psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
1304 DetermineUsersSizeAndByteOffset(psLinuxMemArea,
1305 &ui32RealByteSize,
1306 &ui32ByteOffset);
1308 seq_printf( sfile,
1309 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
1310 "%-8p %08x %-8p %08x %08x %-8d %-24s %-5u %-8s %08x(%s)\n",
1311 #else
1312 "<mmap_record>\n"
1313 "\t<pointer>%-8p</pointer>\n"
1314 "\t<user_virtual>%-8x</user_virtual>\n"
1315 "\t<kernel_virtual>%-8p</kernel_virtual>\n"
1316 "\t<cpu_physical>%08x</cpu_physical>\n"
1317 "\t<mmap_offset>%08x</mmap_offset>\n"
1318 "\t<bytes>%-8d</bytes>\n"
1319 "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
1320 "\t<pid>%-5u</pid>\n"
1321 "\t<name>%-8s</name>\n"
1322 "\t<flags>%08x</flags>\n"
1323 "\t<flags_string>%s</flags_string>\n"
1324 "</mmap_record>\n",
1325 #endif
1326 psLinuxMemArea,
1327 psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
1328 LinuxMemAreaToCpuVAddr(psLinuxMemArea),
1329 LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
1330 psOffsetStruct->ui32MMapOffset,
1331 psLinuxMemArea->ui32ByteSize,
1332 LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
1333 psOffsetStruct->ui32PID,
1334 psOffsetStruct->pszName,
1335 psLinuxMemArea->ui32AreaFlags,
1336 HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
1337 }
1339 #endif
1342 /*!
1343 *******************************************************************************
1345 @Function PVRMMapRegisterArea
1347 @Description
1349 Register a memory area with the mmap code.
1351 @input psLinuxMemArea : pointer to memory area.
1353 @Return PVRSRV_OK, or PVRSRV_ERROR.
1355 ******************************************************************************/
1356 PVRSRV_ERROR
1357 PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
1358 {
1359 PVRSRV_ERROR eError;
1360 #if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
1361 const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
1362 #endif
1364 LinuxLockMutex(&g_sMMapMutex);
1366 #if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
1367 PVR_DPF((PVR_DBG_MESSAGE,
1368 "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8x)",
1369 __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
1370 #endif
1372 PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
1374 /* Check this mem area hasn't already been registered */
1375 if(psLinuxMemArea->bMMapRegistered)
1376 {
1377 PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
1378 __FUNCTION__, psLinuxMemArea));
1379 eError = PVRSRV_ERROR_INVALID_PARAMS;
1380 goto exit_unlock;
1381 }
1383 list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
1385 psLinuxMemArea->bMMapRegistered = IMG_TRUE;
1387 #if defined(DEBUG_LINUX_MMAP_AREAS)
1388 g_ui32RegisteredAreas++;
1389 /*
1390 * Sub memory areas are excluded from g_ui32TotalByteSize so that we
1391 * don't count memory twice, once for the parent and again for sub
1392 * allocationis.
1393 */
1394 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
1395 {
1396 g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
1397 }
1398 #endif
1400 eError = PVRSRV_OK;
1402 exit_unlock:
1403 LinuxUnLockMutex(&g_sMMapMutex);
1405 return eError;
1406 }
1409 /*!
1410 *******************************************************************************
1412 @Function PVRMMapRemoveRegisterArea
1414 @Description
1416 Unregister a memory area with the mmap code.
1418 @input psLinuxMemArea : pointer to memory area.
1420 @Return PVRSRV_OK, or PVRSRV_ERROR.
1422 ******************************************************************************/
1423 PVRSRV_ERROR
1424 PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
1425 {
1426 PVRSRV_ERROR eError;
1427 PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
1429 LinuxLockMutex(&g_sMMapMutex);
1431 PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
1433 list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
1434 {
1435 if (psOffsetStruct->ui32Mapped != 0)
1436 {
1437 PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %u", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
1438 dump_stack();
1439 PVRSRVDumpRefCountCCB();
1440 eError = PVRSRV_ERROR_STILL_MAPPED;
1441 goto exit_unlock;
1442 }
1443 else
1444 {
1445 /*
1446 * An offset structure is created when a call is made to get
1447 * the mmap data for a physical mapping. If the data is never
1448 * used for mmap, we will be left with an umapped offset
1449 * structure.
1450 */
1451 PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
1452 }
1454 PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
1456 DestroyOffsetStruct(psOffsetStruct);
1457 }
1459 list_del(&psLinuxMemArea->sMMapItem);
1461 psLinuxMemArea->bMMapRegistered = IMG_FALSE;
1463 #if defined(DEBUG_LINUX_MMAP_AREAS)
1464 g_ui32RegisteredAreas--;
1465 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
1466 {
1467 g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
1468 }
1469 #endif
1471 eError = PVRSRV_OK;
1473 exit_unlock:
1474 LinuxUnLockMutex(&g_sMMapMutex);
1475 return eError;
1476 }
1479 /*!
1480 *******************************************************************************
1482 @Function LinuxMMapPerProcessConnect
1484 @Description
1486 Per-process mmap initialisation code.
1488 @input psEnvPerProc : pointer to OS specific per-process data.
1490 @Return PVRSRV_OK, or PVRSRV_ERROR.
1492 ******************************************************************************/
1493 PVRSRV_ERROR
1494 LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
1495 {
1496 PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
1498 return PVRSRV_OK;
1499 }
1501 /*!
1502 *******************************************************************************
1504 @Function LinuxMMapPerProcessDisconnect
1506 @Description
1508 Per-process mmap deinitialisation code.
1510 @input psEnvPerProc : pointer to OS specific per-process data.
1512 ******************************************************************************/
1513 IMG_VOID
1514 LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
1515 {
1516 PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
1517 IMG_BOOL bWarn = IMG_FALSE;
1518 IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
1520 PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
1522 LinuxLockMutex(&g_sMMapMutex);
1524 list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
1525 {
1526 if (psOffsetStruct->ui32PID == ui32PID)
1527 {
1528 if (!bWarn)
1529 {
1530 PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
1531 bWarn = IMG_TRUE;
1532 }
1533 PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
1534 PVR_ASSERT(psOffsetStruct->bOnMMapList);
1536 DestroyOffsetStruct(psOffsetStruct);
1537 }
1538 }
1540 LinuxUnLockMutex(&g_sMMapMutex);
1541 }
1544 /*!
1545 *******************************************************************************
1547 @Function LinuxMMapPerProcessHandleOptions
1549 @Description
1551 Set secure handle options required by mmap code.
1553 @input psHandleBase : pointer to handle base.
1555 @Return PVRSRV_OK, or PVRSRV_ERROR.
1557 ******************************************************************************/
1558 PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
1559 {
1560 PVRSRV_ERROR eError;
1562 eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
1563 if (eError != PVRSRV_OK)
1564 {
1565 PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
1566 return eError;
1567 }
1569 return eError;
1570 }
1573 /*!
1574 *******************************************************************************
1576 @Function PVRMMapInit
1578 @Description
1580 MMap initialisation code
1582 ******************************************************************************/
1583 IMG_VOID
1584 PVRMMapInit(IMG_VOID)
1585 {
1586 LinuxInitMutex(&g_sMMapMutex);
1588 g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
1589 if (!g_psMemmapCache)
1590 {
1591 PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
1592 goto error;
1593 }
1595 #if defined(DEBUG_LINUX_MMAP_AREAS)
1596 g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
1597 ProcSeqNextMMapRegistrations,
1598 ProcSeqShowMMapRegistrations,
1599 ProcSeqOff2ElementMMapRegistrations,
1600 ProcSeqStartstopMMapRegistations
1601 );
1602 #endif /* defined(DEBUG_LINUX_MMAP_AREAS) */
1603 return;
1605 error:
1606 PVRMMapCleanup();
1607 return;
1608 }
1611 /*!
1612 *******************************************************************************
1614 @Function PVRMMapCleanup
1616 @Description
1618 Mmap deinitialisation code
1620 ******************************************************************************/
1621 IMG_VOID
1622 PVRMMapCleanup(IMG_VOID)
1623 {
1624 PVRSRV_ERROR eError;
1626 if (!list_empty(&g_sMMapAreaList))
1627 {
1628 LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
1630 PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
1632 PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
1633 list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
1634 {
1635 eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
1636 if (eError != PVRSRV_OK)
1637 {
1638 PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
1639 }
1640 PVR_ASSERT(eError == PVRSRV_OK);
1642 LinuxMemAreaDeepFree(psLinuxMemArea);
1643 }
1644 }
1645 PVR_ASSERT(list_empty((&g_sMMapAreaList)));
1647 #if defined(DEBUG_LINUX_MMAP_AREAS)
1648 RemoveProcEntrySeq(g_ProcMMap);
1649 #endif /* defined(DEBUG_LINUX_MMAP_AREAS) */
1651 if(g_psMemmapCache)
1652 {
1653 KMemCacheDestroyWrapper(g_psMemmapCache);
1654 g_psMemmapCache = NULL;
1655 }
1656 }