summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c')
-rw-r--r--sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c1659
1 files changed, 1659 insertions, 0 deletions
diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c
new file mode 100644
index 0000000..3a2a16b
--- /dev/null
+++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c
@@ -0,0 +1,1659 @@
1/*************************************************************************/ /*!
2@Title Linux mmap interface
3@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
4@License Dual MIT/GPLv2
5
6The contents of this file are subject to the MIT license as set out below.
7
8Permission is hereby granted, free of charge, to any person obtaining a copy
9of this software and associated documentation files (the "Software"), to deal
10in the Software without restriction, including without limitation the rights
11to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12copies of the Software, and to permit persons to whom the Software is
13furnished to do so, subject to the following conditions:
14
15The above copyright notice and this permission notice shall be included in
16all copies or substantial portions of the Software.
17
18Alternatively, the contents of this file may be used under the terms of
19the GNU General Public License Version 2 ("GPL") in which case the provisions
20of GPL are applicable instead of those above.
21
22If you wish to allow use of your version of this file only under the terms of
23GPL, and not to allow others to use your version of this file under the terms
24of the MIT license, indicate your decision by deleting the provisions above
25and replace them with the notice and other provisions required by GPL as set
26out in the file called "GPL-COPYING" included in this distribution. If you do
27not delete the provisions above, a recipient may use your version of this file
28under the terms of either the MIT license or GPL.
29
30This License is also included in this distribution in the file called
31"MIT-COPYING".
32
33EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
34PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
35BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
36PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
37COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
38IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
39CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40*/ /**************************************************************************/
41
42#include <linux/version.h>
43
44#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
45#ifndef AUTOCONF_INCLUDED
46#include <linux/config.h>
47#endif
48#endif
49
50#include <linux/mm.h>
51#include <linux/module.h>
52#include <linux/vmalloc.h>
53#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
54#include <linux/wrapper.h>
55#endif
56#include <linux/slab.h>
57#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
58#include <linux/highmem.h>
59#endif
60#include <asm/io.h>
61#include <asm/page.h>
62#include <asm/shmparam.h>
63#include <asm/pgtable.h>
64#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
65#include <linux/sched.h>
66#include <asm/current.h>
67#endif
68#if defined(SUPPORT_DRI_DRM)
69#include <drm/drmP.h>
70#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0))
71#include <drm/drm_legacy.h>
72#endif
73#endif
74
75#ifdef CONFIG_ARCH_OMAP5
76#ifdef CONFIG_DSSCOMP
77#include <../drivers/staging/omapdrm/omap_dmm_tiler.h>
78#endif
79#endif
80
81#include "services_headers.h"
82
83#include "pvrmmap.h"
84#include "mutils.h"
85#include "mmap.h"
86#include "mm.h"
87#include "proc.h"
88#include "mutex.h"
89#include "handle.h"
90#include "perproc.h"
91#include "env_perproc.h"
92#include "bridged_support.h"
93#if defined(SUPPORT_DRI_DRM)
94#include "pvr_drm.h"
95#endif
96
97#if !defined(PVR_SECURE_HANDLES)
98#error "The mmap code requires PVR_SECURE_HANDLES"
99#endif
100
101#if defined(SUPPORT_DRI_DRM) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0))
102static inline int drm_mmap(struct file *filp, struct vm_area_struct *vma)
103{
104 return drm_legacy_mmap(filp, vma);
105}
106#endif
107
108/* WARNING:
109 * The mmap code has its own mutex, to prevent a possible deadlock,
110 * when using gPVRSRVLock.
111 * The Linux kernel takes the mm->mmap_sem before calling the mmap
112 * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
113 * entry point may take mm->mmap_sem during fault handling, or
114 * before calling get_user_pages. If gPVRSRVLock was used in the
115 * mmap entry points, a deadlock could result, due to the ioctl
116 * and mmap code taking the two locks in different orders.
117 * As a corollary to this, the mmap entry points must not call
118 * any driver code that relies on gPVRSRVLock is held.
119 */
120PVRSRV_LINUX_MUTEX g_sMMapMutex;
121
122static LinuxKMemCache *g_psMemmapCache = NULL;
123static LIST_HEAD(g_sMMapAreaList);
124static LIST_HEAD(g_sMMapOffsetStructList);
125#if defined(DEBUG_LINUX_MMAP_AREAS)
126static IMG_UINT32 g_ui32RegisteredAreas = 0;
127static IMG_SIZE_T g_uiTotalByteSize = 0;
128#endif
129
130
131#if defined(DEBUG_LINUX_MMAP_AREAS)
132static struct pvr_proc_dir_entry *g_ProcMMap;
133#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */
134
135#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
136/*
137 * Now that we are using mmap2 in srvclient, almost (*) the full 32
138 * bit offset is available. The range of values is divided into two.
139 * The first part of the range, from FIRST_PHYSICAL_PFN to
140 * LAST_PHYSICAL_PFN, is for raw page mappings (VM_PFNMAP). The
141 * resulting 43 bit (*) physical address range should be enough for
142 * the current range of processors we support.
143 *
144 * NB: (*) -- the above figures assume 4KB page size. The offset
145 * argument to mmap2() is in units of 4,096 bytes regardless of page
146 * size. Thus, we lose (PAGE_SHIFT-12) bits of resolution on other
147 * architectures.
148 *
149 * The second part of the range, from FIRST_SPECIAL_PFN to LAST_SPECIAL_PFN,
150 * is used for all other mappings. These other mappings will always
151 * consist of pages with associated page structures, and need not
152 * represent a contiguous range of physical addresses.
153 *
154 */
155#define MMAP2_PGOFF_RESOLUTION (32-PAGE_SHIFT+12)
156#define RESERVED_PGOFF_BITS 1
157#define MAX_MMAP_HANDLE ((1UL<<(MMAP2_PGOFF_RESOLUTION-RESERVED_PGOFF_BITS))-1)
158
159#define FIRST_PHYSICAL_PFN 0
160#define LAST_PHYSICAL_PFN (FIRST_PHYSICAL_PFN + MAX_MMAP_HANDLE)
161#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
162#define LAST_SPECIAL_PFN (FIRST_SPECIAL_PFN + MAX_MMAP_HANDLE)
163
164#else /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */
165
166/*
167 * Since we no longer have to worry about clashes with the mmap
168 * offsets used for pure PFN mappings (VM_PFNMAP), there is greater
169 * freedom in choosing the mmap handles. This is useful if the
170 * mmap offset space has to be shared with another driver component.
171 */
172
173#if defined(PVR_MMAP_OFFSET_BASE)
174#define FIRST_SPECIAL_PFN PVR_MMAP_OFFSET_BASE
175#else
176#define FIRST_SPECIAL_PFN_BASE 0x80000000UL
177#define FIRST_SPECIAL_PFN (FIRST_SPECIAL_PFN_BASE >> (PAGE_SHIFT - 12))
178#endif
179
180#if defined(PVR_NUM_MMAP_HANDLES)
181#define MAX_MMAP_HANDLE PVR_NUM_MMAP_HANDLES
182#else
183#define MAX_MMAP_HANDLE_BASE 0x7fffffffUL
184#define MAX_MMAP_HANDLE (MAX_MMAP_HANDLE_BASE >> (PAGE_SHIFT - 12))
185#endif
186
187#endif /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */
188
189#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
190static inline IMG_BOOL
191PFNIsPhysical(IMG_UINT32 pfn)
192{
193 /* Unsigned, no need to compare >=0 */
194 return (/*(pfn >= FIRST_PHYSICAL_PFN) &&*/ (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
195}
196
197static inline IMG_BOOL
198PFNIsSpecial(IMG_UINT32 pfn)
199{
200 /* Unsigned, no need to compare <=MAX_UINT */
201 return ((pfn >= FIRST_SPECIAL_PFN) /*&& (pfn <= LAST_SPECIAL_PFN)*/) ? IMG_TRUE : IMG_FALSE;
202}
203#endif
204
205#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
206static inline IMG_HANDLE
207MMapOffsetToHandle(IMG_UINT32 pfn)
208{
209 if (PFNIsPhysical(pfn))
210 {
211 PVR_ASSERT(PFNIsPhysical(pfn));
212 return IMG_NULL;
213 }
214 return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
215}
216#endif
217
218static inline IMG_UINTPTR_T
219HandleToMMapOffset(IMG_HANDLE hHandle)
220{
221 IMG_UINTPTR_T ulHandle = (IMG_UINTPTR_T)hHandle;
222
223#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
224 if (PFNIsSpecial(ulHandle))
225 {
226 PVR_ASSERT(PFNIsSpecial(ulHandle));
227 return 0;
228 }
229#endif
230 return ulHandle + FIRST_SPECIAL_PFN;
231}
232
233#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
234/*
235 * Determine whether physical or special mappings will be used for
236 * a given memory area. At present, this decision is made on
237 * whether the mapping represents a contiguous range of physical
238 * addresses, which is a requirement for raw page mappings (VM_PFNMAP).
239 * In the VMA structure for such a mapping, vm_pgoff is the PFN
240 * (page frame number, the physical address divided by the page size)
241 * of the first page in the VMA. The second page is assumed to have
242 * PFN (vm_pgoff + 1), the third (vm_pgoff + 2) and so on.
243 */
244static inline IMG_BOOL
245LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
246{
247 return LinuxMemAreaPhysIsContig(psLinuxMemArea);
248}
249#endif
250
251#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
252static inline IMG_UINT32
253GetCurrentThreadID(IMG_VOID)
254{
255 /*
256 * The PID is the thread ID, as each thread is a
257 * seperate process.
258 */
259 return (IMG_UINT32)current->pid;
260}
261#endif
262
263/*
264 * Create an offset structure, which is used to hold per-process
265 * mmap data.
266 */
267static PKV_OFFSET_STRUCT
268CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINTPTR_T uiOffset, IMG_SIZE_T uiRealByteSize)
269{
270 PKV_OFFSET_STRUCT psOffsetStruct;
271#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
272 const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
273#endif
274
275#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
276 PVR_DPF((PVR_DBG_MESSAGE,
277 "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8x)",
278 __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
279#endif
280
281 PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
282
283 PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
284
285 psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
286 if(psOffsetStruct == IMG_NULL)
287 {
288 PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
289 return IMG_NULL;
290 }
291
292 psOffsetStruct->uiMMapOffset = uiOffset;
293
294 psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
295
296 psOffsetStruct->uiRealByteSize = uiRealByteSize;
297
298 /*
299 * We store the TID in case two threads within a process
300 * generate the same offset structure, and both end up on the
301 * list of structures waiting to be mapped, at the same time.
302 * This could happen if two sub areas within the same page are
303 * being mapped at the same time.
304 * The TID allows the mmap entry point to distinguish which
305 * mapping is being done by which thread.
306 */
307#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
308 psOffsetStruct->ui32TID = GetCurrentThreadID();
309#endif
310 psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
311
312#if defined(DEBUG_LINUX_MMAP_AREAS)
313 /* Extra entries to support proc filesystem debug info */
314 psOffsetStruct->pszName = pszName;
315#endif
316
317 list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
318
319 return psOffsetStruct;
320}
321
322
323static IMG_VOID
324DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
325{
326#ifdef DEBUG
327 IMG_CPU_PHYADDR CpuPAddr;
328 CpuPAddr = LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0);
329#endif
330
331 list_del(&psOffsetStruct->sAreaItem);
332
333 if (psOffsetStruct->bOnMMapList)
334 {
335 list_del(&psOffsetStruct->sMMapItem);
336 }
337
338#ifdef DEBUG
339 PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
340 "psLinuxMemArea=%p, CpuPAddr=0x" CPUPADDR_FMT,
341 __FUNCTION__,
342 psOffsetStruct->psLinuxMemArea,
343 CpuPAddr.uiAddr));
344#endif
345
346 KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
347}
348
349
350/*
351 * There are no alignment constraints for mapping requests made by user
352 * mode Services. For this, and potentially other reasons, the
353 * mapping created for a users request may look different to the
354 * original request in terms of size and alignment.
355 *
356 * This function determines an offset that the user can add to the mapping
357 * that is _actually_ created which will point to the memory they are
358 * _really_ interested in.
359 *
360 */
361static inline IMG_VOID
362DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
363 IMG_SIZE_T *puiRealByteSize,
364 IMG_UINTPTR_T *puiByteOffset)
365{
366 IMG_UINTPTR_T uiPageAlignmentOffset;
367 IMG_CPU_PHYADDR CpuPAddr;
368
369 CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
370 uiPageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
371
372 *puiByteOffset = uiPageAlignmentOffset;
373
374 *puiRealByteSize = PAGE_ALIGN(psLinuxMemArea->uiByteSize + uiPageAlignmentOffset);
375}
376
377
378/*!
379 *******************************************************************************
380
381 @Function PVRMMapOSMemHandleToMMapData
382
383 @Description
384
385 Determine various parameters needed to mmap a memory area, and to
386 locate the memory within the mapped area.
387
388 @input psPerProc : Per-process data.
389 @input hMHandle : Memory handle.
390 @input puiMMapOffset : pointer to location for returned mmap offset.
391 @input puiByteOffset : pointer to location for returned byte offset.
392 @input puiRealByteSize : pointer to location for returned real byte size.
393 @input puiUserVaddr : pointer to location for returned user mode address.
394
395 @output puiMMapOffset : points to mmap offset to be used in mmap2 sys call.
396 @output puiByteOffset : points to byte offset of start of memory
397 within mapped area returned by mmap2.
398 @output puiRealByteSize : points to size of area to be mapped.
399 @output puiUserVAddr : points to user mode address of start of
400 mapping, or 0 if it hasn't been mapped yet.
401
402 @Return PVRSRV_ERROR : PVRSRV_OK, or error code.
403
404 ******************************************************************************/
405PVRSRV_ERROR
406PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
407 IMG_HANDLE hMHandle,
408 IMG_UINTPTR_T *puiMMapOffset,
409 IMG_UINTPTR_T *puiByteOffset,
410 IMG_SIZE_T *puiRealByteSize,
411 IMG_UINTPTR_T *puiUserVAddr)
412{
413 LinuxMemArea *psLinuxMemArea;
414 PKV_OFFSET_STRUCT psOffsetStruct;
415 IMG_HANDLE hOSMemHandle;
416 PVRSRV_ERROR eError;
417
418 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
419
420 PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
421
422 eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
423 if (eError != PVRSRV_OK)
424 {
425 PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle));
426
427 goto exit_unlock;
428 }
429
430 psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
431
432 /* Sparse mappings have to ask the BM for the virtual size */
433 if (psLinuxMemArea->hBMHandle)
434 {
435 *puiRealByteSize = BM_GetVirtualSize(psLinuxMemArea->hBMHandle);
436 *puiByteOffset = 0;
437 }
438 else
439 {
440 DetermineUsersSizeAndByteOffset(psLinuxMemArea,
441 puiRealByteSize,
442 puiByteOffset);
443 }
444
445 /* Check whether this memory area has already been mapped */
446 list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
447 {
448 if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
449 {
450 if (!psLinuxMemArea->hBMHandle)
451 {
452 PVR_ASSERT(*puiRealByteSize == psOffsetStruct->uiRealByteSize);
453 }
454 /*
455 * User mode locking is required to stop two threads racing to
456 * map the same memory area. The lock should prevent a
457 * second thread retrieving mmap data for a given handle,
458 * before the first thread has done the mmap.
459 * Without locking, both threads may attempt the mmap,
460 * and one of them will fail.
461 */
462 *puiMMapOffset = psOffsetStruct->uiMMapOffset;
463 *puiUserVAddr = psOffsetStruct->uiUserVAddr;
464 PVRSRVOffsetStructIncRef(psOffsetStruct);
465
466 eError = PVRSRV_OK;
467 goto exit_unlock;
468 }
469 }
470
471 /* Memory area won't have been mapped yet */
472 *puiUserVAddr = 0;
473
474#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
475 if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
476 {
477 *puiMMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
478 PVR_ASSERT(PFNIsPhysical(*puiMMapOffset));
479 }
480 else
481#endif
482 {
483 *puiMMapOffset = HandleToMMapOffset(hMHandle);
484#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
485 PVR_ASSERT(PFNIsSpecial(*puiMMapOffset));
486#endif
487 }
488
489 psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *puiMMapOffset, *puiRealByteSize);
490 if (psOffsetStruct == IMG_NULL)
491 {
492 eError = PVRSRV_ERROR_OUT_OF_MEMORY;
493 goto exit_unlock;
494 }
495
496 /*
497 * Offset structures representing physical mappings are added to
498 * a list, so that they can be located when the memory area is mapped.
499 */
500 list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
501
502 psOffsetStruct->bOnMMapList = IMG_TRUE;
503
504 PVRSRVOffsetStructIncRef(psOffsetStruct);
505
506 eError = PVRSRV_OK;
507
508 /* Need to scale up the offset to counter the shifting that
509 is done in the mmap2() syscall, as it expects the pgoff
510 argument to be in units of 4,096 bytes irrespective of
511 page size */
512 *puiMMapOffset = *puiMMapOffset << (PAGE_SHIFT - 12);
513
514exit_unlock:
515 LinuxUnLockMutex(&g_sMMapMutex);
516
517 return eError;
518}
519
520
521/*!
522 *******************************************************************************
523
524 @Function PVRMMapReleaseMMapData
525
526 @Description
527
528 Release mmap data.
529
530 @input psPerProc : Per-process data.
531 @input hMHandle : Memory handle.
532 @input pbMUnmap : pointer to location for munmap flag.
533 @input puiUserVAddr : pointer to location for user mode address of mapping.
534 @input puiByteSize : pointer to location for size of mapping.
535
536 @Output pbMUnmap : points to flag that indicates whether an munmap is
537 required.
538 @output puiUserVAddr : points to user mode address to munmap.
539
540 @Return PVRSRV_ERROR : PVRSRV_OK, or error code.
541
542 ******************************************************************************/
543PVRSRV_ERROR
544PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
545 IMG_HANDLE hMHandle,
546 IMG_BOOL *pbMUnmap,
547 IMG_SIZE_T *puiRealByteSize,
548 IMG_UINTPTR_T *puiUserVAddr)
549{
550 LinuxMemArea *psLinuxMemArea;
551 PKV_OFFSET_STRUCT psOffsetStruct;
552 IMG_HANDLE hOSMemHandle;
553 PVRSRV_ERROR eError;
554 IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
555
556 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
557
558 PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
559
560 eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
561 if (eError != PVRSRV_OK)
562 {
563 PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle));
564
565 goto exit_unlock;
566 }
567
568 psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
569
570 /* Find the offset structure */
571 list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
572 {
573 if (psOffsetStruct->ui32PID == ui32PID)
574 {
575 if (psOffsetStruct->ui32RefCount == 0)
576 {
577 PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area %p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
578 eError = PVRSRV_ERROR_STILL_MAPPED;
579 goto exit_unlock;
580 }
581
582 PVRSRVOffsetStructDecRef(psOffsetStruct);
583
584 *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->uiUserVAddr != 0));
585
586 *puiUserVAddr = (*pbMUnmap) ? psOffsetStruct->uiUserVAddr : 0;
587 *puiRealByteSize = (*pbMUnmap) ? psOffsetStruct->uiRealByteSize : 0;
588
589 eError = PVRSRV_OK;
590 goto exit_unlock;
591 }
592 }
593
594 /* MMap data not found */
595 PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %p (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea));
596
597 eError = PVRSRV_ERROR_MAPPING_NOT_FOUND;
598
599exit_unlock:
600 LinuxUnLockMutex(&g_sMMapMutex);
601
602 return eError;
603}
604
605static inline PKV_OFFSET_STRUCT
606FindOffsetStructByOffset(IMG_UINTPTR_T uiOffset, IMG_SIZE_T uiRealByteSize)
607{
608 PKV_OFFSET_STRUCT psOffsetStruct;
609#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
610 IMG_UINT32 ui32TID = GetCurrentThreadID();
611#endif
612 IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
613
614 list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
615 {
616 if (uiOffset == psOffsetStruct->uiMMapOffset && uiRealByteSize == psOffsetStruct->uiRealByteSize && psOffsetStruct->ui32PID == ui32PID)
617 {
618#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
619 /*
620 * If the offset is physical, make sure the thread IDs match,
621 * as different threads may be mapping different memory areas
622 * with the same offset.
623 */
624 if (!PFNIsPhysical(uiOffset) || psOffsetStruct->ui32TID == ui32TID)
625#endif
626 {
627 return psOffsetStruct;
628 }
629 }
630 }
631
632 return IMG_NULL;
633}
634
635
636/*
637 * Map a memory area into user space.
638 * Note, the ui32ByteOffset is _not_ implicitly page aligned since
639 * LINUX_MEM_AREA_SUB_ALLOC LinuxMemAreas have no alignment constraints.
640 */
641static IMG_BOOL
642DoMapToUser(LinuxMemArea *psLinuxMemArea,
643 struct vm_area_struct* ps_vma,
644 IMG_UINTPTR_T uiByteOffset)
645{
646 IMG_SIZE_T uiByteSize;
647
648 if ((psLinuxMemArea->hBMHandle) && (uiByteOffset != 0))
649 {
650 /* Partial mapping of sparse allocations should never happen */
651 return IMG_FALSE;
652 }
653
654 if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
655 {
656 return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea), /* PRQA S 3670 */ /* allow recursion */
657 ps_vma,
658 psLinuxMemArea->uData.sSubAlloc.uiByteOffset + uiByteOffset);
659 }
660
661 /*
662 * Note that ui32ByteSize may be larger than the size of the memory
663 * area being mapped, as the former is a multiple of the page size.
664 */
665 uiByteSize = ps_vma->vm_end - ps_vma->vm_start;
666 PVR_ASSERT(ADDR_TO_PAGE_OFFSET(uiByteSize) == 0);
667
668#if defined (__sparc__)
669 /*
670 * For LINUX_MEM_AREA_EXTERNAL_KV, we don't know where the address range
671 * we are being asked to map has come from, that is, whether it is memory
672 * or I/O. For all architectures other than SPARC, there is no distinction.
673 * Since we don't currently support SPARC, we won't worry about it.
674 */
675#error "SPARC not supported"
676#endif
677
678#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
679 if (PFNIsPhysical(ps_vma->vm_pgoff))
680 {
681 IMG_INT result;
682
683 PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
684 PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, uiByteOffset) == ps_vma->vm_pgoff);
685 /*
686 * Since the memory is contiguous, we can map the whole range in one
687 * go .
688 */
689
690 PVR_ASSERT(psLinuxMemArea->hBMHandle == IMG_NULL);
691
692 result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, uiByteSize, ps_vma->vm_page_prot);
693
694 if(result == 0)
695 {
696 return IMG_TRUE;
697 }
698
699 PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
700 }
701#endif
702
703 {
704 /*
705 * Memory may be non-contiguous, so we map the range page,
706 * by page. Since VM_PFNMAP mappings are assumed to be physically
707 * contiguous, we can't legally use REMAP_PFN_RANGE (that is, we
708 * could, but the resulting VMA may confuse other bits of the kernel
709 * that attempt to interpret it).
710 * The only alternative is to use VM_INSERT_PAGE, which requires
711 * finding the page structure corresponding to each page, or
712 * if mixed maps are supported (VM_MIXEDMAP), vm_insert_mixed.
713 */
714 IMG_UINTPTR_T ulVMAPos;
715 IMG_UINTPTR_T uiByteEnd = uiByteOffset + uiByteSize;
716 IMG_UINTPTR_T uiPA;
717 IMG_UINTPTR_T uiAdjustedPA = uiByteOffset;
718#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
719 IMG_BOOL bMixedMap = IMG_FALSE;
720#endif
721 /* First pass, validate the page frame numbers */
722 for(uiPA = uiByteOffset; uiPA < uiByteEnd; uiPA += PAGE_SIZE)
723 {
724 IMG_UINTPTR_T pfn;
725 IMG_BOOL bMapPage = IMG_TRUE;
726
727 if (psLinuxMemArea->hBMHandle)
728 {
729 if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, uiPA))
730 {
731 bMapPage = IMG_FALSE;
732 }
733 }
734
735 if (bMapPage)
736 {
737 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, uiAdjustedPA);
738 if (!pfn_valid(pfn))
739 {
740#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
741 PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x" UINTPTR_FMT, __FUNCTION__, pfn));
742 return IMG_FALSE;
743#else
744 bMixedMap = IMG_TRUE;
745#endif
746 }
747 else if (0 == page_count(pfn_to_page(pfn)))
748 {
749#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
750 bMixedMap = IMG_TRUE;
751#endif
752 }
753 uiAdjustedPA += PAGE_SIZE;
754 }
755 }
756
757#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
758 if (bMixedMap)
759 {
760 ps_vma->vm_flags |= VM_MIXEDMAP;
761 }
762#endif
763 /* Second pass, get the page structures and insert the pages */
764 ulVMAPos = ps_vma->vm_start;
765 uiAdjustedPA = uiByteOffset;
766 for(uiPA = uiByteOffset; uiPA < uiByteEnd; uiPA += PAGE_SIZE)
767 {
768 IMG_UINTPTR_T pfn;
769 IMG_INT result;
770 IMG_BOOL bMapPage = IMG_TRUE;
771
772 if (psLinuxMemArea->hBMHandle)
773 {
774 /* We have a sparse allocation, check if this page should be mapped */
775 if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, uiPA))
776 {
777 bMapPage = IMG_FALSE;
778 }
779 }
780
781 if (bMapPage)
782 {
783 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, uiAdjustedPA);
784
785#if defined(PVR_MAKE_ALL_PFNS_SPECIAL)
786 if (bMixedMap)
787 {
788#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0))
789 pfn_t pfns = { pfn };
790
791 result = vm_insert_mixed(ps_vma, ulVMAPos, pfns);
792#else
793 result = vm_insert_mixed(ps_vma, ulVMAPos, pfn);
794#endif
795 if(result != 0)
796 {
797 PVR_DPF((PVR_DBG_ERROR,"%s: Error - vm_insert_mixed failed (%d)", __FUNCTION__, result));
798 return IMG_FALSE;
799 }
800 }
801 else
802#endif
803 {
804 struct page *psPage;
805
806 PVR_ASSERT(pfn_valid(pfn));
807
808 psPage = pfn_to_page(pfn);
809
810 result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
811 if(result != 0)
812 {
813 PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result));
814 return IMG_FALSE;
815 }
816 }
817 uiAdjustedPA += PAGE_SIZE;
818 }
819 ulVMAPos += PAGE_SIZE;
820 }
821 }
822
823 return IMG_TRUE;
824}
825
826
827static IMG_VOID
828MMapVOpenNoLock(struct vm_area_struct* ps_vma)
829{
830 PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
831
832 PVR_ASSERT(psOffsetStruct != IMG_NULL);
833 PVR_ASSERT(!psOffsetStruct->bOnMMapList);
834
835 PVRSRVOffsetStructIncMapped(psOffsetStruct);
836
837 if (psOffsetStruct->ui32Mapped > 1)
838 {
839 PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %u)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
840 PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
841 }
842
843#if defined(DEBUG_LINUX_MMAP_AREAS)
844
845 PVR_DPF((PVR_DBG_MESSAGE,
846 "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset " UINTPTR_FMT ", ui32Mapped %d",
847 __FUNCTION__,
848 psOffsetStruct->psLinuxMemArea,
849 LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
850 psOffsetStruct->uiMMapOffset,
851 psOffsetStruct->ui32Mapped));
852#endif
853}
854
855
856/*
857 * Linux mmap open entry point.
858 */
859static void
860MMapVOpen(struct vm_area_struct* ps_vma)
861{
862 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
863
864 MMapVOpenNoLock(ps_vma);
865
866 LinuxUnLockMutex(&g_sMMapMutex);
867}
868
869
870static IMG_VOID
871MMapVCloseNoLock(struct vm_area_struct* ps_vma)
872{
873 PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
874 PVR_ASSERT(psOffsetStruct != IMG_NULL);
875
876#if defined(DEBUG_LINUX_MMAP_AREAS)
877 PVR_DPF((PVR_DBG_MESSAGE,
878 "%s: psLinuxMemArea %p, CpuVAddr %p uiMMapOffset " UINTPTR_FMT ", ui32Mapped %d",
879 __FUNCTION__,
880 psOffsetStruct->psLinuxMemArea,
881 LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
882 psOffsetStruct->uiMMapOffset,
883 psOffsetStruct->ui32Mapped));
884#endif
885
886 PVR_ASSERT(!psOffsetStruct->bOnMMapList);
887 PVRSRVOffsetStructDecMapped(psOffsetStruct);
888 if (psOffsetStruct->ui32Mapped == 0)
889 {
890 if (psOffsetStruct->ui32RefCount != 0)
891 {
892 PVR_DPF((
893 PVR_DBG_MESSAGE,
894 "%s: psOffsetStruct %p has non-zero reference count (ui32RefCount = %u). User mode address of start of mapping: 0x" UINTPTR_FMT,
895 __FUNCTION__,
896 psOffsetStruct,
897 psOffsetStruct->ui32RefCount,
898 psOffsetStruct->uiUserVAddr));
899 }
900
901 DestroyOffsetStruct(psOffsetStruct);
902 }
903
904 ps_vma->vm_private_data = NULL;
905}
906
907/*
908 * Linux mmap close entry point.
909 */
910static void
911MMapVClose(struct vm_area_struct* ps_vma)
912{
913 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
914
915 MMapVCloseNoLock(ps_vma);
916
917 LinuxUnLockMutex(&g_sMMapMutex);
918}
919
920#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
921/*
922 * This vma operation is used to read data from mmap regions. It is called
923 * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
924 * requests and reads from /proc/<pid>/mem.
925 */
926static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
927 void *buf, int len, int write)
928{
929 PKV_OFFSET_STRUCT psOffsetStruct;
930 LinuxMemArea *psLinuxMemArea;
931 unsigned long ulOffset;
932 int iRetVal = -EINVAL;
933 IMG_VOID *pvKernelAddr;
934
935 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
936
937 psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
938 psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
939 ulOffset = addr - ps_vma->vm_start;
940
941 if (ulOffset+len > psLinuxMemArea->uiByteSize)
942 /* Out of range. We shouldn't get here, because the kernel will do
943 the necessary checks before calling access_process_vm. */
944 goto exit_unlock;
945
946 pvKernelAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
947
948 if (pvKernelAddr)
949 {
950 memcpy(buf, pvKernelAddr+ulOffset, len);
951 iRetVal = len;
952 }
953 else
954 {
955 IMG_UINTPTR_T pfn, uiOffsetInPage;
956 struct page *page;
957
958 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ulOffset);
959
960 if (!pfn_valid(pfn))
961 goto exit_unlock;
962
963 page = pfn_to_page(pfn);
964 uiOffsetInPage = ADDR_TO_PAGE_OFFSET(ulOffset);
965
966 if (uiOffsetInPage + len > PAGE_SIZE)
967 /* The region crosses a page boundary */
968 goto exit_unlock;
969
970 pvKernelAddr = kmap(page);
971 memcpy(buf, pvKernelAddr + uiOffsetInPage, len);
972 kunmap(page);
973
974 iRetVal = len;
975 }
976
977exit_unlock:
978 LinuxUnLockMutex(&g_sMMapMutex);
979 return iRetVal;
980}
981#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) */
982
983static struct vm_operations_struct MMapIOOps =
984{
985 .open=MMapVOpen,
986 .close=MMapVClose,
987#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
988 .access=MMapVAccess,
989#endif
990};
991
992
993/*!
994 *******************************************************************************
995
996 @Function PVRMMap
997
998 @Description
999
1000 Driver mmap entry point.
1001
1002 @input pFile : unused.
1003 @input ps_vma : pointer to linux memory area descriptor.
1004
1005 @Return 0, or Linux error code.
1006
1007 ******************************************************************************/
1008int
1009PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
1010{
1011 LinuxMemArea *psFlushMemArea = IMG_NULL;
1012 PKV_OFFSET_STRUCT psOffsetStruct;
1013 IMG_SIZE_T uiByteSize;
1014 IMG_VOID *pvBase = IMG_NULL;
1015 int iRetVal = 0;
1016 IMG_UINTPTR_T uiByteOffset = 0; /* Keep compiler happy */
1017 IMG_SIZE_T uiFlushSize = 0;
1018
1019 PVR_UNREFERENCED_PARAMETER(pFile);
1020
1021 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
1022
1023 uiByteSize = ps_vma->vm_end - ps_vma->vm_start;
1024
1025 PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x" UINTPTR_FMT ","
1026 " and uiByteSize %" SIZE_T_FMT_LEN "u(0x%" SIZE_T_FMT_LEN "x)",
1027 __FUNCTION__,
1028 ps_vma->vm_pgoff,
1029 uiByteSize,
1030 uiByteSize));
1031
1032 psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, uiByteSize);
1033
1034 if (psOffsetStruct == IMG_NULL)
1035 {
1036#if defined(SUPPORT_DRI_DRM)
1037 LinuxUnLockMutex(&g_sMMapMutex);
1038
1039#if !defined(SUPPORT_DRI_DRM_EXT)
1040 /* Pass unknown requests onto the DRM module */
1041 return drm_mmap(pFile, ps_vma);
1042#else
1043 /*
1044 * Indicate to caller that the request is not for us.
1045 * Do not return this error elsewhere in this function, as the
1046 * caller may use it as a clue as to whether the mmap request
1047 * should be passed on to another component (e.g. drm_mmap).
1048 */
1049 return -ENOENT;
1050#endif
1051#else
1052 PVR_UNREFERENCED_PARAMETER(pFile);
1053
1054 PVR_DPF((PVR_DBG_ERROR,
1055 "%s: Attempted to mmap unregistered area at vm_pgoff 0x%lx",
1056 __FUNCTION__, ps_vma->vm_pgoff));
1057 iRetVal = -EINVAL;
1058#endif
1059 goto unlock_and_return;
1060 }
1061
1062 list_del(&psOffsetStruct->sMMapItem);
1063 psOffsetStruct->bOnMMapList = IMG_FALSE;
1064
1065 /* Only support shared writeable mappings */
1066 if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
1067 ((ps_vma->vm_flags & VM_SHARED) == 0))
1068 {
1069 PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
1070 iRetVal = -EINVAL;
1071 goto unlock_and_return;
1072 }
1073
1074 PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
1075 __FUNCTION__, psOffsetStruct->psLinuxMemArea));
1076
1077#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
1078 /* This is probably superfluous and implied by VM_IO */
1079 ps_vma->vm_flags |= VM_RESERVED;
1080#else
1081 ps_vma->vm_flags |= VM_DONTDUMP;
1082#endif
1083 ps_vma->vm_flags |= VM_IO;
1084
1085 /*
1086 * Disable mremap because our nopage handler assumes all
1087 * page requests have already been validated.
1088 */
1089 ps_vma->vm_flags |= VM_DONTEXPAND;
1090
1091 /* Don't allow mapping to be inherited across a process fork */
1092 ps_vma->vm_flags |= VM_DONTCOPY;
1093
1094 ps_vma->vm_private_data = (void *)psOffsetStruct;
1095
1096 switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
1097 {
1098 case PVRSRV_HAP_CACHED:
1099 /* This is the default, do nothing. */
1100 break;
1101 case PVRSRV_HAP_WRITECOMBINE:
1102 ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
1103 break;
1104 case PVRSRV_HAP_UNCACHED:
1105 ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
1106 break;
1107 default:
1108 PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
1109 iRetVal = -EINVAL;
1110 goto unlock_and_return;
1111 }
1112
1113#ifdef CONFIG_ARCH_OMAP5
1114 {
1115 IMG_BOOL bModPageProt = IMG_FALSE;
1116
1117#ifdef CONFIG_DSSCOMP
1118 bModPageProt |= is_tiler_addr(LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0).uiAddr);
1119#endif /* CONFIG_DSSCOMP */
1120
1121 if (bModPageProt)
1122 {
1123 ps_vma->vm_page_prot = __pgprot_modify(ps_vma->vm_page_prot,
1124 L_PTE_MT_MASK,
1125 L_PTE_MT_DEV_SHARED);
1126 }
1127 }
1128#endif /* CONFIG_ARCH_OMAP5 */
1129
1130 /* Install open and close handlers for ref-counting */
1131 ps_vma->vm_ops = &MMapIOOps;
1132
1133 if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
1134 {
1135 iRetVal = -EAGAIN;
1136 goto unlock_and_return;
1137 }
1138
1139 PVR_ASSERT(psOffsetStruct->uiUserVAddr == 0);
1140
1141 psOffsetStruct->uiUserVAddr = ps_vma->vm_start;
1142
1143 /* Compute the flush region (if necessary) inside the mmap mutex */
1144 if(psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate)
1145 {
1146 psFlushMemArea = psOffsetStruct->psLinuxMemArea;
1147
1148 /* Sparse mappings have to ask the BM for the virtual size */
1149 if (psFlushMemArea->hBMHandle)
1150 {
1151 pvBase = (IMG_VOID *)ps_vma->vm_start;
1152 uiByteOffset = 0;
1153 uiFlushSize = BM_GetVirtualSize(psFlushMemArea->hBMHandle);
1154 }
1155 else
1156 {
1157 IMG_SIZE_T uiDummyByteSize;
1158
1159 DetermineUsersSizeAndByteOffset(psFlushMemArea,
1160 &uiDummyByteSize,
1161 &uiByteOffset);
1162
1163 pvBase = (IMG_VOID *)ps_vma->vm_start + uiByteOffset;
1164 uiFlushSize = psFlushMemArea->uiByteSize;
1165 }
1166
1167 psFlushMemArea->bNeedsCacheInvalidate = IMG_FALSE;
1168 }
1169
1170 /* Call the open routine to increment the usage count */
1171 MMapVOpenNoLock(ps_vma);
1172
1173 PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x" UINTPTR_FMT "\n",
1174 __FUNCTION__, (IMG_UINTPTR_T)ps_vma->vm_pgoff));
1175
1176unlock_and_return:
1177 if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
1178 {
1179 DestroyOffsetStruct(psOffsetStruct);
1180 }
1181
1182 LinuxUnLockMutex(&g_sMMapMutex);
1183
1184 if(psFlushMemArea && uiFlushSize)
1185 {
1186 OSInvalidateCPUCacheRangeKM(psFlushMemArea, uiByteOffset, pvBase,
1187 uiFlushSize);
1188 }
1189
1190 return iRetVal;
1191}
1192
1193
1194#if defined(DEBUG_LINUX_MMAP_AREAS)
1195
1196/*
1197 * Lock MMap regions list (called on page start/stop while reading /proc/mmap)
1198
1199 * sfile : seq_file that handles /proc file
1200 * start : TRUE if it's start, FALSE if it's stop
1201 *
1202*/
1203static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start)
1204{
1205 if(start)
1206 {
1207 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
1208 }
1209 else
1210 {
1211 LinuxUnLockMutex(&g_sMMapMutex);
1212 }
1213}
1214
1215
1216/*
1217 * Convert offset (index from KVOffsetTable) to element
1218 * (called when reading /proc/mmap file)
1219
1220 * sfile : seq_file that handles /proc file
1221 * off : index into the KVOffsetTable from which to print
1222 *
1223 * returns void* : Pointer to element that will be dumped
1224 *
1225*/
1226static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off)
1227{
1228 LinuxMemArea *psLinuxMemArea;
1229 if(!off)
1230 {
1231 return PVR_PROC_SEQ_START_TOKEN;
1232 }
1233
1234 list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
1235 {
1236 PKV_OFFSET_STRUCT psOffsetStruct;
1237
1238 list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
1239 {
1240 off--;
1241 if (off == 0)
1242 {
1243 PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
1244 return (void*)psOffsetStruct;
1245 }
1246 }
1247 }
1248 return (void*)0;
1249}
1250
1251/*
1252 * Gets next MMap element to show. (called when reading /proc/mmap file)
1253
1254 * sfile : seq_file that handles /proc file
1255 * el : actual element
1256 * off : index into the KVOffsetTable from which to print
1257 *
1258 * returns void* : Pointer to element to show (0 ends iteration)
1259*/
1260static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off)
1261{
1262 return ProcSeqOff2ElementMMapRegistrations(sfile,off);
1263}
1264
1265/*
1266 * Show MMap element (called when reading /proc/mmap file)
1267
1268 * sfile : seq_file that handles /proc file
1269 * el : actual element
1270 *
1271*/
1272static void ProcSeqShowMMapRegistrations(struct seq_file *sfile, void *el)
1273{
1274 KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el;
1275 LinuxMemArea *psLinuxMemArea;
1276 IMG_SIZE_T uiRealByteSize;
1277 IMG_UINTPTR_T uiByteOffset;
1278
1279 if(el == PVR_PROC_SEQ_START_TOKEN)
1280 {
1281 seq_printf( sfile,
1282#if !defined(DEBUG_LINUX_XML_PROC_FILES)
1283 "Allocations registered for mmap: %u\n"
1284 "In total these areas correspond to %" SIZE_T_FMT_LEN "u bytes\n"
1285 "psLinuxMemArea "
1286 "UserVAddr "
1287 "KernelVAddr "
1288 "CpuPAddr "
1289 "MMapOffset "
1290 "ByteLength "
1291 "LinuxMemType "
1292 "Pid Name Flags\n",
1293#else
1294 "<mmap_header>\n"
1295 "\t<count>%u</count>\n"
1296 "\t<bytes>%" SIZE_T_FMT_LEN "u</bytes>\n"
1297 "</mmap_header>\n",
1298#endif
1299 g_ui32RegisteredAreas,
1300 g_uiTotalByteSize
1301 );
1302 return;
1303 }
1304
1305 psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
1306
1307 DetermineUsersSizeAndByteOffset(psLinuxMemArea,
1308 &uiRealByteSize,
1309 &uiByteOffset);
1310
1311 seq_printf( sfile,
1312#if !defined(DEBUG_LINUX_XML_PROC_FILES)
1313 "%p %p %p " CPUPADDR_FMT " " UINTPTR_FMT " %" SIZE_T_FMT_LEN "u %-24s %-5u %-8s %08x(%s)\n",
1314#else
1315 "<mmap_record>\n"
1316 "\t<pointer>%p</pointer>\n"
1317 "\t<user_virtual>%p</user_virtual>\n"
1318 "\t<kernel_virtual>%p</kernel_virtual>\n"
1319 "\t<cpu_physical>" CPUPADDR_FMT "</cpu_physical>\n"
1320 "\t<mmap_offset>" UINTPTR_FMT "</mmap_offset>\n"
1321 "\t<bytes>%" SIZE_T_FMT_LEN "u</bytes>\n"
1322 "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
1323 "\t<pid>%-5u</pid>\n"
1324 "\t<name>%-8s</name>\n"
1325 "\t<flags>%08x</flags>\n"
1326 "\t<flags_string>%s</flags_string>\n"
1327 "</mmap_record>\n",
1328#endif
1329 psLinuxMemArea,
1330 (IMG_PVOID)(psOffsetStruct->uiUserVAddr + uiByteOffset),
1331 LinuxMemAreaToCpuVAddr(psLinuxMemArea),
1332 LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
1333 (IMG_UINTPTR_T)psOffsetStruct->uiMMapOffset,
1334 psLinuxMemArea->uiByteSize,
1335 LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
1336 psOffsetStruct->ui32PID,
1337 psOffsetStruct->pszName,
1338 psLinuxMemArea->ui32AreaFlags,
1339 HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
1340}
1341
1342#endif
1343
1344
1345/*!
1346 *******************************************************************************
1347
1348 @Function PVRMMapRegisterArea
1349
1350 @Description
1351
1352 Register a memory area with the mmap code.
1353
1354 @input psLinuxMemArea : pointer to memory area.
1355
1356 @Return PVRSRV_OK, or PVRSRV_ERROR.
1357
1358 ******************************************************************************/
1359PVRSRV_ERROR
1360PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
1361{
1362 PVRSRV_ERROR eError;
1363#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
1364 const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
1365#endif
1366
1367 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
1368
1369#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
1370 PVR_DPF((PVR_DBG_MESSAGE,
1371 "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8x)",
1372 __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
1373#endif
1374
1375 PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
1376
1377 /* Check this mem area hasn't already been registered */
1378 if(psLinuxMemArea->bMMapRegistered)
1379 {
1380 PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
1381 __FUNCTION__, psLinuxMemArea));
1382 eError = PVRSRV_ERROR_INVALID_PARAMS;
1383 goto exit_unlock;
1384 }
1385
1386 list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
1387
1388 psLinuxMemArea->bMMapRegistered = IMG_TRUE;
1389
1390#if defined(DEBUG_LINUX_MMAP_AREAS)
1391 g_ui32RegisteredAreas++;
1392 /*
1393 * Sub memory areas are excluded from g_ui32TotalByteSize so that we
1394 * don't count memory twice, once for the parent and again for sub
1395 * allocationis.
1396 */
1397 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
1398 {
1399 g_uiTotalByteSize += psLinuxMemArea->uiByteSize;
1400 }
1401#endif
1402
1403 eError = PVRSRV_OK;
1404
1405exit_unlock:
1406 LinuxUnLockMutex(&g_sMMapMutex);
1407
1408 return eError;
1409}
1410
1411
1412/*!
1413 *******************************************************************************
1414
1415 @Function PVRMMapRemoveRegisterArea
1416
1417 @Description
1418
1419 Unregister a memory area with the mmap code.
1420
1421 @input psLinuxMemArea : pointer to memory area.
1422
1423 @Return PVRSRV_OK, or PVRSRV_ERROR.
1424
1425 ******************************************************************************/
1426PVRSRV_ERROR
1427PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
1428{
1429 PVRSRV_ERROR eError;
1430 PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
1431
1432 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
1433
1434 PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
1435
1436 list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
1437 {
1438 if (psOffsetStruct->ui32Mapped != 0)
1439 {
1440 PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %u", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
1441 dump_stack();
1442 PVRSRVDumpRefCountCCB();
1443 eError = PVRSRV_ERROR_STILL_MAPPED;
1444 goto exit_unlock;
1445 }
1446 else
1447 {
1448 /*
1449 * An offset structure is created when a call is made to get
1450 * the mmap data for a physical mapping. If the data is never
1451 * used for mmap, we will be left with an umapped offset
1452 * structure.
1453 */
1454 PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
1455 }
1456
1457 PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
1458
1459 DestroyOffsetStruct(psOffsetStruct);
1460 }
1461
1462 list_del(&psLinuxMemArea->sMMapItem);
1463
1464 psLinuxMemArea->bMMapRegistered = IMG_FALSE;
1465
1466#if defined(DEBUG_LINUX_MMAP_AREAS)
1467 g_ui32RegisteredAreas--;
1468 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
1469 {
1470 g_uiTotalByteSize -= psLinuxMemArea->uiByteSize;
1471 }
1472#endif
1473
1474 eError = PVRSRV_OK;
1475
1476exit_unlock:
1477 LinuxUnLockMutex(&g_sMMapMutex);
1478 return eError;
1479}
1480
1481
1482/*!
1483 *******************************************************************************
1484
1485 @Function LinuxMMapPerProcessConnect
1486
1487 @Description
1488
1489 Per-process mmap initialisation code.
1490
1491 @input psEnvPerProc : pointer to OS specific per-process data.
1492
1493 @Return PVRSRV_OK, or PVRSRV_ERROR.
1494
1495 ******************************************************************************/
1496PVRSRV_ERROR
1497LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
1498{
1499 PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
1500
1501 return PVRSRV_OK;
1502}
1503
1504/*!
1505 *******************************************************************************
1506
1507 @Function LinuxMMapPerProcessDisconnect
1508
1509 @Description
1510
1511 Per-process mmap deinitialisation code.
1512
1513 @input psEnvPerProc : pointer to OS specific per-process data.
1514
1515 ******************************************************************************/
1516IMG_VOID
1517LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
1518{
1519 PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
1520 IMG_BOOL bWarn = IMG_FALSE;
1521 IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
1522
1523 PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
1524
1525 LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP);
1526
1527 list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
1528 {
1529 if (psOffsetStruct->ui32PID == ui32PID)
1530 {
1531 if (!bWarn)
1532 {
1533 PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
1534 bWarn = IMG_TRUE;
1535 }
1536 PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
1537 PVR_ASSERT(psOffsetStruct->bOnMMapList);
1538
1539 DestroyOffsetStruct(psOffsetStruct);
1540 }
1541 }
1542
1543 LinuxUnLockMutex(&g_sMMapMutex);
1544}
1545
1546
1547/*!
1548 *******************************************************************************
1549
1550 @Function LinuxMMapPerProcessHandleOptions
1551
1552 @Description
1553
1554 Set secure handle options required by mmap code.
1555
1556 @input psHandleBase : pointer to handle base.
1557
1558 @Return PVRSRV_OK, or PVRSRV_ERROR.
1559
1560 ******************************************************************************/
1561PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
1562{
1563 PVRSRV_ERROR eError;
1564
1565 eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
1566 if (eError != PVRSRV_OK)
1567 {
1568 PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
1569 return eError;
1570 }
1571
1572 return eError;
1573}
1574
1575
1576/*!
1577 *******************************************************************************
1578
1579 @Function PVRMMapInit
1580
1581 @Description
1582
1583 MMap initialisation code
1584
1585 ******************************************************************************/
1586IMG_VOID
1587PVRMMapInit(IMG_VOID)
1588{
1589 LinuxInitMutex(&g_sMMapMutex);
1590
1591 g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
1592 if (!g_psMemmapCache)
1593 {
1594 PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
1595 goto error;
1596 }
1597
1598#if defined(DEBUG_LINUX_MMAP_AREAS)
1599 g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
1600 ProcSeqNextMMapRegistrations,
1601 ProcSeqShowMMapRegistrations,
1602 ProcSeqOff2ElementMMapRegistrations,
1603 ProcSeqStartstopMMapRegistations
1604 );
1605#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */
1606 return;
1607
1608error:
1609 PVRMMapCleanup();
1610 return;
1611}
1612
1613
1614/*!
1615 *******************************************************************************
1616
1617 @Function PVRMMapCleanup
1618
1619 @Description
1620
1621 Mmap deinitialisation code
1622
1623 ******************************************************************************/
1624IMG_VOID
1625PVRMMapCleanup(IMG_VOID)
1626{
1627 PVRSRV_ERROR eError;
1628
1629 if (!list_empty(&g_sMMapAreaList))
1630 {
1631 LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
1632
1633 PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
1634
1635 PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
1636 list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
1637 {
1638 eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
1639 if (eError != PVRSRV_OK)
1640 {
1641 PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
1642 }
1643 PVR_ASSERT(eError == PVRSRV_OK);
1644
1645 LinuxMemAreaDeepFree(psLinuxMemArea);
1646 }
1647 }
1648 PVR_ASSERT(list_empty((&g_sMMapAreaList)));
1649
1650#if defined(DEBUG_LINUX_MMAP_AREAS)
1651 RemoveProcEntrySeq(g_ProcMMap);
1652#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */
1653
1654 if(g_psMemmapCache)
1655 {
1656 KMemCacheDestroyWrapper(g_psMemmapCache);
1657 g_psMemmapCache = NULL;
1658 }
1659}