[android-sdk/device-ti-proprietary-open.git] / jacinto6 / sgx_src / eurasia_km / services4 / srvkm / env / linux / mm.c
1 /*************************************************************************/ /*!
2 @Title Misc memory management utility functions for Linux
3 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
4 @License Dual MIT/GPLv2
6 The contents of this file are subject to the MIT license as set out below.
8 Permission is hereby granted, free of charge, to any person obtaining a copy
9 of this software and associated documentation files (the "Software"), to deal
10 in the Software without restriction, including without limitation the rights
11 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 copies of the Software, and to permit persons to whom the Software is
13 furnished to do so, subject to the following conditions:
15 The above copyright notice and this permission notice shall be included in
16 all copies or substantial portions of the Software.
18 Alternatively, the contents of this file may be used under the terms of
19 the GNU General Public License Version 2 ("GPL") in which case the provisions
20 of GPL are applicable instead of those above.
22 If you wish to allow use of your version of this file only under the terms of
23 GPL, and not to allow others to use your version of this file under the terms
24 of the MIT license, indicate your decision by deleting the provisions above
25 and replace them with the notice and other provisions required by GPL as set
26 out in the file called "GPL-COPYING" included in this distribution. If you do
27 not delete the provisions above, a recipient may use your version of this file
28 under the terms of either the MIT license or GPL.
30 This License is also included in this distribution in the file called
31 "MIT-COPYING".
33 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
34 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
35 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
36 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
37 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
38 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
39 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 */ /**************************************************************************/
42 #include <linux/version.h>
44 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
45 #ifndef AUTOCONF_INCLUDED
46 #include <linux/config.h>
47 #endif
48 #endif
50 #if !defined(PVR_LINUX_MEM_AREA_POOL_MAX_PAGES)
51 #define PVR_LINUX_MEM_AREA_POOL_MAX_PAGES 0
52 #endif
54 #include <linux/kernel.h>
55 #include <asm/atomic.h>
56 #include <linux/list.h>
57 #include <linux/mutex.h>
58 #include <linux/mm.h>
59 #include <linux/vmalloc.h>
60 #include <asm/io.h>
61 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
62 #include <linux/wrapper.h>
63 #endif
64 #include <linux/slab.h>
65 #include <linux/highmem.h>
66 #include <linux/sched.h>
68 #if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
69 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
70 #include <linux/shrinker.h>
71 #endif
72 #endif
74 #include "img_defs.h"
75 #include "services.h"
76 #include "servicesint.h"
77 #include "syscommon.h"
78 #include "mutils.h"
79 #include "mm.h"
80 #include "pvrmmap.h"
81 #include "mmap.h"
82 #include "osfunc.h"
83 #include "pvr_debug.h"
84 #include "proc.h"
85 #include "mutex.h"
86 #include "lock.h"
88 #if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
89 #include "lists.h"
90 #endif
92 /* If there is no explicit definition
93 * for the minimum DMM alignment size,
94 * then set it to "0" and let ION/DMM
95 * set the minimum value. */
96 #ifndef CONFIG_TILER_GRANULARITY
97 #define CONFIG_TILER_GRANULARITY 0
98 #endif
100 /*
101 * The page pool entry count is an atomic int so that the shrinker function
102 * can return it even when we can't take the lock that protects the page pool
103 * list.
104 */
105 static atomic_t g_sPagePoolEntryCount = ATOMIC_INIT(0);
107 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
108 typedef enum {
109 DEBUG_MEM_ALLOC_TYPE_KMALLOC,
110 DEBUG_MEM_ALLOC_TYPE_VMALLOC,
111 DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
112 DEBUG_MEM_ALLOC_TYPE_IOREMAP,
113 DEBUG_MEM_ALLOC_TYPE_IO,
114 DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
115 DEBUG_MEM_ALLOC_TYPE_ION,
116 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
117 DEBUG_MEM_ALLOC_TYPE_VMAP,
118 #endif
119 DEBUG_MEM_ALLOC_TYPE_COUNT
120 } DEBUG_MEM_ALLOC_TYPE;
122 typedef struct _DEBUG_MEM_ALLOC_REC
123 {
124 DEBUG_MEM_ALLOC_TYPE eAllocType;
125 IMG_VOID *pvKey; /* Some unique value (private to the eAllocType) */
126 IMG_VOID *pvCpuVAddr;
127 IMG_UINT32 ulCpuPAddr;
128 IMG_VOID *pvPrivateData;
129 IMG_UINT32 ui32Bytes;
130 pid_t pid;
131 IMG_CHAR *pszFileName;
132 IMG_UINT32 ui32Line;
134 struct _DEBUG_MEM_ALLOC_REC *psNext;
135 struct _DEBUG_MEM_ALLOC_REC **ppsThis;
136 } DEBUG_MEM_ALLOC_REC;
138 static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE)
139 static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC)
140 static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC)
141 static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC)
142 static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC)
145 static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
147 static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
148 static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
150 /* vmalloc + kmalloc + alloc_pages + kmem_cache */
151 static IMG_UINT32 g_SysRAMWaterMark; /* Doesn't include page pool */
152 static IMG_UINT32 g_SysRAMHighWaterMark; /* *DOES* include page pool */
154 static inline IMG_UINT32
155 SysRAMTrueWaterMark(void)
156 {
157 return g_SysRAMWaterMark + PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount));
158 }
160 /* ioremap + io */
161 static IMG_UINT32 g_IOMemWaterMark;
162 static IMG_UINT32 g_IOMemHighWaterMark;
164 static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
165 IMG_VOID *pvKey,
166 IMG_VOID *pvCpuVAddr,
167 IMG_UINT32 ulCpuPAddr,
168 IMG_VOID *pvPrivateData,
169 IMG_UINT32 ui32Bytes,
170 IMG_CHAR *pszFileName,
171 IMG_UINT32 ui32Line);
173 static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
175 static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
178 static struct proc_dir_entry *g_SeqFileMemoryRecords;
179 static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off);
180 static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el);
181 static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off);
183 #endif
186 #if defined(DEBUG_LINUX_MEM_AREAS)
187 typedef struct _DEBUG_LINUX_MEM_AREA_REC
188 {
189 LinuxMemArea *psLinuxMemArea;
190 IMG_UINT32 ui32Flags;
191 pid_t pid;
193 struct _DEBUG_LINUX_MEM_AREA_REC *psNext;
194 struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis;
195 }DEBUG_LINUX_MEM_AREA_REC;
198 static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC)
199 static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC)
200 static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC)
201 static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC)
206 static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
207 static IMG_UINT32 g_LinuxMemAreaCount;
208 static IMG_UINT32 g_LinuxMemAreaWaterMark;
209 static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
212 static struct proc_dir_entry *g_SeqFileMemArea;
214 static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off);
215 static void ProcSeqShowMemArea(struct seq_file *sfile,void* el);
216 static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off);
218 #endif
220 #if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
221 static PVRSRV_LINUX_MUTEX g_sDebugMutex;
222 #endif
224 #if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS))
225 static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start);
226 #endif
228 typedef struct
229 {
230 /* Linkage for page pool LRU list */
231 struct list_head sPagePoolItem;
233 struct page *psPage;
234 } LinuxPagePoolEntry;
236 static LinuxKMemCache *g_PsLinuxMemAreaCache;
237 static LinuxKMemCache *g_PsLinuxPagePoolCache;
239 static LIST_HEAD(g_sPagePoolList);
240 static int g_iPagePoolMaxEntries;
242 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
243 static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
244 static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
245 #endif
247 static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
248 static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
249 #if defined(DEBUG_LINUX_MEM_AREAS)
250 static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
251 static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
252 static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
253 #endif
256 static inline IMG_BOOL
257 AreaIsUncached(IMG_UINT32 ui32AreaFlags)
258 {
259 return (ui32AreaFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED)) != 0;
260 }
262 static inline IMG_BOOL
263 CanFreeToPool(LinuxMemArea *psLinuxMemArea)
264 {
265 return AreaIsUncached(psLinuxMemArea->ui32AreaFlags) && !psLinuxMemArea->bNeedsCacheInvalidate;
266 }
268 IMG_VOID *
269 _KMallocWrapper(IMG_UINT32 ui32ByteSize, gfp_t uFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
270 {
271 IMG_VOID *pvRet;
272 pvRet = kmalloc(ui32ByteSize, uFlags);
273 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
274 if (pvRet)
275 {
276 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
277 pvRet,
278 pvRet,
279 0,
280 NULL,
281 ui32ByteSize,
282 pszFileName,
283 ui32Line
284 );
285 }
286 #else
287 PVR_UNREFERENCED_PARAMETER(pszFileName);
288 PVR_UNREFERENCED_PARAMETER(ui32Line);
289 #endif
290 return pvRet;
291 }
294 IMG_VOID
295 _KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
296 {
297 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
298 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, pszFileName, ui32Line);
299 #else
300 PVR_UNREFERENCED_PARAMETER(pszFileName);
301 PVR_UNREFERENCED_PARAMETER(ui32Line);
302 #endif
303 kfree(pvCpuVAddr);
304 }
307 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
308 static IMG_VOID
309 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
310 IMG_VOID *pvKey,
311 IMG_VOID *pvCpuVAddr,
312 IMG_UINT32 ulCpuPAddr,
313 IMG_VOID *pvPrivateData,
314 IMG_UINT32 ui32Bytes,
315 IMG_CHAR *pszFileName,
316 IMG_UINT32 ui32Line)
317 {
318 DEBUG_MEM_ALLOC_REC *psRecord;
320 LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG);
322 psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
324 psRecord->eAllocType = eAllocType;
325 psRecord->pvKey = pvKey;
326 psRecord->pvCpuVAddr = pvCpuVAddr;
327 psRecord->ulCpuPAddr = ulCpuPAddr;
328 psRecord->pvPrivateData = pvPrivateData;
329 psRecord->pid = OSGetCurrentProcessIDKM();
330 psRecord->ui32Bytes = ui32Bytes;
331 psRecord->pszFileName = pszFileName;
332 psRecord->ui32Line = ui32Line;
334 List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord);
336 g_WaterMarkData[eAllocType] += ui32Bytes;
337 if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
338 {
339 g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
340 }
342 if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
343 || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
344 || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
345 || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
346 {
347 IMG_UINT32 ui32SysRAMTrueWaterMark;
349 g_SysRAMWaterMark += ui32Bytes;
350 ui32SysRAMTrueWaterMark = SysRAMTrueWaterMark();
352 if (ui32SysRAMTrueWaterMark > g_SysRAMHighWaterMark)
353 {
354 g_SysRAMHighWaterMark = ui32SysRAMTrueWaterMark;
355 }
356 }
357 else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
358 || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
359 {
360 g_IOMemWaterMark += ui32Bytes;
361 if (g_IOMemWaterMark > g_IOMemHighWaterMark)
362 {
363 g_IOMemHighWaterMark = g_IOMemWaterMark;
364 }
365 }
367 LinuxUnLockMutex(&g_sDebugMutex);
368 }
371 static IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va)
372 {
373 DEBUG_MEM_ALLOC_TYPE eAllocType;
374 IMG_VOID *pvKey;
376 eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE);
377 pvKey = va_arg(va, IMG_VOID*);
379 if (psCurrentRecord->eAllocType == eAllocType
380 && psCurrentRecord->pvKey == pvKey)
381 {
382 eAllocType = psCurrentRecord->eAllocType;
383 g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes;
385 if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
386 || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
387 || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
388 || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
389 {
390 g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes;
391 }
392 else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
393 || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
394 {
395 g_IOMemWaterMark -= psCurrentRecord->ui32Bytes;
396 }
398 List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord);
399 kfree(psCurrentRecord);
401 return IMG_TRUE;
402 }
403 else
404 {
405 return IMG_FALSE;
406 }
407 }
410 static IMG_VOID
411 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
412 {
413 /* DEBUG_MEM_ALLOC_REC **ppsCurrentRecord;*/
415 LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG);
417 /* Locate the corresponding allocation entry */
418 if (!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(g_MemoryRecords,
419 DebugMemAllocRecordRemove_AnyVaCb,
420 eAllocType,
421 pvKey))
422 {
423 PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n",
424 __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey,
425 pszFileName, ui32Line));
426 }
428 LinuxUnLockMutex(&g_sDebugMutex);
429 }
432 static IMG_CHAR *
433 DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
434 {
435 IMG_CHAR *apszDebugMemoryRecordTypes[] = {
436 "KMALLOC",
437 "VMALLOC",
438 "ALLOC_PAGES",
439 "IOREMAP",
440 "IO",
441 "KMEM_CACHE_ALLOC",
442 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
443 "VMAP"
444 #endif
445 };
446 return apszDebugMemoryRecordTypes[eAllocType];
447 }
448 #endif
451 static IMG_BOOL
452 AllocFlagsToPGProt(pgprot_t *pPGProtFlags, IMG_UINT32 ui32AllocFlags)
453 {
454 pgprot_t PGProtFlags;
456 switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
457 {
458 case PVRSRV_HAP_CACHED:
459 PGProtFlags = PAGE_KERNEL;
460 break;
461 case PVRSRV_HAP_WRITECOMBINE:
462 PGProtFlags = PGPROT_WC(PAGE_KERNEL);
463 break;
464 case PVRSRV_HAP_UNCACHED:
465 PGProtFlags = PGPROT_UC(PAGE_KERNEL);
466 break;
467 default:
468 PVR_DPF((PVR_DBG_ERROR,
469 "%s: Unknown mapping flags=0x%08x",
470 __FUNCTION__, ui32AllocFlags));
471 dump_stack();
472 return IMG_FALSE;
473 }
475 *pPGProtFlags = PGProtFlags;
477 return IMG_TRUE;
478 }
480 IMG_VOID *
481 _VMallocWrapper(IMG_UINT32 ui32Bytes,
482 IMG_UINT32 ui32AllocFlags,
483 IMG_CHAR *pszFileName,
484 IMG_UINT32 ui32Line)
485 {
486 pgprot_t PGProtFlags;
487 IMG_VOID *pvRet;
489 if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags))
490 {
491 return NULL;
492 }
494 /* Allocate virtually contiguous pages */
495 pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
497 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
498 if (pvRet)
499 {
500 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
501 pvRet,
502 pvRet,
503 0,
504 NULL,
505 PAGE_ALIGN(ui32Bytes),
506 pszFileName,
507 ui32Line
508 );
509 }
510 #else
511 PVR_UNREFERENCED_PARAMETER(pszFileName);
512 PVR_UNREFERENCED_PARAMETER(ui32Line);
513 #endif
515 return pvRet;
516 }
519 IMG_VOID
520 _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
521 {
522 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
523 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, pszFileName, ui32Line);
524 #else
525 PVR_UNREFERENCED_PARAMETER(pszFileName);
526 PVR_UNREFERENCED_PARAMETER(ui32Line);
527 #endif
528 vfree(pvCpuVAddr);
529 }
532 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
533 static IMG_VOID *
534 _VMapWrapper(struct page **ppsPageList, IMG_UINT32 ui32NumPages, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
535 {
536 pgprot_t PGProtFlags;
537 IMG_VOID *pvRet;
539 if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags))
540 {
541 return NULL;
542 }
544 pvRet = vmap(ppsPageList, ui32NumPages, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
546 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
547 if (pvRet)
548 {
549 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMAP,
550 pvRet,
551 pvRet,
552 0,
553 NULL,
554 PAGES_TO_BYTES(ui32NumPages),
555 pszFileName,
556 ui32Line
557 );
558 }
559 #else
560 PVR_UNREFERENCED_PARAMETER(pszFileName);
561 PVR_UNREFERENCED_PARAMETER(ui32Line);
562 #endif
564 return pvRet;
565 }
567 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
568 #define VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags) _VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
569 #else
570 #define VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags) _VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags, NULL, 0)
571 #endif
574 static IMG_VOID
575 _VUnmapWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
576 {
577 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
578 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMAP, pvCpuVAddr, pszFileName, ui32Line);
579 #else
580 PVR_UNREFERENCED_PARAMETER(pszFileName);
581 PVR_UNREFERENCED_PARAMETER(ui32Line);
582 #endif
583 vunmap(pvCpuVAddr);
584 }
586 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
587 #define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, __FILE__, __LINE__)
588 #else
589 #define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, NULL, 0)
590 #endif
592 #endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */
595 IMG_VOID
596 _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
597 {
598 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
599 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, pszFileName, ui32Line);
600 #else
601 PVR_UNREFERENCED_PARAMETER(pszFileName);
602 PVR_UNREFERENCED_PARAMETER(ui32Line);
603 #endif
605 kmem_cache_free(psCache, pvObject);
606 }
609 const IMG_CHAR *
610 KMemCacheNameWrapper(LinuxKMemCache *psCache)
611 {
612 PVR_UNREFERENCED_PARAMETER(psCache);
614 /* In this case kmem_cache_t is an incomplete typedef,
615 * so we can't even de-reference to get the name member. It is also a GPL export symbol */
616 return "";
617 }
620 static LinuxPagePoolEntry *
621 LinuxPagePoolEntryAlloc(IMG_VOID)
622 {
623 return KMemCacheAllocWrapper(g_PsLinuxPagePoolCache, GFP_KERNEL);
624 }
626 static IMG_VOID
627 LinuxPagePoolEntryFree(LinuxPagePoolEntry *psPagePoolEntry)
628 {
629 KMemCacheFreeWrapper(g_PsLinuxPagePoolCache, psPagePoolEntry);
630 }
633 static struct page *
634 AllocPageFromLinux(void)
635 {
636 struct page *psPage;
638 psPage = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
639 if (!psPage)
640 {
641 return NULL;
643 }
644 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
645 /* Reserve those pages to allow them to be re-mapped to user space */
646 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
647 SetPageReserved(psPage);
648 #else
649 mem_map_reserve(psPage);
650 #endif
651 #endif
652 return psPage;
653 }
656 static IMG_VOID
657 FreePageToLinux(struct page *psPage)
658 {
659 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
660 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
661 ClearPageReserved(psPage);
662 #else
663 mem_map_reserve(psPage);
664 #endif
665 #endif
666 __free_pages(psPage, 0);
667 }
670 #if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
671 static DEFINE_MUTEX(g_sPagePoolMutex);
673 static inline void
674 PagePoolLock(void)
675 {
676 mutex_lock(&g_sPagePoolMutex);
677 }
679 static inline void
680 PagePoolUnlock(void)
681 {
682 mutex_unlock(&g_sPagePoolMutex);
683 }
685 static inline int
686 PagePoolTrylock(void)
687 {
688 return mutex_trylock(&g_sPagePoolMutex);
689 }
691 #else /* (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) */
692 static inline void
693 PagePoolLock(void)
694 {
695 }
697 static inline void
698 PagePoolUnlock(void)
699 {
700 }
702 static inline int
703 PagePoolTrylock(void)
704 {
705 return 1;
706 }
707 #endif /* (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) */
710 static inline void
711 AddEntryToPool(LinuxPagePoolEntry *psPagePoolEntry)
712 {
713 list_add_tail(&psPagePoolEntry->sPagePoolItem, &g_sPagePoolList);
714 atomic_inc(&g_sPagePoolEntryCount);
715 }
717 static inline void
718 RemoveEntryFromPool(LinuxPagePoolEntry *psPagePoolEntry)
719 {
720 list_del(&psPagePoolEntry->sPagePoolItem);
721 atomic_dec(&g_sPagePoolEntryCount);
722 }
724 static inline LinuxPagePoolEntry *
725 RemoveFirstEntryFromPool(void)
726 {
727 LinuxPagePoolEntry *psPagePoolEntry;
729 if (list_empty(&g_sPagePoolList))
730 {
731 PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
733 return NULL;
734 }
736 PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) > 0);
738 psPagePoolEntry = list_first_entry(&g_sPagePoolList, LinuxPagePoolEntry, sPagePoolItem);
740 RemoveEntryFromPool(psPagePoolEntry);
742 return psPagePoolEntry;
743 }
745 static struct page *
746 AllocPage(IMG_UINT32 ui32AreaFlags, IMG_BOOL *pbFromPagePool)
747 {
748 struct page *psPage = NULL;
750 /*
751 * Only uncached allocations can come from the page pool.
752 * The page pool is currently used to reduce the cost of
753 * invalidating the CPU cache when uncached memory is allocated.
754 */
755 if (AreaIsUncached(ui32AreaFlags) && atomic_read(&g_sPagePoolEntryCount) != 0)
756 {
757 LinuxPagePoolEntry *psPagePoolEntry;
759 PagePoolLock();
760 psPagePoolEntry = RemoveFirstEntryFromPool();
761 PagePoolUnlock();
763 /* List may have changed since we checked the counter */
764 if (psPagePoolEntry)
765 {
766 psPage = psPagePoolEntry->psPage;
767 LinuxPagePoolEntryFree(psPagePoolEntry);
768 *pbFromPagePool = IMG_TRUE;
769 }
770 }
772 if (!psPage)
773 {
774 psPage = AllocPageFromLinux();
775 if (psPage)
776 {
777 *pbFromPagePool = IMG_FALSE;
778 }
779 }
781 return psPage;
783 }
785 static IMG_VOID
786 FreePage(IMG_BOOL bToPagePool, struct page *psPage)
787 {
788 /* Only uncached allocations can be freed to the page pool */
789 if (bToPagePool && atomic_read(&g_sPagePoolEntryCount) < g_iPagePoolMaxEntries)
790 {
791 LinuxPagePoolEntry *psPagePoolEntry = LinuxPagePoolEntryAlloc();
792 if (psPagePoolEntry)
793 {
794 psPagePoolEntry->psPage = psPage;
796 PagePoolLock();
797 AddEntryToPool(psPagePoolEntry);
798 PagePoolUnlock();
800 return;
801 }
802 }
804 FreePageToLinux(psPage);
805 }
807 static IMG_VOID
808 FreePagePool(IMG_VOID)
809 {
810 LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
812 PagePoolLock();
814 #if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
815 PVR_DPF((PVR_DBG_MESSAGE,"%s: Freeing %d pages from pool", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount)));
816 #else
817 PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
818 PVR_ASSERT(list_empty(&g_sPagePoolList));
819 #endif
821 list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem)
822 {
823 RemoveEntryFromPool(psPagePoolEntry);
825 FreePageToLinux(psPagePoolEntry->psPage);
826 LinuxPagePoolEntryFree(psPagePoolEntry);
827 }
829 PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
831 PagePoolUnlock();
832 }
834 #if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
835 #if defined(PVRSRV_NEED_PVR_ASSERT)
836 static struct shrinker g_sShrinker;
837 #endif
839 static int
840 ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
841 {
842 unsigned long uNumToScan = psShrinkControl->nr_to_scan;
844 PVR_ASSERT(psShrinker == &g_sShrinker);
845 (void)psShrinker;
847 if (uNumToScan != 0)
848 {
849 LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
851 PVR_DPF((PVR_DBG_MESSAGE,"%s: Number to scan: %ld", __FUNCTION__, uNumToScan));
852 PVR_DPF((PVR_DBG_MESSAGE,"%s: Pages in pool before scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount)));
854 if (!PagePoolTrylock())
855 {
856 PVR_TRACE(("%s: Couldn't get page pool lock", __FUNCTION__));
857 return -1;
858 }
860 list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem)
861 {
862 RemoveEntryFromPool(psPagePoolEntry);
864 FreePageToLinux(psPagePoolEntry->psPage);
865 LinuxPagePoolEntryFree(psPagePoolEntry);
867 if (--uNumToScan == 0)
868 {
869 break;
870 }
871 }
873 if (list_empty(&g_sPagePoolList))
874 {
875 PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0);
876 }
878 PagePoolUnlock();
880 PVR_DPF((PVR_DBG_MESSAGE,"%s: Pages in pool after scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount)));
881 }
883 return atomic_read(&g_sPagePoolEntryCount);
884 }
885 #endif
887 static IMG_BOOL
888 AllocPages(IMG_UINT32 ui32AreaFlags, struct page ***pppsPageList, IMG_HANDLE *phBlockPageList, IMG_UINT32 ui32NumPages, IMG_BOOL *pbFromPagePool)
889 {
890 struct page **ppsPageList;
891 IMG_HANDLE hBlockPageList;
892 IMG_INT32 i; /* Must be signed; see "for" loop conditions */
893 PVRSRV_ERROR eError;
894 IMG_BOOL bFromPagePool = IMG_FALSE;
896 eError = OSAllocMem(0, sizeof(*ppsPageList) * ui32NumPages, (IMG_VOID **)&ppsPageList, &hBlockPageList,
897 "Array of pages");
898 if (eError != PVRSRV_OK)
899 {
900 goto failed_page_list_alloc;
901 }
903 *pbFromPagePool = IMG_TRUE;
904 for(i = 0; i < (IMG_INT32)ui32NumPages; i++)
905 {
906 ppsPageList[i] = AllocPage(ui32AreaFlags, &bFromPagePool);
907 if (!ppsPageList[i])
908 {
909 goto failed_alloc_pages;
910 }
911 *pbFromPagePool &= bFromPagePool;
912 }
914 *pppsPageList = ppsPageList;
915 *phBlockPageList = hBlockPageList;
917 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
918 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
919 ppsPageList,
920 0,
921 0,
922 NULL,
923 PAGES_TO_BYTES(ui32NumPages),
924 "unknown",
925 0
926 );
927 #endif
929 return IMG_TRUE;
931 failed_alloc_pages:
932 for(i--; i >= 0; i--)
933 {
934 FreePage(*pbFromPagePool, ppsPageList[i]);
935 }
936 (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList);
938 failed_page_list_alloc:
939 return IMG_FALSE;
940 }
943 static IMG_VOID
944 FreePages(IMG_BOOL bToPagePool, struct page **ppsPageList, IMG_HANDLE hBlockPageList, IMG_UINT32 ui32NumPages)
945 {
946 IMG_INT32 i;
948 for(i = 0; i < (IMG_INT32)ui32NumPages; i++)
949 {
950 FreePage(bToPagePool, ppsPageList[i]);
951 }
953 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
954 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, ppsPageList, __FILE__, __LINE__);
955 #endif
957 (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList);
958 }
961 LinuxMemArea *
962 NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
963 {
964 LinuxMemArea *psLinuxMemArea = NULL;
965 IMG_VOID *pvCpuVAddr;
966 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
967 IMG_UINT32 ui32NumPages = 0;
968 struct page **ppsPageList = NULL;
969 IMG_HANDLE hBlockPageList;
970 #endif
971 IMG_BOOL bFromPagePool = IMG_FALSE;
973 psLinuxMemArea = LinuxMemAreaStructAlloc();
974 if (!psLinuxMemArea)
975 {
976 goto failed;
977 }
979 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
980 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
982 if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool))
983 {
984 goto failed;
985 }
987 pvCpuVAddr = VMapWrapper(ppsPageList, ui32NumPages, ui32AreaFlags);
988 #else /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */
989 pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
990 if (!pvCpuVAddr)
991 {
992 goto failed;
993 }
994 /* PG_reserved was deprecated in linux-2.6.15 */
995 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
996 /* Reserve those pages to allow them to be re-mapped to user space */
997 ReservePages(pvCpuVAddr, ui32Bytes);
998 #endif
999 #endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */
1001 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
1002 psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
1003 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
1004 psLinuxMemArea->uData.sVmalloc.ppsPageList = ppsPageList;
1005 psLinuxMemArea->uData.sVmalloc.hBlockPageList = hBlockPageList;
1006 #endif
1007 psLinuxMemArea->ui32ByteSize = ui32Bytes;
1008 psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
1009 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1011 #if defined(DEBUG_LINUX_MEM_AREAS)
1012 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
1013 #endif
1015 /* This works around a problem where Linux will not invalidate
1016 * the cache for physical memory it frees that is direct mapped.
1017 *
1018 * As a result, cache entries remain that may be subsequently flushed
1019 * to these physical pages after they have been allocated for another
1020 * purpose. For a subsequent cached use of this memory, that is not a
1021 * problem, but if we are allocating uncached or write-combined memory,
1022 * and bypassing the cache, it can cause subsequent uncached writes to
1023 * the memory to be replaced with junk from the cache.
1024 *
1025 * If the pages are from our page cache, no cache invalidate is needed.
1026 *
1027 * This just handles the __vmalloc() case (when we have a kernel virtual
1028 * address range). The alloc_pages() path is handled in mmap.c.
1029 */
1030 if (AreaIsUncached(ui32AreaFlags) && !bFromPagePool)
1031 {
1032 OSInvalidateCPUCacheRangeKM(psLinuxMemArea, 0, pvCpuVAddr, ui32Bytes);
1033 }
1035 return psLinuxMemArea;
1037 failed:
1038 PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
1039 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
1040 if (ppsPageList)
1041 {
1042 FreePages(bFromPagePool, ppsPageList, hBlockPageList, ui32NumPages);
1043 }
1044 #endif
1045 if (psLinuxMemArea)
1046 {
1047 LinuxMemAreaStructFree(psLinuxMemArea);
1048 }
1050 return NULL;
1051 }
1054 IMG_VOID
1055 FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1056 {
1057 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
1058 IMG_UINT32 ui32NumPages;
1059 struct page **ppsPageList;
1060 IMG_HANDLE hBlockPageList;
1061 #endif
1063 PVR_ASSERT(psLinuxMemArea);
1064 PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
1065 PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
1067 #if defined(DEBUG_LINUX_MEM_AREAS)
1068 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1069 #endif
1071 PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
1072 __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
1074 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
1075 VUnmapWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
1077 ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
1078 ppsPageList = psLinuxMemArea->uData.sVmalloc.ppsPageList;
1079 hBlockPageList = psLinuxMemArea->uData.sVmalloc.hBlockPageList;
1081 FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages);
1082 #else
1083 /* PG_reserved was deprecated in linux-2.6.15 */
1084 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1085 UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
1086 psLinuxMemArea->ui32ByteSize);
1087 #endif
1089 VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
1090 #endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */
1092 LinuxMemAreaStructFree(psLinuxMemArea);
1093 }
1096 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1097 /* Reserve pages of memory in order that they're not automatically
1098 deallocated after the last user reference dies. */
1099 static IMG_VOID
1100 ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
1101 {
1102 IMG_VOID *pvPage;
1103 IMG_VOID *pvEnd = pvAddress + ui32Length;
1105 for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
1106 {
1107 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
1108 SetPageReserved(vmalloc_to_page(pvPage));
1109 #else
1110 mem_map_reserve(vmalloc_to_page(pvPage));
1111 #endif
1112 }
1113 }
1116 /* Un-reserve pages of memory in order that they can be freed. */
1117 static IMG_VOID
1118 UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
1119 {
1120 IMG_VOID *pvPage;
1121 IMG_VOID *pvEnd = pvAddress + ui32Length;
1123 for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
1124 {
1125 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
1126 ClearPageReserved(vmalloc_to_page(pvPage));
1127 #else
1128 mem_map_unreserve(vmalloc_to_page(pvPage));
1129 #endif
1130 }
1131 }
1132 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) */
1135 IMG_VOID *
1136 _IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
1137 IMG_UINT32 ui32Bytes,
1138 IMG_UINT32 ui32MappingFlags,
1139 IMG_CHAR *pszFileName,
1140 IMG_UINT32 ui32Line)
1141 {
1142 IMG_VOID *pvIORemapCookie;
1144 switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
1145 {
1146 case PVRSRV_HAP_CACHED:
1147 pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, ui32Bytes);
1148 break;
1149 case PVRSRV_HAP_WRITECOMBINE:
1150 pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
1151 break;
1152 case PVRSRV_HAP_UNCACHED:
1153 pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
1154 break;
1155 default:
1156 PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
1157 return NULL;
1158 }
1160 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1161 if (pvIORemapCookie)
1162 {
1163 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
1164 pvIORemapCookie,
1165 pvIORemapCookie,
1166 BasePAddr.uiAddr,
1167 NULL,
1168 ui32Bytes,
1169 pszFileName,
1170 ui32Line
1171 );
1172 }
1173 #else
1174 PVR_UNREFERENCED_PARAMETER(pszFileName);
1175 PVR_UNREFERENCED_PARAMETER(ui32Line);
1176 #endif
1178 return pvIORemapCookie;
1179 }
1182 IMG_VOID
1183 _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
1184 {
1185 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1186 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, pszFileName, ui32Line);
1187 #else
1188 PVR_UNREFERENCED_PARAMETER(pszFileName);
1189 PVR_UNREFERENCED_PARAMETER(ui32Line);
1190 #endif
1191 iounmap(pvIORemapCookie);
1192 }
1195 LinuxMemArea *
1196 NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
1197 IMG_UINT32 ui32Bytes,
1198 IMG_UINT32 ui32AreaFlags)
1199 {
1200 LinuxMemArea *psLinuxMemArea;
1201 IMG_VOID *pvIORemapCookie;
1203 psLinuxMemArea = LinuxMemAreaStructAlloc();
1204 if (!psLinuxMemArea)
1205 {
1206 return NULL;
1207 }
1209 pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
1210 if (!pvIORemapCookie)
1211 {
1212 LinuxMemAreaStructFree(psLinuxMemArea);
1213 return NULL;
1214 }
1216 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
1217 psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
1218 psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
1219 psLinuxMemArea->ui32ByteSize = ui32Bytes;
1220 psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
1221 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1223 #if defined(DEBUG_LINUX_MEM_AREAS)
1224 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
1225 #endif
1227 return psLinuxMemArea;
1228 }
1231 IMG_VOID
1232 FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1233 {
1234 PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
1236 #if defined(DEBUG_LINUX_MEM_AREAS)
1237 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1238 #endif
1240 IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
1242 LinuxMemAreaStructFree(psLinuxMemArea);
1243 }
1246 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
1247 /*
1248 * Avoid using remap_pfn_range on RAM, if possible. On x86 systems, with
1249 * PAT enabled, remap_pfn_range checks the page attributes requested by
1250 * remap_pfn_range against those of the direct kernel mapping for those
1251 * pages (if any). This is rather annoying if the pages have been obtained
1252 * with alloc_pages, where we just ask for raw pages; we don't care about
1253 * the direct mapping. This latter issue arises when device memory is
1254 * exported from one process to another. Services implements this
1255 * using memory wrapping, which ends up creating an external KV memory area.
1256 */
1257 static IMG_BOOL
1258 TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig)
1259 {
1260 IMG_UINT32 ui32;
1261 IMG_UINT32 ui32AddrChk;
1262 IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
1264 /*
1265 * If bPhysContig is IMG_TRUE, we must assume psSysPhysAddr points
1266 * to the address of the first page, not an array of page addresses.
1267 */
1268 for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
1269 ui32 < ui32NumPages;
1270 ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr)
1271 {
1272 if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk)))
1273 {
1274 break;
1275 }
1276 }
1277 if (ui32 == ui32NumPages)
1278 {
1279 return IMG_FALSE;
1280 }
1282 if (!bPhysContig)
1283 {
1284 for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
1285 ui32 < ui32NumPages;
1286 ui32++, ui32AddrChk += PAGE_SIZE)
1287 {
1288 if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
1289 {
1290 return IMG_FALSE;
1291 }
1292 }
1293 }
1295 return IMG_TRUE;
1296 }
1297 #endif
1299 LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags)
1300 {
1301 LinuxMemArea *psLinuxMemArea;
1303 psLinuxMemArea = LinuxMemAreaStructAlloc();
1304 if (!psLinuxMemArea)
1305 {
1306 return NULL;
1307 }
1309 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
1310 psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
1311 psLinuxMemArea->uData.sExternalKV.bPhysContig =
1312 #if !defined(PVR_MAKE_ALL_PFNS_SPECIAL)
1313 (bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, ui32Bytes, bPhysContig))
1314 ? IMG_TRUE : IMG_FALSE;
1315 #else
1316 bPhysContig;
1317 #endif
1318 if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
1319 {
1320 psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr;
1321 }
1322 else
1323 {
1324 psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr;
1325 }
1326 psLinuxMemArea->ui32ByteSize = ui32Bytes;
1327 psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
1328 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1330 #if defined(DEBUG_LINUX_MEM_AREAS)
1331 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
1332 #endif
1334 return psLinuxMemArea;
1335 }
1338 IMG_VOID
1339 FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1340 {
1341 PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
1343 #if defined(DEBUG_LINUX_MEM_AREAS)
1344 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1345 #endif
1347 LinuxMemAreaStructFree(psLinuxMemArea);
1348 }
1351 LinuxMemArea *
1352 NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
1353 IMG_UINT32 ui32Bytes,
1354 IMG_UINT32 ui32AreaFlags)
1355 {
1356 LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
1357 if (!psLinuxMemArea)
1358 {
1359 return NULL;
1360 }
1362 /* Nothing to activly do. We just keep a record of the physical range. */
1363 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
1364 psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
1365 psLinuxMemArea->ui32ByteSize = ui32Bytes;
1366 psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
1367 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1369 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1370 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
1371 (IMG_VOID *)BasePAddr.uiAddr,
1372 0,
1373 BasePAddr.uiAddr,
1374 NULL,
1375 ui32Bytes,
1376 "unknown",
1377 0
1378 );
1379 #endif
1381 #if defined(DEBUG_LINUX_MEM_AREAS)
1382 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
1383 #endif
1385 return psLinuxMemArea;
1386 }
1389 IMG_VOID
1390 FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1391 {
1392 PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
1394 #if defined(DEBUG_LINUX_MEM_AREAS)
1395 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1396 #endif
1398 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1399 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
1400 (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, __FILE__, __LINE__);
1401 #endif
1403 /* Nothing more to do than free the LinuxMemArea struct */
1405 LinuxMemAreaStructFree(psLinuxMemArea);
1406 }
1409 LinuxMemArea *
1410 NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
1411 {
1412 LinuxMemArea *psLinuxMemArea;
1413 IMG_UINT32 ui32NumPages;
1414 struct page **ppsPageList;
1415 IMG_HANDLE hBlockPageList;
1416 IMG_BOOL bFromPagePool;
1418 psLinuxMemArea = LinuxMemAreaStructAlloc();
1419 if (!psLinuxMemArea)
1420 {
1421 goto failed_area_alloc;
1422 }
1424 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
1426 if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool))
1427 {
1428 goto failed_alloc_pages;
1429 }
1431 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
1432 psLinuxMemArea->uData.sPageList.ppsPageList = ppsPageList;
1433 psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
1434 psLinuxMemArea->ui32ByteSize = ui32Bytes;
1435 psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
1436 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1438 /* We defer the cache flush to the first user mapping of this memory */
1439 psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags) && !bFromPagePool;
1441 #if defined(DEBUG_LINUX_MEM_AREAS)
1442 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
1443 #endif
1445 return psLinuxMemArea;
1447 failed_alloc_pages:
1448 LinuxMemAreaStructFree(psLinuxMemArea);
1449 failed_area_alloc:
1450 PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
1452 return NULL;
1453 }
1456 IMG_VOID
1457 FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1458 {
1459 IMG_UINT32 ui32NumPages;
1460 struct page **ppsPageList;
1461 IMG_HANDLE hBlockPageList;
1463 PVR_ASSERT(psLinuxMemArea);
1464 PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
1466 #if defined(DEBUG_LINUX_MEM_AREAS)
1467 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1468 #endif
1470 ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
1471 ppsPageList = psLinuxMemArea->uData.sPageList.ppsPageList;
1472 hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
1474 FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages);
1476 LinuxMemAreaStructFree(psLinuxMemArea);
1477 }
1479 #if defined(CONFIG_ION_OMAP)
1481 #include "env_perproc.h"
1483 #include <linux/ion.h>
1484 #include <linux/omap_ion.h>
1485 #include <linux/scatterlist.h>
1487 extern struct ion_client *gpsIONClient;
1489 LinuxMemArea *
1490 NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags,
1491 IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength)
1492 {
1493 const IMG_UINT32 ui32AllocDataLen =
1494 offsetof(struct omap_ion_tiler_alloc_data, handle);
1495 struct omap_ion_tiler_alloc_data asAllocData[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES];
1496 u32 *pu32PageAddrs[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES] = { NULL, NULL, NULL};
1497 IMG_UINT32 i, j, ui32NumHandlesPerFd;
1498 IMG_BYTE *pbPrivData = pvPrivData;
1499 IMG_CPU_PHYADDR *pCPUPhysAddrs;
1500 IMG_UINT32 iNumPages[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES] = { 0, 0, 0};
1501 LinuxMemArea *psLinuxMemArea;
1502 IMG_UINT32 ui32ProcID;
1503 IMG_UINT32 ui32TotalPagesSizeInBytes = 0, ui32TotalPages = 0;
1505 psLinuxMemArea = LinuxMemAreaStructAlloc();
1506 if (!psLinuxMemArea)
1507 {
1508 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate LinuxMemArea struct", __func__));
1509 goto err_out;
1510 }
1512 /* Depending on the UM config, userspace might give us info for
1513 * one, two or three ION allocations. Divide the total size of data we
1514 * were given by this ui32AllocDataLen, and check it's 1 or 2.
1515 * Otherwise abort.
1516 */
1517 BUG_ON(ui32PrivDataLength != ui32AllocDataLen &&
1518 ui32PrivDataLength != ui32AllocDataLen * 2 &&
1519 ui32PrivDataLength != ui32AllocDataLen * 3);
1520 /* This is bad !- change this logic to pass in the size or
1521 * use uniformed API */
1522 ui32NumHandlesPerFd = ui32PrivDataLength / ui32AllocDataLen;
1524 ui32ProcID = OSGetCurrentProcessIDKM();
1526 memset(asAllocData, 0x00, sizeof(asAllocData));
1528 /* We do not care about what the first (Y) buffer offset would be,
1529 * but we do care for the UV buffers to be co-aligned with Y
1530 * This for SGX to find the UV offset solely based on the height
1531 * and stride of the YUV buffer.This is very important for OMAP4470
1532 * and later chipsets, where SGX version is 544. 544 and later use
1533 * non-shader based YUV to RGB conversion unit that require
1534 * contiguous GPU virtual space */
1535 for(i = 0; i < ui32NumHandlesPerFd; i++)
1536 {
1537 memcpy(&asAllocData[i], &pbPrivData[i * ui32AllocDataLen], ui32AllocDataLen);
1538 asAllocData[i].token = ui32ProcID;
1540 #ifndef SGX_DISABLE_DMM_OFFSET_BUFFER_ALLOCATIONS
1541 if(i == 0)
1542 {
1543 /* Tiler API says:
1544 * Allocate first buffer with the required alignment
1545 * and an offset of 0 ... */
1546 asAllocData[i].out_align = CONFIG_TILER_GRANULARITY;
1547 asAllocData[i].offset = 0;
1548 }
1549 else
1550 { /* .. Then for the second buffer, use the offset from the first
1551 * buffer with alignment of PAGE_SIZE */
1552 asAllocData[i].out_align = PAGE_SIZE;
1553 asAllocData[i].offset = asAllocData[0].offset;
1554 }
1555 #else
1556 asAllocData[i].offset = 0;
1557 asAllocData[i].out_align = PAGE_SIZE;
1558 #endif
1560 if(asAllocData[i].fmt == TILER_PIXEL_FMT_PAGE)
1561 {
1562 /* 1D DMM Buffers */
1563 struct scatterlist *sg;
1564 struct sg_table *sgtable;
1565 IMG_UINT32 ui32Num1dPages;
1567 asAllocData[i].handle = ion_alloc (gpsIONClient,
1568 ui32Bytes,
1569 PAGE_SIZE, (1 << OMAP_ION_HEAP_SYSTEM), 0);
1571 if (asAllocData[i].handle == NULL)
1572 {
1573 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate via ion_alloc",
1574 __func__));
1575 goto err_free;
1576 }
1578 sgtable = ion_sg_table(gpsIONClient, asAllocData[i].handle);
1579 if (sgtable == NULL)
1580 {
1581 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to compute pages",
1582 __func__));
1583 goto err_free;
1584 }
1586 ui32Num1dPages = (ui32Bytes >> PAGE_SHIFT);
1587 pu32PageAddrs[i] = kmalloc (sizeof(u32) * ui32Num1dPages, GFP_KERNEL);
1588 if (pu32PageAddrs[i] == NULL)
1589 {
1590 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate page array",
1591 __func__));
1592 goto err_free;
1593 }
1595 for_each_sg (sgtable->sgl, sg, ui32Num1dPages, j)
1596 {
1597 pu32PageAddrs[i][j] = sg_phys (sg);
1598 }
1600 iNumPages[i] = ui32Num1dPages;
1601 }
1602 else /* 2D DMM Buffers */
1603 {
1604 if (omap_ion_tiler_alloc(gpsIONClient, &asAllocData[i]) < 0)
1605 {
1606 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate via ion_tiler",
1607 __func__));
1608 goto err_free;
1609 }
1611 if (omap_tiler_pages(gpsIONClient, asAllocData[i].handle, &iNumPages[i],
1612 &pu32PageAddrs[i]) < 0)
1613 {
1614 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to compute tiler pages",
1615 __func__));
1616 goto err_free;
1617 }
1618 }
1619 }
1621 /* Basic sanity check on plane co-alignment */
1622 if((ui32NumHandlesPerFd > 1) &&
1623 (asAllocData[0].offset != asAllocData[1].offset))
1624 {
1625 pr_err("%s: Y and UV offsets do not match for tiler handles "
1626 "%p,%p: %d != %d \n "
1627 "Expect issues with SGX544xx and later chipsets\n",
1628 __func__, asAllocData[0].handle, asAllocData[1].handle,
1629 (int)asAllocData[0].offset, (int)asAllocData[1].offset);
1630 }
1632 /* Assume the user-allocator has already done the tiler math and that the
1633 * number of tiler pages allocated matches any other allocation type.
1634 */
1635 for(i = 0; i < ui32NumHandlesPerFd; i++)
1636 {
1637 ui32TotalPages += iNumPages[i];
1638 }
1640 BUG_ON(ui32Bytes != (ui32TotalPages * PAGE_SIZE));
1641 BUG_ON(sizeof(IMG_CPU_PHYADDR) != sizeof(int));
1643 /* Glue the page lists together */
1644 pCPUPhysAddrs = vmalloc(sizeof(IMG_CPU_PHYADDR) * ui32TotalPages);
1645 if (!pCPUPhysAddrs)
1646 {
1647 PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate page list", __func__));
1648 goto err_free;
1649 }
1651 j = 0;
1652 for(i = 0; i < ui32NumHandlesPerFd; i++)
1653 {
1654 IMG_UINT32 ui32PageIndx;
1655 for(ui32PageIndx = 0; ui32PageIndx < iNumPages[i]; ui32PageIndx++)
1656 {
1657 pCPUPhysAddrs[j++].uiAddr = pu32PageAddrs[i][ui32PageIndx];
1658 }
1660 psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i] =
1661 asAllocData[i].handle;
1662 psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[i] =
1663 ui32TotalPagesSizeInBytes + asAllocData[i].offset;
1664 /* Add the number of pages this plane consists of */
1665 ui32TotalPagesSizeInBytes += (iNumPages[i] * PAGE_SIZE);
1666 }
1668 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ION;
1669 psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs = pCPUPhysAddrs;
1670 psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes =
1671 ui32NumHandlesPerFd;
1672 psLinuxMemArea->ui32ByteSize = ui32TotalPagesSizeInBytes;
1673 psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
1674 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1676 /* We defer the cache flush to the first user mapping of this memory */
1677 psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags);
1679 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1680 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ION,
1681 asAllocData[0].handle,
1682 0,
1683 0,
1684 NULL,
1685 PAGE_ALIGN(ui32Bytes),
1686 "unknown",
1687 0
1688 );
1689 #endif
1691 #if defined(DEBUG_LINUX_MEM_AREAS)
1692 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
1693 #endif
1695 err_out:
1696 return psLinuxMemArea;
1698 err_free:
1699 LinuxMemAreaStructFree(psLinuxMemArea);
1700 psLinuxMemArea = IMG_NULL;
1701 goto err_out;
1702 }
1704 IMG_INT32
1705 GetIONLinuxMemAreaInfo(LinuxMemArea *psLinuxMemArea, IMG_UINT32* pui32AddressOffsets,
1706 IMG_UINT32* ui32NumAddrOffsets)
1707 {
1708 IMG_UINT32 i;
1710 if(!ui32NumAddrOffsets)
1711 return -1;
1713 if(*ui32NumAddrOffsets < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes)
1714 {
1715 *ui32NumAddrOffsets = psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes;
1716 return -1;
1717 }
1719 if(!pui32AddressOffsets)
1720 return -1;
1722 for(i = 0; i < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; i++)
1723 {
1724 if(psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i])
1725 pui32AddressOffsets[i] =
1726 psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[i];
1727 }
1729 *ui32NumAddrOffsets = psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes;
1731 return psLinuxMemArea->ui32ByteSize;
1732 }
1734 IMG_VOID
1735 FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1736 {
1737 IMG_UINT32 i;
1739 #if defined(DEBUG_LINUX_MEM_AREAS)
1740 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1741 #endif
1743 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1744 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ION,
1745 psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[0],
1746 __FILE__, __LINE__);
1747 #endif
1749 for(i = 0; i < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; i++)
1750 {
1751 if (!psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i])
1752 break;
1753 ion_free(gpsIONClient, psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]);
1754 psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i] = IMG_NULL;
1755 }
1757 /* free copy of page list, originals are freed by ion_free */
1758 vfree(psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs);
1759 psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs = IMG_NULL;
1761 LinuxMemAreaStructFree(psLinuxMemArea);
1762 }
1764 #endif /* defined(CONFIG_ION_OMAP) */
1766 struct page*
1767 LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
1768 IMG_UINT32 ui32ByteOffset)
1769 {
1770 IMG_UINT32 ui32PageIndex;
1771 IMG_CHAR *pui8Addr;
1773 switch (psLinuxMemArea->eAreaType)
1774 {
1775 case LINUX_MEM_AREA_ALLOC_PAGES:
1776 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
1777 return psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageIndex];
1779 case LINUX_MEM_AREA_VMALLOC:
1780 pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
1781 pui8Addr += ui32ByteOffset;
1782 return vmalloc_to_page(pui8Addr);
1784 case LINUX_MEM_AREA_SUB_ALLOC:
1785 /* PRQA S 3670 3 */ /* ignore recursive warning */
1786 return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
1787 psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
1788 + ui32ByteOffset);
1789 default:
1790 PVR_DPF((PVR_DBG_ERROR,
1791 "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
1792 __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
1793 return NULL;
1794 }
1795 }
1798 LinuxKMemCache *
1799 KMemCacheCreateWrapper(IMG_CHAR *pszName,
1800 size_t Size,
1801 size_t Align,
1802 IMG_UINT32 ui32Flags)
1803 {
1804 #if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
1805 ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
1806 #endif
1807 return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL
1808 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
1809 , NULL
1810 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22) */
1811 );
1812 }
1815 IMG_VOID
1816 KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
1817 {
1818 kmem_cache_destroy(psCache);
1819 }
1822 IMG_VOID *
1823 _KMemCacheAllocWrapper(LinuxKMemCache *psCache,
1824 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
1825 gfp_t Flags,
1826 #else
1827 IMG_INT Flags,
1828 #endif
1829 IMG_CHAR *pszFileName,
1830 IMG_UINT32 ui32Line)
1831 {
1832 IMG_VOID *pvRet;
1834 pvRet = kmem_cache_zalloc(psCache, Flags);
1836 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
1837 DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
1838 pvRet,
1839 pvRet,
1840 0,
1841 psCache,
1842 kmem_cache_size(psCache),
1843 pszFileName,
1844 ui32Line
1845 );
1846 #else
1847 PVR_UNREFERENCED_PARAMETER(pszFileName);
1848 PVR_UNREFERENCED_PARAMETER(ui32Line);
1849 #endif
1851 return pvRet;
1852 }
1855 LinuxMemArea *
1856 NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
1857 IMG_UINT32 ui32ByteOffset,
1858 IMG_UINT32 ui32Bytes)
1859 {
1860 LinuxMemArea *psLinuxMemArea;
1862 PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
1864 psLinuxMemArea = LinuxMemAreaStructAlloc();
1865 if (!psLinuxMemArea)
1866 {
1867 return NULL;
1868 }
1870 psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
1871 psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
1872 psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
1873 psLinuxMemArea->ui32ByteSize = ui32Bytes;
1874 psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
1875 psLinuxMemArea->bNeedsCacheInvalidate = psParentLinuxMemArea->bNeedsCacheInvalidate;
1876 INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
1878 #if defined(DEBUG_LINUX_MEM_AREAS)
1879 {
1880 DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
1881 psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
1882 DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
1883 }
1884 #endif
1886 return psLinuxMemArea;
1887 }
1890 static IMG_VOID
1891 FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
1892 {
1893 PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
1895 #if defined(DEBUG_LINUX_MEM_AREAS)
1896 DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
1897 #endif
1899 /* Nothing more to do than free the LinuxMemArea structure */
1901 LinuxMemAreaStructFree(psLinuxMemArea);
1902 }
1905 static LinuxMemArea *
1906 LinuxMemAreaStructAlloc(IMG_VOID)
1907 {
1908 /* debug */
1909 #if 0
1910 LinuxMemArea *psLinuxMemArea;
1911 psLinuxMemArea = kmem_cache_alloc(g_PsLinuxMemAreaCache, GFP_KERNEL);
1912 printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
1913 dump_stack();
1914 return psLinuxMemArea;
1915 #else
1916 return KMemCacheAllocWrapper(g_PsLinuxMemAreaCache, GFP_KERNEL);
1917 #endif
1918 }
1921 static IMG_VOID
1922 LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
1923 {
1924 KMemCacheFreeWrapper(g_PsLinuxMemAreaCache, psLinuxMemArea);
1925 /* debug */
1926 //printk(KERN_ERR "%s(%p)\n", __FUNCTION__, psLinuxMemArea);
1927 }
1930 IMG_VOID
1931 LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
1932 {
1933 switch (psLinuxMemArea->eAreaType)
1934 {
1935 case LINUX_MEM_AREA_VMALLOC:
1936 FreeVMallocLinuxMemArea(psLinuxMemArea);
1937 break;
1938 case LINUX_MEM_AREA_ALLOC_PAGES:
1939 FreeAllocPagesLinuxMemArea(psLinuxMemArea);
1940 break;
1941 case LINUX_MEM_AREA_IOREMAP:
1942 FreeIORemapLinuxMemArea(psLinuxMemArea);
1943 break;
1944 case LINUX_MEM_AREA_EXTERNAL_KV:
1945 FreeExternalKVLinuxMemArea(psLinuxMemArea);
1946 break;
1947 case LINUX_MEM_AREA_IO:
1948 FreeIOLinuxMemArea(psLinuxMemArea);
1949 break;
1950 case LINUX_MEM_AREA_SUB_ALLOC:
1951 FreeSubLinuxMemArea(psLinuxMemArea);
1952 break;
1953 case LINUX_MEM_AREA_ION:
1954 FreeIONLinuxMemArea(psLinuxMemArea);
1955 break;
1956 default:
1957 PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
1958 __FUNCTION__, psLinuxMemArea->eAreaType));
1959 break;
1960 }
1961 }
1964 #if defined(DEBUG_LINUX_MEM_AREAS)
1965 static IMG_VOID
1966 DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
1967 {
1968 DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
1969 const IMG_CHAR *pi8FlagsString;
1971 LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG);
1973 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
1974 {
1975 g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
1976 if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
1977 {
1978 g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
1979 }
1980 }
1981 g_LinuxMemAreaCount++;
1983 /* Create a new memory allocation record */
1984 psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
1985 if (psNewRecord)
1986 {
1987 /* Record the allocation */
1988 psNewRecord->psLinuxMemArea = psLinuxMemArea;
1989 psNewRecord->ui32Flags = ui32Flags;
1990 psNewRecord->pid = OSGetCurrentProcessIDKM();
1992 List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord);
1993 }
1994 else
1995 {
1996 PVR_DPF((PVR_DBG_ERROR,
1997 "%s: failed to allocate linux memory area record.",
1998 __FUNCTION__));
1999 }
2001 /* Sanity check the flags */
2002 pi8FlagsString = HAPFlagsToString(ui32Flags);
2003 if (strstr(pi8FlagsString, "UNKNOWN"))
2004 {
2005 PVR_DPF((PVR_DBG_ERROR,
2006 "%s: Unexpected flags (0x%08x) associated with psLinuxMemArea @ %p",
2007 __FUNCTION__,
2008 ui32Flags,
2009 psLinuxMemArea));
2010 //dump_stack();
2011 }
2013 LinuxUnLockMutex(&g_sDebugMutex);
2014 }
2018 static IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord,
2019 va_list va)
2020 {
2021 LinuxMemArea *psLinuxMemArea;
2023 psLinuxMemArea = va_arg(va, LinuxMemArea*);
2024 if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
2025 {
2026 return psCurrentRecord;
2027 }
2028 else
2029 {
2030 return IMG_NULL;
2031 }
2032 }
2035 static DEBUG_LINUX_MEM_AREA_REC *
2036 DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
2037 {
2038 DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
2040 LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG);
2041 psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
2042 MatchLinuxMemArea_AnyVaCb,
2043 psLinuxMemArea);
2045 /*exit_unlock:*/
2046 LinuxUnLockMutex(&g_sDebugMutex);
2048 return psCurrentRecord;
2049 }
2052 static IMG_VOID
2053 DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
2054 {
2055 DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
2057 LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG);
2059 if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
2060 {
2061 g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
2062 }
2063 g_LinuxMemAreaCount--;
2065 /* Locate the corresponding allocation entry */
2066 psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
2067 MatchLinuxMemArea_AnyVaCb,
2068 psLinuxMemArea);
2069 if (psCurrentRecord)
2070 {
2071 /* Unlink the allocation record */
2072 List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord);
2073 kfree(psCurrentRecord);
2074 }
2075 else
2076 {
2077 PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
2078 __FUNCTION__, psLinuxMemArea));
2079 }
2081 LinuxUnLockMutex(&g_sDebugMutex);
2082 }
2083 #endif
2086 IMG_VOID *
2087 LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
2088 {
2089 switch (psLinuxMemArea->eAreaType)
2090 {
2091 case LINUX_MEM_AREA_VMALLOC:
2092 return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
2093 case LINUX_MEM_AREA_IOREMAP:
2094 return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
2095 case LINUX_MEM_AREA_EXTERNAL_KV:
2096 return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
2097 case LINUX_MEM_AREA_SUB_ALLOC:
2098 {
2099 IMG_CHAR *pAddr =
2100 LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea); /* PRQA S 3670 */ /* ignore recursive warning */
2101 if (!pAddr)
2102 {
2103 return NULL;
2104 }
2105 return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
2106 }
2107 default:
2108 return NULL;
2109 }
2110 }
2113 IMG_CPU_PHYADDR
2114 LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
2115 {
2116 IMG_CPU_PHYADDR CpuPAddr;
2118 CpuPAddr.uiAddr = 0;
2120 switch (psLinuxMemArea->eAreaType)
2121 {
2122 case LINUX_MEM_AREA_IOREMAP:
2123 {
2124 CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
2125 CpuPAddr.uiAddr += ui32ByteOffset;
2126 break;
2127 }
2128 case LINUX_MEM_AREA_EXTERNAL_KV:
2129 {
2130 if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
2131 {
2132 CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr);
2133 CpuPAddr.uiAddr += ui32ByteOffset;
2134 }
2135 else
2136 {
2137 IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
2138 IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageIndex];
2140 CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
2141 CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
2142 }
2143 break;
2144 }
2145 case LINUX_MEM_AREA_IO:
2146 {
2147 CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
2148 CpuPAddr.uiAddr += ui32ByteOffset;
2149 break;
2150 }
2151 case LINUX_MEM_AREA_VMALLOC:
2152 {
2153 IMG_CHAR *pCpuVAddr;
2154 pCpuVAddr =
2155 (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
2156 pCpuVAddr += ui32ByteOffset;
2157 CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
2158 break;
2159 }
2160 case LINUX_MEM_AREA_ION:
2161 {
2162 IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
2163 CpuPAddr = psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs[ui32PageIndex];
2164 CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
2165 break;
2166 }
2167 case LINUX_MEM_AREA_ALLOC_PAGES:
2168 {
2169 struct page *page;
2170 IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
2171 page = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageIndex];
2172 CpuPAddr.uiAddr = page_to_phys(page);
2173 CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
2174 break;
2175 }
2176 case LINUX_MEM_AREA_SUB_ALLOC:
2177 {
2178 CpuPAddr =
2179 OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
2180 psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
2181 + ui32ByteOffset);
2182 break;
2183 }
2184 default:
2185 {
2186 PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
2187 __FUNCTION__, psLinuxMemArea->eAreaType));
2188 PVR_ASSERT(CpuPAddr.uiAddr);
2189 break;
2190 }
2191 }
2193 return CpuPAddr;
2194 }
2197 IMG_BOOL
2198 LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea)
2199 {
2200 switch (psLinuxMemArea->eAreaType)
2201 {
2202 case LINUX_MEM_AREA_IOREMAP:
2203 case LINUX_MEM_AREA_IO:
2204 return IMG_TRUE;
2206 case LINUX_MEM_AREA_EXTERNAL_KV:
2207 return psLinuxMemArea->uData.sExternalKV.bPhysContig;
2209 case LINUX_MEM_AREA_ION:
2210 case LINUX_MEM_AREA_VMALLOC:
2211 case LINUX_MEM_AREA_ALLOC_PAGES:
2212 return IMG_FALSE;
2214 case LINUX_MEM_AREA_SUB_ALLOC:
2215 /* PRQA S 3670 1 */ /* ignore recursive warning */
2216 return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
2218 default:
2219 PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
2220 __FUNCTION__, psLinuxMemArea->eAreaType));
2221 break;
2222 }
2223 return IMG_FALSE;
2224 }
2227 const IMG_CHAR *
2228 LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
2229 {
2230 /* Note we explicitly check the types instead of e.g.
2231 * using the type to index an array of strings so
2232 * we remain orthogonal to enum changes */
2233 switch (eMemAreaType)
2234 {
2235 case LINUX_MEM_AREA_IOREMAP:
2236 return "LINUX_MEM_AREA_IOREMAP";
2237 case LINUX_MEM_AREA_EXTERNAL_KV:
2238 return "LINUX_MEM_AREA_EXTERNAL_KV";
2239 case LINUX_MEM_AREA_IO:
2240 return "LINUX_MEM_AREA_IO";
2241 case LINUX_MEM_AREA_VMALLOC:
2242 return "LINUX_MEM_AREA_VMALLOC";
2243 case LINUX_MEM_AREA_SUB_ALLOC:
2244 return "LINUX_MEM_AREA_SUB_ALLOC";
2245 case LINUX_MEM_AREA_ALLOC_PAGES:
2246 return "LINUX_MEM_AREA_ALLOC_PAGES";
2247 case LINUX_MEM_AREA_ION:
2248 return "LINUX_MEM_AREA_ION";
2249 default:
2250 PVR_ASSERT(0);
2251 }
2253 return "";
2254 }
2257 #if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
2258 static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start)
2259 {
2260 if (start)
2261 {
2262 LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG);
2263 }
2264 else
2265 {
2266 LinuxUnLockMutex(&g_sDebugMutex);
2267 }
2268 }
2269 #endif /* defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) */
2271 #if defined(DEBUG_LINUX_MEM_AREAS)
2273 static IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va)
2274 {
2275 off_t *pOff = va_arg(va, off_t*);
2276 if (--(*pOff))
2277 {
2278 return IMG_NULL;
2279 }
2280 else
2281 {
2282 return psNode;
2283 }
2284 }
2286 /* seq_file version of generating output, for reference check proc.c:CreateProcReadEntrySeq */
2287 static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off)
2288 {
2289 DEBUG_LINUX_MEM_AREA_REC *psRecord;
2290 psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
2291 List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
2292 DecOffMemAreaRec_AnyVaCb,
2293 &off);
2294 return (void*)psRecord;
2295 }
2297 static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off)
2298 {
2299 DEBUG_LINUX_MEM_AREA_REC *psRecord;
2300 if (!off)
2301 {
2302 return PVR_PROC_SEQ_START_TOKEN;
2303 }
2305 psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
2306 List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
2307 DecOffMemAreaRec_AnyVaCb,
2308 &off);
2309 return (void*)psRecord;
2310 }
2313 static void ProcSeqShowMemArea(struct seq_file *sfile,void* el)
2314 {
2315 DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el;
2316 if (el == PVR_PROC_SEQ_START_TOKEN)
2317 {
2319 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
2320 seq_printf(sfile,
2321 "Number of Linux Memory Areas: %u\n"
2322 "At the current water mark these areas correspond to %u bytes (excluding SUB areas)\n"
2323 "At the highest water mark these areas corresponded to %u bytes (excluding SUB areas)\n"
2324 "\nDetails for all Linux Memory Areas:\n"
2325 "%s %-24s %s %s %-8s %-5s %s\n",
2326 g_LinuxMemAreaCount,
2327 g_LinuxMemAreaWaterMark,
2328 g_LinuxMemAreaHighWaterMark,
2329 "psLinuxMemArea",
2330 "LinuxMemType",
2331 "CpuVAddr",
2332 "CpuPAddr",
2333 "Bytes",
2334 "Pid",
2335 "Flags"
2336 );
2337 #else
2338 seq_printf(sfile,
2339 "<mem_areas_header>\n"
2340 "\t<count>%u</count>\n"
2341 "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%u\"/>\n" /* (excluding SUB areas) */
2342 "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%u\"/>\n" /* (excluding SUB areas) */
2343 "</mem_areas_header>\n",
2344 g_LinuxMemAreaCount,
2345 g_LinuxMemAreaWaterMark,
2346 g_LinuxMemAreaHighWaterMark
2347 );
2348 #endif
2349 return;
2350 }
2352 seq_printf(sfile,
2353 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
2354 "%8p %-24s %8p %08x %-8d %-5u %08x=(%s)\n",
2355 #else
2356 "<linux_mem_area>\n"
2357 "\t<pointer>%8p</pointer>\n"
2358 "\t<type>%s</type>\n"
2359 "\t<cpu_virtual>%8p</cpu_virtual>\n"
2360 "\t<cpu_physical>%08x</cpu_physical>\n"
2361 "\t<bytes>%d</bytes>\n"
2362 "\t<pid>%u</pid>\n"
2363 "\t<flags>%08x</flags>\n"
2364 "\t<flags_string>%s</flags_string>\n"
2365 "</linux_mem_area>\n",
2366 #endif
2367 psRecord->psLinuxMemArea,
2368 LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
2369 LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
2370 LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
2371 psRecord->psLinuxMemArea->ui32ByteSize,
2372 psRecord->pid,
2373 psRecord->ui32Flags,
2374 HAPFlagsToString(psRecord->ui32Flags)
2375 );
2377 }
2379 #endif /* DEBUG_LINUX_MEM_AREAS */
2382 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
2384 static IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va)
2385 {
2386 off_t *pOff = va_arg(va, off_t*);
2387 if (--(*pOff))
2388 {
2389 return IMG_NULL;
2390 }
2391 else
2392 {
2393 return psNode;
2394 }
2395 }
2398 /* seq_file version of generating output, for reference check proc.c:CreateProcReadEntrySeq */
2399 static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off)
2400 {
2401 DEBUG_MEM_ALLOC_REC *psRecord;
2402 psRecord = (DEBUG_MEM_ALLOC_REC*)
2403 List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
2404 DecOffMemAllocRec_AnyVaCb,
2405 &off);
2406 #if defined(DEBUG_LINUX_XML_PROC_FILES)
2407 if (!psRecord)
2408 {
2409 seq_printf(sfile, "</meminfo>\n");
2410 }
2411 #endif
2413 return (void*)psRecord;
2414 }
2416 static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off)
2417 {
2418 DEBUG_MEM_ALLOC_REC *psRecord;
2419 if (!off)
2420 {
2421 return PVR_PROC_SEQ_START_TOKEN;
2422 }
2424 psRecord = (DEBUG_MEM_ALLOC_REC*)
2425 List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
2426 DecOffMemAllocRec_AnyVaCb,
2427 &off);
2429 #if defined(DEBUG_LINUX_XML_PROC_FILES)
2430 if (!psRecord)
2431 {
2432 seq_printf(sfile, "</meminfo>\n");
2433 }
2434 #endif
2436 return (void*)psRecord;
2437 }
2439 static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el)
2440 {
2441 DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el;
2442 if (el == PVR_PROC_SEQ_START_TOKEN)
2443 {
2444 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
2445 /* NOTE: If you update this code, please also update the XML varient below
2446 * too! */
2448 seq_printf(sfile, "%-60s: %d bytes\n",
2449 "Current Water Mark of bytes allocated via kmalloc",
2450 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
2451 seq_printf(sfile, "%-60s: %d bytes\n",
2452 "Highest Water Mark of bytes allocated via kmalloc",
2453 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
2454 seq_printf(sfile, "%-60s: %d bytes\n",
2455 "Current Water Mark of bytes allocated via vmalloc",
2456 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
2457 seq_printf(sfile, "%-60s: %d bytes\n",
2458 "Highest Water Mark of bytes allocated via vmalloc",
2459 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
2460 seq_printf(sfile, "%-60s: %d bytes\n",
2461 "Current Water Mark of bytes allocated via alloc_pages",
2462 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
2463 seq_printf(sfile, "%-60s: %d bytes\n",
2464 "Highest Water Mark of bytes allocated via alloc_pages",
2465 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
2466 seq_printf(sfile, "%-60s: %d bytes\n",
2467 "Current Water Mark of bytes allocated via ioremap",
2468 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
2469 seq_printf(sfile, "%-60s: %d bytes\n",
2470 "Highest Water Mark of bytes allocated via ioremap",
2471 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
2472 seq_printf(sfile, "%-60s: %d bytes\n",
2473 "Current Water Mark of bytes reserved for \"IO\" memory areas",
2474 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
2475 seq_printf(sfile, "%-60s: %d bytes\n",
2476 "Highest Water Mark of bytes allocated for \"IO\" memory areas",
2477 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
2478 seq_printf(sfile, "%-60s: %d bytes\n",
2479 "Current Water Mark of bytes allocated via kmem_cache_alloc",
2480 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
2481 seq_printf(sfile, "%-60s: %d bytes\n",
2482 "Highest Water Mark of bytes allocated via kmem_cache_alloc",
2483 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
2484 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
2485 seq_printf(sfile, "%-60s: %d bytes\n",
2486 "Current Water Mark of bytes mapped via vmap",
2487 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
2488 seq_printf(sfile, "%-60s: %d bytes\n",
2489 "Highest Water Mark of bytes mapped via vmap",
2490 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
2491 #endif
2492 #if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
2493 seq_printf(sfile, "%-60s: %d pages\n",
2494 "Number of pages in page pool",
2495 atomic_read(&g_sPagePoolEntryCount));
2496 #endif
2497 seq_printf( sfile, "\n");
2498 seq_printf(sfile, "%-60s: %d bytes\n",
2499 "The Current Water Mark for memory allocated from system RAM",
2500 SysRAMTrueWaterMark());
2501 seq_printf(sfile, "%-60s: %d bytes\n",
2502 "The Highest Water Mark for memory allocated from system RAM",
2503 g_SysRAMHighWaterMark);
2504 seq_printf(sfile, "%-60s: %d bytes\n",
2505 "The Current Water Mark for memory allocated from IO memory",
2506 g_IOMemWaterMark);
2507 seq_printf(sfile, "%-60s: %d bytes\n",
2508 "The Highest Water Mark for memory allocated from IO memory",
2509 g_IOMemHighWaterMark);
2511 seq_printf( sfile, "\n");
2513 seq_printf(sfile, "Details for all known allocations:\n"
2514 "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
2515 "Type",
2516 "CpuVAddr",
2517 "CpuPAddr",
2518 "Bytes",
2519 "PID",
2520 "PrivateData",
2521 "Filename:Line");
2523 #else /* DEBUG_LINUX_XML_PROC_FILES */
2525 /* Note: If you want to update the description property of a watermark
2526 * ensure that the key property remains unchanged so that watermark data
2527 * logged over time from different driver revisions may remain comparable
2528 */
2529 seq_printf(sfile, "<meminfo>\n<meminfo_header>\n");
2530 seq_printf(sfile,
2531 "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%d\"/>\n",
2532 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
2533 seq_printf(sfile,
2534 "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%d\"/>\n",
2535 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
2536 seq_printf(sfile,
2537 "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%d\"/>\n",
2538 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
2539 seq_printf(sfile,
2540 "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%d\"/>\n",
2541 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
2542 seq_printf(sfile,
2543 "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%d\"/>\n",
2544 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
2545 seq_printf(sfile,
2546 "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%d\"/>\n",
2547 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
2548 seq_printf(sfile,
2549 "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%d\"/>\n",
2550 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
2551 seq_printf(sfile,
2552 "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%d\"/>\n",
2553 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
2554 seq_printf(sfile,
2555 "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%d\"/>\n",
2556 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
2557 seq_printf(sfile,
2558 "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%d\"/>\n",
2559 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
2560 seq_printf(sfile,
2561 "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%d\"/>\n",
2562 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
2563 seq_printf(sfile,
2564 "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%d\"/>\n",
2565 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
2566 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
2567 seq_printf(sfile,
2568 "<watermark key=\"mr12\" description=\"vmap_current\" bytes=\"%d\"/>\n",
2569 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
2570 seq_printf(sfile,
2571 "<watermark key=\"mr13\" description=\"vmap_high\" bytes=\"%d\"/>\n",
2572 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]);
2573 #endif
2574 seq_printf(sfile,
2575 "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%d\"/>\n",
2576 SysRAMTrueWaterMark());
2577 seq_printf(sfile,
2578 "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%d\"/>\n",
2579 g_SysRAMHighWaterMark);
2580 seq_printf(sfile,
2581 "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%d\"/>\n",
2582 g_IOMemWaterMark);
2583 seq_printf(sfile,
2584 "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%d\"/>\n",
2585 g_IOMemHighWaterMark);
2587 #if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
2588 seq_printf(sfile,
2589 "<watermark key=\"mr18\" description=\"page_pool_current\" bytes=\"%d\"/>\n",
2590 PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount)));
2591 #endif
2592 seq_printf(sfile, "</meminfo_header>\n");
2594 #endif /* DEBUG_LINUX_XML_PROC_FILES */
2595 return;
2596 }
2598 if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
2599 {
2600 seq_printf(sfile,
2601 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
2602 "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n",
2603 #else
2604 "<allocation>\n"
2605 "\t<type>%s</type>\n"
2606 "\t<cpu_virtual>%-8p</cpu_virtual>\n"
2607 "\t<cpu_physical>%08x</cpu_physical>\n"
2608 "\t<bytes>%d</bytes>\n"
2609 "\t<pid>%d</pid>\n"
2610 "\t<private>%s</private>\n"
2611 "\t<filename>%s</filename>\n"
2612 "\t<line>%d</line>\n"
2613 "</allocation>\n",
2614 #endif
2615 DebugMemAllocRecordTypeToString(psRecord->eAllocType),
2616 psRecord->pvCpuVAddr,
2617 psRecord->ulCpuPAddr,
2618 psRecord->ui32Bytes,
2619 psRecord->pid,
2620 "NULL",
2621 psRecord->pszFileName,
2622 psRecord->ui32Line);
2623 }
2624 else
2625 {
2626 seq_printf(sfile,
2627 #if !defined(DEBUG_LINUX_XML_PROC_FILES)
2628 "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n",
2629 #else
2630 "<allocation>\n"
2631 "\t<type>%s</type>\n"
2632 "\t<cpu_virtual>%-8p</cpu_virtual>\n"
2633 "\t<cpu_physical>%08x</cpu_physical>\n"
2634 "\t<bytes>%d</bytes>\n"
2635 "\t<pid>%d</pid>\n"
2636 "\t<private>%s</private>\n"
2637 "\t<filename>%s</filename>\n"
2638 "\t<line>%d</line>\n"
2639 "</allocation>\n",
2640 #endif
2641 DebugMemAllocRecordTypeToString(psRecord->eAllocType),
2642 psRecord->pvCpuVAddr,
2643 psRecord->ulCpuPAddr,
2644 psRecord->ui32Bytes,
2645 psRecord->pid,
2646 KMemCacheNameWrapper(psRecord->pvPrivateData),
2647 psRecord->pszFileName,
2648 psRecord->ui32Line);
2649 }
2650 }
2652 #endif /* defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) */
2655 #if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
2656 /* This could be moved somewhere more general */
2657 const IMG_CHAR *
2658 HAPFlagsToString(IMG_UINT32 ui32Flags)
2659 {
2660 static IMG_CHAR szFlags[50];
2661 IMG_INT32 i32Pos = 0;
2662 IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
2663 IMG_CHAR *apszCacheTypes[] = {
2664 "UNCACHED",
2665 "CACHED",
2666 "WRITECOMBINE",
2667 "UNKNOWN"
2668 };
2669 IMG_CHAR *apszMapType[] = {
2670 "KERNEL_ONLY",
2671 "SINGLE_PROCESS",
2672 "MULTI_PROCESS",
2673 "FROM_EXISTING_PROCESS",
2674 "NO_CPU_VIRTUAL",
2675 "UNKNOWN"
2676 };
2678 /* FIXME create an enum for the cache type that we can
2679 * cast and select so we get compiler warnings when
2680 * when this code isn't complete due to new flags */
2681 if (ui32Flags & PVRSRV_HAP_UNCACHED) {
2682 ui32CacheTypeIndex = 0;
2683 } else if (ui32Flags & PVRSRV_HAP_CACHED) {
2684 ui32CacheTypeIndex = 1;
2685 } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) {
2686 ui32CacheTypeIndex = 2;
2687 } else {
2688 ui32CacheTypeIndex = 3;
2689 PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)",
2690 __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
2691 }
2693 /* FIXME create an enum for the map type that we can
2694 * cast and select so we get compiler warnings when
2695 * when this code isn't complete due to new flags */
2696 if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) {
2697 ui32MapTypeIndex = 0;
2698 } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) {
2699 ui32MapTypeIndex = 1;
2700 } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) {
2701 ui32MapTypeIndex = 2;
2702 } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) {
2703 ui32MapTypeIndex = 3;
2704 } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) {
2705 ui32MapTypeIndex = 4;
2706 } else {
2707 ui32MapTypeIndex = 5;
2708 PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)",
2709 __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
2710 }
2712 i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
2713 if (i32Pos <= 0)
2714 {
2715 PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)",
2716 __FUNCTION__, ui32CacheTypeIndex, i32Pos));
2717 szFlags[0] = 0;
2718 }
2719 else
2720 {
2721 sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
2722 }
2724 return szFlags;
2725 }
2726 #endif
2728 #if defined(DEBUG_LINUX_MEM_AREAS)
2729 static IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord)
2730 {
2731 LinuxMemArea *psLinuxMemArea;
2733 psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
2734 PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%d bytes",
2735 __FUNCTION__,
2736 psCurrentRecord->psLinuxMemArea,
2737 LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
2738 psCurrentRecord->psLinuxMemArea->ui32ByteSize));
2739 /* Note this will also remove psCurrentRecord from g_LinuxMemAreaRecords
2740 * but that's ok since we have already got a pointer to the next area. */
2741 LinuxMemAreaDeepFree(psLinuxMemArea);
2742 }
2743 #endif
2745 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
2746 static IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord)
2748 {
2750 /* It's a bug if anything remains allocated at this point. We
2751 * report an error, and simply brute force free anything we find. */
2752 PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
2753 "type=%s "
2754 "CpuVAddr=%p "
2755 "CpuPAddr=0x%08x, "
2756 "allocated @ file=%s,line=%d",
2757 __FUNCTION__,
2758 DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
2759 psCurrentRecord->pvCpuVAddr,
2760 psCurrentRecord->ulCpuPAddr,
2761 psCurrentRecord->pszFileName,
2762 psCurrentRecord->ui32Line));
2763 switch (psCurrentRecord->eAllocType)
2764 {
2765 case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
2766 KFreeWrapper(psCurrentRecord->pvCpuVAddr);
2767 break;
2768 case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
2769 IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
2770 break;
2771 case DEBUG_MEM_ALLOC_TYPE_IO:
2772 /* Nothing needed except to free the record */
2773 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey, __FILE__, __LINE__);
2774 break;
2775 case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
2776 VFreeWrapper(psCurrentRecord->pvCpuVAddr);
2777 break;
2778 case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
2779 DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey, __FILE__, __LINE__);
2780 break;
2781 case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
2782 KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
2783 break;
2784 #if defined(PVR_LINUX_MEM_AREA_USE_VMAP)
2785 case DEBUG_MEM_ALLOC_TYPE_VMAP:
2786 VUnmapWrapper(psCurrentRecord->pvCpuVAddr);
2787 break;
2788 #endif
2789 default:
2790 PVR_ASSERT(0);
2791 }
2792 }
2793 #endif
2796 #if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
2797 static struct shrinker g_sShrinker =
2798 {
2799 .shrink = ShrinkPagePool,
2800 .seeks = DEFAULT_SEEKS
2801 };
2803 static IMG_BOOL g_bShrinkerRegistered;
2804 #endif
2806 IMG_VOID
2807 LinuxMMCleanup(IMG_VOID)
2808 {
2809 #if defined(DEBUG_LINUX_MEM_AREAS)
2810 {
2811 if (g_LinuxMemAreaCount)
2812 {
2813 PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%d bytes)",
2814 __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
2815 }
2817 List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords, LinuxMMCleanup_MemAreas_ForEachCb);
2819 if (g_SeqFileMemArea)
2820 {
2821 RemoveProcEntrySeq(g_SeqFileMemArea);
2822 }
2823 }
2824 #endif
2826 #if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
2827 if (g_bShrinkerRegistered)
2828 {
2829 unregister_shrinker(&g_sShrinker);
2830 }
2831 #endif
2833 /*
2834 * The page pool must be freed after any remaining mem areas, but before
2835 * the remaining memory resources.
2836 */
2837 FreePagePool();
2839 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
2840 {
2842 /*
2843 * It's a bug if anything remains allocated at this point. We
2844 * report an error, and simply brute force free anything we find.
2845 */
2846 List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords, LinuxMMCleanup_MemRecords_ForEachVa);
2848 if (g_SeqFileMemoryRecords)
2849 {
2850 RemoveProcEntrySeq(g_SeqFileMemoryRecords);
2851 }
2852 }
2853 #endif
2855 if (g_PsLinuxMemAreaCache)
2856 {
2857 KMemCacheDestroyWrapper(g_PsLinuxMemAreaCache);
2858 }
2860 if (g_PsLinuxPagePoolCache)
2861 {
2862 KMemCacheDestroyWrapper(g_PsLinuxPagePoolCache);
2863 }
2864 }
2866 PVRSRV_ERROR
2867 LinuxMMInit(IMG_VOID)
2868 {
2869 #if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
2870 LinuxInitMutex(&g_sDebugMutex);
2871 #endif
2873 #if defined(DEBUG_LINUX_MEM_AREAS)
2874 {
2875 g_SeqFileMemArea = CreateProcReadEntrySeq(
2876 "mem_areas",
2877 NULL,
2878 ProcSeqNextMemArea,
2879 ProcSeqShowMemArea,
2880 ProcSeqOff2ElementMemArea,
2881 ProcSeqStartstopDebugMutex
2882 );
2883 if (!g_SeqFileMemArea)
2884 {
2885 goto failed;
2886 }
2887 }
2888 #endif
2891 #if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
2892 {
2893 g_SeqFileMemoryRecords = CreateProcReadEntrySeq(
2894 "meminfo",
2895 NULL,
2896 ProcSeqNextMemoryRecords,
2897 ProcSeqShowMemoryRecords,
2898 ProcSeqOff2ElementMemoryRecords,
2899 ProcSeqStartstopDebugMutex
2900 );
2901 if (!g_SeqFileMemoryRecords)
2902 {
2903 goto failed;
2904 }
2905 }
2906 #endif
2908 g_PsLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
2909 if (!g_PsLinuxMemAreaCache)
2910 {
2911 PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate mem area kmem_cache", __FUNCTION__));
2912 goto failed;
2913 }
2915 #if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0)
2916 g_iPagePoolMaxEntries = PVR_LINUX_MEM_AREA_POOL_MAX_PAGES;
2917 if (g_iPagePoolMaxEntries <= 0 || g_iPagePoolMaxEntries > INT_MAX/2)
2918 {
2919 g_iPagePoolMaxEntries = INT_MAX/2;
2920 PVR_TRACE(("%s: No limit set for page pool size", __FUNCTION__));
2921 }
2922 else
2923 {
2924 PVR_TRACE(("%s: Maximum page pool size: %d", __FUNCTION__, g_iPagePoolMaxEntries));
2925 }
2927 g_PsLinuxPagePoolCache = KMemCacheCreateWrapper("img-mm-pool", sizeof(LinuxPagePoolEntry), 0, 0);
2928 if (!g_PsLinuxPagePoolCache)
2929 {
2930 PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate page pool kmem_cache", __FUNCTION__));
2931 goto failed;
2932 }
2933 #endif
2935 #if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK)
2936 register_shrinker(&g_sShrinker);
2937 g_bShrinkerRegistered = IMG_TRUE;
2938 #endif
2940 return PVRSRV_OK;
2942 failed:
2943 LinuxMMCleanup();
2944 return PVRSRV_ERROR_OUT_OF_MEMORY;
2945 }