summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'omap5/sgx_src/eurasia_km/services4/srvkm/common/buffer_manager.c')
-rw-r--r--omap5/sgx_src/eurasia_km/services4/srvkm/common/buffer_manager.c3573
1 files changed, 0 insertions, 3573 deletions
diff --git a/omap5/sgx_src/eurasia_km/services4/srvkm/common/buffer_manager.c b/omap5/sgx_src/eurasia_km/services4/srvkm/common/buffer_manager.c
deleted file mode 100644
index 9ce7a11..0000000
--- a/omap5/sgx_src/eurasia_km/services4/srvkm/common/buffer_manager.c
+++ /dev/null
@@ -1,3573 +0,0 @@
1/*************************************************************************/ /*!
2@Title Buffer management functions for Linux
3@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
4@Description Manages buffers mapped into two memory spaces - cpu and device,
5 either of which can be virtual or physical.
6@License Dual MIT/GPLv2
7
8The contents of this file are subject to the MIT license as set out below.
9
10Permission is hereby granted, free of charge, to any person obtaining a copy
11of this software and associated documentation files (the "Software"), to deal
12in the Software without restriction, including without limitation the rights
13to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14copies of the Software, and to permit persons to whom the Software is
15furnished to do so, subject to the following conditions:
16
17The above copyright notice and this permission notice shall be included in
18all copies or substantial portions of the Software.
19
20Alternatively, the contents of this file may be used under the terms of
21the GNU General Public License Version 2 ("GPL") in which case the provisions
22of GPL are applicable instead of those above.
23
24If you wish to allow use of your version of this file only under the terms of
25GPL, and not to allow others to use your version of this file under the terms
26of the MIT license, indicate your decision by deleting the provisions above
27and replace them with the notice and other provisions required by GPL as set
28out in the file called "GPL-COPYING" included in this distribution. If you do
29not delete the provisions above, a recipient may use your version of this file
30under the terms of either the MIT license or GPL.
31
32This License is also included in this distribution in the file called
33"MIT-COPYING".
34
35EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
36PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
37BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
38PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
39COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
40IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
41CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
42*/ /**************************************************************************/
43
44#include "services_headers.h"
45
46#include "sysconfig.h"
47#include "hash.h"
48#include "ra.h"
49#include "pdump_km.h"
50#include "lists.h"
51
52static IMG_BOOL
53ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
54static IMG_VOID
55BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
56static IMG_BOOL
57BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
58 IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
59 IMG_UINT32 uFlags, IMG_PVOID pvPrivData,
60 IMG_UINT32 ui32PrivDataLength, IMG_UINTPTR_T *pBase);
61
62static IMG_INT32
63DevMemoryAlloc (BM_CONTEXT *pBMContext,
64 BM_MAPPING *pMapping,
65 IMG_SIZE_T *pActualSize,
66 IMG_UINT32 uFlags,
67 IMG_UINT32 dev_vaddr_alignment,
68 IMG_DEV_VIRTADDR *pDevVAddr);
69static IMG_INT32
70DevMemoryFree (BM_MAPPING *pMapping);
71
72/*!
73******************************************************************************
74
75 @Function AllocMemory
76
77 @Description Allocate a buffer mapped into both cpu and device virtual
78 address spaces. This is now quite simple:
79
80 1. Choose whence to get the memory;
81 2. Obtain memory from that source;
82 3. Work out the actual buffer addresses in other spaces.
83
84 In choosing whence to get the memory we work like this:
85
86 1. If an import arena exists, use unless BP_CONTIGUOUS is set;
87 2. Use a contiguous pool.
88
89 @Input pBMContext - BM context
90 @Input psBMHeap - BM heap
91 @Input psDevVAddr - device virtual address (optional)
92 @Input uSize - requested buffer size in bytes.
93 @Input uFlags - property flags for the buffer.
94 @Input uDevVAddrAlignment - required device virtual address
95 alignment, or 0.
96 @Input pvPrivData - opaque private data passed through to allocator
97 @Input ui32PrivDataLength - length of opaque private data
98
99 @Output pBuf - receives a pointer to a descriptor of the allocated
100 buffer.
101 @Return IMG_TRUE - Success
102 IMG_FALSE - Failed.
103
104 *****************************************************************************/
105static IMG_BOOL
106AllocMemory (BM_CONTEXT *pBMContext,
107 BM_HEAP *psBMHeap,
108 IMG_DEV_VIRTADDR *psDevVAddr,
109 IMG_SIZE_T uSize,
110 IMG_UINT32 uFlags,
111 IMG_UINT32 uDevVAddrAlignment,
112 IMG_PVOID pvPrivData,
113 IMG_UINT32 ui32PrivDataLength,
114 IMG_UINT32 ui32ChunkSize,
115 IMG_UINT32 ui32NumVirtChunks,
116 IMG_UINT32 ui32NumPhysChunks,
117 IMG_BOOL *pabMapChunk,
118 BM_BUF *pBuf)
119{
120 BM_MAPPING *pMapping;
121 IMG_UINTPTR_T uOffset;
122 RA_ARENA *pArena = IMG_NULL;
123
124 PVR_DPF ((PVR_DBG_MESSAGE,
125 "AllocMemory (uSize=0x%x, uFlags=0x%x, align=0x%x)",
126 uSize, uFlags, uDevVAddrAlignment));
127
128 /*
129 what to do depends on combination of DevVaddr generation
130 and backing RAM requirement
131 */
132 if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
133 {
134 if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
135 {
136 /* user supplied DevVAddr, RAM backing */
137 PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
138 return IMG_FALSE;
139 }
140
141 /* BM supplied DevVAddr, RAM Backing */
142
143 /* check heap attributes */
144 if(psBMHeap->ui32Attribs
145 & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
146 |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
147 {
148 /* specify arena (VM+RAM)*/
149 pArena = psBMHeap->pImportArena;
150 PVR_ASSERT(psBMHeap->sDevArena.psDeviceMemoryHeapInfo->ui32Attribs & PVRSRV_MEM_RAM_BACKED_ALLOCATION);
151 }
152 else
153 {
154 PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
155 return IMG_FALSE;
156 }
157
158 /* Now allocate from the arena we chose above. */
159 /* in case of a pageable buffer, we must bypass RA which could
160 * combine/split individual mappings between buffers:
161 */
162 if (uFlags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_GPU_PAGEABLE))
163 {
164 IMG_BOOL bSuccess;
165 IMG_SIZE_T puiActualSize;
166 IMG_SIZE_T uRequestSize = uSize;
167
168 if(uFlags & PVRSRV_MEM_SPARSE)
169 {
170 uRequestSize = ui32ChunkSize * ui32NumPhysChunks;
171 uSize = ui32ChunkSize * ui32NumVirtChunks;
172 }
173
174 /* Allocate physical memory */
175 if (!BM_ImportMemory(psBMHeap,
176 uRequestSize,
177 &puiActualSize,
178 &pMapping,
179 uFlags,
180 pvPrivData,
181 ui32PrivDataLength,
182 (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
183 {
184 PVR_DPF((PVR_DBG_ERROR,
185 "BM_ImportMemory: Failed to allocate device memory"));
186 return IMG_FALSE;
187 }
188 pBuf->hOSMemHandle = pMapping->hOSMemHandle;
189
190 /* We allocate VM space for sparse area */
191 if(uFlags & PVRSRV_MEM_SPARSE)
192 {
193 if (puiActualSize != ui32ChunkSize * ui32NumPhysChunks)
194 {
195 /*
196 * Most likely the chunk size was not host page multiple,
197 * so return with an error
198 */
199 PVR_DPF((PVR_DBG_ERROR, "AllocMemory: Failed to allocate"
200 "memory for sparse allocation"));
201 BM_FreeMemory(pArena, IMG_NULL, pMapping);
202 return IMG_FALSE;
203 }
204
205 pMapping->uSizeVM = uSize;
206 pMapping->ui32ChunkSize = ui32ChunkSize;
207 pMapping->ui32NumVirtChunks = ui32NumVirtChunks;
208 pMapping->ui32NumPhysChunks = ui32NumPhysChunks;
209 pMapping->pabMapChunk = pabMapChunk;
210
211 if (!(uFlags & PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC))
212 {
213 /* Allocate VA space and map in the physical memory */
214 bSuccess = DevMemoryAlloc (pBMContext,
215 pMapping,
216 IMG_NULL,
217 uFlags,
218 (IMG_UINT32)uDevVAddrAlignment,
219 &pMapping->DevVAddr);
220 if (!bSuccess)
221 {
222 PVR_DPF((PVR_DBG_ERROR,
223 "AllocMemory: Failed to allocate device memory"));
224 BM_FreeMemory(pArena, IMG_NULL, pMapping);
225 return IMG_FALSE;
226 }
227
228 /* uDevVAddrAlignment is currently set to zero so QAC
229 * generates warning which we override */
230 /* PRQA S 3356,3358 1 */
231 PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
232 pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr;
233 }
234 }
235 }
236 else
237 {
238 if (!RA_Alloc(pArena,
239 uSize,
240 IMG_NULL,
241 (IMG_VOID*) &pMapping,
242 uFlags,
243 uDevVAddrAlignment,
244 0,
245 pvPrivData,
246 ui32PrivDataLength,
247 (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
248 {
249 PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) hOSMemHandle %p, flags 0x%08x FAILED",
250 uSize, pMapping->hOSMemHandle, uFlags));
251 return IMG_FALSE;
252 }
253 }
254
255 uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
256 if(pMapping->CpuVAddr)
257 {
258 pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
259 }
260 else
261 {
262 pBuf->CpuVAddr = IMG_NULL;
263 }
264
265 if(uSize == pMapping->uSizeVM)
266 {
267 pBuf->hOSMemHandle = pMapping->hOSMemHandle;
268 }
269 else
270 {
271 if(OSGetSubMemHandle(pMapping->hOSMemHandle,
272 uOffset,
273 uSize,
274 psBMHeap->ui32Attribs,
275 &pBuf->hOSMemHandle)!=PVRSRV_OK)
276 {
277 PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
278 return IMG_FALSE;
279 }
280 }
281
282 /* for hm_contiguous and hm_wrapped memory, the pMapping
283 * will have a physical address, else 0 */
284 pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
285
286 if(uFlags & PVRSRV_MEM_ZERO)
287 {
288 if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
289 {
290 return IMG_FALSE;
291 }
292 }
293 }
294 else
295 {
296 if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
297 {
298 /* user supplied DevVAddr, no RAM backing */
299 PVR_ASSERT(psDevVAddr != IMG_NULL);
300
301 if (psDevVAddr == IMG_NULL)
302 {
303 PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr"));
304 return IMG_FALSE;
305 }
306
307 /* just make space in the pagetables */
308 pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
309 uSize,
310 IMG_NULL,
311 PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
312 uDevVAddrAlignment,
313 psDevVAddr);
314
315 /* setup buf */
316 pBuf->DevVAddr = *psDevVAddr;
317 }
318 else
319 {
320 IMG_BOOL bResult;
321 /* BM supplied DevVAddr, no RAM Backing */
322
323 /* just make space in the pagetables */
324 bResult = pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
325 uSize,
326 IMG_NULL,
327 0,
328 uDevVAddrAlignment,
329 &pBuf->DevVAddr);
330
331 if(!bResult)
332 {
333 PVR_DPF((PVR_DBG_ERROR, "AllocMemory: MMUAlloc failed"));
334 return IMG_FALSE;
335 }
336 }
337
338 /* allocate a mocked-up mapping */
339 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
340 sizeof (struct _BM_MAPPING_),
341 (IMG_PVOID *)&pMapping, IMG_NULL,
342 "Buffer Manager Mapping") != PVRSRV_OK)
343 {
344 PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED", sizeof(*pMapping)));
345 return IMG_FALSE;
346 }
347
348 /* setup buf */
349 pBuf->CpuVAddr = IMG_NULL;
350 pBuf->hOSMemHandle = 0;
351 pBuf->CpuPAddr.uiAddr = 0;
352
353 /* setup mapping */
354 pMapping->CpuVAddr = IMG_NULL;
355 pMapping->CpuPAddr.uiAddr = 0;
356 pMapping->DevVAddr = pBuf->DevVAddr;
357 pMapping->ui32MappingCount = 1;
358 pMapping->psSysAddr = IMG_NULL;
359 pMapping->uSize = uSize;
360 pMapping->hOSMemHandle = 0;
361 }
362
363 /* Record the arena pointer in the mapping. */
364 pMapping->pArena = pArena;
365 pMapping->ui32DevVAddrAlignment = uDevVAddrAlignment;
366
367 /* record the heap */
368 pMapping->pBMHeap = psBMHeap;
369 pBuf->pMapping = pMapping;
370
371 /* output some stats */
372 PVR_DPF ((PVR_DBG_MESSAGE,
373 "AllocMemory: pMapping=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
374 (IMG_UINTPTR_T)pMapping,
375 pMapping->DevVAddr.uiAddr,
376 (IMG_UINTPTR_T)pMapping->CpuVAddr,
377 pMapping->CpuPAddr.uiAddr,
378 pMapping->uSize));
379
380 PVR_DPF ((PVR_DBG_MESSAGE,
381 "AllocMemory: pBuf=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
382 (IMG_UINTPTR_T)pBuf,
383 pBuf->DevVAddr.uiAddr,
384 (IMG_UINTPTR_T)pBuf->CpuVAddr,
385 pBuf->CpuPAddr.uiAddr,
386 uSize));
387
388 /* Verify virtual device address alignment */
389 PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
390
391 return IMG_TRUE;
392}
393
394
395/*!
396******************************************************************************
397
398 @Function WrapMemory
399
400 @Description Allocate a buffer mapped into both cpu and device virtual
401 address spaces.
402
403 @Input psBMHeap - BM heap
404 @Input uSize - requested buffer size in bytes.
405 @Input ui32BaseOffset - Offset from page of wrap.
406 @Input bPhysContig - Is the wrap physically contiguous.
407 @Input psAddr - List of pages to wrap.
408 @Input pvCPUVAddr - Optional CPU Kernel virtual address (page aligned) of memory to wrap
409 @Input uFlags - property flags for the buffer.
410 @Output Buf - receives a pointer to a descriptor of the allocated
411 buffer.
412 @Return IMG_TRUE - Success
413 IMG_FALSE - Failed.
414
415 *****************************************************************************/
416static IMG_BOOL
417WrapMemory (BM_HEAP *psBMHeap,
418 IMG_SIZE_T uSize,
419 IMG_SIZE_T ui32BaseOffset,
420 IMG_BOOL bPhysContig,
421 IMG_SYS_PHYADDR *psAddr,
422 IMG_VOID *pvCPUVAddr,
423 IMG_UINT32 uFlags,
424 BM_BUF *pBuf)
425{
426 IMG_DEV_VIRTADDR DevVAddr = {0};
427 BM_MAPPING *pMapping;
428 IMG_INT32 bResult;
429 IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();
430
431 PVR_DPF ((PVR_DBG_MESSAGE,
432 "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%08x, flags=0x%x)",
433 (IMG_UINTPTR_T)psBMHeap, uSize, ui32BaseOffset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
434
435 PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
436 /* Only need lower 12 bits of the cpu addr - don't care what size a void* is */
437 PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
438
439 uSize += ui32BaseOffset;
440 uSize = HOST_PAGEALIGN (uSize);
441
442 /* allocate a mocked-up mapping */
443 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
444 sizeof(*pMapping),
445 (IMG_PVOID *)&pMapping, IMG_NULL,
446 "Mocked-up mapping") != PVRSRV_OK)
447 {
448 PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
449 return IMG_FALSE;
450 }
451
452 OSMemSet(pMapping, 0, sizeof (*pMapping));
453
454 pMapping->uSize = uSize;
455 pMapping->uSizeVM = uSize;
456 pMapping->pBMHeap = psBMHeap;
457
458 if(pvCPUVAddr)
459 {
460 pMapping->CpuVAddr = pvCPUVAddr;
461
462 if (bPhysContig)
463 {
464 pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
465 pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
466
467 if(OSRegisterMem(pMapping->CpuPAddr,
468 pMapping->CpuVAddr,
469 pMapping->uSize,
470 uFlags,
471 &pMapping->hOSMemHandle) != PVRSRV_OK)
472 {
473 PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, Size=%d) failed",
474 pMapping->CpuPAddr.uiAddr, pMapping->uSize));
475 goto fail_cleanup;
476 }
477 }
478 else
479 {
480 pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr;
481 pMapping->psSysAddr = psAddr;
482
483 if(OSRegisterDiscontigMem(pMapping->psSysAddr,
484 pMapping->CpuVAddr,
485 pMapping->uSize,
486 uFlags,
487 &pMapping->hOSMemHandle) != PVRSRV_OK)
488 {
489 PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem Size=%d) failed",
490 pMapping->uSize));
491 goto fail_cleanup;
492 }
493 }
494 }
495 else
496 {
497 if (bPhysContig)
498 {
499 pMapping->eCpuMemoryOrigin = hm_wrapped;
500 pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
501
502 if(OSReservePhys(pMapping->CpuPAddr,
503 pMapping->uSize,
504 uFlags,
505 IMG_NULL,
506 &pMapping->CpuVAddr,
507 &pMapping->hOSMemHandle) != PVRSRV_OK)
508 {
509 PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
510 pMapping->CpuPAddr.uiAddr, pMapping->uSize));
511 goto fail_cleanup;
512 }
513 }
514 else
515 {
516 pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
517 pMapping->psSysAddr = psAddr;
518
519 if(OSReserveDiscontigPhys(pMapping->psSysAddr,
520 pMapping->uSize,
521 uFlags,
522 &pMapping->CpuVAddr,
523 &pMapping->hOSMemHandle) != PVRSRV_OK)
524 {
525 PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
526 pMapping->uSize));
527 goto fail_cleanup;
528 }
529 }
530 }
531
532 /*
533 * Allocate device memory for this buffer. Map wrapped pages as read/write
534 */
535 bResult = DevMemoryAlloc(psBMHeap->pBMContext,
536 pMapping,
537 IMG_NULL,
538 uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
539 IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
540 &DevVAddr);
541 if (bResult <= 0)
542 {
543 PVR_DPF((PVR_DBG_ERROR,
544 "WrapMemory: DevMemoryAlloc(0x%x) failed",
545 pMapping->uSize));
546 goto fail_cleanup;
547 }
548
549 /*
550 * Determine the offset of this allocation within the underlying
551 * dual mapped chunk of memory, we can assume that all three
552 * addresses associated with this allocation are placed at the same
553 * offset within the underlying chunk.
554 */
555 pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
556 if(!ui32BaseOffset)
557 {
558 pBuf->hOSMemHandle = pMapping->hOSMemHandle;
559 }
560 else
561 {
562 if(OSGetSubMemHandle(pMapping->hOSMemHandle,
563 ui32BaseOffset,
564 (pMapping->uSize-ui32BaseOffset),
565 uFlags,
566 &pBuf->hOSMemHandle)!=PVRSRV_OK)
567 {
568 PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
569 goto fail_cleanup;
570 }
571 }
572 if(pMapping->CpuVAddr)
573 {
574 pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
575 }
576 pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
577
578 if(uFlags & PVRSRV_MEM_ZERO)
579 {
580 if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
581 {
582 return IMG_FALSE;
583 }
584 }
585
586 PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
587 PVR_DPF ((PVR_DBG_MESSAGE,
588 "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
589 pMapping->DevVAddr.uiAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
590 PVR_DPF ((PVR_DBG_MESSAGE,
591 "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
592 pBuf->DevVAddr.uiAddr, pBuf->CpuPAddr.uiAddr, uSize));
593
594 pBuf->pMapping = pMapping;
595 return IMG_TRUE;
596
597fail_cleanup:
598 if(ui32BaseOffset && pBuf->hOSMemHandle)
599 {
600 OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
601 }
602
603 if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
604 {
605 switch(pMapping->eCpuMemoryOrigin)
606 {
607 case hm_wrapped:
608 OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
609 break;
610 case hm_wrapped_virtaddr:
611 OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
612 break;
613 case hm_wrapped_scatter:
614 OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
615 break;
616 case hm_wrapped_scatter_virtaddr:
617 OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
618 break;
619 default:
620 break;
621 }
622
623 }
624
625 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
626 /*not nulling pointer, out of scope*/
627
628 return IMG_FALSE;
629}
630
631
632static IMG_BOOL
633ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
634{
635 IMG_VOID *pvCpuVAddr;
636
637 if(pBuf->CpuVAddr)
638 {
639 OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
640 }
641 else if(pMapping->eCpuMemoryOrigin == hm_contiguous
642 || pMapping->eCpuMemoryOrigin == hm_wrapped)
643 {
644 pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
645 ui32Bytes,
646 PVRSRV_HAP_KERNEL_ONLY
647 | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
648 IMG_NULL);
649 if(!pvCpuVAddr)
650 {
651 PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
652 return IMG_FALSE;
653 }
654 OSMemSet(pvCpuVAddr, 0, ui32Bytes);
655 OSUnMapPhysToLin(pvCpuVAddr,
656 ui32Bytes,
657 PVRSRV_HAP_KERNEL_ONLY
658 | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
659 IMG_NULL);
660 }
661 else
662 {
663 IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
664 IMG_SIZE_T ui32CurrentOffset = 0;
665 IMG_CPU_PHYADDR CpuPAddr;
666
667 /* Walk through the pBuf one page at a time and use
668 * transient mappings to zero the memory */
669
670 PVR_ASSERT(pBuf->hOSMemHandle);
671
672 while(ui32BytesRemaining > 0)
673 {
674 IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
675 CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
676 /* If the CpuPAddr isn't page aligned then start by writing up to the next page
677 * boundary (or ui32BytesRemaining if less), so that subsequent iterations can
678 * copy full physical pages. */
679 if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
680 {
681 ui32BlockBytes =
682 MIN(ui32BytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr));
683 }
684
685 pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
686 ui32BlockBytes,
687 PVRSRV_HAP_KERNEL_ONLY
688 | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
689 IMG_NULL);
690 if(!pvCpuVAddr)
691 {
692 PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
693 return IMG_FALSE;
694 }
695 OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
696 OSUnMapPhysToLin(pvCpuVAddr,
697 ui32BlockBytes,
698 PVRSRV_HAP_KERNEL_ONLY
699 | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
700 IMG_NULL);
701
702 ui32BytesRemaining -= ui32BlockBytes;
703 ui32CurrentOffset += ui32BlockBytes;
704 }
705 }
706
707 return IMG_TRUE;
708}
709
710/*!
711******************************************************************************
712
713 @Function FreeBuf
714
715 @Description Free a buffer previously allocated with BM_Alloc() or unwrap
716 one previous wrapped with BM_Wrap().
717 The buffer is identified by the buffer descriptor pBuf
718 returned at allocation. Note the double indirection when
719 passing the buffer.
720
721
722 @Input pBuf - buffer descriptor to free.
723 @Input ui32Flags - flags
724 @Input bFromAllocator - Is this being called by the
725 allocator?
726
727 @Return None.
728
729 *****************************************************************************/
730static IMG_VOID
731FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator)
732{
733 BM_MAPPING *pMapping;
734 PVRSRV_DEVICE_NODE *psDeviceNode;
735
736 PVR_DPF ((PVR_DBG_MESSAGE,
737 "FreeBuf: pBuf=0x%x: DevVAddr=%08X CpuVAddr=0x%x CpuPAddr=%08X",
738 (IMG_UINTPTR_T)pBuf, pBuf->DevVAddr.uiAddr,
739 (IMG_UINTPTR_T)pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
740
741 /* record mapping */
742 pMapping = pBuf->pMapping;
743
744 psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
745 if (psDeviceNode->pfnCacheInvalidate)
746 {
747 psDeviceNode->pfnCacheInvalidate(psDeviceNode);
748 }
749
750 if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
751 {
752 /* Submemhandle is required by exported mappings */
753 if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
754 {
755 /* user supplied Device Virtual Address */
756 if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
757 {
758 /* RAM backed allocation */
759 PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
760 }
761 else
762 {
763 /* free the mocked-up mapping */
764 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
765 pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/
766 }
767 }
768 }
769 else
770 {
771 /* BM supplied Device Virtual Address */
772 if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
773 {
774 /* Submemhandle is required by exported mappings */
775 if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
776 {
777 OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
778 }
779 }
780
781 if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
782 {
783 /* Submemhandle is required by exported mappings */
784
785 if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
786 {
787 /*
788 RAM backed allocation
789 Note: currently no need to distinguish between hm_env and hm_contiguous
790 */
791 PVR_ASSERT(pBuf->ui32ExportCount == 0);
792 if (pBuf->pMapping->ui32Flags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_GPU_PAGEABLE))
793 {
794 IMG_UINT32 ui32FreeSize = 0;
795 IMG_PVOID pvFreePtr = IMG_NULL;
796
797 if(pBuf->pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
798 {
799 ui32FreeSize = sizeof(IMG_BOOL) * pBuf->pMapping->ui32NumVirtChunks;
800 pvFreePtr = pBuf->pMapping->pabMapChunk;
801 }
802
803 /* With sparse and page-able allocations we don't go through the sub-alloc RA */
804 BM_FreeMemory(pBuf->pMapping->pBMHeap, pBuf->DevVAddr.uiAddr, pBuf->pMapping);
805
806 if(pvFreePtr)
807 {
808 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
809 ui32FreeSize,
810 pvFreePtr,
811 IMG_NULL);
812 }
813 }
814 else
815 {
816 RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
817 }
818 }
819 }
820 else
821 {
822 if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
823 {
824 switch (pMapping->eCpuMemoryOrigin)
825 {
826 case hm_wrapped:
827 OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
828 break;
829 case hm_wrapped_virtaddr:
830 OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
831 break;
832 case hm_wrapped_scatter:
833 OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
834 break;
835 case hm_wrapped_scatter_virtaddr:
836 OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
837 break;
838 default:
839 break;
840 }
841 }
842 if (bFromAllocator)
843 DevMemoryFree (pMapping);
844
845 if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
846 {
847 /* free the mocked-up mapping */
848 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
849 pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/
850 }
851 }
852 }
853
854
855 if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0))
856 {
857 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
858 /*not nulling pointer, copy on stack*/
859 }
860}
861
862/*!
863******************************************************************************
864
865 @Function BM_DestroyContext_AnyCb
866
867 @Description Destroy a buffer manager heap.
868
869 @Input psBMHeap
870
871 @Return PVRSRV_ERROR
872
873 *****************************************************************************/
874static PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
875{
876 if(psBMHeap->ui32Attribs
877 & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
878 |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
879 {
880 if (psBMHeap->pImportArena)
881 {
882 IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena);
883 if (!bTestDelete)
884 {
885 PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed"));
886 return PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP;
887 }
888 }
889 }
890 return PVRSRV_OK;
891}
892
893
894/*!
895******************************************************************************
896
897 @Function BM_DestroyContext
898
899 @Description Destroy a buffer manager context. All allocated buffers must be
900 free'd before calling this function. This function is called
901 also to perform cleanup during aborted initialisations so it's
902 fairly careful not to assume any given resource has really been
903 created/allocated.
904
905 @Return PVRSRV_ERROR
906
907 *****************************************************************************/
908PVRSRV_ERROR
909BM_DestroyContext(IMG_HANDLE hBMContext,
910 IMG_BOOL *pbDestroyed)
911{
912 PVRSRV_ERROR eError;
913 BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
914
915 PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
916
917 if (pbDestroyed != IMG_NULL)
918 {
919 *pbDestroyed = IMG_FALSE;
920 }
921
922 /*
923 Exit straight away if it's an invalid context handle
924 */
925 if (pBMContext == IMG_NULL)
926 {
927 PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
928 return PVRSRV_ERROR_INVALID_PARAMS;
929 }
930
931 pBMContext->ui32RefCount--;
932
933 if (pBMContext->ui32RefCount > 0)
934 {
935 /* Just return if there are more references to this context */
936 return PVRSRV_OK;
937 }
938
939 /*
940 Check whether there is a bug in the client which brought it here before
941 all the allocations have been freed.
942 */
943 eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, &BM_DestroyContext_AnyCb);
944 if(eError != PVRSRV_OK)
945 {
946 PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
947 return eError;
948 }
949 else
950 {
951 /* free the device memory context */
952 eError = ResManFreeResByPtr(pBMContext->hResItem, CLEANUP_WITH_POLL);
953 if(eError != PVRSRV_OK)
954 {
955 PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
956 return eError;
957 }
958
959 /* mark context as destroyed */
960 if (pbDestroyed != IMG_NULL)
961 {
962 *pbDestroyed = IMG_TRUE;
963 }
964 }
965
966 return PVRSRV_OK;
967}
968
969
970/*!
971******************************************************************************
972
973 @Function BM_DestroyContextCallBack_AnyVaCb
974
975 @Description Destroy Device memory context
976
977 @Input psBMHeap - heap to be freed.
978 @Input va - list of variable arguments with the following contents:
979 - psDeviceNode
980 @Return PVRSRV_ERROR
981
982 *****************************************************************************/
983static PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
984{
985 PVRSRV_DEVICE_NODE *psDeviceNode;
986 psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
987
988 /* Free up the import arenas */
989 if(psBMHeap->ui32Attribs
990 & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
991 |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
992 {
993 if (psBMHeap->pImportArena)
994 {
995 RA_Delete (psBMHeap->pImportArena);
996 }
997 }
998 else
999 {
1000 PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
1001 return PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE;
1002 }
1003
1004 /* Free up the MMU Heaps */
1005 psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
1006
1007 /* Free Heap memory */
1008 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
1009 /*not nulling pointer, copy on stack*/
1010
1011 return PVRSRV_OK;
1012}
1013
1014
1015/*!
1016******************************************************************************
1017
1018 @Function BM_DestroyContextCallBack
1019
1020 @Description Destroy Device memory context
1021
1022 @Input pvParam - opaque void ptr param
1023 @Input ui32Param - opaque unsigned long param
1024
1025 @Return PVRSRV_ERROR
1026
1027 *****************************************************************************/
1028static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
1029 IMG_UINT32 ui32Param,
1030 IMG_BOOL bDummy)
1031{
1032 BM_CONTEXT *pBMContext = pvParam;
1033 PVRSRV_DEVICE_NODE *psDeviceNode;
1034 PVRSRV_ERROR eError;
1035/* BM_CONTEXT **ppBMContext;
1036 BM_HEAP *psBMHeap, *psTmpBMHeap;*/
1037
1038 PVR_UNREFERENCED_PARAMETER(ui32Param);
1039 PVR_UNREFERENCED_PARAMETER(bDummy);
1040
1041 /*
1042 Get DeviceNode from BMcontext
1043 */
1044 psDeviceNode = pBMContext->psDeviceNode;
1045
1046 /*
1047 Free the import arenas and heaps
1048 */
1049 eError = List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
1050 &BM_DestroyContextCallBack_AnyVaCb,
1051 psDeviceNode);
1052 if (eError != PVRSRV_OK)
1053 {
1054 return eError;
1055 }
1056 /*
1057 'Finalise' the MMU
1058 */
1059 if (pBMContext->psMMUContext)
1060 {
1061 psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
1062 }
1063
1064 /*
1065 Free up generic, useful resources - if they were allocated.
1066 */
1067 if (pBMContext->pBufferHash)
1068 {
1069 HASH_Delete(pBMContext->pBufferHash);
1070 }
1071
1072 if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext)
1073 {
1074 /* Freeing the kernel context */
1075 psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL;
1076 }
1077 else
1078 {
1079 if (pBMContext->ppsThis != IMG_NULL)
1080 {
1081 /*
1082 * Remove context from the linked list
1083 */
1084 List_BM_CONTEXT_Remove(pBMContext);
1085 }
1086 }
1087
1088 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL);
1089 /*not nulling pointer, copy on stack*/
1090
1091 return PVRSRV_OK;
1092}
1093
1094
1095static IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va)
1096{
1097 PRESMAN_CONTEXT hResManContext;
1098 hResManContext = va_arg(va, PRESMAN_CONTEXT);
1099 if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK)
1100 {
1101 /* just increment the refcount and return the memory context found for this process */
1102 pBMContext->ui32RefCount++;
1103 return pBMContext;
1104 }
1105 return IMG_NULL;
1106}
1107
1108static IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
1109{
1110 PVRSRV_DEVICE_NODE *psDeviceNode;
1111 BM_CONTEXT *pBMContext;
1112 psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
1113 pBMContext = va_arg(va, BM_CONTEXT*);
1114 switch(psBMHeap->sDevArena.DevMemHeapType)
1115 {
1116 case DEVICE_MEMORY_HEAP_SHARED:
1117 case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
1118 {
1119 /* insert the heap into the device's MMU page directory/table */
1120 psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
1121 break;
1122 }
1123 }
1124}
1125
1126/*!
1127******************************************************************************
1128
1129 @Function BM_CreateContext
1130
1131 @Description Creates and initialises a buffer manager context. This function must be called
1132 before any other buffer manager functions.
1133
1134 @Return valid BM context handle - Success
1135 IMG_NULL - Failed
1136
1137 *****************************************************************************/
1138IMG_HANDLE
1139BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
1140 IMG_DEV_PHYADDR *psPDDevPAddr,
1141 PVRSRV_PER_PROCESS_DATA *psPerProc,
1142 IMG_BOOL *pbCreated)
1143{
1144 BM_CONTEXT *pBMContext;
1145/* BM_HEAP *psBMHeap;*/
1146 DEVICE_MEMORY_INFO *psDevMemoryInfo;
1147 IMG_BOOL bKernelContext;
1148 PRESMAN_CONTEXT hResManContext;
1149
1150 PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext"));
1151
1152 if (psPerProc == IMG_NULL)
1153 {
1154 bKernelContext = IMG_TRUE;
1155 hResManContext = psDeviceNode->hResManContext;
1156 }
1157 else
1158 {
1159 bKernelContext = IMG_FALSE;
1160 hResManContext = psPerProc->hResManContext;
1161 }
1162
1163 if (pbCreated != IMG_NULL)
1164 {
1165 *pbCreated = IMG_FALSE;
1166 }
1167
1168 /* setup the device memory info. */
1169 psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
1170
1171 if (bKernelContext == IMG_FALSE)
1172 {
1173 IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext,
1174 &BM_CreateContext_IncRefCount_AnyVaCb,
1175 hResManContext);
1176 if (res)
1177 {
1178 return res;
1179 }
1180 }
1181
1182 /* allocate a BM context */
1183 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1184 sizeof (struct _BM_CONTEXT_),
1185 (IMG_PVOID *)&pBMContext, IMG_NULL,
1186 "Buffer Manager Context") != PVRSRV_OK)
1187 {
1188 PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
1189 return IMG_NULL;
1190 }
1191 OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT));
1192
1193 /* store the associated devicenode */
1194 pBMContext->psDeviceNode = psDeviceNode;
1195
1196 /* This hash table is used to store BM_Wraps in a global way */
1197 /* INTEGRATION_POINT: 32 is an abitrary limit on the number of hashed BM_wraps */
1198 pBMContext->pBufferHash = HASH_Create(32);
1199 if (pBMContext->pBufferHash==IMG_NULL)
1200 {
1201 PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
1202 goto cleanup;
1203 }
1204
1205 if((IMG_NULL == psDeviceNode->pfnMMUInitialise) || (psDeviceNode->pfnMMUInitialise(psDeviceNode,
1206 &pBMContext->psMMUContext,
1207 psPDDevPAddr) != PVRSRV_OK))
1208 {
1209 PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
1210 goto cleanup;
1211 }
1212
1213 if(bKernelContext)
1214 {
1215 /* just save the kernel context */
1216 PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL);
1217 psDevMemoryInfo->pBMKernelContext = pBMContext;
1218 }
1219 else
1220 {
1221 /*
1222 On the creation of each new context we must
1223 insert the kernel context's 'shared' and 'shared_exported'
1224 heaps into the new context
1225 - check the kernel context and heaps exist
1226 */
1227 PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
1228
1229 if (psDevMemoryInfo->pBMKernelContext == IMG_NULL)
1230 {
1231 PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid"));
1232 goto cleanup;
1233 }
1234
1235 PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
1236
1237 /*
1238 insert the kernel heaps structures into the new context's shared heap list
1239 Note. this will include the kernel only heaps but these will not actually
1240 be imported into the context nor returned to the client
1241 */
1242 pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
1243
1244 /*
1245 insert the shared heaps into the MMU page directory/table
1246 for the new context
1247 */
1248 List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
1249 &BM_CreateContext_InsertHeap_ForEachVaCb,
1250 psDeviceNode,
1251 pBMContext);
1252
1253 /* Finally, insert the new context into the list of BM contexts */
1254 List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext);
1255 }
1256
1257 /* Increment the refcount, as creation is successful */
1258 pBMContext->ui32RefCount++;
1259
1260 /* register with resman */
1261 pBMContext->hResItem = ResManRegisterRes(hResManContext,
1262 RESMAN_TYPE_DEVICEMEM_CONTEXT,
1263 pBMContext,
1264 0,
1265 &BM_DestroyContextCallBack);
1266 if (pBMContext->hResItem == IMG_NULL)
1267 {
1268 PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
1269 goto cleanup;
1270 }
1271
1272 if (pbCreated != IMG_NULL)
1273 {
1274 *pbCreated = IMG_TRUE;
1275 }
1276 return (IMG_HANDLE)pBMContext;
1277
1278cleanup:
1279 (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0, CLEANUP_WITH_POLL);
1280
1281 return IMG_NULL;
1282}
1283
1284
1285static IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
1286{
1287 DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo;
1288 psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*);
1289 if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID)
1290 {
1291 /* Match - just return already created heap */
1292 return psBMHeap;
1293 }
1294 else
1295 {
1296 return IMG_NULL;
1297 }
1298}
1299
1300/*!
1301******************************************************************************
1302
1303 @Function BM_CreateHeap
1304
1305 @Description Creates and initialises a BM heap for a given BM context.
1306
1307 @Return
1308 valid heap handle - success
1309 IMG_NULL - failure
1310
1311
1312 *****************************************************************************/
1313IMG_HANDLE
1314BM_CreateHeap (IMG_HANDLE hBMContext,
1315 DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
1316{
1317 BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
1318 PVRSRV_DEVICE_NODE *psDeviceNode;
1319 BM_HEAP *psBMHeap;
1320
1321 PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
1322
1323 if(!pBMContext)
1324 {
1325 PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: BM_CONTEXT null"));
1326 return IMG_NULL;
1327 }
1328
1329 psDeviceNode = pBMContext->psDeviceNode;
1330
1331 /*
1332 * Ensure that the heap size is a multiple of the data page size.
1333 */
1334 PVR_ASSERT((psDevMemHeapInfo->ui32HeapSize & (psDevMemHeapInfo->ui32DataPageSize - 1)) == 0);
1335 PVR_ASSERT(psDevMemHeapInfo->ui32HeapSize > 0);
1336
1337 /*
1338 We may be being asked to create a heap in a context which already has one.
1339 Test for refcount > 0 because PVRSRVGetDeviceMemHeapInfoKM doesn't increment the refcount.
1340 This does mean that the first call to PVRSRVCreateDeviceMemContextKM will first try to find
1341 heaps that we already know don't exist
1342 */
1343 if(pBMContext->ui32RefCount > 0)
1344 {
1345 psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
1346 &BM_CreateHeap_AnyVaCb,
1347 psDevMemHeapInfo);
1348
1349 if (psBMHeap)
1350 {
1351 return psBMHeap;
1352 }
1353 }
1354
1355
1356 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1357 sizeof (BM_HEAP),
1358 (IMG_PVOID *)&psBMHeap, IMG_NULL,
1359 "Buffer Manager Heap") != PVRSRV_OK)
1360 {
1361 PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
1362 return IMG_NULL;
1363 }
1364
1365 OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
1366
1367 psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
1368 psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
1369 psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
1370 psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
1371 psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
1372 psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
1373 psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
1374 psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
1375#if defined(SUPPORT_MEMORY_TILING)
1376 psBMHeap->ui32XTileStride = psDevMemHeapInfo->ui32XTileStride;
1377#endif
1378
1379 /* tie the heap to the context */
1380 psBMHeap->pBMContext = pBMContext;
1381
1382 psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
1383 &psBMHeap->sDevArena,
1384 &psBMHeap->pVMArena,
1385 &psBMHeap->psMMUAttrib);
1386 if (!psBMHeap->pMMUHeap)
1387 {
1388 PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
1389 goto ErrorExit;
1390 }
1391
1392 /* memory is allocated from the OS as required */
1393 psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
1394 0, 0, IMG_NULL,
1395 MAX(HOST_PAGESIZE(), psBMHeap->sDevArena.ui32DataPageSize),
1396 &BM_ImportMemory,
1397 &BM_FreeMemory,
1398 IMG_NULL,
1399 psBMHeap);
1400 if(psBMHeap->pImportArena == IMG_NULL)
1401 {
1402 PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
1403 goto ErrorExit;
1404 }
1405
1406 if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
1407 {
1408 /*
1409 memory comes from a device memory contiguous allocator (ra)
1410 Note: these arenas are shared across the system so don't delete
1411 as part of heap destroy
1412 */
1413 psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
1414 if(psBMHeap->pLocalDevMemArena == IMG_NULL)
1415 {
1416 PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
1417 goto ErrorExit;
1418 }
1419 }
1420
1421 /* insert heap into head of the heap list */
1422 List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
1423
1424 return (IMG_HANDLE)psBMHeap;
1425
1426 /* handle error case */
1427ErrorExit:
1428
1429 /* Free up the MMU if we created one */
1430 if (psBMHeap->pMMUHeap != IMG_NULL)
1431 {
1432 psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
1433 /* don't finalise psMMUContext as we don't own it */
1434 }
1435
1436 /* Free the Heap memory */
1437 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
1438 /*not nulling pointer, out of scope*/
1439
1440 return IMG_NULL;
1441}
1442
1443/*!
1444******************************************************************************
1445
1446 @Function BM_DestroyHeap
1447
1448 @Description Destroys a BM heap
1449
1450 @Return
1451 valid heap handle - success
1452 IMG_NULL - failure
1453
1454
1455 *****************************************************************************/
1456IMG_VOID
1457BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
1458{
1459 BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
1460 PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
1461
1462 PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
1463
1464 if(psBMHeap)
1465 {
1466 /* Free up the import arenas */
1467 if(psBMHeap->ui32Attribs
1468 & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
1469 |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
1470 {
1471 if (psBMHeap->pImportArena)
1472 {
1473 RA_Delete (psBMHeap->pImportArena);
1474 }
1475 }
1476 else
1477 {
1478 PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
1479 return;
1480 }
1481
1482 /* Free up the MMU Heap */
1483 psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
1484
1485 /* remove from the heap list */
1486 List_BM_HEAP_Remove(psBMHeap);
1487 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
1488 }
1489 else
1490 {
1491 PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));
1492 }
1493}
1494
1495
1496/*!
1497******************************************************************************
1498
1499 @Function BM_Reinitialise
1500
1501 @Description Reinitialise the buffer manager after a power down event.
1502
1503 @Return IMG_TRUE - Success
1504 IMG_FALSE - Failed
1505
1506 *****************************************************************************/
1507IMG_BOOL
1508BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
1509{
1510
1511 PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
1512 PVR_UNREFERENCED_PARAMETER(psDeviceNode);
1513
1514 /* FIXME: Need to reenable all contexts
1515 List_BM_CONTEXT_ForEach(psDeviceNode->sDevMemoryInfo.pBMContext, MMU_Enable);
1516 */
1517
1518 return IMG_TRUE;
1519}
1520
1521/*!
1522******************************************************************************
1523
1524 @Function BM_Alloc
1525
1526 @Description Allocate a buffer mapped into both cpu and device virtual
1527 memory maps.
1528
1529 @Input hDevMemHeap
1530 @Input psDevVAddr - device virtual address specified by caller (optional)
1531 @Input uSize - require size in bytes of the buffer.
1532 @Input pui32Flags - bit mask of buffer property flags.
1533 @Input uDevVAddrAlignment - required alignment in bytes, or 0.
1534 @Input pvPrivData - opaque private data passed through to allocator
1535 @Input ui32PrivDataLength - length of opaque private data
1536
1537 @Output phBuf - receives buffer handle
1538 @Output pui32Flags - bit mask of heap property flags.
1539
1540 @Return IMG_TRUE - Success
1541 IMG_FALSE - Failure
1542
1543 *****************************************************************************/
1544IMG_BOOL
1545BM_Alloc ( IMG_HANDLE hDevMemHeap,
1546 IMG_DEV_VIRTADDR *psDevVAddr,
1547 IMG_SIZE_T uSize,
1548 IMG_UINT32 *pui32Flags,
1549 IMG_UINT32 uDevVAddrAlignment,
1550 IMG_PVOID pvPrivData,
1551 IMG_UINT32 ui32PrivDataLength,
1552 IMG_UINT32 ui32ChunkSize,
1553 IMG_UINT32 ui32NumVirtChunks,
1554 IMG_UINT32 ui32NumPhysChunks,
1555 IMG_BOOL *pabMapChunk,
1556 BM_HANDLE *phBuf)
1557{
1558 BM_BUF *pBuf;
1559 BM_CONTEXT *pBMContext;
1560 BM_HEAP *psBMHeap;
1561 SYS_DATA *psSysData;
1562 IMG_UINT32 uFlags;
1563
1564 if (pui32Flags == IMG_NULL)
1565 {
1566 PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter"));
1567 PVR_DBG_BREAK;
1568 return IMG_FALSE;
1569 }
1570
1571 uFlags = *pui32Flags;
1572
1573 PVR_DPF ((PVR_DBG_MESSAGE,
1574 "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
1575 uSize, uFlags, uDevVAddrAlignment));
1576
1577 SysAcquireData(&psSysData);
1578
1579 psBMHeap = (BM_HEAP*)hDevMemHeap;
1580 pBMContext = psBMHeap->pBMContext;
1581
1582 if(uDevVAddrAlignment == 0)
1583 {
1584 uDevVAddrAlignment = 1;
1585 }
1586
1587 /*
1588 * Allocate something in which to record the allocation's details.
1589 */
1590 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1591 sizeof (BM_BUF),
1592 (IMG_PVOID *)&pBuf, IMG_NULL,
1593 "Buffer Manager buffer") != PVRSRV_OK)
1594 {
1595 PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
1596 return IMG_FALSE;
1597 }
1598 OSMemSet(pBuf, 0, sizeof (BM_BUF));
1599
1600 /*
1601 * Allocate the memory itself now.
1602 */
1603 if (AllocMemory(pBMContext,
1604 psBMHeap,
1605 psDevVAddr,
1606 uSize,
1607 uFlags,
1608 uDevVAddrAlignment,
1609 pvPrivData,
1610 ui32PrivDataLength,
1611 ui32ChunkSize,
1612 ui32NumVirtChunks,
1613 ui32NumPhysChunks,
1614 pabMapChunk,
1615 pBuf) != IMG_TRUE)
1616 {
1617 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
1618 /* not nulling pointer, out of scope */
1619 PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
1620 return IMG_FALSE;
1621 }
1622
1623 PVR_DPF ((PVR_DBG_MESSAGE,
1624 "BM_Alloc (uSize=0x%x, uFlags=0x%x)",
1625 uSize, uFlags));
1626
1627 /*
1628 * Assign the handle and return.
1629 */
1630 pBuf->ui32RefCount = 1;
1631 *phBuf = (BM_HANDLE)pBuf;
1632 *pui32Flags = uFlags | psBMHeap->ui32Attribs;
1633
1634 /*
1635 * If the user has specified heap CACHETYPE flags themselves,
1636 * override any CACHETYPE flags inherited from the heap.
1637 */
1638 if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
1639 {
1640 *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
1641 *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
1642 }
1643
1644 return IMG_TRUE;
1645}
1646
1647
1648
1649#if defined(PVR_LMA)
1650/*!
1651******************************************************************************
1652
1653 @Function ValidSysPAddrArrayForDev
1654
1655 @Description Verify the array of system address is accessible
1656 by the given device.
1657
1658 @Input psDeviceNode
1659 @Input psSysPAddr - system address array
1660 @Input ui32PageSize - size of address array
1661
1662 @Return IMG_BOOL
1663
1664 *****************************************************************************/
1665static IMG_BOOL
1666ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
1667{
1668 IMG_UINT32 i;
1669
1670 for (i = 0; i < ui32PageCount; i++)
1671 {
1672 IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i];
1673 IMG_SYS_PHYADDR sEndSysPAddr;
1674
1675 if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
1676 {
1677 return IMG_FALSE;
1678 }
1679
1680 sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
1681
1682 if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
1683 {
1684 return IMG_FALSE;
1685 }
1686 }
1687
1688 return IMG_TRUE;
1689}
1690
1691/*!
1692******************************************************************************
1693
1694 @Function ValidSysPAddrRangeForDev
1695
1696 @Description Verify a system address range is accessible
1697 by the given device.
1698
1699 @Input psDeviceNode
1700 @Input sStartSysPAddr - starting system address
1701 @Input ui32Range - length of address range
1702
1703 @Return IMG_BOOL
1704
1705 *****************************************************************************/
1706static IMG_BOOL
1707ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
1708{
1709 IMG_SYS_PHYADDR sEndSysPAddr;
1710
1711 if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
1712 {
1713 return IMG_FALSE;
1714 }
1715
1716 sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
1717
1718 if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
1719 {
1720 return IMG_FALSE;
1721 }
1722
1723 return IMG_TRUE;
1724}
1725
1726#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
1727
1728#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
1729
1730#endif
1731
1732
1733/*!
1734******************************************************************************
1735
1736 @Function BM_Wrap
1737
1738 @Description Create a buffer which wraps user provided system physical
1739 memory.
1740 The wrapped memory must be page aligned. BM_Wrap will
1741 roundup the size to a multiple of cpu pages.
1742
1743 @Input ui32Size - size of memory to wrap.
1744 @Input ui32Offset - Offset into page of memory to wrap.
1745 @Input bPhysContig - Is the wrap physically contiguous.
1746 @Input psSysAddr - list of system physical page addresses of memory to wrap.
1747 @Input pvCPUVAddr - optional CPU kernel virtual address (Page aligned) of memory to wrap.
1748 @Input uFlags - bit mask of buffer property flags.
1749 @output phBuf - receives the buffer handle.
1750
1751 @Return IMG_TRUE - Success.
1752 IMG_FALSE - Failed
1753
1754 *****************************************************************************/
1755IMG_BOOL
1756BM_Wrap ( IMG_HANDLE hDevMemHeap,
1757 IMG_SIZE_T ui32Size,
1758 IMG_SIZE_T ui32Offset,
1759 IMG_BOOL bPhysContig,
1760 IMG_SYS_PHYADDR *psSysAddr,
1761 IMG_VOID *pvCPUVAddr,
1762 IMG_UINT32 *pui32Flags,
1763 BM_HANDLE *phBuf)
1764{
1765 BM_BUF *pBuf;
1766 BM_CONTEXT *psBMContext;
1767 BM_HEAP *psBMHeap;
1768 SYS_DATA *psSysData;
1769 IMG_SYS_PHYADDR sHashAddress;
1770 IMG_UINT32 uFlags;
1771
1772 psBMHeap = (BM_HEAP*)hDevMemHeap;
1773 psBMContext = psBMHeap->pBMContext;
1774
1775 uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK | PVRSRV_HAP_MAPPING_CTRL_MASK);
1776
1777 if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
1778 {
1779 uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
1780 uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
1781 }
1782
1783 PVR_DPF ((PVR_DBG_MESSAGE,
1784 "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
1785 ui32Size, ui32Offset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));
1786
1787 SysAcquireData(&psSysData);
1788
1789#if defined(PVR_LMA)
1790 if (bPhysContig)
1791 {
1792 if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
1793 {
1794 PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
1795 return IMG_FALSE;
1796 }
1797 }
1798 else
1799 {
1800 IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
1801
1802 if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
1803 {
1804 PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
1805 return IMG_FALSE;
1806 }
1807 }
1808#endif
1809 /*
1810 * Insert the System Physical Address of the first page into the hash so we can optimise multiple wraps of the
1811 * same memory.
1812 */
1813 sHashAddress = psSysAddr[0];
1814
1815 /* Add the in-page offset to ensure a unique hash */
1816 sHashAddress.uiAddr += ui32Offset;
1817
1818 /* See if this address has already been wrapped */
1819 pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, sHashAddress.uiAddr);
1820
1821 if(pBuf)
1822 {
1823 IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
1824
1825 /* Check base address, size and contiguity type match */
1826 if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
1827 pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
1828 {
1829 PVR_DPF((PVR_DBG_MESSAGE,
1830 "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
1831 ui32Size, ui32Offset, sHashAddress.uiAddr));
1832
1833 PVRSRVBMBufIncRef(pBuf);
1834 *phBuf = (BM_HANDLE)pBuf;
1835 if(pui32Flags)
1836 *pui32Flags = uFlags;
1837
1838 return IMG_TRUE;
1839 }
1840 else
1841 {
1842 /* Otherwise removed that item from the hash table
1843 (a workaround for buffer device class) */
1844 HASH_Remove(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr);
1845 }
1846 }
1847
1848 /*
1849 * Allocate something in which to record the allocation's details.
1850 */
1851 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
1852 sizeof (BM_BUF),
1853 (IMG_PVOID *)&pBuf, IMG_NULL,
1854 "Buffer Manager buffer") != PVRSRV_OK)
1855 {
1856 PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
1857 return IMG_FALSE;
1858 }
1859 OSMemSet(pBuf, 0, sizeof (BM_BUF));
1860
1861 /*
1862 * Actually perform the memory wrap.
1863 */
1864 if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
1865 {
1866 PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
1867 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
1868 /*not nulling pointer, out of scope*/
1869 return IMG_FALSE;
1870 }
1871
1872 /* Only insert the buffer in the hash table if it is contiguous - allows for optimisation of multiple wraps
1873 * of the same contiguous buffer.
1874 */
1875 if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
1876 {
1877 /* Have we calculated the right Hash key ? */
1878 PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
1879
1880 if (!HASH_Insert (psBMContext->pBufferHash, sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
1881 {
1882 FreeBuf (pBuf, uFlags, IMG_TRUE);
1883 PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
1884 return IMG_FALSE;
1885 }
1886 }
1887
1888 PVR_DPF ((PVR_DBG_MESSAGE,
1889 "BM_Wrap (uSize=0x%x, uFlags=0x%x, devVAddr=%08X)",
1890 ui32Size, uFlags, pBuf->DevVAddr.uiAddr));
1891
1892 /*
1893 * Assign the handle and return.
1894 */
1895 pBuf->ui32RefCount = 1;
1896 *phBuf = (BM_HANDLE)pBuf;
1897 if(pui32Flags)
1898 {
1899 /* need to override the heap attributes SINGLE PROC to MULT_PROC. */
1900 *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
1901 }
1902
1903 return IMG_TRUE;
1904}
1905
1906/*!
1907******************************************************************************
1908
1909 @Function BM_Export
1910
1911 @Description Export a buffer previously allocated via BM_Alloc.
1912
1913 @Input hBuf - buffer handle.
1914 @Input ui32Flags - flags
1915
1916 @Return None.
1917
1918 *****************************************************************************/
1919
1920IMG_VOID
1921BM_Export (BM_HANDLE hBuf)
1922{
1923 BM_BUF *pBuf = (BM_BUF *)hBuf;
1924
1925 PVRSRVBMBufIncExport(pBuf);
1926}
1927
1928/*!
1929******************************************************************************
1930 @Function BM_Export
1931
1932 @Description Export a buffer previously allocated via BM_Alloc.
1933
1934 @Input hBuf - buffer handle.
1935
1936 @Return None.
1937**************************************************************************/
1938IMG_VOID
1939BM_FreeExport(BM_HANDLE hBuf,
1940 IMG_UINT32 ui32Flags)
1941{
1942 BM_BUF *pBuf = (BM_BUF *)hBuf;
1943
1944 PVRSRVBMBufDecExport(pBuf);
1945 FreeBuf (pBuf, ui32Flags, IMG_FALSE);
1946}
1947
1948/*!
1949******************************************************************************
1950 @Function BM_FreeExport
1951
1952 @Description Free a buffer previously exported via BM_Export.
1953
1954 @Input hBuf - buffer handle.
1955 @Input ui32Flags - flags
1956
1957 @Return None.
1958**************************************************************************/
1959IMG_VOID
1960BM_Free (BM_HANDLE hBuf,
1961 IMG_UINT32 ui32Flags)
1962{
1963 BM_BUF *pBuf = (BM_BUF *)hBuf;
1964 SYS_DATA *psSysData;
1965 IMG_SYS_PHYADDR sHashAddr;
1966
1967 PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%x)", (IMG_UINTPTR_T)hBuf));
1968 PVR_ASSERT (pBuf!=IMG_NULL);
1969
1970 if (pBuf == IMG_NULL)
1971 {
1972 PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter"));
1973 return;
1974 }
1975
1976 SysAcquireData(&psSysData);
1977
1978 PVRSRVBMBufDecRef(pBuf);
1979 if(pBuf->ui32RefCount == 0)
1980 {
1981 if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
1982 {
1983 sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
1984
1985 HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr);
1986 }
1987 FreeBuf (pBuf, ui32Flags, IMG_TRUE);
1988 }
1989}
1990
1991
1992/*!
1993******************************************************************************
1994
1995 @Function BM_HandleToCpuVaddr
1996
1997 @Description Retreive the cpu virtual address associated with a buffer.
1998
1999 @Input buffer handle.
2000
2001 @Return buffers cpu virtual address, or NULL if none exists
2002
2003 *****************************************************************************/
2004IMG_CPU_VIRTADDR
2005BM_HandleToCpuVaddr (BM_HANDLE hBuf)
2006{
2007 BM_BUF *pBuf = (BM_BUF *)hBuf;
2008
2009 PVR_ASSERT (pBuf != IMG_NULL);
2010 if (pBuf == IMG_NULL)
2011 {
2012 PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter"));
2013 return IMG_NULL;
2014 }
2015
2016 PVR_DPF ((PVR_DBG_MESSAGE,
2017 "BM_HandleToCpuVaddr(h=0x%x)=0x%x",
2018 (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->CpuVAddr));
2019 return pBuf->CpuVAddr;
2020}
2021
2022
2023/*!
2024******************************************************************************
2025
2026 @Function BM_HandleToDevVaddr
2027
2028 @Description Retreive the device virtual address associated with a buffer.
2029
2030 @Input hBuf - buffer handle.
2031
2032 @Return buffers device virtual address.
2033
2034 *****************************************************************************/
2035IMG_DEV_VIRTADDR
2036BM_HandleToDevVaddr (BM_HANDLE hBuf)
2037{
2038 BM_BUF *pBuf = (BM_BUF *)hBuf;
2039
2040 PVR_ASSERT (pBuf != IMG_NULL);
2041 if (pBuf == IMG_NULL)
2042 {
2043 IMG_DEV_VIRTADDR DevVAddr = {0};
2044 PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter"));
2045 return DevVAddr;
2046 }
2047
2048 PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->DevVAddr.uiAddr));
2049 return pBuf->DevVAddr;
2050}
2051
2052
2053/*!
2054******************************************************************************
2055
2056 @Function BM_HandleToSysPaddr
2057
2058 @Description Retreive the system physical address associated with a buffer.
2059
2060 @Input hBuf - buffer handle.
2061
2062 @Return buffers device virtual address.
2063
2064 *****************************************************************************/
2065IMG_SYS_PHYADDR
2066BM_HandleToSysPaddr (BM_HANDLE hBuf)
2067{
2068 BM_BUF *pBuf = (BM_BUF *)hBuf;
2069
2070 PVR_ASSERT (pBuf != IMG_NULL);
2071
2072 if (pBuf == IMG_NULL)
2073 {
2074 IMG_SYS_PHYADDR PhysAddr = {0};
2075 PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter"));
2076 return PhysAddr;
2077 }
2078
2079 PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->CpuPAddr.uiAddr));
2080 return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
2081}
2082
2083/*!
2084******************************************************************************
2085
2086 @Function BM_HandleToMemOSHandle
2087
2088 @Description Retreive the underlying memory handle associated with a buffer.
2089
2090 @Input hBuf - buffer handle.
2091
2092 @Return OS Specific memory handle.
2093
2094 *****************************************************************************/
2095IMG_HANDLE
2096BM_HandleToOSMemHandle(BM_HANDLE hBuf)
2097{
2098 BM_BUF *pBuf = (BM_BUF *)hBuf;
2099
2100 PVR_ASSERT (pBuf != IMG_NULL);
2101
2102 if (pBuf == IMG_NULL)
2103 {
2104 PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter"));
2105 return IMG_NULL;
2106 }
2107
2108 PVR_DPF ((PVR_DBG_MESSAGE,
2109 "BM_HandleToOSMemHandle(h=0x%x)=0x%x",
2110 (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->hOSMemHandle));
2111 return pBuf->hOSMemHandle;
2112}
2113
2114/*----------------------------------------------------------------------------
2115<function>
2116 FUNCTION: BM_UnmapFromDev
2117
2118 PURPOSE: Unmaps a buffer from GPU virtual address space, but otherwise
2119 leaves buffer intact (ie. not changing any CPU virtual space
2120 mappings, etc). This in conjunction with BM_RemapToDev() can
2121 be used to migrate buffers in and out of GPU virtual address
2122 space to deal with fragmentation and/or limited size of GPU
2123 MMU.
2124
2125 PARAMETERS: In: hBuf - buffer handle.
2126 RETURNS: IMG_TRUE - Success
2127 IMG_FALSE - Failure
2128</function>
2129-----------------------------------------------------------------------------*/
2130IMG_INT32
2131BM_UnmapFromDev(BM_HANDLE hBuf)
2132{
2133 BM_BUF *pBuf = (BM_BUF *)hBuf;
2134 BM_MAPPING *pMapping;
2135 IMG_INT32 result;
2136
2137 PVR_ASSERT (pBuf != IMG_NULL);
2138
2139 if (pBuf == IMG_NULL)
2140 {
2141 PVR_DPF((PVR_DBG_ERROR, "BM_UnmapFromDev: invalid parameter"));
2142 return -(PVRSRV_ERROR_INVALID_PARAMS);
2143 }
2144
2145 pMapping = pBuf->pMapping;
2146
2147 if ((pMapping->ui32Flags & PVRSRV_HAP_GPU_PAGEABLE) == 0)
2148 {
2149 PVR_DPF((PVR_DBG_ERROR, "BM_UnmapFromDev: cannot unmap non-pageable buffer"));
2150 return -(PVRSRV_ERROR_STILL_MAPPED);
2151 }
2152
2153 result = DevMemoryFree(pMapping);
2154
2155 if(result == 0)
2156 pBuf->DevVAddr.uiAddr = PVRSRV_BAD_DEVICE_ADDRESS;
2157
2158 return result;
2159}
2160
2161/*----------------------------------------------------------------------------
2162<function>
2163 FUNCTION: BM_RemapToDev
2164
2165 PURPOSE: Maps a buffer back into GPU virtual address space, after it
2166 has been BM_UnmapFromDev()'d. After this operation, the GPU
2167 virtual address may have changed, so BM_HandleToDevVaddr()
2168 should be called to get the new address.
2169
2170 PARAMETERS: In: hBuf - buffer handle.
2171 RETURNS: IMG_TRUE - Success
2172 IMG_FALSE - Failure
2173</function>
2174-----------------------------------------------------------------------------*/
2175IMG_INT32
2176BM_RemapToDev(BM_HANDLE hBuf)
2177{
2178 BM_BUF *pBuf = (BM_BUF *)hBuf;
2179 BM_MAPPING *pMapping;
2180 IMG_INT32 mapCount;
2181
2182 PVR_ASSERT (pBuf != IMG_NULL);
2183
2184 if (pBuf == IMG_NULL)
2185 {
2186 PVR_DPF((PVR_DBG_ERROR, "BM_RemapToDev: invalid parameter"));
2187 return -PVRSRV_ERROR_INVALID_PARAMS;
2188 }
2189
2190 pMapping = pBuf->pMapping;
2191
2192 if ((pMapping->ui32Flags & PVRSRV_HAP_GPU_PAGEABLE) == 0)
2193 {
2194 PVR_DPF((PVR_DBG_ERROR, "BM_RemapToDev: cannot remap non-pageable buffer"));
2195 return -PVRSRV_ERROR_BAD_MAPPING;
2196 }
2197
2198 mapCount = DevMemoryAlloc(pMapping->pBMHeap->pBMContext, pMapping, IMG_NULL,
2199 pMapping->ui32Flags, pMapping->ui32DevVAddrAlignment, &pBuf->DevVAddr);
2200
2201 if(mapCount <= 0)
2202 {
2203 PVR_DPF((PVR_DBG_WARNING, "BM_RemapToDev: failed to allocate device memory"));
2204 }
2205
2206 return mapCount;
2207}
2208
2209/*!
2210******************************************************************************
2211
2212 @Function DevMemoryAlloc
2213
2214 @Description Allocate device memory for a given physical/virtual memory
2215 mapping. We handle the main cases where device MMU mappings
2216 are required - these are the dynamic cases: all wrappings of
2217 host OS memory and host OS imports for SYS_MMU_NORMAL mode.
2218
2219 If no MMU support is required then we simply map device virtual
2220 space as device physical space.
2221
2222 @Input pBMContext - the pager to allocate from.
2223 @Output pMapping - the mapping descriptor to be filled in for this
2224 allocation.
2225 @Output pActualSize - the actual size of the block allocated in
2226 bytes.
2227 @Input uFlags - allocation flags
2228 @Input dev_vaddr_alignment - required device virtual address
2229 alignment, or 0.
2230 @Output pDevVAddr - receives the device virtual base address of the
2231 allocated block.
2232 @Return IMG_INT32 - Reference count
2233 -1 - Failed.
2234
2235 *****************************************************************************/
2236static IMG_INT32
2237DevMemoryAlloc (BM_CONTEXT *pBMContext,
2238 BM_MAPPING *pMapping,
2239 IMG_SIZE_T *pActualSize,
2240 IMG_UINT32 uFlags,
2241 IMG_UINT32 dev_vaddr_alignment,
2242 IMG_DEV_VIRTADDR *pDevVAddr)
2243{
2244 PVRSRV_DEVICE_NODE *psDeviceNode;
2245#ifdef PDUMP
2246 IMG_UINT32 ui32PDumpSize = (IMG_UINT32)pMapping->uSize;
2247#endif
2248
2249 if(pMapping->ui32MappingCount > 0)
2250 {
2251 pMapping->ui32MappingCount++;
2252 *pDevVAddr = pMapping->DevVAddr;
2253 return pMapping->ui32MappingCount;
2254 }
2255
2256 psDeviceNode = pBMContext->psDeviceNode;
2257
2258 pMapping->ui32DevVAddrAlignment = dev_vaddr_alignment;
2259
2260 if(uFlags & PVRSRV_MEM_INTERLEAVED)
2261 {
2262 /* double the size */
2263 /* don't continue to alter the size each time a buffer is remapped..
2264 * we only want to do this the first time
2265 */
2266 /* TODO: FIXME: There is something wrong with this logic */
2267 if (pMapping->ui32MappingCount == 0)
2268 pMapping->uSize *= 2;
2269 }
2270
2271#ifdef PDUMP
2272 if(uFlags & PVRSRV_MEM_DUMMY)
2273 {
2274 /* only one page behind a dummy allocation */
2275 ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
2276 }
2277#endif
2278
2279 /* Check we haven't fall through a gap */
2280 PVR_ASSERT(pMapping->uSizeVM != 0);
2281 /* allocate device linear space */
2282 if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap,
2283 pMapping->uSizeVM,
2284 pActualSize,
2285 0,
2286 dev_vaddr_alignment,
2287 &(pMapping->DevVAddr)))
2288 {
2289 PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
2290 pDevVAddr->uiAddr = PVRSRV_BAD_DEVICE_ADDRESS;
2291 return -(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY);
2292 }
2293
2294#ifdef SUPPORT_SGX_MMU_BYPASS
2295 EnableHostAccess(pBMContext->psMMUContext);
2296#endif
2297
2298#if defined(PDUMP)
2299 /* pdump the memory allocate */
2300 PDUMPMALLOCPAGES(&psDeviceNode->sDevId,
2301 pMapping->DevVAddr.uiAddr,
2302 pMapping->CpuVAddr,
2303 pMapping->hOSMemHandle,
2304 ui32PDumpSize,
2305 pMapping->pBMHeap->sDevArena.ui32DataPageSize,
2306#if defined(SUPPORT_PDUMP_MULTI_PROCESS)
2307 psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap),
2308#else
2309 IMG_FALSE, // unused
2310#endif /* SUPPORT_PDUMP_MULTI_PROCESS */
2311 (IMG_HANDLE)pMapping);
2312#endif
2313
2314 switch (pMapping->eCpuMemoryOrigin)
2315 {
2316 case hm_wrapped:
2317 case hm_wrapped_virtaddr:
2318 case hm_contiguous:
2319 {
2320 if (uFlags & PVRSRV_MEM_SPARSE)
2321 {
2322 /* Check if this device supports sparse mappings */
2323 PVR_ASSERT(psDeviceNode->pfnMMUMapPagesSparse != IMG_NULL);
2324 psDeviceNode->pfnMMUMapPagesSparse(pMapping->pBMHeap->pMMUHeap,
2325 pMapping->DevVAddr,
2326 SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
2327 pMapping->ui32ChunkSize,
2328 pMapping->ui32NumVirtChunks,
2329 pMapping->ui32NumPhysChunks,
2330 pMapping->pabMapChunk,
2331 uFlags,
2332 (IMG_HANDLE)pMapping);
2333 }
2334 else
2335 {
2336 psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
2337 pMapping->DevVAddr,
2338 SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
2339 pMapping->uSize,
2340 uFlags,
2341 (IMG_HANDLE)pMapping);
2342 }
2343 *pDevVAddr = pMapping->DevVAddr;
2344 break;
2345 }
2346 case hm_env:
2347 {
2348 if (uFlags & PVRSRV_MEM_SPARSE)
2349 {
2350 /* Check if this device supports sparse mappings */
2351 PVR_ASSERT(psDeviceNode->pfnMMUMapShadowSparse != IMG_NULL);
2352 psDeviceNode->pfnMMUMapShadowSparse(pMapping->pBMHeap->pMMUHeap,
2353 pMapping->DevVAddr,
2354 pMapping->ui32ChunkSize,
2355 pMapping->ui32NumVirtChunks,
2356 pMapping->ui32NumPhysChunks,
2357 pMapping->pabMapChunk,
2358 pMapping->CpuVAddr,
2359 pMapping->hOSMemHandle,
2360 pDevVAddr,
2361 uFlags,
2362 (IMG_HANDLE)pMapping);
2363 }
2364 else
2365 {
2366 psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
2367 pMapping->DevVAddr,
2368 pMapping->uSize,
2369 pMapping->CpuVAddr,
2370 pMapping->hOSMemHandle,
2371 pDevVAddr,
2372 uFlags,
2373 (IMG_HANDLE)pMapping);
2374 }
2375 break;
2376 }
2377 case hm_wrapped_scatter:
2378 case hm_wrapped_scatter_virtaddr:
2379 {
2380 psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
2381 pMapping->DevVAddr,
2382 pMapping->psSysAddr,
2383 pMapping->uSize,
2384 uFlags,
2385 (IMG_HANDLE)pMapping);
2386
2387 *pDevVAddr = pMapping->DevVAddr;
2388 break;
2389 }
2390 default:
2391 PVR_DPF((PVR_DBG_ERROR,
2392 "Illegal value %d for pMapping->eCpuMemoryOrigin",
2393 pMapping->eCpuMemoryOrigin));
2394 return -(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE);
2395 }
2396
2397#ifdef SUPPORT_SGX_MMU_BYPASS
2398 DisableHostAccess(pBMContext->psMMUContext);
2399#endif
2400
2401 pMapping->ui32MappingCount = 1;
2402
2403 return pMapping->ui32MappingCount;
2404}
2405
2406static IMG_INT32
2407DevMemoryFree (BM_MAPPING *pMapping)
2408{
2409 PVRSRV_DEVICE_NODE *psDeviceNode;
2410 IMG_DEV_PHYADDR sDevPAddr;
2411#ifdef PDUMP
2412 IMG_UINT32 ui32PSize;
2413#endif
2414
2415 if(pMapping->ui32MappingCount > 1)
2416 {
2417 pMapping->ui32MappingCount--;
2418
2419 /* Nothing else to do for now */
2420 return pMapping->ui32MappingCount;
2421 }
2422
2423 if (pMapping->ui32MappingCount == 0)
2424 {
2425 /* already unmapped from GPU.. bail */
2426 return -(PVRSRV_ERROR_MAPPING_NOT_FOUND);
2427 }
2428
2429 /* Then pMapping->ui32MappingCount is 1
2430 * ready to release GPU mapping */
2431
2432 psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
2433 sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr);
2434
2435 if (sDevPAddr.uiAddr != 0)
2436 {
2437#ifdef PDUMP
2438 /* pdump the memory free */
2439 if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
2440 {
2441 /* physical memory size differs in the case of Dummy allocations */
2442 ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
2443 }
2444 else
2445 {
2446 ui32PSize = (IMG_UINT32)pMapping->uSize;
2447 }
2448
2449 PDUMPFREEPAGES(pMapping->pBMHeap,
2450 pMapping->DevVAddr,
2451 ui32PSize,
2452 pMapping->pBMHeap->sDevArena.ui32DataPageSize,
2453 (IMG_HANDLE)pMapping,
2454 (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE,
2455 (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) ? IMG_TRUE : IMG_FALSE);
2456#endif
2457 }
2458 PVR_ASSERT(pMapping->uSizeVM != 0);
2459 psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSizeVM));
2460
2461 pMapping->ui32MappingCount = 0;
2462
2463 return pMapping->ui32MappingCount;
2464}
2465
2466/* If this array grows larger, it might be preferable to use a hashtable rather than an array. */
2467#ifndef XPROC_WORKAROUND_NUM_SHAREABLES
2468#define XPROC_WORKAROUND_NUM_SHAREABLES 500
2469#endif
2470
2471#define XPROC_WORKAROUND_BAD_SHAREINDEX 0773407734
2472
2473#define XPROC_WORKAROUND_UNKNOWN 0
2474#define XPROC_WORKAROUND_ALLOC 1
2475#define XPROC_WORKAROUND_MAP 2
2476
2477static IMG_UINT32 gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX;
2478static IMG_UINT32 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
2479
2480/* PRQA S 0686 10 */ /* force compiler to init structure */
2481XPROC_DATA gXProcWorkaroundShareData[XPROC_WORKAROUND_NUM_SHAREABLES] = {{0}};
2482
2483IMG_INT32 BM_XProcGetShareDataRefCount(IMG_UINT32 ui32Index)
2484{
2485 if(ui32Index >= XPROC_WORKAROUND_NUM_SHAREABLES)
2486 return -1;
2487
2488 return gXProcWorkaroundShareData[ui32Index].ui32RefCount;
2489}
2490
2491PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index)
2492{
2493 /* if you fail this assertion - did you acquire the mutex?
2494 did you call "set" exactly once?
2495 did you call "unset" exactly once per set?
2496 */
2497 if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX)
2498 {
2499 PVR_DPF((PVR_DBG_ERROR, "No, it's already set!"));
2500 return PVRSRV_ERROR_INVALID_PARAMS;
2501 }
2502
2503 gXProcWorkaroundShareIndex = ui32Index;
2504 gXProcWorkaroundState = XPROC_WORKAROUND_MAP;
2505
2506 return PVRSRV_OK;
2507}
2508
2509PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index)
2510{
2511 /* if you fail this assertion - did you acquire the mutex?
2512 did you call "set" exactly once?
2513 did you call "unset" exactly once per set?
2514 */
2515 if (gXProcWorkaroundShareIndex == XPROC_WORKAROUND_BAD_SHAREINDEX)
2516 {
2517 PVR_DPF((PVR_DBG_ERROR, "huh? how can it be bad??"));
2518 return PVRSRV_ERROR_INVALID_PARAMS;
2519 }
2520 if (gXProcWorkaroundShareIndex != ui32Index)
2521 {
2522 PVR_DPF((PVR_DBG_ERROR, "gXProcWorkaroundShareIndex == 0x%08x != 0x%08x == ui32Index", gXProcWorkaroundShareIndex, ui32Index));
2523 return PVRSRV_ERROR_INVALID_PARAMS;
2524 }
2525
2526 gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX;
2527 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN;
2528
2529 return PVRSRV_OK;
2530}
2531
2532PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index)
2533{
2534 /* if you fail this assertion - did you acquire the mutex?
2535 did you call "set" exactly once?
2536 did you call "unset" exactly once per set?
2537 */
2538 if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX)
2539 {
2540 return PVRSRV_ERROR_INVALID_PARAMS;
2541 }
2542
2543 for (*pui32Index = 0; *pui32Index < XPROC_WORKAROUND_NUM_SHAREABLES; (*pui32Index)++)
2544 {
2545 if (gXProcWorkaroundShareData[*pui32Index].ui32RefCount == 0)
2546 {
2547 gXProcWorkaroundShareIndex = *pui32Index;
2548 gXProcWorkaroundState = XPROC_WORKAROUND_ALLOC;
2549 return PVRSRV_OK;
2550 }
2551 }
2552
2553 PVR_DPF((PVR_DBG_ERROR, "ran out of shared buffers"));
2554 return PVRSRV_ERROR_OUT_OF_MEMORY;
2555}
2556
2557static PVRSRV_ERROR
2558XProcWorkaroundAllocShareable(RA_ARENA *psArena,
2559 IMG_UINT32 ui32AllocFlags,
2560 IMG_UINT32 ui32Size,
2561 IMG_UINT32 ui32PageSize,
2562 IMG_PVOID pvPrivData,
2563 IMG_UINT32 ui32PrivDataLength,
2564 IMG_VOID **ppvCpuVAddr,
2565 IMG_HANDLE *phOSMemHandle)
2566{
2567 if ((ui32AllocFlags & PVRSRV_MEM_XPROC) == 0)
2568 {
2569 PVR_DPF((PVR_DBG_VERBOSE, "XProcWorkaroundAllocShareable: bad flags"));
2570 return PVRSRV_ERROR_INVALID_PARAMS;
2571 }
2572
2573 if (gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32RefCount > 0)
2574 {
2575 PVR_DPF((PVR_DBG_VERBOSE,
2576 "XProcWorkaroundAllocShareable: re-using previously allocated pages"));
2577
2578 ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK;
2579 ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS;
2580
2581 if (ui32AllocFlags != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags)
2582 {
2583 PVR_DPF((PVR_DBG_ERROR,
2584 "%s ERROR: Flags don't match (Shared 0x%08x, Requested 0x%08x)!",
2585 __FUNCTION__,
2586 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
2587 ui32AllocFlags));
2588 return PVRSRV_ERROR_INVALID_PARAMS;
2589 }
2590
2591 if (ui32Size != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size)
2592 {
2593 PVR_DPF((PVR_DBG_ERROR,
2594 "%s ERROR: Size doesn't match (Shared %d, Requested %d) with flags 0x%08x - 0x%08x!",
2595 __FUNCTION__,
2596 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size,
2597 ui32Size,
2598 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
2599 ui32AllocFlags));
2600 return PVRSRV_ERROR_INVALID_PARAMS;
2601 }
2602
2603 if (ui32PageSize != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize)
2604 {
2605 PVR_DPF((PVR_DBG_ERROR,
2606 "%s ERROR: Page Size doesn't match (Shared %d, Requested %d) with flags 0x%08x - 0x%08x!",
2607 __FUNCTION__,
2608 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize,
2609 ui32PageSize,
2610 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags,
2611 ui32AllocFlags));
2612 return PVRSRV_ERROR_INVALID_PARAMS;
2613 }
2614
2615 *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr;
2616 *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle;
2617
2618 BM_XProcIndexAcquire(gXProcWorkaroundShareIndex);
2619
2620 return PVRSRV_OK;
2621 }
2622 else
2623 {
2624 if (gXProcWorkaroundState != XPROC_WORKAROUND_ALLOC)
2625 {
2626 PVR_DPF((PVR_DBG_ERROR,
2627 "XPROC workaround in bad state! About to allocate memory from non-alloc state! (%d)",
2628 gXProcWorkaroundState));
2629 }
2630 PVR_ASSERT(gXProcWorkaroundState == XPROC_WORKAROUND_ALLOC);
2631
2632 if (psArena != IMG_NULL)
2633 {
2634 IMG_CPU_PHYADDR sCpuPAddr;
2635 IMG_SYS_PHYADDR sSysPAddr;
2636
2637 PVR_DPF((PVR_DBG_VERBOSE,
2638 "XProcWorkaroundAllocShareable: making a NEW allocation from local mem"));
2639
2640 if (!RA_Alloc (psArena,
2641 ui32Size,
2642 IMG_NULL,
2643 IMG_NULL,
2644 0,
2645 ui32PageSize,
2646 0,
2647 pvPrivData,
2648 ui32PrivDataLength,
2649 (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
2650 {
2651 PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: RA_Alloc(0x%x) FAILED", ui32Size));
2652 return PVRSRV_ERROR_OUT_OF_MEMORY;
2653 }
2654
2655 sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
2656 if(OSReservePhys(sCpuPAddr,
2657 ui32Size,
2658 ui32AllocFlags,
2659 IMG_NULL,
2660 (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr,
2661 &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK)
2662 {
2663 PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: OSReservePhys failed"));
2664 return PVRSRV_ERROR_OUT_OF_MEMORY;
2665 }
2666 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].sSysPAddr = sSysPAddr;
2667 }
2668 else
2669 {
2670 PVR_DPF((PVR_DBG_VERBOSE,
2671 "XProcWorkaroundAllocShareable: making a NEW allocation from OS"));
2672
2673 ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK;
2674 ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS;
2675
2676 /* allocate pages from the OS RAM */
2677 if (OSAllocPages(ui32AllocFlags,
2678 ui32Size,
2679 ui32PageSize,
2680 pvPrivData,
2681 ui32PrivDataLength,
2682 IMG_NULL, /* FIXME: to support cross process sparse allocations */
2683 (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr,
2684 &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK)
2685 {
2686 PVR_DPF((PVR_DBG_ERROR,
2687 "XProcWorkaroundAllocShareable: OSAllocPages(0x%x) failed",
2688 ui32PageSize));
2689 return PVRSRV_ERROR_OUT_OF_MEMORY;
2690 }
2691 }
2692
2693 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].psArena = psArena;
2694 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags = ui32AllocFlags;
2695 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size = ui32Size;
2696 gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize = ui32PageSize;
2697
2698 *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr;
2699 *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle;
2700
2701 BM_XProcIndexAcquire(gXProcWorkaroundShareIndex);
2702
2703 return PVRSRV_OK;
2704 }
2705}
2706
2707static PVRSRV_ERROR XProcWorkaroundHandleToSI(IMG_HANDLE hOSMemHandle, IMG_UINT32 *pui32SI)
2708{
2709 IMG_UINT32 ui32SI;
2710 IMG_BOOL bFound;
2711 IMG_BOOL bErrorDups;
2712
2713 bFound = IMG_FALSE;
2714 bErrorDups = IMG_FALSE;
2715
2716 for (ui32SI = 0; ui32SI < XPROC_WORKAROUND_NUM_SHAREABLES; ui32SI++)
2717 {
2718 if (gXProcWorkaroundShareData[ui32SI].ui32RefCount>0 && gXProcWorkaroundShareData[ui32SI].hOSMemHandle == hOSMemHandle)
2719 {
2720 if (bFound)
2721 {
2722 bErrorDups = IMG_TRUE;
2723 }
2724 else
2725 {
2726 *pui32SI = ui32SI;
2727 bFound = IMG_TRUE;
2728 }
2729 }
2730 }
2731
2732 if (bErrorDups || !bFound)
2733 {
2734 return PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE;
2735 }
2736
2737 return PVRSRV_OK;
2738}
2739
2740#if defined(PVRSRV_REFCOUNT_DEBUG)
2741IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
2742#else
2743IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index)
2744#endif
2745{
2746#if defined(PVRSRV_REFCOUNT_DEBUG)
2747 PVRSRVBMXProcIncRef2(pszFile, iLine, ui32Index);
2748#else
2749 PVRSRVBMXProcIncRef(ui32Index);
2750#endif
2751}
2752
2753#if defined(PVRSRV_REFCOUNT_DEBUG)
2754IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index)
2755#else
2756IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index)
2757#endif
2758{
2759#if defined(PVRSRV_REFCOUNT_DEBUG)
2760 PVRSRVBMXProcDecRef2(pszFile, iLine, ui32Index);
2761#else
2762 PVRSRVBMXProcDecRef(ui32Index);
2763#endif
2764
2765 PVR_DPF((PVR_DBG_VERBOSE, "Reduced refcount of SI[%d] from %d to %d",
2766 ui32Index, gXProcWorkaroundShareData[ui32Index].ui32RefCount+1, gXProcWorkaroundShareData[ui32Index].ui32RefCount));
2767
2768 if (gXProcWorkaroundShareData[ui32Index].ui32RefCount == 0)
2769 {
2770 if (gXProcWorkaroundShareData[ui32Index].psArena != IMG_NULL)
2771 {
2772 IMG_SYS_PHYADDR sSysPAddr;
2773
2774 if (gXProcWorkaroundShareData[ui32Index].pvCpuVAddr != IMG_NULL)
2775 {
2776 OSUnReservePhys(gXProcWorkaroundShareData[ui32Index].pvCpuVAddr,
2777 gXProcWorkaroundShareData[ui32Index].ui32Size,
2778 gXProcWorkaroundShareData[ui32Index].ui32AllocFlags,
2779 gXProcWorkaroundShareData[ui32Index].hOSMemHandle);
2780 }
2781 sSysPAddr = gXProcWorkaroundShareData[ui32Index].sSysPAddr;
2782 RA_Free (gXProcWorkaroundShareData[ui32Index].psArena,
2783 sSysPAddr.uiAddr,
2784 IMG_FALSE);
2785 }
2786 else
2787 {
2788 PVR_DPF((PVR_DBG_VERBOSE, "freeing OS memory"));
2789 OSFreePages(gXProcWorkaroundShareData[ui32Index].ui32AllocFlags,
2790 gXProcWorkaroundShareData[ui32Index].ui32PageSize,
2791 gXProcWorkaroundShareData[ui32Index].pvCpuVAddr,
2792 gXProcWorkaroundShareData[ui32Index].hOSMemHandle);
2793 }
2794 }
2795}
2796
2797static IMG_VOID XProcWorkaroundFreeShareable(IMG_HANDLE hOSMemHandle)
2798{
2799 IMG_UINT32 ui32SI = (IMG_UINT32)((IMG_UINTPTR_T)hOSMemHandle & 0xffffU);
2800 PVRSRV_ERROR eError;
2801
2802 eError = XProcWorkaroundHandleToSI(hOSMemHandle, &ui32SI);
2803 if (eError != PVRSRV_OK)
2804 {
2805 PVR_DPF((PVR_DBG_ERROR, "bad handle"));
2806 return;
2807 }
2808
2809 BM_XProcIndexRelease(ui32SI);
2810}
2811
2812
2813/*!
2814******************************************************************************
2815
2816 @Function BM_ImportMemory
2817
2818 @Description Provide a resource allocator with a source of pages of memory
2819 from the Host OS's own allocation. Allocates a block of pages
2820 larger than requested, allowing the resource allocator to
2821 operate a small cache of pre allocated pages.
2822
2823 @Input pH - buffer manager handle, not the void type is dictated
2824 by the generic nature of the resource allocator interface.
2825 @Input uRequestSize - requested size in bytes
2826 @Output pActualSize - receives the actual size allocated in bytes
2827 which may be >= requested size
2828 @Output ppsMapping - receives the arbitrary user reference
2829 associated with the underlying storage.
2830 @Input uFlags - bit mask of allocation flags
2831 @Input pvPrivData - opaque private data passed through to allocator
2832 @Input ui32PrivDataLength - length of opaque private data
2833 @Output pBase - receives a pointer to the allocated storage.
2834
2835 @Return IMG_TRUE - success
2836 IMG_FALSE - failed
2837
2838 *****************************************************************************/
2839static IMG_BOOL
2840BM_ImportMemory (IMG_VOID *pH,
2841 IMG_SIZE_T uRequestSize,
2842 IMG_SIZE_T *pActualSize,
2843 BM_MAPPING **ppsMapping,
2844 IMG_UINT32 uFlags,
2845 IMG_PVOID pvPrivData,
2846 IMG_UINT32 ui32PrivDataLength,
2847 IMG_UINTPTR_T *pBase)
2848{
2849 BM_MAPPING *pMapping;
2850 BM_HEAP *pBMHeap = pH;
2851 BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
2852 IMG_INT32 uResult;
2853 IMG_SIZE_T uSize;
2854 IMG_SIZE_T uPSize;
2855 IMG_SIZE_T uDevVAddrAlignment = 0; /* ? */
2856
2857 PVR_DPF ((PVR_DBG_MESSAGE,
2858 "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
2859 (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
2860
2861 PVR_ASSERT (ppsMapping != IMG_NULL);
2862 PVR_ASSERT (pBMContext != IMG_NULL);
2863
2864 if (ppsMapping == IMG_NULL)
2865 {
2866 PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
2867 goto fail_exit;
2868 }
2869
2870 uSize = HOST_PAGEALIGN (uRequestSize);
2871 PVR_ASSERT (uSize >= uRequestSize);
2872
2873 if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
2874 sizeof (BM_MAPPING),
2875 (IMG_PVOID *)&pMapping, IMG_NULL,
2876 "Buffer Manager Mapping") != PVRSRV_OK)
2877 {
2878 PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
2879 goto fail_exit;
2880 }
2881
2882 pMapping->hOSMemHandle = 0;
2883 pMapping->CpuVAddr = 0;
2884 pMapping->DevVAddr.uiAddr = 0;
2885 pMapping->ui32MappingCount = 0;
2886 pMapping->CpuPAddr.uiAddr = 0;
2887 pMapping->uSize = uSize;
2888 if ((uFlags & PVRSRV_MEM_SPARSE) == 0)
2889 {
2890 pMapping->uSizeVM = uSize;
2891 }
2892 pMapping->pBMHeap = pBMHeap;
2893 pMapping->ui32Flags = uFlags;
2894
2895 /*
2896 * If anyone want's to know, pass back the actual size of our allocation.
2897 * There could be up to an extra page's worth of memory which will be marked
2898 * as free in the RA.
2899 */
2900 if (pActualSize)
2901 {
2902 *pActualSize = uSize;
2903 }
2904
2905 /* if it's a dummy allocation only use one physical page */
2906 if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
2907 {
2908 uPSize = pBMHeap->sDevArena.ui32DataPageSize;
2909 }
2910 else
2911 {
2912 uPSize = pMapping->uSize;
2913 }
2914
2915 if (uFlags & PVRSRV_MEM_XPROC)
2916 {
2917 IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs | PVRSRV_MEM_XPROC;
2918 IMG_BOOL bBadBackingStoreType;
2919
2920 if(uFlags & PVRSRV_MEM_ION)
2921 {
2922 ui32Attribs |= PVRSRV_MEM_ION;
2923 }
2924
2925 bBadBackingStoreType = IMG_TRUE;
2926
2927 if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0)
2928 {
2929 uDevVAddrAlignment = MAX(pBMHeap->sDevArena.ui32DataPageSize, HOST_PAGESIZE());
2930
2931
2932 if (uPSize % uDevVAddrAlignment != 0)
2933 {
2934 PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated"));
2935 goto fail_mapping_alloc;
2936 }
2937 uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */
2938
2939 /* If the user has specified heap CACHETYPE flags, use them to
2940 * override the flags inherited from the heap.
2941 */
2942 if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
2943 {
2944 ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
2945 ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
2946 }
2947
2948 /* allocate "shared" pages. */
2949 if (XProcWorkaroundAllocShareable(IMG_NULL,
2950 ui32Attribs,
2951 (IMG_UINT32)uPSize,
2952 pBMHeap->sDevArena.ui32DataPageSize,
2953 pvPrivData,
2954 ui32PrivDataLength,
2955 (IMG_VOID **)&pMapping->CpuVAddr,
2956 &pMapping->hOSMemHandle) != PVRSRV_OK)
2957 {
2958 PVR_DPF((PVR_DBG_ERROR,
2959 "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
2960 uPSize));
2961 goto fail_mapping_alloc;
2962 }
2963
2964 /* specify how page addresses are derived */
2965 /* it works just like "env" now - no need to record
2966 it as shareable, as we use the actual hOSMemHandle
2967 and only divert to our wrapper layer based on Attribs */
2968 pMapping->eCpuMemoryOrigin = hm_env;
2969 bBadBackingStoreType = IMG_FALSE;
2970 }
2971
2972 if ((ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) != 0)
2973 {
2974 uDevVAddrAlignment = pBMHeap->sDevArena.ui32DataPageSize;
2975
2976 if (uPSize % uDevVAddrAlignment != 0)
2977 {
2978 PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated"));
2979 goto fail_mapping_alloc;
2980 }
2981 uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */
2982
2983 /* If the user has specified heap CACHETYPE flags, use them to
2984 * override the flags inherited from the heap.
2985 */
2986 if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
2987 {
2988 ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
2989 ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
2990 }
2991
2992 /* allocate "shared" pages. */
2993 if (XProcWorkaroundAllocShareable(pBMHeap->pLocalDevMemArena,
2994 ui32Attribs,
2995 (IMG_UINT32)uPSize,
2996 pBMHeap->sDevArena.ui32DataPageSize,
2997 pvPrivData,
2998 ui32PrivDataLength,
2999 (IMG_VOID **)&pMapping->CpuVAddr,
3000 &pMapping->hOSMemHandle) != PVRSRV_OK)
3001 {
3002 PVR_DPF((PVR_DBG_ERROR,
3003 "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed",
3004 uPSize));
3005 goto fail_mapping_alloc;
3006 }
3007
3008 /* specify how page addresses are derived */
3009 /* it works just like "env" now - no need to record
3010 it as shareable, as we use the actual hOSMemHandle
3011 and only divert to our wrapper layer based on Attribs */
3012 pMapping->eCpuMemoryOrigin = hm_env;
3013 bBadBackingStoreType = IMG_FALSE;
3014 }
3015
3016 if (bBadBackingStoreType)
3017 {
3018 PVR_DPF((PVR_DBG_ERROR, "Cannot use this memory sharing workaround with this type of backing store"));
3019 goto fail_mapping_alloc;
3020 }
3021 }
3022 else
3023
3024 /*
3025 What type of backing store do we have?
3026 */
3027 if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
3028 {
3029 IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
3030
3031 /* The allocation code needs to know this is a sparse mapping */
3032 if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
3033 {
3034 ui32Attribs |= PVRSRV_MEM_SPARSE;
3035 }
3036
3037 /* If the user has specified heap CACHETYPE flags, use them to
3038 * override the flags inherited from the heap.
3039 */
3040 if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
3041 {
3042 ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
3043 ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
3044 }
3045
3046 if (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM)
3047 {
3048 ui32Attribs &= ~PVRSRV_MEM_ALLOCATENONCACHEDMEM;
3049 ui32Attribs |= (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM);
3050 }
3051
3052 /* allocate pages from the OS RAM */
3053 if (OSAllocPages(ui32Attribs,
3054 uPSize,
3055 pBMHeap->sDevArena.ui32DataPageSize,
3056 pvPrivData,
3057 ui32PrivDataLength,
3058 pMapping,
3059 (IMG_VOID **)&pMapping->CpuVAddr,
3060 &pMapping->hOSMemHandle) != PVRSRV_OK)
3061 {
3062 PVR_DPF((PVR_DBG_ERROR,
3063 "BM_ImportMemory: OSAllocPages(0x%x) failed",
3064 uPSize));
3065 goto fail_mapping_alloc;
3066 }
3067
3068 /* specify how page addresses are derived */
3069 pMapping->eCpuMemoryOrigin = hm_env;
3070 }
3071 else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
3072 {
3073 IMG_SYS_PHYADDR sSysPAddr;
3074 IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
3075
3076 /* The allocation code needs to know this is a sparse mapping */
3077 if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE)
3078 {
3079 ui32Attribs |= PVRSRV_MEM_SPARSE;
3080 }
3081
3082 /* allocate pages from the local device memory allocator */
3083 PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
3084
3085 /* If the user has specified heap CACHETYPE flags, use them to
3086 * override the flags inherited from the heap.
3087 */
3088 if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
3089 {
3090 ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
3091 ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
3092 }
3093
3094 if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
3095 uPSize,
3096 IMG_NULL,
3097 IMG_NULL,
3098 0,
3099 pBMHeap->sDevArena.ui32DataPageSize,
3100 0,
3101 pvPrivData,
3102 ui32PrivDataLength,
3103 (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
3104 {
3105 PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
3106 goto fail_mapping_alloc;
3107 }
3108
3109 /* derive the CPU virtual address */
3110 pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
3111 if(OSReservePhys(pMapping->CpuPAddr,
3112 uPSize,
3113 ui32Attribs,
3114 pMapping,
3115 &pMapping->CpuVAddr,
3116 &pMapping->hOSMemHandle) != PVRSRV_OK)
3117 {
3118 PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
3119 goto fail_dev_mem_alloc;
3120 }
3121
3122 /* specify how page addresses are derived */
3123 pMapping->eCpuMemoryOrigin = hm_contiguous;
3124 }
3125 else
3126 {
3127 PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
3128 goto fail_mapping_alloc;
3129 }
3130
3131 if(uFlags & PVRSRV_MEM_ION)
3132 {
3133 IMG_UINT32 ui32AddressOffsets[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES];
3134 IMG_UINT32 ui32NumAddrOffsets = PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES;
3135
3136 IMG_INT32 retSize = OSGetMemMultiPlaneInfo(pMapping->hOSMemHandle,
3137 ui32AddressOffsets, &ui32NumAddrOffsets);
3138
3139 if(retSize > 0 && pActualSize)
3140 {
3141 *pActualSize = pMapping->uSize = retSize;
3142 }
3143 }
3144
3145 /*
3146 * Allocate some device memory for what we just allocated.
3147 */
3148 /*
3149 * Do not allocate GPU mapping if NO_GPU_VIRTUAL_ON_ALLOC is requested.
3150 * In the case where CBI is enabled, this allows for late
3151 * GPU mapping. This flag is, otherwise, used in cases where only
3152 * the memory management feature of the driver is utilized, without
3153 * a need for GPU rendering
3154 */
3155 if ((uFlags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC)) == 0)
3156 {
3157 uResult = DevMemoryAlloc (pBMContext,
3158 pMapping,
3159 IMG_NULL,
3160 uFlags,
3161 (IMG_UINT32)uDevVAddrAlignment,
3162 &pMapping->DevVAddr);
3163 if (uResult <= 0)
3164 {
3165 PVR_DPF((PVR_DBG_ERROR,
3166 "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
3167 pMapping->uSize));
3168 goto fail_dev_mem_alloc;
3169 }
3170
3171 /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */
3172 /* PRQA S 3356,3358 1 */
3173 PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
3174 PVR_ASSERT(pBase);
3175 }
3176
3177 if(pBase)
3178 *pBase = pMapping->DevVAddr.uiAddr;
3179 *ppsMapping = pMapping;
3180
3181 PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
3182 return IMG_TRUE;
3183
3184fail_dev_mem_alloc:
3185 if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
3186 {
3187 /* the size is double the actual size for interleaved allocations */
3188 if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
3189 {
3190 pMapping->uSize /= 2;
3191 }
3192
3193 if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
3194 {
3195 uPSize = pBMHeap->sDevArena.ui32DataPageSize;
3196 }
3197 else
3198 {
3199 uPSize = pMapping->uSize;
3200 }
3201
3202 if (uFlags & PVRSRV_MEM_XPROC)
3203 {
3204 XProcWorkaroundFreeShareable(pMapping->hOSMemHandle);
3205 }
3206 else
3207 if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
3208 {
3209 OSFreePages(pBMHeap->ui32Attribs,
3210 uPSize,
3211 (IMG_VOID *)pMapping->CpuVAddr,
3212 pMapping->hOSMemHandle);
3213 }
3214 else
3215 {
3216 IMG_SYS_PHYADDR sSysPAddr;
3217
3218 if(pMapping->CpuVAddr)
3219 {
3220 OSUnReservePhys(pMapping->CpuVAddr,
3221 uPSize,
3222 pBMHeap->ui32Attribs,
3223 pMapping->hOSMemHandle);
3224 }
3225 sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
3226 RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
3227 }
3228 }
3229fail_mapping_alloc:
3230 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
3231 /*not nulling pointer, out of scope*/
3232fail_exit:
3233 return IMG_FALSE;
3234}
3235
3236
3237/*!
3238******************************************************************************
3239
3240 @Function BM_FreeMemory
3241
3242 @Description Free a block of pages previously allocated via
3243 BM_ImportMemory.
3244
3245 @Input h - buffer manager handle, not the void type as dictated by
3246 the generic nature of the resource allocator interface.
3247 @Input _base - base address of blocks to free.
3248 @Input psMapping - arbitrary user reference associated with the
3249 underlying storage provided by BM_ImportMemory
3250 @Return None
3251
3252 *****************************************************************************/
3253static IMG_VOID
3254BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
3255{
3256 BM_HEAP *pBMHeap = h;
3257 IMG_SIZE_T uPSize;
3258
3259 PVR_UNREFERENCED_PARAMETER (_base);
3260
3261 PVR_DPF ((PVR_DBG_MESSAGE,
3262 "BM_FreeMemory (h=0x%x, base=0x%x, psMapping=0x%x)",
3263 (IMG_UINTPTR_T)h, _base, (IMG_UINTPTR_T)psMapping));
3264
3265 PVR_ASSERT (psMapping != IMG_NULL);
3266
3267 if (psMapping == IMG_NULL)
3268 {
3269 PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
3270 return;
3271 }
3272
3273 /*
3274 Only free the virtual memory if we got as far a allocating it.
3275 This NULL check should be safe as we always have a guard page
3276 at virtual address 0x00000000
3277 */
3278 if (psMapping->DevVAddr.uiAddr)
3279 {
3280 DevMemoryFree (psMapping);
3281 }
3282
3283 /* the size is double the actual for interleaved */
3284 if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
3285 {
3286 psMapping->uSize /= 2;
3287 }
3288
3289 if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
3290 {
3291 uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
3292 }
3293 else
3294 {
3295 uPSize = psMapping->uSize;
3296 }
3297
3298 if (psMapping->ui32Flags & PVRSRV_MEM_XPROC)
3299 {
3300 XProcWorkaroundFreeShareable(psMapping->hOSMemHandle);
3301 }
3302 else
3303 if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
3304 {
3305 OSFreePages(pBMHeap->ui32Attribs,
3306 uPSize,
3307 (IMG_VOID *) psMapping->CpuVAddr,
3308 psMapping->hOSMemHandle);
3309 }
3310 else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
3311 {
3312 IMG_SYS_PHYADDR sSysPAddr;
3313
3314 OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
3315
3316 sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
3317
3318 RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
3319 }
3320 else
3321 {
3322 PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
3323 }
3324
3325 OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
3326 /*not nulling pointer, copy on stack*/
3327
3328 PVR_DPF((PVR_DBG_MESSAGE,
3329 "..BM_FreeMemory (h=0x%x, base=0x%x)",
3330 (IMG_UINTPTR_T)h, _base));
3331}
3332
3333/*!
3334******************************************************************************
3335
3336 @Function BM_GetPhysPageAddr
3337
3338 @Description
3339
3340 @Input psMemInfo
3341
3342 @Input sDevVPageAddr
3343
3344 @Output psDevPAddr
3345
3346 @Return IMG_VOID
3347
3348******************************************************************************/
3349
3350IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
3351 IMG_DEV_VIRTADDR sDevVPageAddr,
3352 IMG_DEV_PHYADDR *psDevPAddr)
3353{
3354 PVRSRV_DEVICE_NODE *psDeviceNode;
3355
3356 PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
3357
3358 PVR_ASSERT(psMemInfo && psDevPAddr);
3359
3360 /* check it's a page address */
3361 PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
3362
3363 /* PRQA S 0505 4 */ /* PVR_ASSERT should catch NULL ptrs */
3364 psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
3365
3366 *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap,
3367 sDevVPageAddr);
3368}
3369
3370
3371/*!
3372******************************************************************************
3373 @Function BM_GetMMUContext
3374
3375 @Description utility function to return the MMU context
3376
3377 @Input hDevMemHeap - the Dev mem heap handle
3378
3379 @Return MMU context, else NULL
3380**************************************************************************/
3381MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
3382{
3383 BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
3384
3385 PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
3386
3387 return pBMHeap->pBMContext->psMMUContext;
3388}
3389
3390/*!
3391******************************************************************************
3392 @Function BM_GetMMUContextFromMemContext
3393
3394 @Description utility function to return the MMU context
3395
3396 @Input hDevMemContext - the Dev mem context handle
3397
3398 @Return MMU context, else NULL
3399**************************************************************************/
3400MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
3401{
3402 BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
3403
3404 PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
3405
3406 return pBMContext->psMMUContext;
3407}
3408
3409/*!
3410******************************************************************************
3411 @Function BM_GetMMUHeap
3412
3413 @Description utility function to return the MMU heap handle
3414
3415 @Input hDevMemHeap - the Dev mem heap handle
3416
3417 @Return MMU heap handle, else NULL
3418**************************************************************************/
3419IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
3420{
3421 PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
3422
3423 return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
3424}
3425
3426
3427/*!
3428******************************************************************************
3429 @Function BM_GetDeviceNode
3430
3431 @Description utility function to return the devicenode from the BM Context
3432
3433 @Input hDevMemContext - the Dev Mem Context
3434
3435 @Return MMU heap handle, else NULL
3436**************************************************************************/
3437PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
3438{
3439 PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
3440
3441 return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
3442}
3443
3444
3445/*!
3446******************************************************************************
3447 @Function BM_GetMappingHandle
3448
3449 @Description utility function to return the mapping handle from a meminfo
3450
3451 @Input psMemInfo - kernel meminfo
3452
3453 @Return mapping handle, else NULL
3454**************************************************************************/
3455IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
3456{
3457 PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
3458
3459 return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
3460}
3461
3462/*!
3463******************************************************************************
3464 @Function BM_MappingHandleFromBuffer
3465
3466 @Description utility function to get the BM mapping handle from a BM buffer
3467
3468 @Input hBuffer - Handle to BM buffer
3469
3470 @Return BM mapping handle
3471**************************************************************************/
3472IMG_HANDLE BM_MappingHandleFromBuffer(IMG_HANDLE hBuffer)
3473{
3474 BM_BUF *psBuffer;
3475
3476 PVR_ASSERT(hBuffer != IMG_NULL);
3477 psBuffer = hBuffer;
3478 return psBuffer->pMapping;
3479}
3480
3481/*!
3482******************************************************************************
3483 @Function BM_GetVirtualSize
3484
3485 @Description utility function to get the VM size of a BM mapping
3486
3487 @Input hBMHandle - Handle to BM mapping
3488
3489 @Return VM size of mapping
3490**************************************************************************/
3491IMG_UINT32 BM_GetVirtualSize(IMG_HANDLE hBMHandle)
3492{
3493 BM_MAPPING *psMapping;
3494
3495 PVR_ASSERT(hBMHandle != IMG_NULL);
3496 psMapping = hBMHandle;
3497 return psMapping->ui32ChunkSize * psMapping->ui32NumVirtChunks;
3498}
3499
3500/*!
3501******************************************************************************
3502 @Function BM_MapPageAtOffset
3503
3504 @Description utility function check if the specificed offset in a BM mapping
3505 is a page that needs tp be mapped
3506
3507 @Input hBMHandle - Handle to BM mapping
3508
3509 @Input ui32Offset - Offset into allocation
3510
3511 @Return IMG_TRUE if the page should be mapped
3512**************************************************************************/
3513IMG_BOOL BM_MapPageAtOffset(IMG_HANDLE hBMHandle, IMG_UINT32 ui32Offset)
3514{
3515 BM_MAPPING *psMapping;
3516 IMG_UINT32 ui32ChunkIndex;
3517
3518 PVR_ASSERT(hBMHandle != IMG_NULL);
3519 psMapping = hBMHandle;
3520
3521 ui32ChunkIndex = ui32Offset / psMapping->ui32ChunkSize;
3522 /* Check for overrun */
3523 PVR_ASSERT(ui32ChunkIndex <= psMapping->ui32NumVirtChunks);
3524 return psMapping->pabMapChunk[ui32ChunkIndex];
3525}
3526
3527/*!
3528******************************************************************************
3529 @Function BM_VirtOffsetToPhyscial
3530
3531 @Description utility function find of physical offset of a sparse allocation
3532 from it's virtual offset.
3533
3534 @Input hBMHandle - Handle to BM mapping
3535
3536 @Input ui32VirtOffset - Virtual offset into allocation
3537
3538 @Output pui32PhysOffset - Physical offset
3539
3540 @Return IMG_TRUE if the virtual offset is physically backed
3541**************************************************************************/
3542IMG_BOOL BM_VirtOffsetToPhysical(IMG_HANDLE hBMHandle,
3543 IMG_UINT32 ui32VirtOffset,
3544 IMG_UINT32 *pui32PhysOffset)
3545{
3546 BM_MAPPING *psMapping;
3547 IMG_UINT32 ui32ChunkOffset;
3548 IMG_UINT32 ui32PhysOffset = 0;
3549 IMG_UINT32 i;
3550
3551 PVR_ASSERT(hBMHandle != IMG_NULL);
3552 psMapping = hBMHandle;
3553
3554 ui32ChunkOffset = ui32VirtOffset / psMapping->ui32ChunkSize;
3555 if (!psMapping->pabMapChunk[ui32ChunkOffset])
3556 {
3557 return IMG_FALSE;
3558 }
3559
3560 for (i=0;i<ui32ChunkOffset;i++)
3561 {
3562 if (psMapping->pabMapChunk[i])
3563 {
3564 ui32PhysOffset += psMapping->ui32ChunkSize;
3565 }
3566 }
3567 *pui32PhysOffset = ui32PhysOffset;
3568
3569 return IMG_TRUE;
3570}
3571/******************************************************************************
3572 End of file (buffer_manager.c)
3573******************************************************************************/