aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJammy Zhou2015-08-16 22:09:09 -0500
committerAlex Deucher2015-08-17 15:29:26 -0500
commit56d8dd6a9c03680700e0b0043cb56e0af7e3e3de (patch)
treed9f049c3c8d8ce7010436add5ad76c6f36e32943 /amdgpu/amdgpu_vamgr.c
parentffa305d0fc926418e4dff432381ead8907dc18d9 (diff)
downloadexternal-libdrm-56d8dd6a9c03680700e0b0043cb56e0af7e3e3de.tar.gz
external-libdrm-56d8dd6a9c03680700e0b0043cb56e0af7e3e3de.tar.xz
external-libdrm-56d8dd6a9c03680700e0b0043cb56e0af7e3e3de.zip
amdgpu: make vamgr per device v2
Each device can have its own vamgr, so make it per device now. This can fix the failure with multiple GPUs used in one single process. v2: rebase Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu/amdgpu_vamgr.c')
-rw-r--r--amdgpu/amdgpu_vamgr.c26
1 files changed, 1 insertions, 25 deletions
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 507a73a2..04d28817 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -33,8 +33,6 @@
33#include "amdgpu_internal.h" 33#include "amdgpu_internal.h"
34#include "util_math.h" 34#include "util_math.h"
35 35
36static struct amdgpu_bo_va_mgr vamgr = {{0}};
37
38int amdgpu_va_range_query(amdgpu_device_handle dev, 36int amdgpu_va_range_query(amdgpu_device_handle dev,
39 enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end) 37 enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
40{ 38{
@@ -67,28 +65,6 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
67 pthread_mutex_destroy(&mgr->bo_va_mutex); 65 pthread_mutex_destroy(&mgr->bo_va_mutex);
68} 66}
69 67
70drm_private struct amdgpu_bo_va_mgr *
71amdgpu_vamgr_get_global(struct amdgpu_device *dev)
72{
73 int ref;
74 ref = atomic_inc_return(&vamgr.refcount);
75
76 if (ref == 1)
77 amdgpu_vamgr_init(&vamgr, dev->dev_info.virtual_address_offset,
78 dev->dev_info.virtual_address_max,
79 dev->dev_info.virtual_address_alignment);
80 return &vamgr;
81}
82
83drm_private void
84amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
85 struct amdgpu_bo_va_mgr *src)
86{
87 if (update_references(&(*dst)->refcount, NULL))
88 amdgpu_vamgr_deinit(*dst);
89 *dst = src;
90}
91
92drm_private uint64_t 68drm_private uint64_t
93amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 69amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
94 uint64_t alignment, uint64_t base_required) 70 uint64_t alignment, uint64_t base_required)
@@ -105,7 +81,7 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
105 pthread_mutex_lock(&mgr->bo_va_mutex); 81 pthread_mutex_lock(&mgr->bo_va_mutex);
106 /* TODO: using more appropriate way to track the holes */ 82 /* TODO: using more appropriate way to track the holes */
107 /* first look for a hole */ 83 /* first look for a hole */
108 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) { 84 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
109 if (base_required) { 85 if (base_required) {
110 if(hole->offset > base_required || 86 if(hole->offset > base_required ||
111 (hole->offset + hole->size) < (base_required + size)) 87 (hole->offset + hole->size) < (base_required + size))