aboutsummaryrefslogtreecommitdiffstats
path: root/amdgpu
diff options
context:
space:
mode:
authorJammy Zhou2015-08-16 22:09:08 -0500
committerAlex Deucher2015-08-17 15:26:26 -0500
commitffa305d0fc926418e4dff432381ead8907dc18d9 (patch)
tree1581c8b5ba358942fe6a376bf85e7750af6d3d3a /amdgpu
parent102ab6f0049c2c85857fd19f098bc5b51e2a8a60 (diff)
downloadexternal-libdrm-ffa305d0fc926418e4dff432381ead8907dc18d9.tar.gz
external-libdrm-ffa305d0fc926418e4dff432381ead8907dc18d9.tar.xz
external-libdrm-ffa305d0fc926418e4dff432381ead8907dc18d9.zip
amdgpu: add flag to support 32bit VA address v4
The AMDGPU_VA_RANGE_32_BIT flag is added to request VA range in the 32bit address space for amdgpu_va_range_alloc. The 32bit address space is reserved at initialization time, and managed with a separate VAMGR as part of the global VAMGR. And if no enough VA space available in range above 4GB, this reserved range can be used as fallback. v2: add comment for AMDGPU_VA_RANGE_32_BIT, and add vamgr to va_range v3: rebase to Emil's drm_private series v4: fix one warning Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian K├Ânig <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/amdgpu.h5
-rw-r--r--amdgpu/amdgpu_device.c20
-rw-r--r--amdgpu/amdgpu_internal.h9
-rw-r--r--amdgpu/amdgpu_vamgr.c32
4 files changed, 59 insertions, 7 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index a3eea84a..e44d802b 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -1075,6 +1075,11 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
1075 uint32_t *values); 1075 uint32_t *values);
1076 1076
1077/** 1077/**
1078 * Flag to request VA address range in the 32bit address space
1079*/
1080#define AMDGPU_VA_RANGE_32_BIT 0x1
1081
1082/**
1078 * Allocate virtual address range 1083 * Allocate virtual address range
1079 * 1084 *
1080 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 1085 * \param dev - [in] Device handle. See #amdgpu_device_initialize()
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index c6bbae81..e16cd24b 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -43,6 +43,7 @@
43#include "amdgpu_drm.h" 43#include "amdgpu_drm.h"
44#include "amdgpu_internal.h" 44#include "amdgpu_internal.h"
45#include "util_hash_table.h" 45#include "util_hash_table.h"
46#include "util_math.h"
46 47
47#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 48#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
48#define UINT_TO_PTR(x) ((void *)((intptr_t)(x))) 49#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@@ -173,6 +174,7 @@ int amdgpu_device_initialize(int fd,
173 int flag_auth = 0; 174 int flag_auth = 0;
174 int flag_authexist=0; 175 int flag_authexist=0;
175 uint32_t accel_working = 0; 176 uint32_t accel_working = 0;
177 uint64_t start, max;
176 178
177 *device_handle = NULL; 179 *device_handle = NULL;
178 180
@@ -251,6 +253,19 @@ int amdgpu_device_initialize(int fd,
251 253
252 dev->vamgr = amdgpu_vamgr_get_global(dev); 254 dev->vamgr = amdgpu_vamgr_get_global(dev);
253 255
256 max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
257 start = amdgpu_vamgr_find_va(dev->vamgr,
258 max - dev->dev_info.virtual_address_offset,
259 dev->dev_info.virtual_address_alignment, 0);
260 if (start > 0xffffffff)
261 goto free_va; /* shouldn't get here */
262
263 dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
264 if (dev->vamgr_32 == NULL)
265 goto free_va;
266 amdgpu_vamgr_init(dev->vamgr_32, start, max,
267 dev->dev_info.virtual_address_alignment);
268
254 *major_version = dev->major_version; 269 *major_version = dev->major_version;
255 *minor_version = dev->minor_version; 270 *minor_version = dev->minor_version;
256 *device_handle = dev; 271 *device_handle = dev;
@@ -259,6 +274,11 @@ int amdgpu_device_initialize(int fd,
259 274
260 return 0; 275 return 0;
261 276
277free_va:
278 r = -ENOMEM;
279 amdgpu_vamgr_free_va(dev->vamgr, start,
280 max - dev->dev_info.virtual_address_offset);
281
262cleanup: 282cleanup:
263 if (dev->fd >= 0) 283 if (dev->fd >= 0)
264 close(dev->fd); 284 close(dev->fd);
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 4b07aff8..3ce0969e 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -66,6 +66,7 @@ struct amdgpu_va {
66 uint64_t address; 66 uint64_t address;
67 uint64_t size; 67 uint64_t size;
68 enum amdgpu_gpu_va_range range; 68 enum amdgpu_gpu_va_range range;
69 struct amdgpu_bo_va_mgr *vamgr;
69}; 70};
70 71
71struct amdgpu_device { 72struct amdgpu_device {
@@ -83,7 +84,10 @@ struct amdgpu_device {
83 pthread_mutex_t bo_table_mutex; 84 pthread_mutex_t bo_table_mutex;
84 struct drm_amdgpu_info_device dev_info; 85 struct drm_amdgpu_info_device dev_info;
85 struct amdgpu_gpu_info info; 86 struct amdgpu_gpu_info info;
87 /** The global VA manager for the whole virtual address space */
86 struct amdgpu_bo_va_mgr *vamgr; 88 struct amdgpu_bo_va_mgr *vamgr;
89 /** The VA manager for the 32bit address space */
90 struct amdgpu_bo_va_mgr *vamgr_32;
87}; 91};
88 92
89struct amdgpu_bo { 93struct amdgpu_bo {
@@ -128,6 +132,11 @@ drm_private void
128amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, 132amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
129 struct amdgpu_bo_va_mgr *src); 133 struct amdgpu_bo_va_mgr *src);
130 134
135drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
136 uint64_t max, uint64_t alignment);
137
138drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
139
131drm_private uint64_t 140drm_private uint64_t
132amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 141amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
133 uint64_t alignment, uint64_t base_required); 142 uint64_t alignment, uint64_t base_required);
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index eef8a71a..507a73a2 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -46,7 +46,7 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
46 return -EINVAL; 46 return -EINVAL;
47} 47}
48 48
49static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 49drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
50 uint64_t max, uint64_t alignment) 50 uint64_t max, uint64_t alignment)
51{ 51{
52 mgr->va_offset = start; 52 mgr->va_offset = start;
@@ -57,7 +57,7 @@ static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
57 pthread_mutex_init(&mgr->bo_va_mutex, NULL); 57 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
58} 58}
59 59
60static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) 60drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
61{ 61{
62 struct amdgpu_bo_va_hole *hole; 62 struct amdgpu_bo_va_hole *hole;
63 LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) { 63 LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
@@ -255,23 +255,39 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
255 amdgpu_va_handle *va_range_handle, 255 amdgpu_va_handle *va_range_handle,
256 uint64_t flags) 256 uint64_t flags)
257{ 257{
258 va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment); 258 struct amdgpu_bo_va_mgr *vamgr;
259 size = ALIGN(size, vamgr.va_alignment);
260 259
261 *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size, 260 if (flags & AMDGPU_VA_RANGE_32_BIT)
261 vamgr = dev->vamgr_32;
262 else
263 vamgr = dev->vamgr;
264
265 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
266 size = ALIGN(size, vamgr->va_alignment);
267
268 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
269 va_base_alignment, va_base_required);
270
271 if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
272 (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
273 /* fallback to 32bit address */
274 vamgr = dev->vamgr_32;
275 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
262 va_base_alignment, va_base_required); 276 va_base_alignment, va_base_required);
277 }
263 278
264 if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) { 279 if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
265 struct amdgpu_va* va; 280 struct amdgpu_va* va;
266 va = calloc(1, sizeof(struct amdgpu_va)); 281 va = calloc(1, sizeof(struct amdgpu_va));
267 if(!va){ 282 if(!va){
268 amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size); 283 amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
269 return -ENOMEM; 284 return -ENOMEM;
270 } 285 }
271 va->dev = dev; 286 va->dev = dev;
272 va->address = *va_base_allocated; 287 va->address = *va_base_allocated;
273 va->size = size; 288 va->size = size;
274 va->range = va_range_type; 289 va->range = va_range_type;
290 va->vamgr = vamgr;
275 *va_range_handle = va; 291 *va_range_handle = va;
276 } else { 292 } else {
277 return -EINVAL; 293 return -EINVAL;
@@ -284,7 +300,9 @@ int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
284{ 300{
285 if(!va_range_handle || !va_range_handle->address) 301 if(!va_range_handle || !va_range_handle->address)
286 return 0; 302 return 0;
287 amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address, 303
304 amdgpu_vamgr_free_va(va_range_handle->vamgr,
305 va_range_handle->address,
288 va_range_handle->size); 306 va_range_handle->size);
289 free(va_range_handle); 307 free(va_range_handle);
290 return 0; 308 return 0;