diff options
author | Jammy Zhou | 2015-08-16 22:09:08 -0500 |
---|---|---|
committer | Alex Deucher | 2015-08-17 15:26:26 -0500 |
commit | ffa305d0fc926418e4dff432381ead8907dc18d9 (patch) | |
tree | 1581c8b5ba358942fe6a376bf85e7750af6d3d3a /amdgpu/amdgpu_vamgr.c | |
parent | 102ab6f0049c2c85857fd19f098bc5b51e2a8a60 (diff) | |
download | external-libdrm-ffa305d0fc926418e4dff432381ead8907dc18d9.tar.gz external-libdrm-ffa305d0fc926418e4dff432381ead8907dc18d9.tar.xz external-libdrm-ffa305d0fc926418e4dff432381ead8907dc18d9.zip |
amdgpu: add flag to support 32bit VA address v4
The AMDGPU_VA_RANGE_32_BIT flag is added to request VA range in the
32bit address space for amdgpu_va_range_alloc.
The 32bit address space is reserved at initialization time, and managed
with a separate VAMGR as part of the global VAMGR. And if no enough VA
space available in range above 4GB, this reserved range can be used as
fallback.
v2: add comment for AMDGPU_VA_RANGE_32_BIT, and add vamgr to va_range
v3: rebase to Emil's drm_private series
v4: fix one warning
Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu/amdgpu_vamgr.c')
-rw-r--r-- | amdgpu/amdgpu_vamgr.c | 32 |
1 files changed, 25 insertions, 7 deletions
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c index eef8a71a..507a73a2 100644 --- a/amdgpu/amdgpu_vamgr.c +++ b/amdgpu/amdgpu_vamgr.c | |||
@@ -46,7 +46,7 @@ int amdgpu_va_range_query(amdgpu_device_handle dev, | |||
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | } | 47 | } |
48 | 48 | ||
49 | static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, | 49 | drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, |
50 | uint64_t max, uint64_t alignment) | 50 | uint64_t max, uint64_t alignment) |
51 | { | 51 | { |
52 | mgr->va_offset = start; | 52 | mgr->va_offset = start; |
@@ -57,7 +57,7 @@ static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, | |||
57 | pthread_mutex_init(&mgr->bo_va_mutex, NULL); | 57 | pthread_mutex_init(&mgr->bo_va_mutex, NULL); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) | 60 | drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) |
61 | { | 61 | { |
62 | struct amdgpu_bo_va_hole *hole; | 62 | struct amdgpu_bo_va_hole *hole; |
63 | LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) { | 63 | LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) { |
@@ -255,23 +255,39 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev, | |||
255 | amdgpu_va_handle *va_range_handle, | 255 | amdgpu_va_handle *va_range_handle, |
256 | uint64_t flags) | 256 | uint64_t flags) |
257 | { | 257 | { |
258 | va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment); | 258 | struct amdgpu_bo_va_mgr *vamgr; |
259 | size = ALIGN(size, vamgr.va_alignment); | ||
260 | 259 | ||
261 | *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size, | 260 | if (flags & AMDGPU_VA_RANGE_32_BIT) |
261 | vamgr = dev->vamgr_32; | ||
262 | else | ||
263 | vamgr = dev->vamgr; | ||
264 | |||
265 | va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment); | ||
266 | size = ALIGN(size, vamgr->va_alignment); | ||
267 | |||
268 | *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size, | ||
269 | va_base_alignment, va_base_required); | ||
270 | |||
271 | if (!(flags & AMDGPU_VA_RANGE_32_BIT) && | ||
272 | (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) { | ||
273 | /* fallback to 32bit address */ | ||
274 | vamgr = dev->vamgr_32; | ||
275 | *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size, | ||
262 | va_base_alignment, va_base_required); | 276 | va_base_alignment, va_base_required); |
277 | } | ||
263 | 278 | ||
264 | if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) { | 279 | if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) { |
265 | struct amdgpu_va* va; | 280 | struct amdgpu_va* va; |
266 | va = calloc(1, sizeof(struct amdgpu_va)); | 281 | va = calloc(1, sizeof(struct amdgpu_va)); |
267 | if(!va){ | 282 | if(!va){ |
268 | amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size); | 283 | amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size); |
269 | return -ENOMEM; | 284 | return -ENOMEM; |
270 | } | 285 | } |
271 | va->dev = dev; | 286 | va->dev = dev; |
272 | va->address = *va_base_allocated; | 287 | va->address = *va_base_allocated; |
273 | va->size = size; | 288 | va->size = size; |
274 | va->range = va_range_type; | 289 | va->range = va_range_type; |
290 | va->vamgr = vamgr; | ||
275 | *va_range_handle = va; | 291 | *va_range_handle = va; |
276 | } else { | 292 | } else { |
277 | return -EINVAL; | 293 | return -EINVAL; |
@@ -284,7 +300,9 @@ int amdgpu_va_range_free(amdgpu_va_handle va_range_handle) | |||
284 | { | 300 | { |
285 | if(!va_range_handle || !va_range_handle->address) | 301 | if(!va_range_handle || !va_range_handle->address) |
286 | return 0; | 302 | return 0; |
287 | amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address, | 303 | |
304 | amdgpu_vamgr_free_va(va_range_handle->vamgr, | ||
305 | va_range_handle->address, | ||
288 | va_range_handle->size); | 306 | va_range_handle->size); |
289 | free(va_range_handle); | 307 | free(va_range_handle); |
290 | return 0; | 308 | return 0; |