aboutsummaryrefslogtreecommitdiffstats
path: root/amdgpu
diff options
context:
space:
mode:
authorJammy Zhou2015-07-13 07:57:44 -0500
committerAlex Deucher2015-08-05 12:47:52 -0500
commit8aeffcc1cf3360fddd97f4a6b6f7300f401142ae (patch)
tree1196cf4db93a65d76bead1b4e6d221d85cdc180a /amdgpu
parent95d0f35dafff6c588da47c28332c252881f2e07c (diff)
downloadexternal-libgbm-8aeffcc1cf3360fddd97f4a6b6f7300f401142ae.tar.gz
external-libgbm-8aeffcc1cf3360fddd97f4a6b6f7300f401142ae.tar.xz
external-libgbm-8aeffcc1cf3360fddd97f4a6b6f7300f401142ae.zip
amdgpu: add amdgpu_bo_va_op for va map/unmap support v3
The following interfaces are changed accordingly: - amdgpu_bo_alloc - amdgpu_create_bo_from_user_mem v2: update the interfaces v3: remove virtual_mc_base_address from amdgpu_bo Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/amdgpu.h54
-rw-r--r--amdgpu/amdgpu_bo.c130
-rw-r--r--amdgpu/amdgpu_internal.h1
3 files changed, 59 insertions, 126 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index bc6751ae..f14b7f45 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -156,19 +156,6 @@ struct amdgpu_bo_alloc_request {
156}; 156};
157 157
158/** 158/**
159 * Structure describing memory allocation request
160 *
161 * \sa amdgpu_bo_alloc()
162*/
163struct amdgpu_bo_alloc_result {
164 /** Assigned virtual MC Base Address */
165 uint64_t virtual_mc_base_address;
166
167 /** Handle of allocated memory to be used by the given process only. */
168 amdgpu_bo_handle buf_handle;
169};
170
171/**
172 * Special UMD specific information associated with buffer. 159 * Special UMD specific information associated with buffer.
173 * 160 *
174 * It may be need to pass some buffer charactersitic as part 161 * It may be need to pass some buffer charactersitic as part
@@ -213,13 +200,6 @@ struct amdgpu_bo_info {
213 */ 200 */
214 uint64_t phys_alignment; 201 uint64_t phys_alignment;
215 202
216 /**
217 * Assigned virtual MC Base Address.
218 * \note This information will be returned only if this buffer was
219 * allocated in the same process otherwise 0 will be returned.
220 */
221 uint64_t virtual_mc_base_address;
222
223 /** Heap where to allocate memory. */ 203 /** Heap where to allocate memory. */
224 uint32_t preferred_heap; 204 uint32_t preferred_heap;
225 205
@@ -242,9 +222,6 @@ struct amdgpu_bo_import_result {
242 222
243 /** Buffer size */ 223 /** Buffer size */
244 uint64_t alloc_size; 224 uint64_t alloc_size;
245
246 /** Assigned virtual MC Base Address */
247 uint64_t virtual_mc_base_address;
248}; 225};
249 226
250/** 227/**
@@ -558,8 +535,7 @@ int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
558 * See #amdgpu_device_initialize() 535 * See #amdgpu_device_initialize()
559 * \param alloc_buffer - \c [in] Pointer to the structure describing an 536 * \param alloc_buffer - \c [in] Pointer to the structure describing an
560 * allocation request 537 * allocation request
561 * \param info - \c [out] Pointer to structure which return 538 * \param buf_handle - \c [out] Allocated buffer handle
562 * information about allocated memory
563 * 539 *
564 * \return 0 on success\n 540 * \return 0 on success\n
565 * <0 - Negative POSIX Error code 541 * <0 - Negative POSIX Error code
@@ -568,7 +544,7 @@ int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
568*/ 544*/
569int amdgpu_bo_alloc(amdgpu_device_handle dev, 545int amdgpu_bo_alloc(amdgpu_device_handle dev,
570 struct amdgpu_bo_alloc_request *alloc_buffer, 546 struct amdgpu_bo_alloc_request *alloc_buffer,
571 struct amdgpu_bo_alloc_result *info); 547 amdgpu_bo_handle *buf_handle);
572 548
573/** 549/**
574 * Associate opaque data with buffer to be queried by another UMD 550 * Associate opaque data with buffer to be queried by another UMD
@@ -652,7 +628,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
652 * want to map to GPU address space (make GPU accessible) 628 * want to map to GPU address space (make GPU accessible)
653 * (This address must be correctly aligned). 629 * (This address must be correctly aligned).
654 * \param size - [in] Size of allocation (must be correctly aligned) 630 * \param size - [in] Size of allocation (must be correctly aligned)
655 * \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as 631 * \param buf_handle - [out] Buffer handle for the userptr memory
656 * resource on submission and be used in other operations. 632 * resource on submission and be used in other operations.
657 * 633 *
658 * 634 *
@@ -677,7 +653,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
677*/ 653*/
678int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, 654int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
679 void *cpu, uint64_t size, 655 void *cpu, uint64_t size,
680 struct amdgpu_bo_alloc_result *info); 656 amdgpu_bo_handle *buf_handle);
681 657
682/** 658/**
683 * Free previosuly allocated memory 659 * Free previosuly allocated memory
@@ -1173,4 +1149,26 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
1173 uint64_t *start, 1149 uint64_t *start,
1174 uint64_t *end); 1150 uint64_t *end);
1175 1151
1152/**
1153 * VA mapping/unmapping for the buffer object
1154 *
1155 * \param bo - \c [in] BO handle
1156 * \param offset - \c [in] Start offset to map
1157 * \param size - \c [in] Size to map
1158 * \param addr - \c [in] Start virtual address.
1159 * \param flags - \c [in] Supported flags for mapping/unmapping
1160 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
1161 *
1162 * \return 0 on success\n
1163 * <0 - Negative POSIX Error code
1164 *
1165*/
1166
1167int amdgpu_bo_va_op(amdgpu_bo_handle bo,
1168 uint64_t offset,
1169 uint64_t size,
1170 uint64_t addr,
1171 uint64_t flags,
1172 uint32_t ops);
1173
1176#endif /* #ifdef _AMDGPU_H_ */ 1174#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 1ef15162..a17bd0f5 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -52,72 +52,6 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
52 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args); 52 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
53} 53}
54 54
55/* map the buffer to the GPU virtual address space */
56static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
57{
58 amdgpu_device_handle dev = bo->dev;
59 struct drm_amdgpu_gem_va va;
60 int r;
61
62 memset(&va, 0, sizeof(va));
63
64 bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
65 bo->alloc_size, alignment, 0);
66
67 if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
68 return -ENOSPC;
69
70 va.handle = bo->handle;
71 va.operation = AMDGPU_VA_OP_MAP;
72 va.flags = AMDGPU_VM_PAGE_READABLE |
73 AMDGPU_VM_PAGE_WRITEABLE |
74 AMDGPU_VM_PAGE_EXECUTABLE;
75 va.va_address = bo->virtual_mc_base_address;
76 va.offset_in_bo = 0;
77 va.map_size = ALIGN(bo->alloc_size, getpagesize());
78
79 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
80 if (r) {
81 amdgpu_bo_free_internal(bo);
82 return r;
83 }
84
85 return 0;
86}
87
88/* unmap the buffer from the GPU virtual address space */
89static void amdgpu_bo_unmap(amdgpu_bo_handle bo)
90{
91 amdgpu_device_handle dev = bo->dev;
92 struct drm_amdgpu_gem_va va;
93 int r;
94
95 if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
96 return;
97
98 memset(&va, 0, sizeof(va));
99
100 va.handle = bo->handle;
101 va.operation = AMDGPU_VA_OP_UNMAP;
102 va.flags = AMDGPU_VM_PAGE_READABLE |
103 AMDGPU_VM_PAGE_WRITEABLE |
104 AMDGPU_VM_PAGE_EXECUTABLE;
105 va.va_address = bo->virtual_mc_base_address;
106 va.offset_in_bo = 0;
107 va.map_size = ALIGN(bo->alloc_size, getpagesize());
108
109 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
110 if (r) {
111 fprintf(stderr, "amdgpu: VA_OP_UNMAP failed with %d\n", r);
112 return;
113 }
114
115 amdgpu_vamgr_free_va(bo->dev->vamgr, bo->virtual_mc_base_address,
116 bo->alloc_size);
117
118 bo->virtual_mc_base_address = AMDGPU_INVALID_VA_ADDRESS;
119}
120
121void amdgpu_bo_free_internal(amdgpu_bo_handle bo) 55void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
122{ 56{
123 /* Remove the buffer from the hash tables. */ 57 /* Remove the buffer from the hash tables. */
@@ -136,7 +70,6 @@ void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
136 amdgpu_bo_cpu_unmap(bo); 70 amdgpu_bo_cpu_unmap(bo);
137 } 71 }
138 72
139 amdgpu_bo_unmap(bo);
140 amdgpu_close_kms_handle(bo->dev, bo->handle); 73 amdgpu_close_kms_handle(bo->dev, bo->handle);
141 pthread_mutex_destroy(&bo->cpu_access_mutex); 74 pthread_mutex_destroy(&bo->cpu_access_mutex);
142 free(bo); 75 free(bo);
@@ -144,7 +77,7 @@ void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
144 77
145int amdgpu_bo_alloc(amdgpu_device_handle dev, 78int amdgpu_bo_alloc(amdgpu_device_handle dev,
146 struct amdgpu_bo_alloc_request *alloc_buffer, 79 struct amdgpu_bo_alloc_request *alloc_buffer,
147 struct amdgpu_bo_alloc_result *info) 80 amdgpu_bo_handle *buf_handle)
148{ 81{
149 struct amdgpu_bo *bo; 82 struct amdgpu_bo *bo;
150 union drm_amdgpu_gem_create args; 83 union drm_amdgpu_gem_create args;
@@ -183,14 +116,7 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
183 116
184 pthread_mutex_init(&bo->cpu_access_mutex, NULL); 117 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
185 118
186 r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment); 119 *buf_handle = bo;
187 if (r) {
188 amdgpu_bo_free_internal(bo);
189 return r;
190 }
191
192 info->buf_handle = bo;
193 info->virtual_mc_base_address = bo->virtual_mc_base_address;
194 return 0; 120 return 0;
195} 121}
196 122
@@ -255,7 +181,6 @@ int amdgpu_bo_query_info(amdgpu_bo_handle bo,
255 memset(info, 0, sizeof(*info)); 181 memset(info, 0, sizeof(*info));
256 info->alloc_size = bo_info.bo_size; 182 info->alloc_size = bo_info.bo_size;
257 info->phys_alignment = bo_info.alignment; 183 info->phys_alignment = bo_info.alignment;
258 info->virtual_mc_base_address = bo->virtual_mc_base_address;
259 info->preferred_heap = bo_info.domains; 184 info->preferred_heap = bo_info.domains;
260 info->alloc_flags = bo_info.domain_flags; 185 info->alloc_flags = bo_info.domain_flags;
261 info->metadata.flags = metadata.data.flags; 186 info->metadata.flags = metadata.data.flags;
@@ -421,8 +346,6 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
421 346
422 output->buf_handle = bo; 347 output->buf_handle = bo;
423 output->alloc_size = bo->alloc_size; 348 output->alloc_size = bo->alloc_size;
424 output->virtual_mc_base_address =
425 bo->virtual_mc_base_address;
426 return 0; 349 return 0;
427 } 350 }
428 351
@@ -484,19 +407,11 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
484 bo->dev = dev; 407 bo->dev = dev;
485 pthread_mutex_init(&bo->cpu_access_mutex, NULL); 408 pthread_mutex_init(&bo->cpu_access_mutex, NULL);
486 409
487 r = amdgpu_bo_map(bo, 1 << 20);
488 if (r) {
489 pthread_mutex_unlock(&dev->bo_table_mutex);
490 amdgpu_bo_reference(&bo, NULL);
491 return r;
492 }
493
494 util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo); 410 util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
495 pthread_mutex_unlock(&dev->bo_table_mutex); 411 pthread_mutex_unlock(&dev->bo_table_mutex);
496 412
497 output->buf_handle = bo; 413 output->buf_handle = bo;
498 output->alloc_size = bo->alloc_size; 414 output->alloc_size = bo->alloc_size;
499 output->virtual_mc_base_address = bo->virtual_mc_base_address;
500 return 0; 415 return 0;
501} 416}
502 417
@@ -615,7 +530,7 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
615int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, 530int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
616 void *cpu, 531 void *cpu,
617 uint64_t size, 532 uint64_t size,
618 struct amdgpu_bo_alloc_result *info) 533 amdgpu_bo_handle *buf_handle)
619{ 534{
620 int r; 535 int r;
621 struct amdgpu_bo *bo; 536 struct amdgpu_bo *bo;
@@ -647,15 +562,7 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
647 bo->alloc_size = size; 562 bo->alloc_size = size;
648 bo->handle = args.handle; 563 bo->handle = args.handle;
649 564
650 r = amdgpu_bo_map(bo, 1 << 12); 565 *buf_handle = bo;
651 if (r) {
652 amdgpu_bo_free_internal(bo);
653 return r;
654 }
655
656 info->buf_handle = bo;
657 info->virtual_mc_base_address = bo->virtual_mc_base_address;
658 info->virtual_mc_base_address += off;
659 566
660 return r; 567 return r;
661} 568}
@@ -766,3 +673,32 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
766 free(list); 673 free(list);
767 return r; 674 return r;
768} 675}
676
677int amdgpu_bo_va_op(amdgpu_bo_handle bo,
678 uint64_t offset,
679 uint64_t size,
680 uint64_t addr,
681 uint64_t flags,
682 uint32_t ops)
683{
684 amdgpu_device_handle dev = bo->dev;
685 struct drm_amdgpu_gem_va va;
686 int r;
687
688 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
689 return -EINVAL;
690
691 memset(&va, 0, sizeof(va));
692 va.handle = bo->handle;
693 va.operation = ops;
694 va.flags = AMDGPU_VM_PAGE_READABLE |
695 AMDGPU_VM_PAGE_WRITEABLE |
696 AMDGPU_VM_PAGE_EXECUTABLE;
697 va.va_address = addr;
698 va.offset_in_bo = offset;
699 va.map_size = ALIGN(size, getpagesize());
700
701 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
702
703 return r;
704}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index bf7788dd..526a93f8 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -88,7 +88,6 @@ struct amdgpu_bo {
88 struct amdgpu_device *dev; 88 struct amdgpu_device *dev;
89 89
90 uint64_t alloc_size; 90 uint64_t alloc_size;
91 uint64_t virtual_mc_base_address;
92 91
93 uint32_t handle; 92 uint32_t handle;
94 uint32_t flink_name; 93 uint32_t flink_name;