aboutsummaryrefslogtreecommitdiffstats
path: root/amdgpu
diff options
context:
space:
mode:
authorMarek Olšák2015-05-29 10:13:12 -0500
committerAlex Deucher2015-08-05 12:47:50 -0500
commit2a344a8d8a7af0b242b262866742c253cd55d334 (patch)
tree48b4d6fa070c9b708e7e4973eac45e0e52a09937 /amdgpu
parent1041cfdc38692721364557ce61d7abe71a1a9cbf (diff)
downloadexternal-libdrm-2a344a8d8a7af0b242b262866742c253cd55d334.tar.gz
external-libdrm-2a344a8d8a7af0b242b262866742c253cd55d334.tar.xz
external-libdrm-2a344a8d8a7af0b242b262866742c253cd55d334.zip
amdgpu: don't use amdgpu_cs_create_ib for allocation of the fence BO
amdgpu_cs_create_ib will go away. Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/amdgpu_cs.c34
-rw-r--r--amdgpu/amdgpu_internal.h3
2 files changed, 28 insertions, 9 deletions
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index 1f153adb..326e3d32 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -165,6 +165,8 @@ int amdgpu_cs_alloc_ib(amdgpu_context_handle context,
165int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 165int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
166 amdgpu_context_handle *context) 166 amdgpu_context_handle *context)
167{ 167{
168 struct amdgpu_bo_alloc_request alloc_buffer = {};
169 struct amdgpu_bo_alloc_result info = {};
168 struct amdgpu_context *gpu_context; 170 struct amdgpu_context *gpu_context;
169 union drm_amdgpu_ctx args; 171 union drm_amdgpu_ctx args;
170 int r; 172 int r;
@@ -184,12 +186,21 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
184 if (r) 186 if (r)
185 goto error_mutex; 187 goto error_mutex;
186 188
187 r = amdgpu_cs_create_ib(gpu_context, amdgpu_cs_ib_size_4K, 189 /* Create the fence BO */
188 &gpu_context->fence_ib); 190 alloc_buffer.alloc_size = 4 * 1024;
191 alloc_buffer.phys_alignment = 4 * 1024;
192 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
193
194 r = amdgpu_bo_alloc(dev, &alloc_buffer, &info);
189 if (r) 195 if (r)
190 goto error_fence_ib; 196 goto error_fence_alloc;
197 gpu_context->fence_bo = info.buf_handle;
191 198
199 r = amdgpu_bo_cpu_map(gpu_context->fence_bo, &gpu_context->fence_cpu);
200 if (r)
201 goto error_fence_map;
192 202
203 /* Create the context */
193 memset(&args, 0, sizeof(args)); 204 memset(&args, 0, sizeof(args));
194 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX; 205 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
195 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); 206 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
@@ -202,9 +213,12 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
202 return 0; 213 return 0;
203 214
204error_kernel: 215error_kernel:
205 amdgpu_cs_free_ib(gpu_context->fence_ib); 216 amdgpu_bo_cpu_unmap(gpu_context->fence_bo);
206 217
207error_fence_ib: 218error_fence_map:
219 amdgpu_bo_free(gpu_context->fence_bo);
220
221error_fence_alloc:
208 pthread_mutex_destroy(&gpu_context->sequence_mutex); 222 pthread_mutex_destroy(&gpu_context->sequence_mutex);
209 223
210error_mutex: 224error_mutex:
@@ -228,7 +242,11 @@ int amdgpu_cs_ctx_free(amdgpu_context_handle context)
228 if (NULL == context) 242 if (NULL == context)
229 return -EINVAL; 243 return -EINVAL;
230 244
231 r = amdgpu_cs_free_ib(context->fence_ib); 245 r = amdgpu_bo_cpu_unmap(context->fence_bo);
246 if (r)
247 return r;
248
249 r = amdgpu_bo_free(context->fence_bo);
232 if (r) 250 if (r)
233 return r; 251 return r;
234 252
@@ -351,7 +369,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
351 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i]; 369 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
352 370
353 /* fence bo handle */ 371 /* fence bo handle */
354 chunk_data[i].fence_data.handle = context->fence_ib->buf_handle->handle; 372 chunk_data[i].fence_data.handle = context->fence_bo->handle;
355 /* offset */ 373 /* offset */
356 chunk_data[i].fence_data.offset = amdgpu_cs_fence_index( 374 chunk_data[i].fence_data.offset = amdgpu_cs_fence_index(
357 ibs_request->ip_type, ibs_request->ring); 375 ibs_request->ip_type, ibs_request->ring);
@@ -480,7 +498,7 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence,
480 ip_type = fence->ip_type; 498 ip_type = fence->ip_type;
481 ip_instance = fence->ip_instance; 499 ip_instance = fence->ip_instance;
482 ring = fence->ring; 500 ring = fence->ring;
483 signaled_fence = context->fence_ib->cpu; 501 signaled_fence = context->fence_cpu;
484 signaled_fence += amdgpu_cs_fence_index(ip_type, ring); 502 signaled_fence += amdgpu_cs_fence_index(ip_type, ring);
485 expired_fence = &context->expired_fences[ip_type][ip_instance][ring]; 503 expired_fence = &context->expired_fences[ip_type][ip_instance][ring];
486 *expired = false; 504 *expired = false;
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index e5a457ab..a4c29894 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -103,7 +103,8 @@ struct amdgpu_context {
103 in good sequence. */ 103 in good sequence. */
104 pthread_mutex_t sequence_mutex; 104 pthread_mutex_t sequence_mutex;
105 /** Buffer for user fences */ 105 /** Buffer for user fences */
106 struct amdgpu_ib *fence_ib; 106 struct amdgpu_bo *fence_bo;
107 void *fence_cpu;
107 /** The newest expired fence for the ring of the ip blocks. */ 108 /** The newest expired fence for the ring of the ip blocks. */
108 uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 109 uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
109 /* context id*/ 110 /* context id*/