aboutsummaryrefslogtreecommitdiffstats
path: root/amdgpu
diff options
context:
space:
mode:
authorChristian König2015-04-22 05:21:13 -0500
committerAlex Deucher2015-08-05 12:47:49 -0500
commit9c2afffedb773da27fd7506b31fc2164f329d3a8 (patch)
tree0b99a1763fdc9fc977d3575bc1dba584d955d7e0 /amdgpu
parent9c3bec246e5f1f7d2fcb2bd7e7cc0deab4d8b20d (diff)
downloadexternal-libgbm-9c2afffedb773da27fd7506b31fc2164f329d3a8.tar.gz
external-libgbm-9c2afffedb773da27fd7506b31fc2164f329d3a8.tar.xz
external-libgbm-9c2afffedb773da27fd7506b31fc2164f329d3a8.zip
amdgpu: cleanup public interface v2
Remove the mostly unused device parameter, for the few cases where we really need it keep a copy in the context structure. v2: rebased on internal branch Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/amdgpu.h24
-rw-r--r--amdgpu/amdgpu_cs.c115
-rw-r--r--amdgpu/amdgpu_internal.h2
3 files changed, 56 insertions, 85 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 11a86eff..d010d99c 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -882,7 +882,6 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
882 * 882 *
883 * Destroy GPU execution context when not needed any more 883 * Destroy GPU execution context when not needed any more
884 * 884 *
885 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
886 * \param context - \c [in] GPU Context handle 885 * \param context - \c [in] GPU Context handle
887 * 886 *
888 * \return 0 on success\n 887 * \return 0 on success\n
@@ -892,13 +891,11 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
892 * \sa amdgpu_cs_ctx_create() 891 * \sa amdgpu_cs_ctx_create()
893 * 892 *
894*/ 893*/
895int amdgpu_cs_ctx_free(amdgpu_device_handle dev, 894int amdgpu_cs_ctx_free(amdgpu_context_handle context);
896 amdgpu_context_handle context);
897 895
898/** 896/**
899 * Query reset state for the specific GPU Context 897 * Query reset state for the specific GPU Context
900 * 898 *
901 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
902 * \param context - \c [in] GPU Context handle 899 * \param context - \c [in] GPU Context handle
903 * \param state - \c [out] Reset state status 900 * \param state - \c [out] Reset state status
904 * 901 *
@@ -909,8 +906,7 @@ int amdgpu_cs_ctx_free(amdgpu_device_handle dev,
909 * \sa amdgpu_cs_ctx_create() 906 * \sa amdgpu_cs_ctx_create()
910 * 907 *
911*/ 908*/
912int amdgpu_cs_query_reset_state(amdgpu_device_handle dev, 909int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
913 amdgpu_context_handle context,
914 enum amdgpu_cs_ctx_reset_state *state); 910 enum amdgpu_cs_ctx_reset_state *state);
915 911
916 912
@@ -924,7 +920,6 @@ int amdgpu_cs_query_reset_state(amdgpu_device_handle dev,
924 * Allocate memory to be filled with PM4 packets and be served as the first 920 * Allocate memory to be filled with PM4 packets and be served as the first
925 * entry point of execution (a.k.a. Indirect Buffer) 921 * entry point of execution (a.k.a. Indirect Buffer)
926 * 922 *
927 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
928 * \param context - \c [in] GPU Context which will use IB 923 * \param context - \c [in] GPU Context which will use IB
929 * \param ib_size - \c [in] Size of allocation 924 * \param ib_size - \c [in] Size of allocation
930 * \param output - \c [out] Pointer to structure to get information about 925 * \param output - \c [out] Pointer to structure to get information about
@@ -937,8 +932,7 @@ int amdgpu_cs_query_reset_state(amdgpu_device_handle dev,
937 * \sa amdgpu_cs_free_ib() 932 * \sa amdgpu_cs_free_ib()
938 * 933 *
939*/ 934*/
940int amdgpu_cs_alloc_ib(amdgpu_device_handle dev, 935int amdgpu_cs_alloc_ib(amdgpu_context_handle context,
941 amdgpu_context_handle context,
942 enum amdgpu_cs_ib_size ib_size, 936 enum amdgpu_cs_ib_size ib_size,
943 struct amdgpu_cs_ib_alloc_result *output); 937 struct amdgpu_cs_ib_alloc_result *output);
944 938
@@ -946,8 +940,6 @@ int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
946 * If UMD has allocates IBs which doesn’t need any more than those IBs must 940 * If UMD has allocates IBs which doesn’t need any more than those IBs must
947 * be explicitly freed 941 * be explicitly freed
948 * 942 *
949 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
950 * \param context - \c [in] GPU Context containing IB
951 * \param handle - \c [in] IB handle 943 * \param handle - \c [in] IB handle
952 * 944 *
953 * \return 0 on success\n 945 * \return 0 on success\n
@@ -960,9 +952,7 @@ int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
960 * \sa amdgpu_cs_alloc_ib() 952 * \sa amdgpu_cs_alloc_ib()
961 * 953 *
962*/ 954*/
963int amdgpu_cs_free_ib(amdgpu_device_handle dev, 955int amdgpu_cs_free_ib(amdgpu_ib_handle handle);
964 amdgpu_context_handle context,
965 amdgpu_ib_handle handle);
966 956
967/** 957/**
968 * Send request to submit command buffers to hardware. 958 * Send request to submit command buffers to hardware.
@@ -1011,8 +1001,7 @@ int amdgpu_cs_free_ib(amdgpu_device_handle dev,
1011 * amdgpu_cs_query_fence_status() 1001 * amdgpu_cs_query_fence_status()
1012 * 1002 *
1013*/ 1003*/
1014int amdgpu_cs_submit(amdgpu_device_handle dev, 1004int amdgpu_cs_submit(amdgpu_context_handle context,
1015 amdgpu_context_handle context,
1016 uint64_t flags, 1005 uint64_t flags,
1017 struct amdgpu_cs_request *ibs_request, 1006 struct amdgpu_cs_request *ibs_request,
1018 uint32_t number_of_requests, 1007 uint32_t number_of_requests,
@@ -1038,8 +1027,7 @@ int amdgpu_cs_submit(amdgpu_device_handle dev,
1038 * 1027 *
1039 * \sa amdgpu_cs_submit() 1028 * \sa amdgpu_cs_submit()
1040*/ 1029*/
1041int amdgpu_cs_query_fence_status(amdgpu_device_handle dev, 1030int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence,
1042 struct amdgpu_cs_query_fence *fence,
1043 uint32_t *expired); 1031 uint32_t *expired);
1044 1032
1045 1033
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index 91e6bcfc..4d5b3ecf 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -42,8 +42,7 @@
42 * 42 *
43 * \return 0 on success otherwise POSIX Error code 43 * \return 0 on success otherwise POSIX Error code
44*/ 44*/
45static int amdgpu_cs_create_ib(amdgpu_device_handle dev, 45static int amdgpu_cs_create_ib(amdgpu_context_handle context,
46 amdgpu_context_handle context,
47 enum amdgpu_cs_ib_size ib_size, 46 enum amdgpu_cs_ib_size ib_size,
48 amdgpu_ib_handle *ib) 47 amdgpu_ib_handle *ib)
49{ 48{
@@ -79,7 +78,7 @@ static int amdgpu_cs_create_ib(amdgpu_device_handle dev,
79 78
80 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT; 79 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
81 80
82 r = amdgpu_bo_alloc(dev, 81 r = amdgpu_bo_alloc(context->dev,
83 &alloc_buffer, 82 &alloc_buffer,
84 &info); 83 &info);
85 if (r) 84 if (r)
@@ -98,6 +97,7 @@ static int amdgpu_cs_create_ib(amdgpu_device_handle dev,
98 return -ENOMEM; 97 return -ENOMEM;
99 } 98 }
100 99
100 new_ib->context = context;
101 new_ib->buf_handle = info.buf_handle; 101 new_ib->buf_handle = info.buf_handle;
102 new_ib->cpu = cpu; 102 new_ib->cpu = cpu;
103 new_ib->virtual_mc_base_address = info.virtual_mc_base_address; 103 new_ib->virtual_mc_base_address = info.virtual_mc_base_address;
@@ -114,10 +114,10 @@ static int amdgpu_cs_create_ib(amdgpu_device_handle dev,
114 * 114 *
115 * \return 0 on success otherwise POSIX Error code 115 * \return 0 on success otherwise POSIX Error code
116*/ 116*/
117static int amdgpu_cs_destroy_ib(amdgpu_device_handle dev, 117static int amdgpu_cs_destroy_ib(amdgpu_ib_handle ib)
118 amdgpu_ib_handle ib)
119{ 118{
120 int r; 119 int r;
120
121 r = amdgpu_bo_cpu_unmap(ib->buf_handle); 121 r = amdgpu_bo_cpu_unmap(ib->buf_handle);
122 if (r) 122 if (r)
123 return r; 123 return r;
@@ -162,8 +162,7 @@ static int amdgpu_cs_init_ib_pool(amdgpu_context_handle context)
162 * 162 *
163 * \return 0 on success otherwise POSIX Error code 163 * \return 0 on success otherwise POSIX Error code
164*/ 164*/
165static int amdgpu_cs_alloc_from_ib_pool(amdgpu_device_handle dev, 165static int amdgpu_cs_alloc_from_ib_pool(amdgpu_context_handle context,
166 amdgpu_context_handle context,
167 enum amdgpu_cs_ib_size ib_size, 166 enum amdgpu_cs_ib_size ib_size,
168 amdgpu_ib_handle *ib) 167 amdgpu_ib_handle *ib)
169{ 168{
@@ -210,21 +209,19 @@ static void amdgpu_cs_free_to_ib_pool(amdgpu_context_handle context,
210 * 209 *
211 * \return 0 on success otherwise POSIX Error code 210 * \return 0 on success otherwise POSIX Error code
212*/ 211*/
213static int amdgpu_cs_destroy_ib_pool(amdgpu_device_handle dev, 212static int amdgpu_cs_destroy_ib_pool(amdgpu_context_handle context)
214 amdgpu_context_handle context)
215{ 213{
216 int i;
217 int r;
218 struct list_head *head; 214 struct list_head *head;
219 struct amdgpu_ib *next; 215 struct amdgpu_ib *next;
220 struct amdgpu_ib *storage; 216 struct amdgpu_ib *storage;
217 int i, r;
221 218
222 r = 0; 219 r = 0;
223 pthread_mutex_lock(&context->pool_mutex); 220 pthread_mutex_lock(&context->pool_mutex);
224 for (i = 0; i < AMDGPU_CS_IB_SIZE_NUM; i++) { 221 for (i = 0; i < AMDGPU_CS_IB_SIZE_NUM; i++) {
225 head = &context->ib_pools[i]; 222 head = &context->ib_pools[i];
226 LIST_FOR_EACH_ENTRY_SAFE(next, storage, head, list_node) { 223 LIST_FOR_EACH_ENTRY_SAFE(next, storage, head, list_node) {
227 r = amdgpu_cs_destroy_ib(dev, next); 224 r = amdgpu_cs_destroy_ib(next);
228 if (r) 225 if (r)
229 break; 226 break;
230 } 227 }
@@ -268,8 +265,7 @@ static int amdgpu_cs_init_pendings(amdgpu_context_handle context)
268 * 265 *
269 * \return 0 on success otherwise POSIX Error code 266 * \return 0 on success otherwise POSIX Error code
270*/ 267*/
271static int amdgpu_cs_destroy_pendings(amdgpu_device_handle dev, 268static int amdgpu_cs_destroy_pendings(amdgpu_context_handle context)
272 amdgpu_context_handle context)
273{ 269{
274 int ip, inst; 270 int ip, inst;
275 uint32_t ring; 271 uint32_t ring;
@@ -285,7 +281,7 @@ static int amdgpu_cs_destroy_pendings(amdgpu_device_handle dev,
285 for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++) { 281 for (ring = 0; ring < AMDGPU_CS_MAX_RINGS; ring++) {
286 head = &context->pendings[ip][inst][ring]; 282 head = &context->pendings[ip][inst][ring];
287 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) { 283 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
288 r = amdgpu_cs_destroy_ib(dev, next); 284 r = amdgpu_cs_destroy_ib(next);
289 if (r) 285 if (r)
290 break; 286 break;
291 } 287 }
@@ -293,7 +289,7 @@ static int amdgpu_cs_destroy_pendings(amdgpu_device_handle dev,
293 289
294 head = &context->freed; 290 head = &context->freed;
295 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) { 291 LIST_FOR_EACH_ENTRY_SAFE(next, s, head, list_node) {
296 r = amdgpu_cs_destroy_ib(dev, next); 292 r = amdgpu_cs_destroy_ib(next);
297 if (r) 293 if (r)
298 break; 294 break;
299 } 295 }
@@ -441,39 +437,35 @@ static void amdgpu_cs_all_pending_gc(amdgpu_context_handle context)
441 * 437 *
442 * \return 0 on success otherwise POSIX Error code 438 * \return 0 on success otherwise POSIX Error code
443*/ 439*/
444static int amdgpu_cs_alloc_ib_local(amdgpu_device_handle dev, 440static int amdgpu_cs_alloc_ib_local(amdgpu_context_handle context,
445 amdgpu_context_handle context,
446 enum amdgpu_cs_ib_size ib_size, 441 enum amdgpu_cs_ib_size ib_size,
447 amdgpu_ib_handle *ib) 442 amdgpu_ib_handle *ib)
448{ 443{
449 int r; 444 int r;
450 445
451 r = amdgpu_cs_alloc_from_ib_pool(dev, context, ib_size, ib); 446 r = amdgpu_cs_alloc_from_ib_pool(context, ib_size, ib);
452 if (!r) 447 if (!r)
453 return r; 448 return r;
454 449
455 amdgpu_cs_all_pending_gc(context); 450 amdgpu_cs_all_pending_gc(context);
456 451
457 /* Retry to allocate from free IB pools after garbage collector. */ 452 /* Retry to allocate from free IB pools after garbage collector. */
458 r = amdgpu_cs_alloc_from_ib_pool(dev, context, ib_size, ib); 453 r = amdgpu_cs_alloc_from_ib_pool(context, ib_size, ib);
459 if (!r) 454 if (!r)
460 return r; 455 return r;
461 456
462 /* There is no suitable IB in free pools. Create one. */ 457 /* There is no suitable IB in free pools. Create one. */
463 r = amdgpu_cs_create_ib(dev, context, ib_size, ib); 458 r = amdgpu_cs_create_ib(context, ib_size, ib);
464 return r; 459 return r;
465} 460}
466 461
467int amdgpu_cs_alloc_ib(amdgpu_device_handle dev, 462int amdgpu_cs_alloc_ib(amdgpu_context_handle context,
468 amdgpu_context_handle context,
469 enum amdgpu_cs_ib_size ib_size, 463 enum amdgpu_cs_ib_size ib_size,
470 struct amdgpu_cs_ib_alloc_result *output) 464 struct amdgpu_cs_ib_alloc_result *output)
471{ 465{
472 int r; 466 int r;
473 amdgpu_ib_handle ib; 467 amdgpu_ib_handle ib;
474 468
475 if (NULL == dev)
476 return -EINVAL;
477 if (NULL == context) 469 if (NULL == context)
478 return -EINVAL; 470 return -EINVAL;
479 if (NULL == output) 471 if (NULL == output)
@@ -481,7 +473,7 @@ int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
481 if (ib_size >= AMDGPU_CS_IB_SIZE_NUM) 473 if (ib_size >= AMDGPU_CS_IB_SIZE_NUM)
482 return -EINVAL; 474 return -EINVAL;
483 475
484 r = amdgpu_cs_alloc_ib_local(dev, context, ib_size, &ib); 476 r = amdgpu_cs_alloc_ib_local(context, ib_size, &ib);
485 if (!r) { 477 if (!r) {
486 output->handle = ib; 478 output->handle = ib;
487 output->cpu = ib->cpu; 479 output->cpu = ib->cpu;
@@ -491,17 +483,14 @@ int amdgpu_cs_alloc_ib(amdgpu_device_handle dev,
491 return r; 483 return r;
492} 484}
493 485
494int amdgpu_cs_free_ib(amdgpu_device_handle dev, 486int amdgpu_cs_free_ib(amdgpu_ib_handle handle)
495 amdgpu_context_handle context,
496 amdgpu_ib_handle handle)
497{ 487{
498 if (NULL == dev) 488 amdgpu_context_handle context;
499 return -EINVAL; 489
500 if (NULL == context)
501 return -EINVAL;
502 if (NULL == handle) 490 if (NULL == handle)
503 return -EINVAL; 491 return -EINVAL;
504 492
493 context = handle->context;
505 pthread_mutex_lock(&context->pendings_mutex); 494 pthread_mutex_lock(&context->pendings_mutex);
506 LIST_ADD(&handle->list_node, &context->freed); 495 LIST_ADD(&handle->list_node, &context->freed);
507 pthread_mutex_unlock(&context->pendings_mutex); 496 pthread_mutex_unlock(&context->pendings_mutex);
@@ -532,6 +521,8 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
532 if (NULL == gpu_context) 521 if (NULL == gpu_context)
533 return -ENOMEM; 522 return -ENOMEM;
534 523
524 gpu_context->dev = dev;
525
535 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL); 526 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
536 if (r) 527 if (r)
537 goto error_mutex; 528 goto error_mutex;
@@ -544,7 +535,7 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
544 if (r) 535 if (r)
545 goto error_pendings; 536 goto error_pendings;
546 537
547 r = amdgpu_cs_alloc_ib_local(dev, gpu_context, amdgpu_cs_ib_size_4K, 538 r = amdgpu_cs_alloc_ib_local(gpu_context, amdgpu_cs_ib_size_4K,
548 &gpu_context->fence_ib); 539 &gpu_context->fence_ib);
549 if (r) 540 if (r)
550 goto error_fence_ib; 541 goto error_fence_ib;
@@ -562,13 +553,13 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
562 return 0; 553 return 0;
563 554
564error_kernel: 555error_kernel:
565 amdgpu_cs_free_ib(dev, gpu_context, gpu_context->fence_ib); 556 amdgpu_cs_free_ib(gpu_context->fence_ib);
566 557
567error_fence_ib: 558error_fence_ib:
568 amdgpu_cs_destroy_pendings(dev, gpu_context); 559 amdgpu_cs_destroy_pendings(gpu_context);
569 560
570error_pendings: 561error_pendings:
571 amdgpu_cs_destroy_ib_pool(dev, gpu_context); 562 amdgpu_cs_destroy_ib_pool(gpu_context);
572 563
573error_pool: 564error_pool:
574 pthread_mutex_destroy(&gpu_context->sequence_mutex); 565 pthread_mutex_destroy(&gpu_context->sequence_mutex);
@@ -586,26 +577,23 @@ error_mutex:
586 * 577 *
587 * \return 0 on success otherwise POSIX Error code 578 * \return 0 on success otherwise POSIX Error code
588*/ 579*/
589int amdgpu_cs_ctx_free(amdgpu_device_handle dev, 580int amdgpu_cs_ctx_free(amdgpu_context_handle context)
590 amdgpu_context_handle context)
591{ 581{
592 int r;
593 union drm_amdgpu_ctx args; 582 union drm_amdgpu_ctx args;
583 int r;
594 584
595 if (NULL == dev)
596 return -EINVAL;
597 if (NULL == context) 585 if (NULL == context)
598 return -EINVAL; 586 return -EINVAL;
599 587
600 r = amdgpu_cs_free_ib(dev, context, context->fence_ib); 588 r = amdgpu_cs_free_ib(context->fence_ib);
601 if (r) 589 if (r)
602 return r; 590 return r;
603 591
604 r = amdgpu_cs_destroy_pendings(dev, context); 592 r = amdgpu_cs_destroy_pendings(context);
605 if (r) 593 if (r)
606 return r; 594 return r;
607 595
608 r = amdgpu_cs_destroy_ib_pool(dev, context); 596 r = amdgpu_cs_destroy_ib_pool(context);
609 if (r) 597 if (r)
610 return r; 598 return r;
611 599
@@ -615,15 +603,15 @@ int amdgpu_cs_ctx_free(amdgpu_device_handle dev,
615 memset(&args, 0, sizeof(args)); 603 memset(&args, 0, sizeof(args));
616 args.in.op = AMDGPU_CTX_OP_FREE_CTX; 604 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
617 args.in.ctx_id = context->id; 605 args.in.ctx_id = context->id;
618 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); 606 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
607 &args, sizeof(args));
619 608
620 free(context); 609 free(context);
621 610
622 return r; 611 return r;
623} 612}
624 613
625static int amdgpu_cs_create_bo_list(amdgpu_device_handle dev, 614static int amdgpu_cs_create_bo_list(amdgpu_context_handle context,
626 amdgpu_context_handle context,
627 struct amdgpu_cs_request *request, 615 struct amdgpu_cs_request *request,
628 amdgpu_ib_handle fence_ib, 616 amdgpu_ib_handle fence_ib,
629 uint32_t *handle) 617 uint32_t *handle)
@@ -663,7 +651,7 @@ static int amdgpu_cs_create_bo_list(amdgpu_device_handle dev,
663 if (fence_ib) 651 if (fence_ib)
664 list[i].bo_handle = fence_ib->buf_handle->handle; 652 list[i].bo_handle = fence_ib->buf_handle->handle;
665 653
666 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST, 654 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_BO_LIST,
667 &args, sizeof(args)); 655 &args, sizeof(args));
668 if (r) 656 if (r)
669 return r; 657 return r;
@@ -672,7 +660,7 @@ static int amdgpu_cs_create_bo_list(amdgpu_device_handle dev,
672 return 0; 660 return 0;
673} 661}
674 662
675static int amdgpu_cs_free_bo_list(amdgpu_device_handle dev, uint32_t handle) 663static int amdgpu_cs_free_bo_list(amdgpu_context_handle context, uint32_t handle)
676{ 664{
677 union drm_amdgpu_bo_list args; 665 union drm_amdgpu_bo_list args;
678 int r; 666 int r;
@@ -684,7 +672,7 @@ static int amdgpu_cs_free_bo_list(amdgpu_device_handle dev, uint32_t handle)
684 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY; 672 args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
685 args.in.list_handle = handle; 673 args.in.list_handle = handle;
686 674
687 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST, 675 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_BO_LIST,
688 &args, sizeof(args)); 676 &args, sizeof(args));
689 677
690 return r; 678 return r;
@@ -705,8 +693,7 @@ static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
705 * \return 0 on success otherwise POSIX Error code 693 * \return 0 on success otherwise POSIX Error code
706 * \sa amdgpu_cs_submit() 694 * \sa amdgpu_cs_submit()
707*/ 695*/
708static int amdgpu_cs_submit_one(amdgpu_device_handle dev, 696static int amdgpu_cs_submit_one(amdgpu_context_handle context,
709 amdgpu_context_handle context,
710 struct amdgpu_cs_request *ibs_request, 697 struct amdgpu_cs_request *ibs_request,
711 uint64_t *fence) 698 uint64_t *fence)
712{ 699{
@@ -763,7 +750,7 @@ static int amdgpu_cs_submit_one(amdgpu_device_handle dev,
763 chunk_data[i].ib_data.flags = AMDGPU_IB_FLAG_CE; 750 chunk_data[i].ib_data.flags = AMDGPU_IB_FLAG_CE;
764 } 751 }
765 752
766 r = amdgpu_cs_create_bo_list(dev, context, ibs_request, NULL, 753 r = amdgpu_cs_create_bo_list(context, ibs_request, NULL,
767 &bo_list_handle); 754 &bo_list_handle);
768 if (r) 755 if (r)
769 goto error_unlock; 756 goto error_unlock;
@@ -789,7 +776,7 @@ static int amdgpu_cs_submit_one(amdgpu_device_handle dev,
789 chunk_data[i].fence_data.offset *= sizeof(uint64_t); 776 chunk_data[i].fence_data.offset *= sizeof(uint64_t);
790 } 777 }
791 778
792 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS, 779 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
793 &cs, sizeof(cs)); 780 &cs, sizeof(cs));
794 if (r) 781 if (r)
795 goto error_unlock; 782 goto error_unlock;
@@ -816,7 +803,7 @@ static int amdgpu_cs_submit_one(amdgpu_device_handle dev,
816 803
817 pthread_mutex_unlock(&context->sequence_mutex); 804 pthread_mutex_unlock(&context->sequence_mutex);
818 805
819 r = amdgpu_cs_free_bo_list(dev, bo_list_handle); 806 r = amdgpu_cs_free_bo_list(context, bo_list_handle);
820 if (r) 807 if (r)
821 goto error_free; 808 goto error_free;
822 809
@@ -831,18 +818,15 @@ error_free:
831 return r; 818 return r;
832} 819}
833 820
834int amdgpu_cs_submit(amdgpu_device_handle dev, 821int amdgpu_cs_submit(amdgpu_context_handle context,
835 amdgpu_context_handle context,
836 uint64_t flags, 822 uint64_t flags,
837 struct amdgpu_cs_request *ibs_request, 823 struct amdgpu_cs_request *ibs_request,
838 uint32_t number_of_requests, 824 uint32_t number_of_requests,
839 uint64_t *fences) 825 uint64_t *fences)
840{ 826{
841 int r;
842 uint32_t i; 827 uint32_t i;
828 int r;
843 829
844 if (NULL == dev)
845 return -EINVAL;
846 if (NULL == context) 830 if (NULL == context)
847 return -EINVAL; 831 return -EINVAL;
848 if (NULL == ibs_request) 832 if (NULL == ibs_request)
@@ -852,7 +836,7 @@ int amdgpu_cs_submit(amdgpu_device_handle dev,
852 836
853 r = 0; 837 r = 0;
854 for (i = 0; i < number_of_requests; i++) { 838 for (i = 0; i < number_of_requests; i++) {
855 r = amdgpu_cs_submit_one(dev, context, ibs_request, fences); 839 r = amdgpu_cs_submit_one(context, ibs_request, fences);
856 if (r) 840 if (r)
857 break; 841 break;
858 fences++; 842 fences++;
@@ -915,8 +899,7 @@ static int amdgpu_ioctl_wait_cs(amdgpu_device_handle dev,
915 return 0; 899 return 0;
916} 900}
917 901
918int amdgpu_cs_query_fence_status(amdgpu_device_handle dev, 902int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence,
919 struct amdgpu_cs_query_fence *fence,
920 uint32_t *expired) 903 uint32_t *expired)
921{ 904{
922 amdgpu_context_handle context; 905 amdgpu_context_handle context;
@@ -927,8 +910,6 @@ int amdgpu_cs_query_fence_status(amdgpu_device_handle dev,
927 bool busy = true; 910 bool busy = true;
928 int r; 911 int r;
929 912
930 if (NULL == dev)
931 return -EINVAL;
932 if (NULL == fence) 913 if (NULL == fence)
933 return -EINVAL; 914 return -EINVAL;
934 if (NULL == expired) 915 if (NULL == expired)
@@ -969,7 +950,7 @@ int amdgpu_cs_query_fence_status(amdgpu_device_handle dev,
969 950
970 pthread_mutex_unlock(&context->sequence_mutex); 951 pthread_mutex_unlock(&context->sequence_mutex);
971 952
972 r = amdgpu_ioctl_wait_cs(dev, ip_type, ip_instance, ring, 953 r = amdgpu_ioctl_wait_cs(context->dev, ip_type, ip_instance, ring,
973 fence->fence, fence->timeout_ns, &busy); 954 fence->fence, fence->timeout_ns, &busy);
974 if (!r && !busy) { 955 if (!r && !busy) {
975 *expired = true; 956 *expired = true;
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 19bc7e18..c91452ef 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -94,6 +94,7 @@ struct amdgpu_bo {
94 * sequence_mutex -> pendings_mutex -> pool_mutex. 94 * sequence_mutex -> pendings_mutex -> pool_mutex.
95*/ 95*/
96struct amdgpu_context { 96struct amdgpu_context {
97 struct amdgpu_device *dev;
97 /** Mutex for accessing fences and to maintain command submissions 98 /** Mutex for accessing fences and to maintain command submissions
98 and pending lists in good sequence. */ 99 and pending lists in good sequence. */
99 pthread_mutex_t sequence_mutex; 100 pthread_mutex_t sequence_mutex;
@@ -116,6 +117,7 @@ struct amdgpu_context {
116}; 117};
117 118
118struct amdgpu_ib { 119struct amdgpu_ib {
120 amdgpu_context_handle context;
119 struct list_head list_node; 121 struct list_head list_node;
120 amdgpu_bo_handle buf_handle; 122 amdgpu_bo_handle buf_handle;
121 void *cpu; 123 void *cpu;