aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorChunming Zhou2018-02-08 01:03:01 -0600
committerChunming Zhou2018-02-08 01:03:47 -0600
commit09642c073e8af71127cf98b48fe1b2a376c606cf (patch)
tree10b2861ed4abd2bf3610a5f6bcfba8f417db189b /tests
parent41b94a3fb6e87d057fad78568d920d29489e5060 (diff)
downloadexternal-libgbm-09642c073e8af71127cf98b48fe1b2a376c606cf.tar.gz
external-libgbm-09642c073e8af71127cf98b48fe1b2a376c606cf.tar.xz
external-libgbm-09642c073e8af71127cf98b48fe1b2a376c606cf.zip
tests/amdgpu: add bo eviction test
for(( i=1; i < 100; i++)) do echo "Hello, Welcome $i times " sudo ./amdgpu_test -s 1 -t 5 done with above stricpt, run in two terminals, will reproduce Felix's swap leeking issue. Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Christian K├Ânig <christian.koenig@amd.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/amdgpu/amdgpu_test.h23
-rw-r--r--tests/amdgpu/basic_tests.c160
2 files changed, 182 insertions, 1 deletions
diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index 1db803c6..7397dea0 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -252,6 +252,29 @@ static inline int gpu_mem_free(amdgpu_bo_handle bo,
252} 252}
253 253
254static inline int 254static inline int
255amdgpu_bo_alloc_wrap(amdgpu_device_handle dev, unsigned size,
256 unsigned alignment, unsigned heap, uint64_t flags,
257 amdgpu_bo_handle *bo)
258{
259 struct amdgpu_bo_alloc_request request = {};
260 amdgpu_bo_handle buf_handle;
261 int r;
262
263 request.alloc_size = size;
264 request.phys_alignment = alignment;
265 request.preferred_heap = heap;
266 request.flags = flags;
267
268 r = amdgpu_bo_alloc(dev, &request, &buf_handle);
269 if (r)
270 return r;
271
272 *bo = buf_handle;
273
274 return 0;
275}
276
277static inline int
255amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size, 278amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size,
256 unsigned alignment, unsigned heap, uint64_t flags, 279 unsigned alignment, unsigned heap, uint64_t flags,
257 amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address, 280 amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address,
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index 354b0157..0ea010a8 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -51,14 +51,22 @@ static void amdgpu_command_submission_sdma(void);
51static void amdgpu_userptr_test(void); 51static void amdgpu_userptr_test(void);
52static void amdgpu_semaphore_test(void); 52static void amdgpu_semaphore_test(void);
53static void amdgpu_sync_dependency_test(void); 53static void amdgpu_sync_dependency_test(void);
54static void amdgpu_bo_eviction_test(void);
54 55
55static void amdgpu_command_submission_write_linear_helper(unsigned ip_type); 56static void amdgpu_command_submission_write_linear_helper(unsigned ip_type);
56static void amdgpu_command_submission_const_fill_helper(unsigned ip_type); 57static void amdgpu_command_submission_const_fill_helper(unsigned ip_type);
57static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type); 58static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type);
58 59static void amdgpu_test_exec_cs_helper(amdgpu_context_handle context_handle,
60 unsigned ip_type,
61 int instance, int pm4_dw, uint32_t *pm4_src,
62 int res_cnt, amdgpu_bo_handle *resources,
63 struct amdgpu_cs_ib_info *ib_info,
64 struct amdgpu_cs_request *ibs_request);
65
59CU_TestInfo basic_tests[] = { 66CU_TestInfo basic_tests[] = {
60 { "Query Info Test", amdgpu_query_info_test }, 67 { "Query Info Test", amdgpu_query_info_test },
61 { "Userptr Test", amdgpu_userptr_test }, 68 { "Userptr Test", amdgpu_userptr_test },
69 { "bo eviction Test", amdgpu_bo_eviction_test },
62 { "Command submission Test (GFX)", amdgpu_command_submission_gfx }, 70 { "Command submission Test (GFX)", amdgpu_command_submission_gfx },
63 { "Command submission Test (Compute)", amdgpu_command_submission_compute }, 71 { "Command submission Test (Compute)", amdgpu_command_submission_compute },
64 { "Command submission Test (Multi-Fence)", amdgpu_command_submission_multi_fence }, 72 { "Command submission Test (Multi-Fence)", amdgpu_command_submission_multi_fence },
@@ -516,6 +524,156 @@ static void amdgpu_command_submission_gfx_cp_copy_data(void)
516 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX); 524 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX);
517} 525}
518 526
527static void amdgpu_bo_eviction_test(void)
528{
529 const int sdma_write_length = 1024;
530 const int pm4_dw = 256;
531 amdgpu_context_handle context_handle;
532 amdgpu_bo_handle bo1, bo2, vram_max[2], gtt_max[2];
533 amdgpu_bo_handle *resources;
534 uint32_t *pm4;
535 struct amdgpu_cs_ib_info *ib_info;
536 struct amdgpu_cs_request *ibs_request;
537 uint64_t bo1_mc, bo2_mc;
538 volatile unsigned char *bo1_cpu, *bo2_cpu;
539 int i, j, r, loop1, loop2;
540 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
541 amdgpu_va_handle bo1_va_handle, bo2_va_handle;
542 struct amdgpu_heap_info vram_info, gtt_info;
543
544 pm4 = calloc(pm4_dw, sizeof(*pm4));
545 CU_ASSERT_NOT_EQUAL(pm4, NULL);
546
547 ib_info = calloc(1, sizeof(*ib_info));
548 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
549
550 ibs_request = calloc(1, sizeof(*ibs_request));
551 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
552
553 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
554 CU_ASSERT_EQUAL(r, 0);
555
556 /* prepare resource */
557 resources = calloc(4, sizeof(amdgpu_bo_handle));
558 CU_ASSERT_NOT_EQUAL(resources, NULL);
559
560 r = amdgpu_query_heap_info(device_handle, AMDGPU_GEM_DOMAIN_VRAM,
561 0, &vram_info);
562 CU_ASSERT_EQUAL(r, 0);
563
564 r = amdgpu_bo_alloc_wrap(device_handle, vram_info.max_allocation, 4096,
565 AMDGPU_GEM_DOMAIN_VRAM, 0, &vram_max[0]);
566 CU_ASSERT_EQUAL(r, 0);
567 r = amdgpu_bo_alloc_wrap(device_handle, vram_info.max_allocation, 4096,
568 AMDGPU_GEM_DOMAIN_VRAM, 0, &vram_max[1]);
569 CU_ASSERT_EQUAL(r, 0);
570
571 r = amdgpu_query_heap_info(device_handle, AMDGPU_GEM_DOMAIN_GTT,
572 0, &gtt_info);
573 CU_ASSERT_EQUAL(r, 0);
574
575 r = amdgpu_bo_alloc_wrap(device_handle, gtt_info.max_allocation, 4096,
576 AMDGPU_GEM_DOMAIN_GTT, 0, &gtt_max[0]);
577 CU_ASSERT_EQUAL(r, 0);
578 r = amdgpu_bo_alloc_wrap(device_handle, gtt_info.max_allocation, 4096,
579 AMDGPU_GEM_DOMAIN_GTT, 0, &gtt_max[1]);
580 CU_ASSERT_EQUAL(r, 0);
581
582
583
584 loop1 = loop2 = 0;
585 /* run 9 circle to test all mapping combination */
586 while(loop1 < 2) {
587 while(loop2 < 2) {
588 /* allocate UC bo1for sDMA use */
589 r = amdgpu_bo_alloc_and_map(device_handle,
590 sdma_write_length, 4096,
591 AMDGPU_GEM_DOMAIN_GTT,
592 gtt_flags[loop1], &bo1,
593 (void**)&bo1_cpu, &bo1_mc,
594 &bo1_va_handle);
595 CU_ASSERT_EQUAL(r, 0);
596
597 /* set bo1 */
598 memset((void*)bo1_cpu, 0xaa, sdma_write_length);
599
600 /* allocate UC bo2 for sDMA use */
601 r = amdgpu_bo_alloc_and_map(device_handle,
602 sdma_write_length, 4096,
603 AMDGPU_GEM_DOMAIN_GTT,
604 gtt_flags[loop2], &bo2,
605 (void**)&bo2_cpu, &bo2_mc,
606 &bo2_va_handle);
607 CU_ASSERT_EQUAL(r, 0);
608
609 /* clear bo2 */
610 memset((void*)bo2_cpu, 0, sdma_write_length);
611
612 resources[0] = bo1;
613 resources[1] = bo2;
614 resources[2] = vram_max[loop2];
615 resources[3] = gtt_max[loop2];
616
617 /* fulfill PM4: test DMA copy linear */
618 i = j = 0;
619 if (family_id == AMDGPU_FAMILY_SI) {
620 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0,
621 sdma_write_length);
622 pm4[i++] = 0xffffffff & bo2_mc;
623 pm4[i++] = 0xffffffff & bo1_mc;
624 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
625 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
626 } else {
627 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
628 if (family_id >= AMDGPU_FAMILY_AI)
629 pm4[i++] = sdma_write_length - 1;
630 else
631 pm4[i++] = sdma_write_length;
632 pm4[i++] = 0;
633 pm4[i++] = 0xffffffff & bo1_mc;
634 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
635 pm4[i++] = 0xffffffff & bo2_mc;
636 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
637 }
638
639 amdgpu_test_exec_cs_helper(context_handle,
640 AMDGPU_HW_IP_DMA, 0,
641 i, pm4,
642 4, resources,
643 ib_info, ibs_request);
644
645 /* verify if SDMA test result meets with expected */
646 i = 0;
647 while(i < sdma_write_length) {
648 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
649 }
650 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
651 sdma_write_length);
652 CU_ASSERT_EQUAL(r, 0);
653 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
654 sdma_write_length);
655 CU_ASSERT_EQUAL(r, 0);
656 loop2++;
657 }
658 loop2 = 0;
659 loop1++;
660 }
661 amdgpu_bo_free(vram_max[0]);
662 amdgpu_bo_free(vram_max[1]);
663 amdgpu_bo_free(gtt_max[0]);
664 amdgpu_bo_free(gtt_max[1]);
665 /* clean resources */
666 free(resources);
667 free(ibs_request);
668 free(ib_info);
669 free(pm4);
670
671 /* end of test */
672 r = amdgpu_cs_ctx_free(context_handle);
673 CU_ASSERT_EQUAL(r, 0);
674}
675
676
519static void amdgpu_command_submission_gfx(void) 677static void amdgpu_command_submission_gfx(void)
520{ 678{
521 /* write data using the CP */ 679 /* write data using the CP */