aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorHawking Zhang2018-01-07 21:20:30 -0600
committerAlex Deucher2018-01-12 11:17:24 -0600
commitf115de819bc10bb36565f344a66fa619219911b9 (patch)
tree7a4c82c225c2b92187bb8f8bd4042dad98ef708d /tests
parentcbbb8a332d972a4ab18622f1c53de21324735fef (diff)
downloadexternal-libdrm-f115de819bc10bb36565f344a66fa619219911b9.tar.gz
external-libdrm-f115de819bc10bb36565f344a66fa619219911b9.tar.xz
external-libdrm-f115de819bc10bb36565f344a66fa619219911b9.zip
tests/amdgpu: execute copy linear on all the available rings
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Acked-by: Christian K├Ânig <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/amdgpu/basic_tests.c200
1 files changed, 104 insertions, 96 deletions
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index 0a198e48..0f75e896 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -1114,9 +1114,10 @@ static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1114 struct amdgpu_cs_request *ibs_request; 1114 struct amdgpu_cs_request *ibs_request;
1115 uint64_t bo1_mc, bo2_mc; 1115 uint64_t bo1_mc, bo2_mc;
1116 volatile unsigned char *bo1_cpu, *bo2_cpu; 1116 volatile unsigned char *bo1_cpu, *bo2_cpu;
1117 int i, j, r, loop1, loop2; 1117 int i, j, r, loop1, loop2, ring_id;
1118 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC}; 1118 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
1119 amdgpu_va_handle bo1_va_handle, bo2_va_handle; 1119 amdgpu_va_handle bo1_va_handle, bo2_va_handle;
1120 struct drm_amdgpu_info_hw_ip hw_ip_info;
1120 1121
1121 pm4 = calloc(pm4_dw, sizeof(*pm4)); 1122 pm4 = calloc(pm4_dw, sizeof(*pm4));
1122 CU_ASSERT_NOT_EQUAL(pm4, NULL); 1123 CU_ASSERT_NOT_EQUAL(pm4, NULL);
@@ -1127,6 +1128,9 @@ static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1127 ibs_request = calloc(1, sizeof(*ibs_request)); 1128 ibs_request = calloc(1, sizeof(*ibs_request));
1128 CU_ASSERT_NOT_EQUAL(ibs_request, NULL); 1129 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
1129 1130
1131 r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
1132 CU_ASSERT_EQUAL(r, 0);
1133
1130 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 1134 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1131 CU_ASSERT_EQUAL(r, 0); 1135 CU_ASSERT_EQUAL(r, 0);
1132 1136
@@ -1134,107 +1138,111 @@ static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1134 resources = calloc(2, sizeof(amdgpu_bo_handle)); 1138 resources = calloc(2, sizeof(amdgpu_bo_handle));
1135 CU_ASSERT_NOT_EQUAL(resources, NULL); 1139 CU_ASSERT_NOT_EQUAL(resources, NULL);
1136 1140
1137 loop1 = loop2 = 0; 1141 for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
1138 /* run 9 circle to test all mapping combination */ 1142 loop1 = loop2 = 0;
1139 while(loop1 < 2) { 1143 /* run 9 circle to test all mapping combination */
1140 while(loop2 < 2) { 1144 while(loop1 < 2) {
1141 /* allocate UC bo1for sDMA use */ 1145 while(loop2 < 2) {
1142 r = amdgpu_bo_alloc_and_map(device_handle, 1146 /* allocate UC bo1for sDMA use */
1143 sdma_write_length, 4096, 1147 r = amdgpu_bo_alloc_and_map(device_handle,
1144 AMDGPU_GEM_DOMAIN_GTT, 1148 sdma_write_length, 4096,
1145 gtt_flags[loop1], &bo1, 1149 AMDGPU_GEM_DOMAIN_GTT,
1146 (void**)&bo1_cpu, &bo1_mc, 1150 gtt_flags[loop1], &bo1,
1147 &bo1_va_handle); 1151 (void**)&bo1_cpu, &bo1_mc,
1148 CU_ASSERT_EQUAL(r, 0); 1152 &bo1_va_handle);
1149 1153 CU_ASSERT_EQUAL(r, 0);
1150 /* set bo1 */ 1154
1151 memset((void*)bo1_cpu, 0xaa, sdma_write_length); 1155 /* set bo1 */
1152 1156 memset((void*)bo1_cpu, 0xaa, sdma_write_length);
1153 /* allocate UC bo2 for sDMA use */ 1157
1154 r = amdgpu_bo_alloc_and_map(device_handle, 1158 /* allocate UC bo2 for sDMA use */
1155 sdma_write_length, 4096, 1159 r = amdgpu_bo_alloc_and_map(device_handle,
1156 AMDGPU_GEM_DOMAIN_GTT, 1160 sdma_write_length, 4096,
1157 gtt_flags[loop2], &bo2, 1161 AMDGPU_GEM_DOMAIN_GTT,
1158 (void**)&bo2_cpu, &bo2_mc, 1162 gtt_flags[loop2], &bo2,
1159 &bo2_va_handle); 1163 (void**)&bo2_cpu, &bo2_mc,
1160 CU_ASSERT_EQUAL(r, 0); 1164 &bo2_va_handle);
1161 1165 CU_ASSERT_EQUAL(r, 0);
1162 /* clear bo2 */ 1166
1163 memset((void*)bo2_cpu, 0, sdma_write_length); 1167 /* clear bo2 */
1164 1168 memset((void*)bo2_cpu, 0, sdma_write_length);
1165 resources[0] = bo1; 1169
1166 resources[1] = bo2; 1170 resources[0] = bo1;
1167 1171 resources[1] = bo2;
1168 /* fulfill PM4: test DMA copy linear */ 1172
1169 i = j = 0; 1173 /* fulfill PM4: test DMA copy linear */
1170 if (ip_type == AMDGPU_HW_IP_DMA) { 1174 i = j = 0;
1171 if (family_id == AMDGPU_FAMILY_SI) { 1175 if (ip_type == AMDGPU_HW_IP_DMA) {
1172 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0, 1176 if (family_id == AMDGPU_FAMILY_SI) {
1173 sdma_write_length); 1177 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI,
1174 pm4[i++] = 0xffffffff & bo2_mc; 1178 0, 0, 0,
1175 pm4[i++] = 0xffffffff & bo1_mc; 1179 sdma_write_length);
1176 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32; 1180 pm4[i++] = 0xffffffff & bo2_mc;
1177 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32; 1181 pm4[i++] = 0xffffffff & bo1_mc;
1178 } else { 1182 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1179 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); 1183 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1180 if (family_id >= AMDGPU_FAMILY_AI) 1184 } else {
1181 pm4[i++] = sdma_write_length - 1; 1185 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY,
1182 else 1186 SDMA_COPY_SUB_OPCODE_LINEAR,
1187 0);
1188 if (family_id >= AMDGPU_FAMILY_AI)
1189 pm4[i++] = sdma_write_length - 1;
1190 else
1191 pm4[i++] = sdma_write_length;
1192 pm4[i++] = 0;
1193 pm4[i++] = 0xffffffff & bo1_mc;
1194 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1195 pm4[i++] = 0xffffffff & bo2_mc;
1196 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1197 }
1198 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1199 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1200 if (family_id == AMDGPU_FAMILY_SI) {
1201 pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1202 pm4[i++] = 0xfffffffc & bo1_mc;
1203 pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1204 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1205 PACKET3_DMA_DATA_SI_SRC_SEL(0) |
1206 PACKET3_DMA_DATA_SI_CP_SYNC |
1207 (0xffff00000000 & bo1_mc) >> 32;
1208 pm4[i++] = 0xfffffffc & bo2_mc;
1209 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1183 pm4[i++] = sdma_write_length; 1210 pm4[i++] = sdma_write_length;
1184 pm4[i++] = 0; 1211 } else {
1185 pm4[i++] = 0xffffffff & bo1_mc; 1212 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1186 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32; 1213 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1187 pm4[i++] = 0xffffffff & bo2_mc; 1214 PACKET3_DMA_DATA_DST_SEL(0) |
1188 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32; 1215 PACKET3_DMA_DATA_SRC_SEL(0) |
1189 } 1216 PACKET3_DMA_DATA_CP_SYNC;
1190 1217 pm4[i++] = 0xfffffffc & bo1_mc;
1191 } else if ((ip_type == AMDGPU_HW_IP_GFX) || 1218 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1192 (ip_type == AMDGPU_HW_IP_COMPUTE)) { 1219 pm4[i++] = 0xfffffffc & bo2_mc;
1193 if (family_id == AMDGPU_FAMILY_SI) { 1220 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1194 pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4); 1221 pm4[i++] = sdma_write_length;
1195 pm4[i++] = 0xfffffffc & bo1_mc; 1222 }
1196 pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1197 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1198 PACKET3_DMA_DATA_SI_SRC_SEL(0) |
1199 PACKET3_DMA_DATA_SI_CP_SYNC |
1200 (0xffff00000000 & bo1_mc) >> 32;
1201 pm4[i++] = 0xfffffffc & bo2_mc;
1202 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1203 pm4[i++] = sdma_write_length;
1204 } else {
1205 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1206 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1207 PACKET3_DMA_DATA_DST_SEL(0) |
1208 PACKET3_DMA_DATA_SRC_SEL(0) |
1209 PACKET3_DMA_DATA_CP_SYNC;
1210 pm4[i++] = 0xfffffffc & bo1_mc;
1211 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1212 pm4[i++] = 0xfffffffc & bo2_mc;
1213 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1214 pm4[i++] = sdma_write_length;
1215 } 1223 }
1216 }
1217 1224
1218 amdgpu_test_exec_cs_helper(context_handle, 1225 amdgpu_test_exec_cs_helper(context_handle,
1219 ip_type, 0, 1226 ip_type, ring_id,
1220 i, pm4, 1227 i, pm4,
1221 2, resources, 1228 2, resources,
1222 ib_info, ibs_request); 1229 ib_info, ibs_request);
1223 1230
1224 /* verify if SDMA test result meets with expected */ 1231 /* verify if SDMA test result meets with expected */
1225 i = 0; 1232 i = 0;
1226 while(i < sdma_write_length) { 1233 while(i < sdma_write_length) {
1227 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa); 1234 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
1235 }
1236 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
1237 sdma_write_length);
1238 CU_ASSERT_EQUAL(r, 0);
1239 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
1240 sdma_write_length);
1241 CU_ASSERT_EQUAL(r, 0);
1242 loop2++;
1228 } 1243 }
1229 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc, 1244 loop1++;
1230 sdma_write_length);
1231 CU_ASSERT_EQUAL(r, 0);
1232 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
1233 sdma_write_length);
1234 CU_ASSERT_EQUAL(r, 0);
1235 loop2++;
1236 } 1245 }
1237 loop1++;
1238 } 1246 }
1239 /* clean resources */ 1247 /* clean resources */
1240 free(resources); 1248 free(resources);