aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/drm/amdgpu_drm.h396
1 files changed, 251 insertions, 145 deletions
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index d8f24976..5797283c 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -50,6 +50,7 @@ extern "C" {
50#define DRM_AMDGPU_WAIT_CS 0x09 50#define DRM_AMDGPU_WAIT_CS 0x09
51#define DRM_AMDGPU_GEM_OP 0x10 51#define DRM_AMDGPU_GEM_OP 0x10
52#define DRM_AMDGPU_GEM_USERPTR 0x11 52#define DRM_AMDGPU_GEM_USERPTR 0x11
53#define DRM_AMDGPU_WAIT_FENCES 0x12
53 54
54#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 55#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
55#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) 56#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +64,7 @@ extern "C" {
63#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 64#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
64#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 65#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
65#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) 66#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
67#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
66 68
67#define AMDGPU_GEM_DOMAIN_CPU 0x1 69#define AMDGPU_GEM_DOMAIN_CPU 0x1
68#define AMDGPU_GEM_DOMAIN_GTT 0x2 70#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -79,22 +81,26 @@ extern "C" {
79#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 81#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
80/* Flag that the memory should be in VRAM and cleared */ 82/* Flag that the memory should be in VRAM and cleared */
81#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) 83#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
84/* Flag that create shadow bo(GTT) while allocating vram bo */
85#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
86/* Flag that allocating the BO should use linear VRAM */
87#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
82 88
83struct drm_amdgpu_gem_create_in { 89struct drm_amdgpu_gem_create_in {
84 /** the requested memory size */ 90 /** the requested memory size */
85 uint64_t bo_size; 91 __u64 bo_size;
86 /** physical start_addr alignment in bytes for some HW requirements */ 92 /** physical start_addr alignment in bytes for some HW requirements */
87 uint64_t alignment; 93 __u64 alignment;
88 /** the requested memory domains */ 94 /** the requested memory domains */
89 uint64_t domains; 95 __u64 domains;
90 /** allocation flags */ 96 /** allocation flags */
91 uint64_t domain_flags; 97 __u64 domain_flags;
92}; 98};
93 99
94struct drm_amdgpu_gem_create_out { 100struct drm_amdgpu_gem_create_out {
95 /** returned GEM object handle */ 101 /** returned GEM object handle */
96 uint32_t handle; 102 __u32 handle;
97 uint32_t _pad; 103 __u32 _pad;
98}; 104};
99 105
100union drm_amdgpu_gem_create { 106union drm_amdgpu_gem_create {
@@ -111,28 +117,28 @@ union drm_amdgpu_gem_create {
111 117
112struct drm_amdgpu_bo_list_in { 118struct drm_amdgpu_bo_list_in {
113 /** Type of operation */ 119 /** Type of operation */
114 uint32_t operation; 120 __u32 operation;
115 /** Handle of list or 0 if we want to create one */ 121 /** Handle of list or 0 if we want to create one */
116 uint32_t list_handle; 122 __u32 list_handle;
117 /** Number of BOs in list */ 123 /** Number of BOs in list */
118 uint32_t bo_number; 124 __u32 bo_number;
119 /** Size of each element describing BO */ 125 /** Size of each element describing BO */
120 uint32_t bo_info_size; 126 __u32 bo_info_size;
121 /** Pointer to array describing BOs */ 127 /** Pointer to array describing BOs */
122 uint64_t bo_info_ptr; 128 __u64 bo_info_ptr;
123}; 129};
124 130
125struct drm_amdgpu_bo_list_entry { 131struct drm_amdgpu_bo_list_entry {
126 /** Handle of BO */ 132 /** Handle of BO */
127 uint32_t bo_handle; 133 __u32 bo_handle;
128 /** New (if specified) BO priority to be used during migration */ 134 /** New (if specified) BO priority to be used during migration */
129 uint32_t bo_priority; 135 __u32 bo_priority;
130}; 136};
131 137
132struct drm_amdgpu_bo_list_out { 138struct drm_amdgpu_bo_list_out {
133 /** Handle of resource list */ 139 /** Handle of resource list */
134 uint32_t list_handle; 140 __u32 list_handle;
135 uint32_t _pad; 141 __u32 _pad;
136}; 142};
137 143
138union drm_amdgpu_bo_list { 144union drm_amdgpu_bo_list {
@@ -156,26 +162,26 @@ union drm_amdgpu_bo_list {
156 162
157struct drm_amdgpu_ctx_in { 163struct drm_amdgpu_ctx_in {
158 /** AMDGPU_CTX_OP_* */ 164 /** AMDGPU_CTX_OP_* */
159 uint32_t op; 165 __u32 op;
160 /** For future use, no flags defined so far */ 166 /** For future use, no flags defined so far */
161 uint32_t flags; 167 __u32 flags;
162 uint32_t ctx_id; 168 __u32 ctx_id;
163 uint32_t _pad; 169 __u32 _pad;
164}; 170};
165 171
166union drm_amdgpu_ctx_out { 172union drm_amdgpu_ctx_out {
167 struct { 173 struct {
168 uint32_t ctx_id; 174 __u32 ctx_id;
169 uint32_t _pad; 175 __u32 _pad;
170 } alloc; 176 } alloc;
171 177
172 struct { 178 struct {
173 /** For future use, no flags defined so far */ 179 /** For future use, no flags defined so far */
174 uint64_t flags; 180 __u64 flags;
175 /** Number of resets caused by this context so far. */ 181 /** Number of resets caused by this context so far. */
176 uint32_t hangs; 182 __u32 hangs;
177 /** Reset status since the last call of the ioctl. */ 183 /** Reset status since the last call of the ioctl. */
178 uint32_t reset_status; 184 __u32 reset_status;
179 } state; 185 } state;
180}; 186};
181 187
@@ -195,12 +201,12 @@ union drm_amdgpu_ctx {
195#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 201#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
196 202
197struct drm_amdgpu_gem_userptr { 203struct drm_amdgpu_gem_userptr {
198 uint64_t addr; 204 __u64 addr;
199 uint64_t size; 205 __u64 size;
200 /* AMDGPU_GEM_USERPTR_* */ 206 /* AMDGPU_GEM_USERPTR_* */
201 uint32_t flags; 207 __u32 flags;
202 /* Resulting GEM handle */ 208 /* Resulting GEM handle */
203 uint32_t handle; 209 __u32 handle;
204}; 210};
205 211
206/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 212/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
@@ -232,28 +238,28 @@ struct drm_amdgpu_gem_userptr {
232/** The same structure is shared for input/output */ 238/** The same structure is shared for input/output */
233struct drm_amdgpu_gem_metadata { 239struct drm_amdgpu_gem_metadata {
234 /** GEM Object handle */ 240 /** GEM Object handle */
235 uint32_t handle; 241 __u32 handle;
236 /** Do we want get or set metadata */ 242 /** Do we want get or set metadata */
237 uint32_t op; 243 __u32 op;
238 struct { 244 struct {
239 /** For future use, no flags defined so far */ 245 /** For future use, no flags defined so far */
240 uint64_t flags; 246 __u64 flags;
241 /** family specific tiling info */ 247 /** family specific tiling info */
242 uint64_t tiling_info; 248 __u64 tiling_info;
243 uint32_t data_size_bytes; 249 __u32 data_size_bytes;
244 uint32_t data[64]; 250 __u32 data[64];
245 } data; 251 } data;
246}; 252};
247 253
248struct drm_amdgpu_gem_mmap_in { 254struct drm_amdgpu_gem_mmap_in {
249 /** the GEM object handle */ 255 /** the GEM object handle */
250 uint32_t handle; 256 __u32 handle;
251 uint32_t _pad; 257 __u32 _pad;
252}; 258};
253 259
254struct drm_amdgpu_gem_mmap_out { 260struct drm_amdgpu_gem_mmap_out {
255 /** mmap offset from the vma offset manager */ 261 /** mmap offset from the vma offset manager */
256 uint64_t addr_ptr; 262 __u64 addr_ptr;
257}; 263};
258 264
259union drm_amdgpu_gem_mmap { 265union drm_amdgpu_gem_mmap {
@@ -263,18 +269,18 @@ union drm_amdgpu_gem_mmap {
263 269
264struct drm_amdgpu_gem_wait_idle_in { 270struct drm_amdgpu_gem_wait_idle_in {
265 /** GEM object handle */ 271 /** GEM object handle */
266 uint32_t handle; 272 __u32 handle;
267 /** For future use, no flags defined so far */ 273 /** For future use, no flags defined so far */
268 uint32_t flags; 274 __u32 flags;
269 /** Absolute timeout to wait */ 275 /** Absolute timeout to wait */
270 uint64_t timeout; 276 __u64 timeout;
271}; 277};
272 278
273struct drm_amdgpu_gem_wait_idle_out { 279struct drm_amdgpu_gem_wait_idle_out {
274 /** BO status: 0 - BO is idle, 1 - BO is busy */ 280 /** BO status: 0 - BO is idle, 1 - BO is busy */
275 uint32_t status; 281 __u32 status;
276 /** Returned current memory domain */ 282 /** Returned current memory domain */
277 uint32_t domain; 283 __u32 domain;
278}; 284};
279 285
280union drm_amdgpu_gem_wait_idle { 286union drm_amdgpu_gem_wait_idle {
@@ -284,18 +290,18 @@ union drm_amdgpu_gem_wait_idle {
284 290
285struct drm_amdgpu_wait_cs_in { 291struct drm_amdgpu_wait_cs_in {
286 /** Command submission handle */ 292 /** Command submission handle */
287 uint64_t handle; 293 __u64 handle;
288 /** Absolute timeout to wait */ 294 /** Absolute timeout to wait */
289 uint64_t timeout; 295 __u64 timeout;
290 uint32_t ip_type; 296 __u32 ip_type;
291 uint32_t ip_instance; 297 __u32 ip_instance;
292 uint32_t ring; 298 __u32 ring;
293 uint32_t ctx_id; 299 __u32 ctx_id;
294}; 300};
295 301
296struct drm_amdgpu_wait_cs_out { 302struct drm_amdgpu_wait_cs_out {
297 /** CS status: 0 - CS completed, 1 - CS still busy */ 303 /** CS status: 0 - CS completed, 1 - CS still busy */
298 uint64_t status; 304 __u64 status;
299}; 305};
300 306
301union drm_amdgpu_wait_cs { 307union drm_amdgpu_wait_cs {
@@ -303,17 +309,43 @@ union drm_amdgpu_wait_cs {
303 struct drm_amdgpu_wait_cs_out out; 309 struct drm_amdgpu_wait_cs_out out;
304}; 310};
305 311
312struct drm_amdgpu_fence {
313 __u32 ctx_id;
314 __u32 ip_type;
315 __u32 ip_instance;
316 __u32 ring;
317 __u64 seq_no;
318};
319
320struct drm_amdgpu_wait_fences_in {
321 /** This points to uint64_t * which points to fences */
322 __u64 fences;
323 __u32 fence_count;
324 __u32 wait_all;
325 __u64 timeout_ns;
326};
327
328struct drm_amdgpu_wait_fences_out {
329 __u32 status;
330 __u32 first_signaled;
331};
332
333union drm_amdgpu_wait_fences {
334 struct drm_amdgpu_wait_fences_in in;
335 struct drm_amdgpu_wait_fences_out out;
336};
337
306#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 338#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
307#define AMDGPU_GEM_OP_SET_PLACEMENT 1 339#define AMDGPU_GEM_OP_SET_PLACEMENT 1
308 340
309/* Sets or returns a value associated with a buffer. */ 341/* Sets or returns a value associated with a buffer. */
310struct drm_amdgpu_gem_op { 342struct drm_amdgpu_gem_op {
311 /** GEM object handle */ 343 /** GEM object handle */
312 uint32_t handle; 344 __u32 handle;
313 /** AMDGPU_GEM_OP_* */ 345 /** AMDGPU_GEM_OP_* */
314 uint32_t op; 346 __u32 op;
315 /** Input or return value */ 347 /** Input or return value */
316 uint64_t value; 348 __u64 value;
317}; 349};
318 350
319#define AMDGPU_VA_OP_MAP 1 351#define AMDGPU_VA_OP_MAP 1
@@ -332,18 +364,18 @@ struct drm_amdgpu_gem_op {
332 364
333struct drm_amdgpu_gem_va { 365struct drm_amdgpu_gem_va {
334 /** GEM object handle */ 366 /** GEM object handle */
335 uint32_t handle; 367 __u32 handle;
336 uint32_t _pad; 368 __u32 _pad;
337 /** AMDGPU_VA_OP_* */ 369 /** AMDGPU_VA_OP_* */
338 uint32_t operation; 370 __u32 operation;
339 /** AMDGPU_VM_PAGE_* */ 371 /** AMDGPU_VM_PAGE_* */
340 uint32_t flags; 372 __u32 flags;
341 /** va address to assign . Must be correctly aligned.*/ 373 /** va address to assign . Must be correctly aligned.*/
342 uint64_t va_address; 374 __u64 va_address;
343 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 375 /** Specify offset inside of BO to assign. Must be correctly aligned.*/
344 uint64_t offset_in_bo; 376 __u64 offset_in_bo;
345 /** Specify mapping size. Must be correctly aligned. */ 377 /** Specify mapping size. Must be correctly aligned. */
346 uint64_t map_size; 378 __u64 map_size;
347}; 379};
348 380
349#define AMDGPU_HW_IP_GFX 0 381#define AMDGPU_HW_IP_GFX 0
@@ -360,24 +392,24 @@ struct drm_amdgpu_gem_va {
360#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 392#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
361 393
362struct drm_amdgpu_cs_chunk { 394struct drm_amdgpu_cs_chunk {
363 uint32_t chunk_id; 395 __u32 chunk_id;
364 uint32_t length_dw; 396 __u32 length_dw;
365 uint64_t chunk_data; 397 __u64 chunk_data;
366}; 398};
367 399
368struct drm_amdgpu_cs_in { 400struct drm_amdgpu_cs_in {
369 /** Rendering context id */ 401 /** Rendering context id */
370 uint32_t ctx_id; 402 __u32 ctx_id;
371 /** Handle of resource list associated with CS */ 403 /** Handle of resource list associated with CS */
372 uint32_t bo_list_handle; 404 __u32 bo_list_handle;
373 uint32_t num_chunks; 405 __u32 num_chunks;
374 uint32_t _pad; 406 __u32 _pad;
375 /** this points to uint64_t * which point to cs chunks */ 407 /** this points to __u64 * which point to cs chunks */
376 uint64_t chunks; 408 __u64 chunks;
377}; 409};
378 410
379struct drm_amdgpu_cs_out { 411struct drm_amdgpu_cs_out {
380 uint64_t handle; 412 __u64 handle;
381}; 413};
382 414
383union drm_amdgpu_cs { 415union drm_amdgpu_cs {
@@ -394,32 +426,32 @@ union drm_amdgpu_cs {
394#define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 426#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
395 427
396struct drm_amdgpu_cs_chunk_ib { 428struct drm_amdgpu_cs_chunk_ib {
397 uint32_t _pad; 429 __u32 _pad;
398 /** AMDGPU_IB_FLAG_* */ 430 /** AMDGPU_IB_FLAG_* */
399 uint32_t flags; 431 __u32 flags;
400 /** Virtual address to begin IB execution */ 432 /** Virtual address to begin IB execution */
401 uint64_t va_start; 433 __u64 va_start;
402 /** Size of submission */ 434 /** Size of submission */
403 uint32_t ib_bytes; 435 __u32 ib_bytes;
404 /** HW IP to submit to */ 436 /** HW IP to submit to */
405 uint32_t ip_type; 437 __u32 ip_type;
406 /** HW IP index of the same type to submit to */ 438 /** HW IP index of the same type to submit to */
407 uint32_t ip_instance; 439 __u32 ip_instance;
408 /** Ring index to submit to */ 440 /** Ring index to submit to */
409 uint32_t ring; 441 __u32 ring;
410}; 442};
411 443
412struct drm_amdgpu_cs_chunk_dep { 444struct drm_amdgpu_cs_chunk_dep {
413 uint32_t ip_type; 445 __u32 ip_type;
414 uint32_t ip_instance; 446 __u32 ip_instance;
415 uint32_t ring; 447 __u32 ring;
416 uint32_t ctx_id; 448 __u32 ctx_id;
417 uint64_t handle; 449 __u64 handle;
418}; 450};
419 451
420struct drm_amdgpu_cs_chunk_fence { 452struct drm_amdgpu_cs_chunk_fence {
421 uint32_t handle; 453 __u32 handle;
422 uint32_t offset; 454 __u32 offset;
423}; 455};
424 456
425struct drm_amdgpu_cs_chunk_data { 457struct drm_amdgpu_cs_chunk_data {
@@ -434,6 +466,7 @@ struct drm_amdgpu_cs_chunk_data {
434 * 466 *
435 */ 467 */
436#define AMDGPU_IDS_FLAGS_FUSION 0x1 468#define AMDGPU_IDS_FLAGS_FUSION 0x1
469#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
437 470
438/* indicate if acceleration can be working */ 471/* indicate if acceleration can be working */
439#define AMDGPU_INFO_ACCEL_WORKING 0x00 472#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -483,6 +516,20 @@ struct drm_amdgpu_cs_chunk_data {
483#define AMDGPU_INFO_DEV_INFO 0x16 516#define AMDGPU_INFO_DEV_INFO 0x16
484/* visible vram usage */ 517/* visible vram usage */
485#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 518#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
519/* number of TTM buffer evictions */
520#define AMDGPU_INFO_NUM_EVICTIONS 0x18
521/* Query memory about VRAM and GTT domains */
522#define AMDGPU_INFO_MEMORY 0x19
523/* Query vce clock table */
524#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
525/* Query vbios related information */
526#define AMDGPU_INFO_VBIOS 0x1B
527 /* Subquery id: Query vbios size */
528 #define AMDGPU_INFO_VBIOS_SIZE 0x1
529 /* Subquery id: Query vbios image */
530 #define AMDGPU_INFO_VBIOS_IMAGE 0x2
531/* Query UVD handles */
532#define AMDGPU_INFO_NUM_HANDLES 0x1C
486 533
487#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 534#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
488#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff 535#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -491,86 +538,119 @@ struct drm_amdgpu_cs_chunk_data {
491 538
492struct drm_amdgpu_query_fw { 539struct drm_amdgpu_query_fw {
493 /** AMDGPU_INFO_FW_* */ 540 /** AMDGPU_INFO_FW_* */
494 uint32_t fw_type; 541 __u32 fw_type;
495 /** 542 /**
496 * Index of the IP if there are more IPs of 543 * Index of the IP if there are more IPs of
497 * the same type. 544 * the same type.
498 */ 545 */
499 uint32_t ip_instance; 546 __u32 ip_instance;
500 /** 547 /**
501 * Index of the engine. Whether this is used depends 548 * Index of the engine. Whether this is used depends
502 * on the firmware type. (e.g. MEC, SDMA) 549 * on the firmware type. (e.g. MEC, SDMA)
503 */ 550 */
504 uint32_t index; 551 __u32 index;
505 uint32_t _pad; 552 __u32 _pad;
506}; 553};
507 554
508/* Input structure for the INFO ioctl */ 555/* Input structure for the INFO ioctl */
509struct drm_amdgpu_info { 556struct drm_amdgpu_info {
510 /* Where the return value will be stored */ 557 /* Where the return value will be stored */
511 uint64_t return_pointer; 558 __u64 return_pointer;
512 /* The size of the return value. Just like "size" in "snprintf", 559 /* The size of the return value. Just like "size" in "snprintf",
513 * it limits how many bytes the kernel can write. */ 560 * it limits how many bytes the kernel can write. */
514 uint32_t return_size; 561 __u32 return_size;
515 /* The query request id. */ 562 /* The query request id. */
516 uint32_t query; 563 __u32 query;
517 564
518 union { 565 union {
519 struct { 566 struct {
520 uint32_t id; 567 __u32 id;
521 uint32_t _pad; 568 __u32 _pad;
522 } mode_crtc; 569 } mode_crtc;
523 570
524 struct { 571 struct {
525 /** AMDGPU_HW_IP_* */ 572 /** AMDGPU_HW_IP_* */
526 uint32_t type; 573 __u32 type;
527 /** 574 /**
528 * Index of the IP if there are more IPs of the same 575 * Index of the IP if there are more IPs of the same
529 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 576 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
530 */ 577 */
531 uint32_t ip_instance; 578 __u32 ip_instance;
532 } query_hw_ip; 579 } query_hw_ip;
533 580
534 struct { 581 struct {
535 uint32_t dword_offset; 582 __u32 dword_offset;
536 /** number of registers to read */ 583 /** number of registers to read */
537 uint32_t count; 584 __u32 count;
538 uint32_t instance; 585 __u32 instance;
539 /** For future use, no flags defined so far */ 586 /** For future use, no flags defined so far */
540 uint32_t flags; 587 __u32 flags;
541 } read_mmr_reg; 588 } read_mmr_reg;
542 589
543 struct drm_amdgpu_query_fw query_fw; 590 struct drm_amdgpu_query_fw query_fw;
591
592 struct {
593 __u32 type;
594 __u32 offset;
595 } vbios_info;
544 }; 596 };
545}; 597};
546 598
547struct drm_amdgpu_info_gds { 599struct drm_amdgpu_info_gds {
548 /** GDS GFX partition size */ 600 /** GDS GFX partition size */
549 uint32_t gds_gfx_partition_size; 601 __u32 gds_gfx_partition_size;
550 /** GDS compute partition size */ 602 /** GDS compute partition size */
551 uint32_t compute_partition_size; 603 __u32 compute_partition_size;
552 /** total GDS memory size */ 604 /** total GDS memory size */
553 uint32_t gds_total_size; 605 __u32 gds_total_size;
554 /** GWS size per GFX partition */ 606 /** GWS size per GFX partition */
555 uint32_t gws_per_gfx_partition; 607 __u32 gws_per_gfx_partition;
556 /** GSW size per compute partition */ 608 /** GSW size per compute partition */
557 uint32_t gws_per_compute_partition; 609 __u32 gws_per_compute_partition;
558 /** OA size per GFX partition */ 610 /** OA size per GFX partition */
559 uint32_t oa_per_gfx_partition; 611 __u32 oa_per_gfx_partition;
560 /** OA size per compute partition */ 612 /** OA size per compute partition */
561 uint32_t oa_per_compute_partition; 613 __u32 oa_per_compute_partition;
562 uint32_t _pad; 614 __u32 _pad;
563}; 615};
564 616
565struct drm_amdgpu_info_vram_gtt { 617struct drm_amdgpu_info_vram_gtt {
566 uint64_t vram_size; 618 __u64 vram_size;
567 uint64_t vram_cpu_accessible_size; 619 __u64 vram_cpu_accessible_size;
568 uint64_t gtt_size; 620 __u64 gtt_size;
621};
622
623struct drm_amdgpu_heap_info {
624 /** max. physical memory */
625 __u64 total_heap_size;
626
627 /** Theoretical max. available memory in the given heap */
628 __u64 usable_heap_size;
629
630 /**
631 * Number of bytes allocated in the heap. This includes all processes
632 * and private allocations in the kernel. It changes when new buffers
633 * are allocated, freed, and moved. It cannot be larger than
634 * heap_size.
635 */
636 __u64 heap_usage;
637
638 /**
639 * Theoretical possible max. size of buffer which
640 * could be allocated in the given heap
641 */
642 __u64 max_allocation;
643};
644
645struct drm_amdgpu_memory_info {
646 struct drm_amdgpu_heap_info vram;
647 struct drm_amdgpu_heap_info cpu_accessible_vram;
648 struct drm_amdgpu_heap_info gtt;
569}; 649};
570 650
571struct drm_amdgpu_info_firmware { 651struct drm_amdgpu_info_firmware {
572 uint32_t ver; 652 __u32 ver;
573 uint32_t feature; 653 __u32 feature;
574}; 654};
575 655
576#define AMDGPU_VRAM_TYPE_UNKNOWN 0 656#define AMDGPU_VRAM_TYPE_UNKNOWN 0
@@ -584,67 +664,93 @@ struct drm_amdgpu_info_firmware {
584 664
585struct drm_amdgpu_info_device { 665struct drm_amdgpu_info_device {
586 /** PCI Device ID */ 666 /** PCI Device ID */
587 uint32_t device_id; 667 __u32 device_id;
588 /** Internal chip revision: A0, A1, etc.) */ 668 /** Internal chip revision: A0, A1, etc.) */
589 uint32_t chip_rev; 669 __u32 chip_rev;
590 uint32_t external_rev; 670 __u32 external_rev;
591 /** Revision id in PCI Config space */ 671 /** Revision id in PCI Config space */
592 uint32_t pci_rev; 672 __u32 pci_rev;
593 uint32_t family; 673 __u32 family;
594 uint32_t num_shader_engines; 674 __u32 num_shader_engines;
595 uint32_t num_shader_arrays_per_engine; 675 __u32 num_shader_arrays_per_engine;
596 /* in KHz */ 676 /* in KHz */
597 uint32_t gpu_counter_freq; 677 __u32 gpu_counter_freq;
598 uint64_t max_engine_clock; 678 __u64 max_engine_clock;
599 uint64_t max_memory_clock; 679 __u64 max_memory_clock;
600 /* cu information */ 680 /* cu information */
601 uint32_t cu_active_number; 681 __u32 cu_active_number;
602 uint32_t cu_ao_mask; 682 __u32 cu_ao_mask;
603 uint32_t cu_bitmap[4][4]; 683 __u32 cu_bitmap[4][4];
604 /** Render backend pipe mask. One render backend is CB+DB. */ 684 /** Render backend pipe mask. One render backend is CB+DB. */
605 uint32_t enabled_rb_pipes_mask; 685 __u32 enabled_rb_pipes_mask;
606 uint32_t num_rb_pipes; 686 __u32 num_rb_pipes;
607 uint32_t num_hw_gfx_contexts; 687 __u32 num_hw_gfx_contexts;
608 uint32_t _pad; 688 __u32 _pad;
609 uint64_t ids_flags; 689 __u64 ids_flags;
610 /** Starting virtual address for UMDs. */ 690 /** Starting virtual address for UMDs. */
611 uint64_t virtual_address_offset; 691 __u64 virtual_address_offset;
612 /** The maximum virtual address */ 692 /** The maximum virtual address */
613 uint64_t virtual_address_max; 693 __u64 virtual_address_max;
614 /** Required alignment of virtual addresses. */ 694 /** Required alignment of virtual addresses. */
615 uint32_t virtual_address_alignment; 695 __u32 virtual_address_alignment;
616 /** Page table entry - fragment size */ 696 /** Page table entry - fragment size */
617 uint32_t pte_fragment_size; 697 __u32 pte_fragment_size;
618 uint32_t gart_page_size; 698 __u32 gart_page_size;
619 /** constant engine ram size*/ 699 /** constant engine ram size*/
620 uint32_t ce_ram_size; 700 __u32 ce_ram_size;
621 /** video memory type info*/ 701 /** video memory type info*/
622 uint32_t vram_type; 702 __u32 vram_type;
623 /** video memory bit width*/ 703 /** video memory bit width*/
624 uint32_t vram_bit_width; 704 __u32 vram_bit_width;
625 /* vce harvesting instance */ 705 /* vce harvesting instance */
626 uint32_t vce_harvest_config; 706 __u32 vce_harvest_config;
627}; 707};
628 708
629struct drm_amdgpu_info_hw_ip { 709struct drm_amdgpu_info_hw_ip {
630 /** Version of h/w IP */ 710 /** Version of h/w IP */
631 uint32_t hw_ip_version_major; 711 __u32 hw_ip_version_major;
632 uint32_t hw_ip_version_minor; 712 __u32 hw_ip_version_minor;
633 /** Capabilities */ 713 /** Capabilities */
634 uint64_t capabilities_flags; 714 __u64 capabilities_flags;
635 /** command buffer address start alignment*/ 715 /** command buffer address start alignment*/
636 uint32_t ib_start_alignment; 716 __u32 ib_start_alignment;
637 /** command buffer size alignment*/ 717 /** command buffer size alignment*/
638 uint32_t ib_size_alignment; 718 __u32 ib_size_alignment;
639 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 719 /** Bitmask of available rings. Bit 0 means ring 0, etc. */
640 uint32_t available_rings; 720 __u32 available_rings;
641 uint32_t _pad; 721 __u32 _pad;
722};
723
724struct drm_amdgpu_info_num_handles {
725 /** Max handles as supported by firmware for UVD */
726 __u32 uvd_max_handles;
727 /** Handles currently in use for UVD */
728 __u32 uvd_used_handles;
729};
730
731#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
732
733struct drm_amdgpu_info_vce_clock_table_entry {
734 /** System clock */
735 __u32 sclk;
736 /** Memory clock */
737 __u32 mclk;
738 /** VCE clock */
739 __u32 eclk;
740 __u32 pad;
741};
742
743struct drm_amdgpu_info_vce_clock_table {
744 struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
745 __u32 num_valid_entries;
746 __u32 pad;
642}; 747};
643 748
644/* 749/*
645 * Supported GPU families 750 * Supported GPU families
646 */ 751 */
647#define AMDGPU_FAMILY_UNKNOWN 0 752#define AMDGPU_FAMILY_UNKNOWN 0
753#define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
648#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 754#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
649#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 755#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
650#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 756#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */