diff options
Diffstat (limited to 'include/drm/i915_drm.h')
-rw-r--r-- | include/drm/i915_drm.h | 321 |
1 files changed, 307 insertions, 14 deletions
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 5ebe0462..16e452aa 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -86,6 +86,62 @@ enum i915_mocs_table_index { | |||
86 | I915_MOCS_CACHED, | 86 | I915_MOCS_CACHED, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* | ||
90 | * Different engines serve different roles, and there may be more than one | ||
91 | * engine serving each role. enum drm_i915_gem_engine_class provides a | ||
92 | * classification of the role of the engine, which may be used when requesting | ||
93 | * operations to be performed on a certain subset of engines, or for providing | ||
94 | * information about that group. | ||
95 | */ | ||
96 | enum drm_i915_gem_engine_class { | ||
97 | I915_ENGINE_CLASS_RENDER = 0, | ||
98 | I915_ENGINE_CLASS_COPY = 1, | ||
99 | I915_ENGINE_CLASS_VIDEO = 2, | ||
100 | I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, | ||
101 | |||
102 | I915_ENGINE_CLASS_INVALID = -1 | ||
103 | }; | ||
104 | |||
105 | /** | ||
106 | * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 | ||
107 | * | ||
108 | */ | ||
109 | |||
110 | enum drm_i915_pmu_engine_sample { | ||
111 | I915_SAMPLE_BUSY = 0, | ||
112 | I915_SAMPLE_WAIT = 1, | ||
113 | I915_SAMPLE_SEMA = 2 | ||
114 | }; | ||
115 | |||
116 | #define I915_PMU_SAMPLE_BITS (4) | ||
117 | #define I915_PMU_SAMPLE_MASK (0xf) | ||
118 | #define I915_PMU_SAMPLE_INSTANCE_BITS (8) | ||
119 | #define I915_PMU_CLASS_SHIFT \ | ||
120 | (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) | ||
121 | |||
122 | #define __I915_PMU_ENGINE(class, instance, sample) \ | ||
123 | ((class) << I915_PMU_CLASS_SHIFT | \ | ||
124 | (instance) << I915_PMU_SAMPLE_BITS | \ | ||
125 | (sample)) | ||
126 | |||
127 | #define I915_PMU_ENGINE_BUSY(class, instance) \ | ||
128 | __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) | ||
129 | |||
130 | #define I915_PMU_ENGINE_WAIT(class, instance) \ | ||
131 | __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) | ||
132 | |||
133 | #define I915_PMU_ENGINE_SEMA(class, instance) \ | ||
134 | __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) | ||
135 | |||
136 | #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | ||
137 | |||
138 | #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) | ||
139 | #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) | ||
140 | #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) | ||
141 | #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) | ||
142 | |||
143 | #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY | ||
144 | |||
89 | /* Each region is a minimum of 16k, and there are at most 255 of them. | 145 | /* Each region is a minimum of 16k, and there are at most 255 of them. |
90 | */ | 146 | */ |
91 | #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use | 147 | #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use |
@@ -260,6 +316,9 @@ typedef struct _drm_i915_sarea { | |||
260 | #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 | 316 | #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 |
261 | #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 | 317 | #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 |
262 | #define DRM_I915_PERF_OPEN 0x36 | 318 | #define DRM_I915_PERF_OPEN 0x36 |
319 | #define DRM_I915_PERF_ADD_CONFIG 0x37 | ||
320 | #define DRM_I915_PERF_REMOVE_CONFIG 0x38 | ||
321 | #define DRM_I915_QUERY 0x39 | ||
263 | 322 | ||
264 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 323 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
265 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 324 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
@@ -315,6 +374,9 @@ typedef struct _drm_i915_sarea { | |||
315 | #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) | 374 | #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) |
316 | #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) | 375 | #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) |
317 | #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) | 376 | #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) |
377 | #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) | ||
378 | #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) | ||
379 | #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) | ||
318 | 380 | ||
319 | /* Allow drivers to submit batchbuffers directly to hardware, relying | 381 | /* Allow drivers to submit batchbuffers directly to hardware, relying |
320 | * on the security mechanisms provided by hardware. | 382 | * on the security mechanisms provided by hardware. |
@@ -393,10 +455,20 @@ typedef struct drm_i915_irq_wait { | |||
393 | #define I915_PARAM_MIN_EU_IN_POOL 39 | 455 | #define I915_PARAM_MIN_EU_IN_POOL 39 |
394 | #define I915_PARAM_MMAP_GTT_VERSION 40 | 456 | #define I915_PARAM_MMAP_GTT_VERSION 40 |
395 | 457 | ||
396 | /* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution | 458 | /* |
459 | * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution | ||
397 | * priorities and the driver will attempt to execute batches in priority order. | 460 | * priorities and the driver will attempt to execute batches in priority order. |
461 | * The param returns a capability bitmask, nonzero implies that the scheduler | ||
462 | * is enabled, with different features present according to the mask. | ||
463 | * | ||
464 | * The initial priority for each batch is supplied by the context and is | ||
465 | * controlled via I915_CONTEXT_PARAM_PRIORITY. | ||
398 | */ | 466 | */ |
399 | #define I915_PARAM_HAS_SCHEDULER 41 | 467 | #define I915_PARAM_HAS_SCHEDULER 41 |
468 | #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) | ||
469 | #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) | ||
470 | #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) | ||
471 | |||
400 | #define I915_PARAM_HUC_STATUS 42 | 472 | #define I915_PARAM_HUC_STATUS 42 |
401 | 473 | ||
402 | /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of | 474 | /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of |
@@ -412,6 +484,51 @@ typedef struct drm_i915_irq_wait { | |||
412 | */ | 484 | */ |
413 | #define I915_PARAM_HAS_EXEC_FENCE 44 | 485 | #define I915_PARAM_HAS_EXEC_FENCE 44 |
414 | 486 | ||
487 | /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture | ||
488 | * user specified bufffers for post-mortem debugging of GPU hangs. See | ||
489 | * EXEC_OBJECT_CAPTURE. | ||
490 | */ | ||
491 | #define I915_PARAM_HAS_EXEC_CAPTURE 45 | ||
492 | |||
493 | #define I915_PARAM_SLICE_MASK 46 | ||
494 | |||
495 | /* Assuming it's uniform for each slice, this queries the mask of subslices | ||
496 | * per-slice for this system. | ||
497 | */ | ||
498 | #define I915_PARAM_SUBSLICE_MASK 47 | ||
499 | |||
500 | /* | ||
501 | * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer | ||
502 | * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. | ||
503 | */ | ||
504 | #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 | ||
505 | |||
506 | /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of | ||
507 | * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. | ||
508 | */ | ||
509 | #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 | ||
510 | |||
511 | /* | ||
512 | * Query whether every context (both per-file default and user created) is | ||
513 | * isolated (insofar as HW supports). If this parameter is not true, then | ||
514 | * freshly created contexts may inherit values from an existing context, | ||
515 | * rather than default HW values. If true, it also ensures (insofar as HW | ||
516 | * supports) that all state set by this context will not leak to any other | ||
517 | * context. | ||
518 | * | ||
519 | * As not every engine across every gen support contexts, the returned | ||
520 | * value reports the support of context isolation for individual engines by | ||
521 | * returning a bitmask of each engine class set to true if that class supports | ||
522 | * isolation. | ||
523 | */ | ||
524 | #define I915_PARAM_HAS_CONTEXT_ISOLATION 50 | ||
525 | |||
526 | /* Frequency of the command streamer timestamps given by the *_TIMESTAMP | ||
527 | * registers. This used to be fixed per platform but from CNL onwards, this | ||
528 | * might vary depending on the parts. | ||
529 | */ | ||
530 | #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 | ||
531 | |||
415 | typedef struct drm_i915_getparam { | 532 | typedef struct drm_i915_getparam { |
416 | __s32 param; | 533 | __s32 param; |
417 | /* | 534 | /* |
@@ -666,6 +783,8 @@ struct drm_i915_gem_relocation_entry { | |||
666 | #define I915_GEM_DOMAIN_VERTEX 0x00000020 | 783 | #define I915_GEM_DOMAIN_VERTEX 0x00000020 |
667 | /** GTT domain - aperture and scanout */ | 784 | /** GTT domain - aperture and scanout */ |
668 | #define I915_GEM_DOMAIN_GTT 0x00000040 | 785 | #define I915_GEM_DOMAIN_GTT 0x00000040 |
786 | /** WC domain - uncached access */ | ||
787 | #define I915_GEM_DOMAIN_WC 0x00000080 | ||
669 | /** @} */ | 788 | /** @} */ |
670 | 789 | ||
671 | struct drm_i915_gem_exec_object { | 790 | struct drm_i915_gem_exec_object { |
@@ -773,8 +892,15 @@ struct drm_i915_gem_exec_object2 { | |||
773 | * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. | 892 | * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. |
774 | */ | 893 | */ |
775 | #define EXEC_OBJECT_ASYNC (1<<6) | 894 | #define EXEC_OBJECT_ASYNC (1<<6) |
895 | /* Request that the contents of this execobject be copied into the error | ||
896 | * state upon a GPU hang involving this batch for post-mortem debugging. | ||
897 | * These buffers are recorded in no particular order as "user" in | ||
898 | * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see | ||
899 | * if the kernel supports this flag. | ||
900 | */ | ||
901 | #define EXEC_OBJECT_CAPTURE (1<<7) | ||
776 | /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ | 902 | /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ |
777 | #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1) | 903 | #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) |
778 | __u64 flags; | 904 | __u64 flags; |
779 | 905 | ||
780 | union { | 906 | union { |
@@ -784,6 +910,18 @@ struct drm_i915_gem_exec_object2 { | |||
784 | __u64 rsvd2; | 910 | __u64 rsvd2; |
785 | }; | 911 | }; |
786 | 912 | ||
913 | struct drm_i915_gem_exec_fence { | ||
914 | /** | ||
915 | * User's handle for a drm_syncobj to wait on or signal. | ||
916 | */ | ||
917 | __u32 handle; | ||
918 | |||
919 | #define I915_EXEC_FENCE_WAIT (1<<0) | ||
920 | #define I915_EXEC_FENCE_SIGNAL (1<<1) | ||
921 | #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) | ||
922 | __u32 flags; | ||
923 | }; | ||
924 | |||
787 | struct drm_i915_gem_execbuffer2 { | 925 | struct drm_i915_gem_execbuffer2 { |
788 | /** | 926 | /** |
789 | * List of gem_exec_object2 structs | 927 | * List of gem_exec_object2 structs |
@@ -798,7 +936,11 @@ struct drm_i915_gem_execbuffer2 { | |||
798 | __u32 DR1; | 936 | __u32 DR1; |
799 | __u32 DR4; | 937 | __u32 DR4; |
800 | __u32 num_cliprects; | 938 | __u32 num_cliprects; |
801 | /** This is a struct drm_clip_rect *cliprects */ | 939 | /** |
940 | * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY | ||
941 | * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a | ||
942 | * struct drm_i915_gem_exec_fence *fences. | ||
943 | */ | ||
802 | __u64 cliprects_ptr; | 944 | __u64 cliprects_ptr; |
803 | #define I915_EXEC_RING_MASK (7<<0) | 945 | #define I915_EXEC_RING_MASK (7<<0) |
804 | #define I915_EXEC_DEFAULT (0<<0) | 946 | #define I915_EXEC_DEFAULT (0<<0) |
@@ -889,7 +1031,24 @@ struct drm_i915_gem_execbuffer2 { | |||
889 | */ | 1031 | */ |
890 | #define I915_EXEC_FENCE_OUT (1<<17) | 1032 | #define I915_EXEC_FENCE_OUT (1<<17) |
891 | 1033 | ||
892 | #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1)) | 1034 | /* |
1035 | * Traditionally the execbuf ioctl has only considered the final element in | ||
1036 | * the execobject[] to be the executable batch. Often though, the client | ||
1037 | * will known the batch object prior to construction and being able to place | ||
1038 | * it into the execobject[] array first can simplify the relocation tracking. | ||
1039 | * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the | ||
1040 | * execobject[] as the * batch instead (the default is to use the last | ||
1041 | * element). | ||
1042 | */ | ||
1043 | #define I915_EXEC_BATCH_FIRST (1<<18) | ||
1044 | |||
1045 | /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr | ||
1046 | * define an array of i915_gem_exec_fence structures which specify a set of | ||
1047 | * dma fences to wait upon or signal. | ||
1048 | */ | ||
1049 | #define I915_EXEC_FENCE_ARRAY (1<<19) | ||
1050 | |||
1051 | #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) | ||
893 | 1052 | ||
894 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) | 1053 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
895 | #define i915_execbuffer2_set_context_id(eb2, context) \ | 1054 | #define i915_execbuffer2_set_context_id(eb2, context) \ |
@@ -1201,7 +1360,9 @@ struct drm_intel_overlay_attrs { | |||
1201 | * active on a given plane. | 1360 | * active on a given plane. |
1202 | */ | 1361 | */ |
1203 | 1362 | ||
1204 | #define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ | 1363 | #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set |
1364 | * flags==0 to disable colorkeying. | ||
1365 | */ | ||
1205 | #define I915_SET_COLORKEY_DESTINATION (1<<1) | 1366 | #define I915_SET_COLORKEY_DESTINATION (1<<1) |
1206 | #define I915_SET_COLORKEY_SOURCE (1<<2) | 1367 | #define I915_SET_COLORKEY_SOURCE (1<<2) |
1207 | struct drm_intel_sprite_colorkey { | 1368 | struct drm_intel_sprite_colorkey { |
@@ -1239,14 +1400,16 @@ struct drm_i915_reg_read { | |||
1239 | * be specified | 1400 | * be specified |
1240 | */ | 1401 | */ |
1241 | __u64 offset; | 1402 | __u64 offset; |
1403 | #define I915_REG_READ_8B_WA (1ul << 0) | ||
1404 | |||
1242 | __u64 val; /* Return value */ | 1405 | __u64 val; /* Return value */ |
1243 | }; | 1406 | }; |
1244 | /* Known registers: | 1407 | /* Known registers: |
1245 | * | 1408 | * |
1246 | * Render engine timestamp - 0x2358 + 64bit - gen7+ | 1409 | * Render engine timestamp - 0x2358 + 64bit - gen7+ |
1247 | * - Note this register returns an invalid value if using the default | 1410 | * - Note this register returns an invalid value if using the default |
1248 | * single instruction 8byte read, in order to workaround that use | 1411 | * single instruction 8byte read, in order to workaround that pass |
1249 | * offset (0x2538 | 1) instead. | 1412 | * flag I915_REG_READ_8B_WA in offset field. |
1250 | * | 1413 | * |
1251 | */ | 1414 | */ |
1252 | 1415 | ||
@@ -1289,17 +1452,26 @@ struct drm_i915_gem_context_param { | |||
1289 | #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 | 1452 | #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 |
1290 | #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 | 1453 | #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 |
1291 | #define I915_CONTEXT_PARAM_BANNABLE 0x5 | 1454 | #define I915_CONTEXT_PARAM_BANNABLE 0x5 |
1455 | #define I915_CONTEXT_PARAM_PRIORITY 0x6 | ||
1456 | #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ | ||
1457 | #define I915_CONTEXT_DEFAULT_PRIORITY 0 | ||
1458 | #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ | ||
1292 | __u64 value; | 1459 | __u64 value; |
1293 | }; | 1460 | }; |
1294 | 1461 | ||
1295 | enum drm_i915_oa_format { | 1462 | enum drm_i915_oa_format { |
1296 | I915_OA_FORMAT_A13 = 1, | 1463 | I915_OA_FORMAT_A13 = 1, /* HSW only */ |
1297 | I915_OA_FORMAT_A29, | 1464 | I915_OA_FORMAT_A29, /* HSW only */ |
1298 | I915_OA_FORMAT_A13_B8_C8, | 1465 | I915_OA_FORMAT_A13_B8_C8, /* HSW only */ |
1299 | I915_OA_FORMAT_B4_C8, | 1466 | I915_OA_FORMAT_B4_C8, /* HSW only */ |
1300 | I915_OA_FORMAT_A45_B8_C8, | 1467 | I915_OA_FORMAT_A45_B8_C8, /* HSW only */ |
1301 | I915_OA_FORMAT_B4_C8_A16, | 1468 | I915_OA_FORMAT_B4_C8_A16, /* HSW only */ |
1302 | I915_OA_FORMAT_C4_B8, | 1469 | I915_OA_FORMAT_C4_B8, /* HSW+ */ |
1470 | |||
1471 | /* Gen8+ */ | ||
1472 | I915_OA_FORMAT_A12, | ||
1473 | I915_OA_FORMAT_A12_B8_C8, | ||
1474 | I915_OA_FORMAT_A32u40_A4u32_B8_C8, | ||
1303 | 1475 | ||
1304 | I915_OA_FORMAT_MAX /* non-ABI */ | 1476 | I915_OA_FORMAT_MAX /* non-ABI */ |
1305 | }; | 1477 | }; |
@@ -1424,6 +1596,127 @@ enum drm_i915_perf_record_type { | |||
1424 | DRM_I915_PERF_RECORD_MAX /* non-ABI */ | 1596 | DRM_I915_PERF_RECORD_MAX /* non-ABI */ |
1425 | }; | 1597 | }; |
1426 | 1598 | ||
1599 | /** | ||
1600 | * Structure to upload perf dynamic configuration into the kernel. | ||
1601 | */ | ||
1602 | struct drm_i915_perf_oa_config { | ||
1603 | /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ | ||
1604 | char uuid[36]; | ||
1605 | |||
1606 | __u32 n_mux_regs; | ||
1607 | __u32 n_boolean_regs; | ||
1608 | __u32 n_flex_regs; | ||
1609 | |||
1610 | /* | ||
1611 | * These fields are pointers to tuples of u32 values (register address, | ||
1612 | * value). For example the expected length of the buffer pointed by | ||
1613 | * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). | ||
1614 | */ | ||
1615 | __u64 mux_regs_ptr; | ||
1616 | __u64 boolean_regs_ptr; | ||
1617 | __u64 flex_regs_ptr; | ||
1618 | }; | ||
1619 | |||
1620 | struct drm_i915_query_item { | ||
1621 | __u64 query_id; | ||
1622 | #define DRM_I915_QUERY_TOPOLOGY_INFO 1 | ||
1623 | |||
1624 | /* | ||
1625 | * When set to zero by userspace, this is filled with the size of the | ||
1626 | * data to be written at the data_ptr pointer. The kernel sets this | ||
1627 | * value to a negative value to signal an error on a particular query | ||
1628 | * item. | ||
1629 | */ | ||
1630 | __s32 length; | ||
1631 | |||
1632 | /* | ||
1633 | * Unused for now. Must be cleared to zero. | ||
1634 | */ | ||
1635 | __u32 flags; | ||
1636 | |||
1637 | /* | ||
1638 | * Data will be written at the location pointed by data_ptr when the | ||
1639 | * value of length matches the length of the data to be written by the | ||
1640 | * kernel. | ||
1641 | */ | ||
1642 | __u64 data_ptr; | ||
1643 | }; | ||
1644 | |||
1645 | struct drm_i915_query { | ||
1646 | __u32 num_items; | ||
1647 | |||
1648 | /* | ||
1649 | * Unused for now. Must be cleared to zero. | ||
1650 | */ | ||
1651 | __u32 flags; | ||
1652 | |||
1653 | /* | ||
1654 | * This points to an array of num_items drm_i915_query_item structures. | ||
1655 | */ | ||
1656 | __u64 items_ptr; | ||
1657 | }; | ||
1658 | |||
1659 | /* | ||
1660 | * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO : | ||
1661 | * | ||
1662 | * data: contains the 3 pieces of information : | ||
1663 | * | ||
1664 | * - the slice mask with one bit per slice telling whether a slice is | ||
1665 | * available. The availability of slice X can be queried with the following | ||
1666 | * formula : | ||
1667 | * | ||
1668 | * (data[X / 8] >> (X % 8)) & 1 | ||
1669 | * | ||
1670 | * - the subslice mask for each slice with one bit per subslice telling | ||
1671 | * whether a subslice is available. The availability of subslice Y in slice | ||
1672 | * X can be queried with the following formula : | ||
1673 | * | ||
1674 | * (data[subslice_offset + | ||
1675 | * X * subslice_stride + | ||
1676 | * Y / 8] >> (Y % 8)) & 1 | ||
1677 | * | ||
1678 | * - the EU mask for each subslice in each slice with one bit per EU telling | ||
1679 | * whether an EU is available. The availability of EU Z in subslice Y in | ||
1680 | * slice X can be queried with the following formula : | ||
1681 | * | ||
1682 | * (data[eu_offset + | ||
1683 | * (X * max_subslices + Y) * eu_stride + | ||
1684 | * Z / 8] >> (Z % 8)) & 1 | ||
1685 | */ | ||
1686 | struct drm_i915_query_topology_info { | ||
1687 | /* | ||
1688 | * Unused for now. Must be cleared to zero. | ||
1689 | */ | ||
1690 | __u16 flags; | ||
1691 | |||
1692 | __u16 max_slices; | ||
1693 | __u16 max_subslices; | ||
1694 | __u16 max_eus_per_subslice; | ||
1695 | |||
1696 | /* | ||
1697 | * Offset in data[] at which the subslice masks are stored. | ||
1698 | */ | ||
1699 | __u16 subslice_offset; | ||
1700 | |||
1701 | /* | ||
1702 | * Stride at which each of the subslice masks for each slice are | ||
1703 | * stored. | ||
1704 | */ | ||
1705 | __u16 subslice_stride; | ||
1706 | |||
1707 | /* | ||
1708 | * Offset in data[] at which the EU masks are stored. | ||
1709 | */ | ||
1710 | __u16 eu_offset; | ||
1711 | |||
1712 | /* | ||
1713 | * Stride at which each of the EU masks for each subslice are stored. | ||
1714 | */ | ||
1715 | __u16 eu_stride; | ||
1716 | |||
1717 | __u8 data[]; | ||
1718 | }; | ||
1719 | |||
1427 | #if defined(__cplusplus) | 1720 | #if defined(__cplusplus) |
1428 | } | 1721 | } |
1429 | #endif | 1722 | #endif |