aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Stone2018-03-30 07:04:30 -0500
committerDaniel Stone2018-03-30 10:10:03 -0500
commit8e535dd2142433b24aebc6eddeaee04663489aa2 (patch)
tree15d3d3fcc26b00d09cbf0e0d5db91b62ff7f682f
parent2fa58c77fb9e563219f8ec647b9ddf52f3390ed2 (diff)
downloadexternal-libdrm-8e535dd2142433b24aebc6eddeaee04663489aa2.tar.gz
external-libdrm-8e535dd2142433b24aebc6eddeaee04663489aa2.tar.xz
external-libdrm-8e535dd2142433b24aebc6eddeaee04663489aa2.zip
headers: Sync with drm-next
Taken from the drm-next pull for 4.17-rc1 (694f54f680f7), and manually reconciled: core: - Dropped DRM_MODE_TYPE_ALL and DRM_MODE_FLAG_ALL; these are purely internal details of the bits accepted by the currently running kernel, and can not be generally relied on by userspace - Add HDCP flags - Note CTM entry representation is sign-magnitude format, not two's-complement amdgpu: - Add QUERY_STATE2 context op - Add VCN firmware version query etnaviv: - Add more GPU feature flags i915: - Add caps, params and ioctls for PMU / perf-stream - Add support for explicit fencing nouveau: - Add TILE_COMP layout vc4: - Add perfmon ioctls virtgpu: - Add capset-fix param vmware: - Add handle-close ioctl and explicit-fencing support Signed-off-by: Daniel Stone <daniels@collabora.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--etnaviv/etnaviv_drm.h7
-rw-r--r--include/drm/amdgpu_drm.h11
-rw-r--r--include/drm/drm_mode.h25
-rw-r--r--include/drm/i915_drm.h321
-rw-r--r--include/drm/nouveau_drm.h1
-rw-r--r--include/drm/vc4_drm.h76
-rw-r--r--include/drm/virtgpu_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h35
8 files changed, 451 insertions, 26 deletions
diff --git a/etnaviv/etnaviv_drm.h b/etnaviv/etnaviv_drm.h
index 110cc73b..0d5c49dc 100644
--- a/etnaviv/etnaviv_drm.h
+++ b/etnaviv/etnaviv_drm.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* 2/*
2 * Copyright (C) 2015 Etnaviv Project 3 * Copyright (C) 2015 Etnaviv Project
3 * 4 *
@@ -54,6 +55,12 @@ struct drm_etnaviv_timespec {
54#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 55#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
55#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08 56#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
56#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09 57#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
58#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
59#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
60#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
61#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
62#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
63#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
57 64
58#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 65#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
59#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 66#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index f784f248..c363b67f 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -160,6 +160,7 @@ union drm_amdgpu_bo_list {
160#define AMDGPU_CTX_OP_ALLOC_CTX 1 160#define AMDGPU_CTX_OP_ALLOC_CTX 1
161#define AMDGPU_CTX_OP_FREE_CTX 2 161#define AMDGPU_CTX_OP_FREE_CTX 2
162#define AMDGPU_CTX_OP_QUERY_STATE 3 162#define AMDGPU_CTX_OP_QUERY_STATE 3
163#define AMDGPU_CTX_OP_QUERY_STATE2 4
163 164
164/* GPU reset status */ 165/* GPU reset status */
165#define AMDGPU_CTX_NO_RESET 0 166#define AMDGPU_CTX_NO_RESET 0
@@ -170,6 +171,13 @@ union drm_amdgpu_bo_list {
170/* unknown cause */ 171/* unknown cause */
171#define AMDGPU_CTX_UNKNOWN_RESET 3 172#define AMDGPU_CTX_UNKNOWN_RESET 3
172 173
174/* indicate gpu reset occured after ctx created */
175#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
176/* indicate vram lost occured after ctx created */
177#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
178/* indicate some job from this context once cause gpu hang */
179#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
180
173/* Context priority level */ 181/* Context priority level */
174#define AMDGPU_CTX_PRIORITY_UNSET -2048 182#define AMDGPU_CTX_PRIORITY_UNSET -2048
175#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 183#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
@@ -610,6 +618,8 @@ struct drm_amdgpu_cs_chunk_data {
610 #define AMDGPU_INFO_FW_SOS 0x0c 618 #define AMDGPU_INFO_FW_SOS 0x0c
611 /* Subquery id: Query PSP ASD firmware version */ 619 /* Subquery id: Query PSP ASD firmware version */
612 #define AMDGPU_INFO_FW_ASD 0x0d 620 #define AMDGPU_INFO_FW_ASD 0x0d
621 /* Subquery id: Query VCN firmware version */
622 #define AMDGPU_INFO_FW_VCN 0x0e
613/* number of bytes moved for TTM migration */ 623/* number of bytes moved for TTM migration */
614#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f 624#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
615/* the used VRAM size */ 625/* the used VRAM size */
@@ -798,6 +808,7 @@ struct drm_amdgpu_info_firmware {
798#define AMDGPU_VRAM_TYPE_GDDR5 5 808#define AMDGPU_VRAM_TYPE_GDDR5 5
799#define AMDGPU_VRAM_TYPE_HBM 6 809#define AMDGPU_VRAM_TYPE_HBM 6
800#define AMDGPU_VRAM_TYPE_DDR3 7 810#define AMDGPU_VRAM_TYPE_DDR3 7
811#define AMDGPU_VRAM_TYPE_DDR4 8
801 812
802struct drm_amdgpu_info_device { 813struct drm_amdgpu_info_device {
803 /** PCI Device ID */ 814 /** PCI Device ID */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 5597a871..5f9fadbd 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -38,11 +38,11 @@ extern "C" {
38#define DRM_DISPLAY_MODE_LEN 32 38#define DRM_DISPLAY_MODE_LEN 32
39#define DRM_PROP_NAME_LEN 32 39#define DRM_PROP_NAME_LEN 32
40 40
41#define DRM_MODE_TYPE_BUILTIN (1<<0) 41#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
42#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) 42#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
43#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) 43#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
44#define DRM_MODE_TYPE_PREFERRED (1<<3) 44#define DRM_MODE_TYPE_PREFERRED (1<<3)
45#define DRM_MODE_TYPE_DEFAULT (1<<4) 45#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
46#define DRM_MODE_TYPE_USERDEF (1<<5) 46#define DRM_MODE_TYPE_USERDEF (1<<5)
47#define DRM_MODE_TYPE_DRIVER (1<<6) 47#define DRM_MODE_TYPE_DRIVER (1<<6)
48 48
@@ -66,8 +66,8 @@ extern "C" {
66#define DRM_MODE_FLAG_PCSYNC (1<<7) 66#define DRM_MODE_FLAG_PCSYNC (1<<7)
67#define DRM_MODE_FLAG_NCSYNC (1<<8) 67#define DRM_MODE_FLAG_NCSYNC (1<<8)
68#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ 68#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
69#define DRM_MODE_FLAG_BCAST (1<<10) 69#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
70#define DRM_MODE_FLAG_PIXMUX (1<<11) 70#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
71#define DRM_MODE_FLAG_DBLCLK (1<<12) 71#define DRM_MODE_FLAG_DBLCLK (1<<12)
72#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 72#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
73 /* 73 /*
@@ -173,6 +173,10 @@ extern "C" {
173 DRM_MODE_REFLECT_X | \ 173 DRM_MODE_REFLECT_X | \
174 DRM_MODE_REFLECT_Y) 174 DRM_MODE_REFLECT_Y)
175 175
176/* Content Protection Flags */
177#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
178#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
179#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
176 180
177struct drm_mode_modeinfo { 181struct drm_mode_modeinfo {
178 __u32 clock; 182 __u32 clock;
@@ -341,7 +345,7 @@ struct drm_mode_get_connector {
341 __u32 pad; 345 __u32 pad;
342}; 346};
343 347
344#define DRM_MODE_PROP_PENDING (1<<0) 348#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
345#define DRM_MODE_PROP_RANGE (1<<1) 349#define DRM_MODE_PROP_RANGE (1<<1)
346#define DRM_MODE_PROP_IMMUTABLE (1<<2) 350#define DRM_MODE_PROP_IMMUTABLE (1<<2)
347#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ 351#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
@@ -576,8 +580,11 @@ struct drm_mode_crtc_lut {
576}; 580};
577 581
578struct drm_color_ctm { 582struct drm_color_ctm {
579 /* Conversion matrix in S31.32 format. */ 583 /*
580 __s64 matrix[9]; 584 * Conversion matrix in S31.32 sign-magnitude
585 * (not two's complement!) format.
586 */
587 __u64 matrix[9];
581}; 588};
582 589
583struct drm_color_lut { 590struct drm_color_lut {
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 5ebe0462..16e452aa 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
86 I915_MOCS_CACHED, 86 I915_MOCS_CACHED,
87}; 87};
88 88
89/*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103};
104
105/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
113 I915_SAMPLE_SEMA = 2
114};
115
116#define I915_PMU_SAMPLE_BITS (4)
117#define I915_PMU_SAMPLE_MASK (0xf)
118#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119#define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122#define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127#define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130#define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133#define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
140#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
141#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
142
143#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
144
89/* Each region is a minimum of 16k, and there are at most 255 of them. 145/* Each region is a minimum of 16k, and there are at most 255 of them.
90 */ 146 */
91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 147#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -260,6 +316,9 @@ typedef struct _drm_i915_sarea {
260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 316#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 317#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
262#define DRM_I915_PERF_OPEN 0x36 318#define DRM_I915_PERF_OPEN 0x36
319#define DRM_I915_PERF_ADD_CONFIG 0x37
320#define DRM_I915_PERF_REMOVE_CONFIG 0x38
321#define DRM_I915_QUERY 0x39
263 322
264#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 323#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
265#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 324#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +374,9 @@ typedef struct _drm_i915_sarea {
315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 374#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 375#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
317#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 376#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
377#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
378#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
379#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
318 380
319/* Allow drivers to submit batchbuffers directly to hardware, relying 381/* Allow drivers to submit batchbuffers directly to hardware, relying
320 * on the security mechanisms provided by hardware. 382 * on the security mechanisms provided by hardware.
@@ -393,10 +455,20 @@ typedef struct drm_i915_irq_wait {
393#define I915_PARAM_MIN_EU_IN_POOL 39 455#define I915_PARAM_MIN_EU_IN_POOL 39
394#define I915_PARAM_MMAP_GTT_VERSION 40 456#define I915_PARAM_MMAP_GTT_VERSION 40
395 457
396/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 458/*
459 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
397 * priorities and the driver will attempt to execute batches in priority order. 460 * priorities and the driver will attempt to execute batches in priority order.
461 * The param returns a capability bitmask, nonzero implies that the scheduler
462 * is enabled, with different features present according to the mask.
463 *
464 * The initial priority for each batch is supplied by the context and is
465 * controlled via I915_CONTEXT_PARAM_PRIORITY.
398 */ 466 */
399#define I915_PARAM_HAS_SCHEDULER 41 467#define I915_PARAM_HAS_SCHEDULER 41
468#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
469#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
470#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
471
400#define I915_PARAM_HUC_STATUS 42 472#define I915_PARAM_HUC_STATUS 42
401 473
402/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 474/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -412,6 +484,51 @@ typedef struct drm_i915_irq_wait {
412 */ 484 */
413#define I915_PARAM_HAS_EXEC_FENCE 44 485#define I915_PARAM_HAS_EXEC_FENCE 44
414 486
487/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
488 * user specified bufffers for post-mortem debugging of GPU hangs. See
489 * EXEC_OBJECT_CAPTURE.
490 */
491#define I915_PARAM_HAS_EXEC_CAPTURE 45
492
493#define I915_PARAM_SLICE_MASK 46
494
495/* Assuming it's uniform for each slice, this queries the mask of subslices
496 * per-slice for this system.
497 */
498#define I915_PARAM_SUBSLICE_MASK 47
499
500/*
501 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
502 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
503 */
504#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
505
506/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
507 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
508 */
509#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
510
511/*
512 * Query whether every context (both per-file default and user created) is
513 * isolated (insofar as HW supports). If this parameter is not true, then
514 * freshly created contexts may inherit values from an existing context,
515 * rather than default HW values. If true, it also ensures (insofar as HW
516 * supports) that all state set by this context will not leak to any other
517 * context.
518 *
519 * As not every engine across every gen support contexts, the returned
520 * value reports the support of context isolation for individual engines by
521 * returning a bitmask of each engine class set to true if that class supports
522 * isolation.
523 */
524#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
525
526/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
527 * registers. This used to be fixed per platform but from CNL onwards, this
528 * might vary depending on the parts.
529 */
530#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
531
415typedef struct drm_i915_getparam { 532typedef struct drm_i915_getparam {
416 __s32 param; 533 __s32 param;
417 /* 534 /*
@@ -666,6 +783,8 @@ struct drm_i915_gem_relocation_entry {
666#define I915_GEM_DOMAIN_VERTEX 0x00000020 783#define I915_GEM_DOMAIN_VERTEX 0x00000020
667/** GTT domain - aperture and scanout */ 784/** GTT domain - aperture and scanout */
668#define I915_GEM_DOMAIN_GTT 0x00000040 785#define I915_GEM_DOMAIN_GTT 0x00000040
786/** WC domain - uncached access */
787#define I915_GEM_DOMAIN_WC 0x00000080
669/** @} */ 788/** @} */
670 789
671struct drm_i915_gem_exec_object { 790struct drm_i915_gem_exec_object {
@@ -773,8 +892,15 @@ struct drm_i915_gem_exec_object2 {
773 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 892 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
774 */ 893 */
775#define EXEC_OBJECT_ASYNC (1<<6) 894#define EXEC_OBJECT_ASYNC (1<<6)
895/* Request that the contents of this execobject be copied into the error
896 * state upon a GPU hang involving this batch for post-mortem debugging.
897 * These buffers are recorded in no particular order as "user" in
898 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
899 * if the kernel supports this flag.
900 */
901#define EXEC_OBJECT_CAPTURE (1<<7)
776/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 902/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
777#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1) 903#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
778 __u64 flags; 904 __u64 flags;
779 905
780 union { 906 union {
@@ -784,6 +910,18 @@ struct drm_i915_gem_exec_object2 {
784 __u64 rsvd2; 910 __u64 rsvd2;
785}; 911};
786 912
913struct drm_i915_gem_exec_fence {
914 /**
915 * User's handle for a drm_syncobj to wait on or signal.
916 */
917 __u32 handle;
918
919#define I915_EXEC_FENCE_WAIT (1<<0)
920#define I915_EXEC_FENCE_SIGNAL (1<<1)
921#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
922 __u32 flags;
923};
924
787struct drm_i915_gem_execbuffer2 { 925struct drm_i915_gem_execbuffer2 {
788 /** 926 /**
789 * List of gem_exec_object2 structs 927 * List of gem_exec_object2 structs
@@ -798,7 +936,11 @@ struct drm_i915_gem_execbuffer2 {
798 __u32 DR1; 936 __u32 DR1;
799 __u32 DR4; 937 __u32 DR4;
800 __u32 num_cliprects; 938 __u32 num_cliprects;
801 /** This is a struct drm_clip_rect *cliprects */ 939 /**
940 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
941 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
942 * struct drm_i915_gem_exec_fence *fences.
943 */
802 __u64 cliprects_ptr; 944 __u64 cliprects_ptr;
803#define I915_EXEC_RING_MASK (7<<0) 945#define I915_EXEC_RING_MASK (7<<0)
804#define I915_EXEC_DEFAULT (0<<0) 946#define I915_EXEC_DEFAULT (0<<0)
@@ -889,7 +1031,24 @@ struct drm_i915_gem_execbuffer2 {
889 */ 1031 */
890#define I915_EXEC_FENCE_OUT (1<<17) 1032#define I915_EXEC_FENCE_OUT (1<<17)
891 1033
892#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1)) 1034/*
1035 * Traditionally the execbuf ioctl has only considered the final element in
1036 * the execobject[] to be the executable batch. Often though, the client
1037 * will known the batch object prior to construction and being able to place
1038 * it into the execobject[] array first can simplify the relocation tracking.
1039 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1040 * execobject[] as the * batch instead (the default is to use the last
1041 * element).
1042 */
1043#define I915_EXEC_BATCH_FIRST (1<<18)
1044
1045/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1046 * define an array of i915_gem_exec_fence structures which specify a set of
1047 * dma fences to wait upon or signal.
1048 */
1049#define I915_EXEC_FENCE_ARRAY (1<<19)
1050
1051#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
893 1052
894#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1053#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
895#define i915_execbuffer2_set_context_id(eb2, context) \ 1054#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1201,7 +1360,9 @@ struct drm_intel_overlay_attrs {
1201 * active on a given plane. 1360 * active on a given plane.
1202 */ 1361 */
1203 1362
1204#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 1363#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1364 * flags==0 to disable colorkeying.
1365 */
1205#define I915_SET_COLORKEY_DESTINATION (1<<1) 1366#define I915_SET_COLORKEY_DESTINATION (1<<1)
1206#define I915_SET_COLORKEY_SOURCE (1<<2) 1367#define I915_SET_COLORKEY_SOURCE (1<<2)
1207struct drm_intel_sprite_colorkey { 1368struct drm_intel_sprite_colorkey {
@@ -1239,14 +1400,16 @@ struct drm_i915_reg_read {
1239 * be specified 1400 * be specified
1240 */ 1401 */
1241 __u64 offset; 1402 __u64 offset;
1403#define I915_REG_READ_8B_WA (1ul << 0)
1404
1242 __u64 val; /* Return value */ 1405 __u64 val; /* Return value */
1243}; 1406};
1244/* Known registers: 1407/* Known registers:
1245 * 1408 *
1246 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1409 * Render engine timestamp - 0x2358 + 64bit - gen7+
1247 * - Note this register returns an invalid value if using the default 1410 * - Note this register returns an invalid value if using the default
1248 * single instruction 8byte read, in order to workaround that use 1411 * single instruction 8byte read, in order to workaround that pass
1249 * offset (0x2538 | 1) instead. 1412 * flag I915_REG_READ_8B_WA in offset field.
1250 * 1413 *
1251 */ 1414 */
1252 1415
@@ -1289,17 +1452,26 @@ struct drm_i915_gem_context_param {
1289#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1452#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1290#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1453#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1291#define I915_CONTEXT_PARAM_BANNABLE 0x5 1454#define I915_CONTEXT_PARAM_BANNABLE 0x5
1455#define I915_CONTEXT_PARAM_PRIORITY 0x6
1456#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1457#define I915_CONTEXT_DEFAULT_PRIORITY 0
1458#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1292 __u64 value; 1459 __u64 value;
1293}; 1460};
1294 1461
1295enum drm_i915_oa_format { 1462enum drm_i915_oa_format {
1296 I915_OA_FORMAT_A13 = 1, 1463 I915_OA_FORMAT_A13 = 1, /* HSW only */
1297 I915_OA_FORMAT_A29, 1464 I915_OA_FORMAT_A29, /* HSW only */
1298 I915_OA_FORMAT_A13_B8_C8, 1465 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1299 I915_OA_FORMAT_B4_C8, 1466 I915_OA_FORMAT_B4_C8, /* HSW only */
1300 I915_OA_FORMAT_A45_B8_C8, 1467 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1301 I915_OA_FORMAT_B4_C8_A16, 1468 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1302 I915_OA_FORMAT_C4_B8, 1469 I915_OA_FORMAT_C4_B8, /* HSW+ */
1470
1471 /* Gen8+ */
1472 I915_OA_FORMAT_A12,
1473 I915_OA_FORMAT_A12_B8_C8,
1474 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1303 1475
1304 I915_OA_FORMAT_MAX /* non-ABI */ 1476 I915_OA_FORMAT_MAX /* non-ABI */
1305}; 1477};
@@ -1424,6 +1596,127 @@ enum drm_i915_perf_record_type {
1424 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1596 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1425}; 1597};
1426 1598
1599/**
1600 * Structure to upload perf dynamic configuration into the kernel.
1601 */
1602struct drm_i915_perf_oa_config {
1603 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1604 char uuid[36];
1605
1606 __u32 n_mux_regs;
1607 __u32 n_boolean_regs;
1608 __u32 n_flex_regs;
1609
1610 /*
1611 * These fields are pointers to tuples of u32 values (register address,
1612 * value). For example the expected length of the buffer pointed by
1613 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1614 */
1615 __u64 mux_regs_ptr;
1616 __u64 boolean_regs_ptr;
1617 __u64 flex_regs_ptr;
1618};
1619
1620struct drm_i915_query_item {
1621 __u64 query_id;
1622#define DRM_I915_QUERY_TOPOLOGY_INFO 1
1623
1624 /*
1625 * When set to zero by userspace, this is filled with the size of the
1626 * data to be written at the data_ptr pointer. The kernel sets this
1627 * value to a negative value to signal an error on a particular query
1628 * item.
1629 */
1630 __s32 length;
1631
1632 /*
1633 * Unused for now. Must be cleared to zero.
1634 */
1635 __u32 flags;
1636
1637 /*
1638 * Data will be written at the location pointed by data_ptr when the
1639 * value of length matches the length of the data to be written by the
1640 * kernel.
1641 */
1642 __u64 data_ptr;
1643};
1644
1645struct drm_i915_query {
1646 __u32 num_items;
1647
1648 /*
1649 * Unused for now. Must be cleared to zero.
1650 */
1651 __u32 flags;
1652
1653 /*
1654 * This points to an array of num_items drm_i915_query_item structures.
1655 */
1656 __u64 items_ptr;
1657};
1658
1659/*
1660 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1661 *
1662 * data: contains the 3 pieces of information :
1663 *
1664 * - the slice mask with one bit per slice telling whether a slice is
1665 * available. The availability of slice X can be queried with the following
1666 * formula :
1667 *
1668 * (data[X / 8] >> (X % 8)) & 1
1669 *
1670 * - the subslice mask for each slice with one bit per subslice telling
1671 * whether a subslice is available. The availability of subslice Y in slice
1672 * X can be queried with the following formula :
1673 *
1674 * (data[subslice_offset +
1675 * X * subslice_stride +
1676 * Y / 8] >> (Y % 8)) & 1
1677 *
1678 * - the EU mask for each subslice in each slice with one bit per EU telling
1679 * whether an EU is available. The availability of EU Z in subslice Y in
1680 * slice X can be queried with the following formula :
1681 *
1682 * (data[eu_offset +
1683 * (X * max_subslices + Y) * eu_stride +
1684 * Z / 8] >> (Z % 8)) & 1
1685 */
1686struct drm_i915_query_topology_info {
1687 /*
1688 * Unused for now. Must be cleared to zero.
1689 */
1690 __u16 flags;
1691
1692 __u16 max_slices;
1693 __u16 max_subslices;
1694 __u16 max_eus_per_subslice;
1695
1696 /*
1697 * Offset in data[] at which the subslice masks are stored.
1698 */
1699 __u16 subslice_offset;
1700
1701 /*
1702 * Stride at which each of the subslice masks for each slice are
1703 * stored.
1704 */
1705 __u16 subslice_stride;
1706
1707 /*
1708 * Offset in data[] at which the EU masks are stored.
1709 */
1710 __u16 eu_offset;
1711
1712 /*
1713 * Stride at which each of the EU masks for each subslice are stored.
1714 */
1715 __u16 eu_stride;
1716
1717 __u8 data[];
1718};
1719
1427#if defined(__cplusplus) 1720#if defined(__cplusplus)
1428} 1721}
1429#endif 1722#endif
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index cb077821..d42105c8 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -104,6 +104,7 @@ struct drm_nouveau_setparam {
104#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) 104#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
105#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4) 105#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
106 106
107#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
107#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 108#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
108#define NOUVEAU_GEM_TILE_16BPP 0x00000001 109#define NOUVEAU_GEM_TILE_16BPP 0x00000001
109#define NOUVEAU_GEM_TILE_32BPP 0x00000002 110#define NOUVEAU_GEM_TILE_32BPP 0x00000002
diff --git a/include/drm/vc4_drm.h b/include/drm/vc4_drm.h
index 3415a4b7..4117117b 100644
--- a/include/drm/vc4_drm.h
+++ b/include/drm/vc4_drm.h
@@ -42,6 +42,9 @@ extern "C" {
42#define DRM_VC4_GET_TILING 0x09 42#define DRM_VC4_GET_TILING 0x09
43#define DRM_VC4_LABEL_BO 0x0a 43#define DRM_VC4_LABEL_BO 0x0a
44#define DRM_VC4_GEM_MADVISE 0x0b 44#define DRM_VC4_GEM_MADVISE 0x0b
45#define DRM_VC4_PERFMON_CREATE 0x0c
46#define DRM_VC4_PERFMON_DESTROY 0x0d
47#define DRM_VC4_PERFMON_GET_VALUES 0x0e
45 48
46#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) 49#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
47#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) 50#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -55,6 +58,9 @@ extern "C" {
55#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling) 58#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
56#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo) 59#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
57#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise) 60#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
61#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
62#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
63#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
58 64
59struct drm_vc4_submit_rcl_surface { 65struct drm_vc4_submit_rcl_surface {
60 __u32 hindex; /* Handle index, or ~0 if not present. */ 66 __u32 hindex; /* Handle index, or ~0 if not present. */
@@ -173,6 +179,15 @@ struct drm_vc4_submit_cl {
173 * wait ioctl). 179 * wait ioctl).
174 */ 180 */
175 __u64 seqno; 181 __u64 seqno;
182
183 /* ID of the perfmon to attach to this job. 0 means no perfmon. */
184 __u32 perfmonid;
185
186 /* Unused field to align this struct on 64 bits. Must be set to 0.
187 * If one ever needs to add an u32 field to this struct, this field
188 * can be used.
189 */
190 __u32 pad2;
176}; 191};
177 192
178/** 193/**
@@ -308,6 +323,7 @@ struct drm_vc4_get_hang_state {
308#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 323#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
309#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 324#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
310#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7 325#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
326#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
311 327
312struct drm_vc4_get_param { 328struct drm_vc4_get_param {
313 __u32 param; 329 __u32 param;
@@ -352,6 +368,66 @@ struct drm_vc4_gem_madvise {
352 __u32 pad; 368 __u32 pad;
353}; 369};
354 370
371enum {
372 VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
373 VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
374 VC4_PERFCNT_FEP_CLIPPED_QUADS,
375 VC4_PERFCNT_FEP_VALID_QUADS,
376 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
377 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
378 VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
379 VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
380 VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
381 VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
382 VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
383 VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
384 VC4_PERFCNT_PSE_PRIMS_REVERSED,
385 VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
386 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
387 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
388 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
389 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
390 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
391 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
392 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
393 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
394 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
395 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
396 VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
397 VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
398 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
399 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
400 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
401 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
402 VC4_PERFCNT_NUM_EVENTS,
403};
404
405#define DRM_VC4_MAX_PERF_COUNTERS 16
406
407struct drm_vc4_perfmon_create {
408 __u32 id;
409 __u32 ncounters;
410 __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
411};
412
413struct drm_vc4_perfmon_destroy {
414 __u32 id;
415};
416
417/*
418 * Returns the values of the performance counters tracked by this
419 * perfmon (as an array of ncounters u64 values).
420 *
421 * No implicit synchronization is performed, so the user has to
422 * guarantee that any jobs using this perfmon have already been
423 * completed (probably by blocking on the seqno returned by the
424 * last exec that used the perfmon).
425 */
426struct drm_vc4_perfmon_get_values {
427 __u32 id;
428 __u64 values_ptr;
429};
430
355#if defined(__cplusplus) 431#if defined(__cplusplus)
356} 432}
357#endif 433#endif
diff --git a/include/drm/virtgpu_drm.h b/include/drm/virtgpu_drm.h
index 91a31ffe..9a781f06 100644
--- a/include/drm/virtgpu_drm.h
+++ b/include/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
63}; 63};
64 64
65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
66#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
66 67
67struct drm_virtgpu_getparam { 68struct drm_virtgpu_getparam {
68 __u64 param; 69 __u64 param;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index d325a410..0bc784f5 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -41,6 +41,7 @@ extern "C" {
41#define DRM_VMW_GET_PARAM 0 41#define DRM_VMW_GET_PARAM 0
42#define DRM_VMW_ALLOC_DMABUF 1 42#define DRM_VMW_ALLOC_DMABUF 1
43#define DRM_VMW_UNREF_DMABUF 2 43#define DRM_VMW_UNREF_DMABUF 2
44#define DRM_VMW_HANDLE_CLOSE 2
44#define DRM_VMW_CURSOR_BYPASS 3 45#define DRM_VMW_CURSOR_BYPASS 3
45/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 46/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
46#define DRM_VMW_CONTROL_STREAM 4 47#define DRM_VMW_CONTROL_STREAM 4
@@ -296,13 +297,17 @@ union drm_vmw_surface_reference_arg {
296 * @version: Allows expanding the execbuf ioctl parameters without breaking 297 * @version: Allows expanding the execbuf ioctl parameters without breaking
297 * backwards compatibility, since user-space will always tell the kernel 298 * backwards compatibility, since user-space will always tell the kernel
298 * which version it uses. 299 * which version it uses.
299 * @flags: Execbuf flags. None currently. 300 * @flags: Execbuf flags.
301 * @imported_fence_fd: FD for a fence imported from another device
300 * 302 *
301 * Argument to the DRM_VMW_EXECBUF Ioctl. 303 * Argument to the DRM_VMW_EXECBUF Ioctl.
302 */ 304 */
303 305
304#define DRM_VMW_EXECBUF_VERSION 2 306#define DRM_VMW_EXECBUF_VERSION 2
305 307
308#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
309#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
310
306struct drm_vmw_execbuf_arg { 311struct drm_vmw_execbuf_arg {
307 __u64 commands; 312 __u64 commands;
308 __u32 command_size; 313 __u32 command_size;
@@ -311,7 +316,7 @@ struct drm_vmw_execbuf_arg {
311 __u32 version; 316 __u32 version;
312 __u32 flags; 317 __u32 flags;
313 __u32 context_handle; 318 __u32 context_handle;
314 __u32 pad64; 319 __s32 imported_fence_fd;
315}; 320};
316 321
317/** 322/**
@@ -327,6 +332,7 @@ struct drm_vmw_execbuf_arg {
327 * @passed_seqno: The highest seqno number processed by the hardware 332 * @passed_seqno: The highest seqno number processed by the hardware
328 * so far. This can be used to mark user-space fence objects as signaled, and 333 * so far. This can be used to mark user-space fence objects as signaled, and
329 * to determine whether a fence seqno might be stale. 334 * to determine whether a fence seqno might be stale.
335 * @fd: FD associated with the fence, -1 if not exported
330 * @error: This member should've been set to -EFAULT on submission. 336 * @error: This member should've been set to -EFAULT on submission.
331 * The following actions should be take on completion: 337 * The following actions should be take on completion:
332 * error == -EFAULT: Fence communication failed. The host is synchronized. 338 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -344,7 +350,7 @@ struct drm_vmw_fence_rep {
344 __u32 mask; 350 __u32 mask;
345 __u32 seqno; 351 __u32 seqno;
346 __u32 passed_seqno; 352 __u32 passed_seqno;
347 __u32 pad64; 353 __s32 fd;
348 __s32 error; 354 __s32 error;
349}; 355};
350 356
@@ -1092,6 +1098,29 @@ union drm_vmw_extended_context_arg {
1092 struct drm_vmw_context_arg rep; 1098 struct drm_vmw_context_arg rep;
1093}; 1099};
1094 1100
1101/*************************************************************************/
1102/*
1103 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1104 * underlying resource.
1105 *
1106 * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
1107 * The ioctl arguments therefore need to be identical in layout.
1108 *
1109 */
1110
1111/**
1112 * struct drm_vmw_handle_close_arg
1113 *
1114 * @handle: Handle to close.
1115 *
1116 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1117 */
1118struct drm_vmw_handle_close_arg {
1119 __u32 handle;
1120 __u32 pad64;
1121};
1122
1123
1095#if defined(__cplusplus) 1124#if defined(__cplusplus)
1096} 1125}
1097#endif 1126#endif