aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/drm/README34
-rw-r--r--include/drm/amdgpu_drm.h592
-rw-r--r--include/drm/drm.h92
-rw-r--r--include/drm/drm_fourcc.h184
-rw-r--r--include/drm/drm_mode.h277
-rw-r--r--include/drm/drm_sarea.h8
-rw-r--r--include/drm/i915_drm.h321
-rw-r--r--include/drm/mga_drm.h12
-rw-r--r--include/drm/nouveau_drm.h94
-rw-r--r--include/drm/qxl_drm.h82
-rw-r--r--include/drm/r128_drm.h10
-rw-r--r--include/drm/radeon_drm.h128
-rw-r--r--include/drm/savage_drm.h20
-rw-r--r--include/drm/sis_drm.h10
-rw-r--r--include/drm/tegra_drm.h38
-rw-r--r--include/drm/vc4_drm.h133
-rw-r--r--include/drm/via_drm.h8
-rw-r--r--include/drm/virtgpu_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h44
19 files changed, 1685 insertions, 403 deletions
diff --git a/include/drm/README b/include/drm/README
index a50b02c0..b4658dd7 100644
--- a/include/drm/README
+++ b/include/drm/README
@@ -67,6 +67,8 @@ That said, it's up-to the individual developers to sync with newer version
67 67
68When and how to update these files 68When and how to update these files
69---------------------------------- 69----------------------------------
70Note: One should not do _any_ changes to the files apart from the steps below.
71
70In order to update the files do the following: 72In order to update the files do the following:
71 - Switch to a Linux kernel tree/branch which is not rebased. 73 - Switch to a Linux kernel tree/branch which is not rebased.
72For example: airlied/drm-next 74For example: airlied/drm-next
@@ -84,47 +86,21 @@ Outdated or Broken Headers
84This section contains a list of headers and the respective "issues" they might 86This section contains a list of headers and the respective "issues" they might
85have relative to their kernel equivalent. 87have relative to their kernel equivalent.
86 88
87Nearly all headers:
88 - Missing extern C notation.
89Status: Trivial.
90
91Most UMS headers: 89Most UMS headers:
92 - Not using fixed size integers - compat ioctls are broken. 90 - Not using fixed size integers - compat ioctls are broken.
93Status: ? 91Status: ?
94Promote to fixed size ints, which match the current (32bit) ones. 92Promote to fixed size ints, which match the current (32bit) ones.
95 93
96
97amdgpu_drm.h
98 - Using the stdint.h uint*_t over the respective __u* ones
99Status: Trivial.
100
101drm_mode.h
102 - Missing DPI encode/connector pair.
103Status: Trivial.
104
105i915_drm.h
106 - Missing PARAMS - HAS_POOLED_EU, MIN_EU_IN_POOL CONTEXT_PARAM_NO_ERROR_CAPTURE
107Status: Trivial.
108
109mga_drm.h
110 - Typo fix, use struct over typedef.
111Status: Trivial.
112
113nouveau_drm.h 94nouveau_drm.h
114 - Missing macros NOUVEAU_GETPARAM*, NOUVEAU_DRM_HEADER_PATCHLEVEL, structs, 95 - Missing macros NOUVEAU_GETPARAM*, NOUVEAU_DRM_HEADER_PATCHLEVEL, structs,
115enums, using stdint.h over the __u* types. 96enums
116Status: ? 97Status: Deliberate UABI choice; nouveau hides the exact kernel ABI behind libdrm
117
118qxl_drm.h
119 - Using the stdint.h uint*_t over the respective __u* ones
120Status: Trivial.
121 98
122r128_drm.h 99r128_drm.h
123 - Broken compat ioctls. 100 - Broken compat ioctls.
124 101
125radeon_drm.h 102radeon_drm.h
126 - Missing RADEON_TILING_R600_NO_SCANOUT, CIK_TILE_MODE_*, broken UMS ioctls, 103 - Missing RADEON_TILING_R600_NO_SCANOUT, CIK_TILE_MODE_*, broken UMS ioctls
127using stdint types.
128 - Both kernel and libdrm: missing padding - 104 - Both kernel and libdrm: missing padding -
129drm_radeon_gem_{create,{g,s}et_tiling,set_domain} others ? 105drm_radeon_gem_{create,{g,s}et_tiling,set_domain} others ?
130Status: ? 106Status: ?
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index d8f24976..c363b67f 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -50,6 +50,10 @@ extern "C" {
50#define DRM_AMDGPU_WAIT_CS 0x09 50#define DRM_AMDGPU_WAIT_CS 0x09
51#define DRM_AMDGPU_GEM_OP 0x10 51#define DRM_AMDGPU_GEM_OP 0x10
52#define DRM_AMDGPU_GEM_USERPTR 0x11 52#define DRM_AMDGPU_GEM_USERPTR 0x11
53#define DRM_AMDGPU_WAIT_FENCES 0x12
54#define DRM_AMDGPU_VM 0x13
55#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
56#define DRM_AMDGPU_SCHED 0x15
53 57
54#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 58#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
55#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) 59#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +67,10 @@ extern "C" {
63#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 67#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
64#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 68#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
65#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) 69#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
70#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
71#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
72#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
73#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
66 74
67#define AMDGPU_GEM_DOMAIN_CPU 0x1 75#define AMDGPU_GEM_DOMAIN_CPU 0x1
68#define AMDGPU_GEM_DOMAIN_GTT 0x2 76#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -79,22 +87,30 @@ extern "C" {
79#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 87#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
80/* Flag that the memory should be in VRAM and cleared */ 88/* Flag that the memory should be in VRAM and cleared */
81#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) 89#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
90/* Flag that create shadow bo(GTT) while allocating vram bo */
91#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
92/* Flag that allocating the BO should use linear VRAM */
93#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
94/* Flag that BO is always valid in this VM */
95#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
96/* Flag that BO sharing will be explicitly synchronized */
97#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
82 98
83struct drm_amdgpu_gem_create_in { 99struct drm_amdgpu_gem_create_in {
84 /** the requested memory size */ 100 /** the requested memory size */
85 uint64_t bo_size; 101 __u64 bo_size;
86 /** physical start_addr alignment in bytes for some HW requirements */ 102 /** physical start_addr alignment in bytes for some HW requirements */
87 uint64_t alignment; 103 __u64 alignment;
88 /** the requested memory domains */ 104 /** the requested memory domains */
89 uint64_t domains; 105 __u64 domains;
90 /** allocation flags */ 106 /** allocation flags */
91 uint64_t domain_flags; 107 __u64 domain_flags;
92}; 108};
93 109
94struct drm_amdgpu_gem_create_out { 110struct drm_amdgpu_gem_create_out {
95 /** returned GEM object handle */ 111 /** returned GEM object handle */
96 uint32_t handle; 112 __u32 handle;
97 uint32_t _pad; 113 __u32 _pad;
98}; 114};
99 115
100union drm_amdgpu_gem_create { 116union drm_amdgpu_gem_create {
@@ -111,28 +127,28 @@ union drm_amdgpu_gem_create {
111 127
112struct drm_amdgpu_bo_list_in { 128struct drm_amdgpu_bo_list_in {
113 /** Type of operation */ 129 /** Type of operation */
114 uint32_t operation; 130 __u32 operation;
115 /** Handle of list or 0 if we want to create one */ 131 /** Handle of list or 0 if we want to create one */
116 uint32_t list_handle; 132 __u32 list_handle;
117 /** Number of BOs in list */ 133 /** Number of BOs in list */
118 uint32_t bo_number; 134 __u32 bo_number;
119 /** Size of each element describing BO */ 135 /** Size of each element describing BO */
120 uint32_t bo_info_size; 136 __u32 bo_info_size;
121 /** Pointer to array describing BOs */ 137 /** Pointer to array describing BOs */
122 uint64_t bo_info_ptr; 138 __u64 bo_info_ptr;
123}; 139};
124 140
125struct drm_amdgpu_bo_list_entry { 141struct drm_amdgpu_bo_list_entry {
126 /** Handle of BO */ 142 /** Handle of BO */
127 uint32_t bo_handle; 143 __u32 bo_handle;
128 /** New (if specified) BO priority to be used during migration */ 144 /** New (if specified) BO priority to be used during migration */
129 uint32_t bo_priority; 145 __u32 bo_priority;
130}; 146};
131 147
132struct drm_amdgpu_bo_list_out { 148struct drm_amdgpu_bo_list_out {
133 /** Handle of resource list */ 149 /** Handle of resource list */
134 uint32_t list_handle; 150 __u32 list_handle;
135 uint32_t _pad; 151 __u32 _pad;
136}; 152};
137 153
138union drm_amdgpu_bo_list { 154union drm_amdgpu_bo_list {
@@ -144,6 +160,7 @@ union drm_amdgpu_bo_list {
144#define AMDGPU_CTX_OP_ALLOC_CTX 1 160#define AMDGPU_CTX_OP_ALLOC_CTX 1
145#define AMDGPU_CTX_OP_FREE_CTX 2 161#define AMDGPU_CTX_OP_FREE_CTX 2
146#define AMDGPU_CTX_OP_QUERY_STATE 3 162#define AMDGPU_CTX_OP_QUERY_STATE 3
163#define AMDGPU_CTX_OP_QUERY_STATE2 4
147 164
148/* GPU reset status */ 165/* GPU reset status */
149#define AMDGPU_CTX_NO_RESET 0 166#define AMDGPU_CTX_NO_RESET 0
@@ -154,28 +171,44 @@ union drm_amdgpu_bo_list {
154/* unknown cause */ 171/* unknown cause */
155#define AMDGPU_CTX_UNKNOWN_RESET 3 172#define AMDGPU_CTX_UNKNOWN_RESET 3
156 173
174/* indicate gpu reset occured after ctx created */
175#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
176/* indicate vram lost occured after ctx created */
177#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
178/* indicate some job from this context once cause gpu hang */
179#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
180
181/* Context priority level */
182#define AMDGPU_CTX_PRIORITY_UNSET -2048
183#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
184#define AMDGPU_CTX_PRIORITY_LOW -512
185#define AMDGPU_CTX_PRIORITY_NORMAL 0
186/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
187#define AMDGPU_CTX_PRIORITY_HIGH 512
188#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
189
157struct drm_amdgpu_ctx_in { 190struct drm_amdgpu_ctx_in {
158 /** AMDGPU_CTX_OP_* */ 191 /** AMDGPU_CTX_OP_* */
159 uint32_t op; 192 __u32 op;
160 /** For future use, no flags defined so far */ 193 /** For future use, no flags defined so far */
161 uint32_t flags; 194 __u32 flags;
162 uint32_t ctx_id; 195 __u32 ctx_id;
163 uint32_t _pad; 196 __s32 priority;
164}; 197};
165 198
166union drm_amdgpu_ctx_out { 199union drm_amdgpu_ctx_out {
167 struct { 200 struct {
168 uint32_t ctx_id; 201 __u32 ctx_id;
169 uint32_t _pad; 202 __u32 _pad;
170 } alloc; 203 } alloc;
171 204
172 struct { 205 struct {
173 /** For future use, no flags defined so far */ 206 /** For future use, no flags defined so far */
174 uint64_t flags; 207 __u64 flags;
175 /** Number of resets caused by this context so far. */ 208 /** Number of resets caused by this context so far. */
176 uint32_t hangs; 209 __u32 hangs;
177 /** Reset status since the last call of the ioctl. */ 210 /** Reset status since the last call of the ioctl. */
178 uint32_t reset_status; 211 __u32 reset_status;
179 } state; 212 } state;
180}; 213};
181 214
@@ -184,6 +217,41 @@ union drm_amdgpu_ctx {
184 union drm_amdgpu_ctx_out out; 217 union drm_amdgpu_ctx_out out;
185}; 218};
186 219
220/* vm ioctl */
221#define AMDGPU_VM_OP_RESERVE_VMID 1
222#define AMDGPU_VM_OP_UNRESERVE_VMID 2
223
224struct drm_amdgpu_vm_in {
225 /** AMDGPU_VM_OP_* */
226 __u32 op;
227 __u32 flags;
228};
229
230struct drm_amdgpu_vm_out {
231 /** For future use, no flags defined so far */
232 __u64 flags;
233};
234
235union drm_amdgpu_vm {
236 struct drm_amdgpu_vm_in in;
237 struct drm_amdgpu_vm_out out;
238};
239
240/* sched ioctl */
241#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
242
243struct drm_amdgpu_sched_in {
244 /* AMDGPU_SCHED_OP_* */
245 __u32 op;
246 __u32 fd;
247 __s32 priority;
248 __u32 flags;
249};
250
251union drm_amdgpu_sched {
252 struct drm_amdgpu_sched_in in;
253};
254
187/* 255/*
188 * This is not a reliable API and you should expect it to fail for any 256 * This is not a reliable API and you should expect it to fail for any
189 * number of reasons and have fallback path that do not use userptr to 257 * number of reasons and have fallback path that do not use userptr to
@@ -195,14 +263,15 @@ union drm_amdgpu_ctx {
195#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 263#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
196 264
197struct drm_amdgpu_gem_userptr { 265struct drm_amdgpu_gem_userptr {
198 uint64_t addr; 266 __u64 addr;
199 uint64_t size; 267 __u64 size;
200 /* AMDGPU_GEM_USERPTR_* */ 268 /* AMDGPU_GEM_USERPTR_* */
201 uint32_t flags; 269 __u32 flags;
202 /* Resulting GEM handle */ 270 /* Resulting GEM handle */
203 uint32_t handle; 271 __u32 handle;
204}; 272};
205 273
274/* SI-CI-VI: */
206/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 275/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
207#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 276#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
208#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf 277#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
@@ -221,10 +290,15 @@ struct drm_amdgpu_gem_userptr {
221#define AMDGPU_TILING_NUM_BANKS_SHIFT 21 290#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
222#define AMDGPU_TILING_NUM_BANKS_MASK 0x3 291#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
223 292
293/* GFX9 and later: */
294#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
295#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
296
297/* Set/Get helpers for tiling flags. */
224#define AMDGPU_TILING_SET(field, value) \ 298#define AMDGPU_TILING_SET(field, value) \
225 (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) 299 (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
226#define AMDGPU_TILING_GET(value, field) \ 300#define AMDGPU_TILING_GET(value, field) \
227 (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) 301 (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
228 302
229#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 303#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
230#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 304#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
@@ -232,28 +306,28 @@ struct drm_amdgpu_gem_userptr {
232/** The same structure is shared for input/output */ 306/** The same structure is shared for input/output */
233struct drm_amdgpu_gem_metadata { 307struct drm_amdgpu_gem_metadata {
234 /** GEM Object handle */ 308 /** GEM Object handle */
235 uint32_t handle; 309 __u32 handle;
236 /** Do we want get or set metadata */ 310 /** Do we want get or set metadata */
237 uint32_t op; 311 __u32 op;
238 struct { 312 struct {
239 /** For future use, no flags defined so far */ 313 /** For future use, no flags defined so far */
240 uint64_t flags; 314 __u64 flags;
241 /** family specific tiling info */ 315 /** family specific tiling info */
242 uint64_t tiling_info; 316 __u64 tiling_info;
243 uint32_t data_size_bytes; 317 __u32 data_size_bytes;
244 uint32_t data[64]; 318 __u32 data[64];
245 } data; 319 } data;
246}; 320};
247 321
248struct drm_amdgpu_gem_mmap_in { 322struct drm_amdgpu_gem_mmap_in {
249 /** the GEM object handle */ 323 /** the GEM object handle */
250 uint32_t handle; 324 __u32 handle;
251 uint32_t _pad; 325 __u32 _pad;
252}; 326};
253 327
254struct drm_amdgpu_gem_mmap_out { 328struct drm_amdgpu_gem_mmap_out {
255 /** mmap offset from the vma offset manager */ 329 /** mmap offset from the vma offset manager */
256 uint64_t addr_ptr; 330 __u64 addr_ptr;
257}; 331};
258 332
259union drm_amdgpu_gem_mmap { 333union drm_amdgpu_gem_mmap {
@@ -263,18 +337,18 @@ union drm_amdgpu_gem_mmap {
263 337
264struct drm_amdgpu_gem_wait_idle_in { 338struct drm_amdgpu_gem_wait_idle_in {
265 /** GEM object handle */ 339 /** GEM object handle */
266 uint32_t handle; 340 __u32 handle;
267 /** For future use, no flags defined so far */ 341 /** For future use, no flags defined so far */
268 uint32_t flags; 342 __u32 flags;
269 /** Absolute timeout to wait */ 343 /** Absolute timeout to wait */
270 uint64_t timeout; 344 __u64 timeout;
271}; 345};
272 346
273struct drm_amdgpu_gem_wait_idle_out { 347struct drm_amdgpu_gem_wait_idle_out {
274 /** BO status: 0 - BO is idle, 1 - BO is busy */ 348 /** BO status: 0 - BO is idle, 1 - BO is busy */
275 uint32_t status; 349 __u32 status;
276 /** Returned current memory domain */ 350 /** Returned current memory domain */
277 uint32_t domain; 351 __u32 domain;
278}; 352};
279 353
280union drm_amdgpu_gem_wait_idle { 354union drm_amdgpu_gem_wait_idle {
@@ -283,19 +357,22 @@ union drm_amdgpu_gem_wait_idle {
283}; 357};
284 358
285struct drm_amdgpu_wait_cs_in { 359struct drm_amdgpu_wait_cs_in {
286 /** Command submission handle */ 360 /* Command submission handle
287 uint64_t handle; 361 * handle equals 0 means none to wait for
362 * handle equals ~0ull means wait for the latest sequence number
363 */
364 __u64 handle;
288 /** Absolute timeout to wait */ 365 /** Absolute timeout to wait */
289 uint64_t timeout; 366 __u64 timeout;
290 uint32_t ip_type; 367 __u32 ip_type;
291 uint32_t ip_instance; 368 __u32 ip_instance;
292 uint32_t ring; 369 __u32 ring;
293 uint32_t ctx_id; 370 __u32 ctx_id;
294}; 371};
295 372
296struct drm_amdgpu_wait_cs_out { 373struct drm_amdgpu_wait_cs_out {
297 /** CS status: 0 - CS completed, 1 - CS still busy */ 374 /** CS status: 0 - CS completed, 1 - CS still busy */
298 uint64_t status; 375 __u64 status;
299}; 376};
300 377
301union drm_amdgpu_wait_cs { 378union drm_amdgpu_wait_cs {
@@ -303,21 +380,49 @@ union drm_amdgpu_wait_cs {
303 struct drm_amdgpu_wait_cs_out out; 380 struct drm_amdgpu_wait_cs_out out;
304}; 381};
305 382
383struct drm_amdgpu_fence {
384 __u32 ctx_id;
385 __u32 ip_type;
386 __u32 ip_instance;
387 __u32 ring;
388 __u64 seq_no;
389};
390
391struct drm_amdgpu_wait_fences_in {
392 /** This points to uint64_t * which points to fences */
393 __u64 fences;
394 __u32 fence_count;
395 __u32 wait_all;
396 __u64 timeout_ns;
397};
398
399struct drm_amdgpu_wait_fences_out {
400 __u32 status;
401 __u32 first_signaled;
402};
403
404union drm_amdgpu_wait_fences {
405 struct drm_amdgpu_wait_fences_in in;
406 struct drm_amdgpu_wait_fences_out out;
407};
408
306#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 409#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
307#define AMDGPU_GEM_OP_SET_PLACEMENT 1 410#define AMDGPU_GEM_OP_SET_PLACEMENT 1
308 411
309/* Sets or returns a value associated with a buffer. */ 412/* Sets or returns a value associated with a buffer. */
310struct drm_amdgpu_gem_op { 413struct drm_amdgpu_gem_op {
311 /** GEM object handle */ 414 /** GEM object handle */
312 uint32_t handle; 415 __u32 handle;
313 /** AMDGPU_GEM_OP_* */ 416 /** AMDGPU_GEM_OP_* */
314 uint32_t op; 417 __u32 op;
315 /** Input or return value */ 418 /** Input or return value */
316 uint64_t value; 419 __u64 value;
317}; 420};
318 421
319#define AMDGPU_VA_OP_MAP 1 422#define AMDGPU_VA_OP_MAP 1
320#define AMDGPU_VA_OP_UNMAP 2 423#define AMDGPU_VA_OP_UNMAP 2
424#define AMDGPU_VA_OP_CLEAR 3
425#define AMDGPU_VA_OP_REPLACE 4
321 426
322/* Delay the page table update till the next CS */ 427/* Delay the page table update till the next CS */
323#define AMDGPU_VM_DELAY_UPDATE (1 << 0) 428#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
@@ -329,21 +434,35 @@ struct drm_amdgpu_gem_op {
329#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) 434#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
330/* executable mapping, new for VI */ 435/* executable mapping, new for VI */
331#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) 436#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
437/* partially resident texture */
438#define AMDGPU_VM_PAGE_PRT (1 << 4)
439/* MTYPE flags use bit 5 to 8 */
440#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
441/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
442#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
443/* Use NC MTYPE instead of default MTYPE */
444#define AMDGPU_VM_MTYPE_NC (1 << 5)
445/* Use WC MTYPE instead of default MTYPE */
446#define AMDGPU_VM_MTYPE_WC (2 << 5)
447/* Use CC MTYPE instead of default MTYPE */
448#define AMDGPU_VM_MTYPE_CC (3 << 5)
449/* Use UC MTYPE instead of default MTYPE */
450#define AMDGPU_VM_MTYPE_UC (4 << 5)
332 451
333struct drm_amdgpu_gem_va { 452struct drm_amdgpu_gem_va {
334 /** GEM object handle */ 453 /** GEM object handle */
335 uint32_t handle; 454 __u32 handle;
336 uint32_t _pad; 455 __u32 _pad;
337 /** AMDGPU_VA_OP_* */ 456 /** AMDGPU_VA_OP_* */
338 uint32_t operation; 457 __u32 operation;
339 /** AMDGPU_VM_PAGE_* */ 458 /** AMDGPU_VM_PAGE_* */
340 uint32_t flags; 459 __u32 flags;
341 /** va address to assign . Must be correctly aligned.*/ 460 /** va address to assign . Must be correctly aligned.*/
342 uint64_t va_address; 461 __u64 va_address;
343 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 462 /** Specify offset inside of BO to assign. Must be correctly aligned.*/
344 uint64_t offset_in_bo; 463 __u64 offset_in_bo;
345 /** Specify mapping size. Must be correctly aligned. */ 464 /** Specify mapping size. Must be correctly aligned. */
346 uint64_t map_size; 465 __u64 map_size;
347}; 466};
348 467
349#define AMDGPU_HW_IP_GFX 0 468#define AMDGPU_HW_IP_GFX 0
@@ -351,33 +470,38 @@ struct drm_amdgpu_gem_va {
351#define AMDGPU_HW_IP_DMA 2 470#define AMDGPU_HW_IP_DMA 2
352#define AMDGPU_HW_IP_UVD 3 471#define AMDGPU_HW_IP_UVD 3
353#define AMDGPU_HW_IP_VCE 4 472#define AMDGPU_HW_IP_VCE 4
354#define AMDGPU_HW_IP_NUM 5 473#define AMDGPU_HW_IP_UVD_ENC 5
474#define AMDGPU_HW_IP_VCN_DEC 6
475#define AMDGPU_HW_IP_VCN_ENC 7
476#define AMDGPU_HW_IP_NUM 8
355 477
356#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 478#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
357 479
358#define AMDGPU_CHUNK_ID_IB 0x01 480#define AMDGPU_CHUNK_ID_IB 0x01
359#define AMDGPU_CHUNK_ID_FENCE 0x02 481#define AMDGPU_CHUNK_ID_FENCE 0x02
360#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 482#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
483#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
484#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
361 485
362struct drm_amdgpu_cs_chunk { 486struct drm_amdgpu_cs_chunk {
363 uint32_t chunk_id; 487 __u32 chunk_id;
364 uint32_t length_dw; 488 __u32 length_dw;
365 uint64_t chunk_data; 489 __u64 chunk_data;
366}; 490};
367 491
368struct drm_amdgpu_cs_in { 492struct drm_amdgpu_cs_in {
369 /** Rendering context id */ 493 /** Rendering context id */
370 uint32_t ctx_id; 494 __u32 ctx_id;
371 /** Handle of resource list associated with CS */ 495 /** Handle of resource list associated with CS */
372 uint32_t bo_list_handle; 496 __u32 bo_list_handle;
373 uint32_t num_chunks; 497 __u32 num_chunks;
374 uint32_t _pad; 498 __u32 _pad;
375 /** this points to uint64_t * which point to cs chunks */ 499 /** this points to __u64 * which point to cs chunks */
376 uint64_t chunks; 500 __u64 chunks;
377}; 501};
378 502
379struct drm_amdgpu_cs_out { 503struct drm_amdgpu_cs_out {
380 uint64_t handle; 504 __u64 handle;
381}; 505};
382 506
383union drm_amdgpu_cs { 507union drm_amdgpu_cs {
@@ -390,36 +514,58 @@ union drm_amdgpu_cs {
390/* This IB should be submitted to CE */ 514/* This IB should be submitted to CE */
391#define AMDGPU_IB_FLAG_CE (1<<0) 515#define AMDGPU_IB_FLAG_CE (1<<0)
392 516
393/* CE Preamble */ 517/* Preamble flag, which means the IB could be dropped if no context switch */
394#define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 518#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
395 519
520/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
521#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
522
396struct drm_amdgpu_cs_chunk_ib { 523struct drm_amdgpu_cs_chunk_ib {
397 uint32_t _pad; 524 __u32 _pad;
398 /** AMDGPU_IB_FLAG_* */ 525 /** AMDGPU_IB_FLAG_* */
399 uint32_t flags; 526 __u32 flags;
400 /** Virtual address to begin IB execution */ 527 /** Virtual address to begin IB execution */
401 uint64_t va_start; 528 __u64 va_start;
402 /** Size of submission */ 529 /** Size of submission */
403 uint32_t ib_bytes; 530 __u32 ib_bytes;
404 /** HW IP to submit to */ 531 /** HW IP to submit to */
405 uint32_t ip_type; 532 __u32 ip_type;
406 /** HW IP index of the same type to submit to */ 533 /** HW IP index of the same type to submit to */
407 uint32_t ip_instance; 534 __u32 ip_instance;
408 /** Ring index to submit to */ 535 /** Ring index to submit to */
409 uint32_t ring; 536 __u32 ring;
410}; 537};
411 538
412struct drm_amdgpu_cs_chunk_dep { 539struct drm_amdgpu_cs_chunk_dep {
413 uint32_t ip_type; 540 __u32 ip_type;
414 uint32_t ip_instance; 541 __u32 ip_instance;
415 uint32_t ring; 542 __u32 ring;
416 uint32_t ctx_id; 543 __u32 ctx_id;
417 uint64_t handle; 544 __u64 handle;
418}; 545};
419 546
420struct drm_amdgpu_cs_chunk_fence { 547struct drm_amdgpu_cs_chunk_fence {
421 uint32_t handle; 548 __u32 handle;
422 uint32_t offset; 549 __u32 offset;
550};
551
552struct drm_amdgpu_cs_chunk_sem {
553 __u32 handle;
554};
555
556#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
557#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
558#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
559
560union drm_amdgpu_fence_to_handle {
561 struct {
562 struct drm_amdgpu_fence fence;
563 __u32 what;
564 __u32 pad;
565 } in;
566 struct {
567 __u32 handle;
568 } out;
423}; 569};
424 570
425struct drm_amdgpu_cs_chunk_data { 571struct drm_amdgpu_cs_chunk_data {
@@ -434,6 +580,7 @@ struct drm_amdgpu_cs_chunk_data {
434 * 580 *
435 */ 581 */
436#define AMDGPU_IDS_FLAGS_FUSION 0x1 582#define AMDGPU_IDS_FLAGS_FUSION 0x1
583#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
437 584
438/* indicate if acceleration can be working */ 585/* indicate if acceleration can be working */
439#define AMDGPU_INFO_ACCEL_WORKING 0x00 586#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -467,6 +614,12 @@ struct drm_amdgpu_cs_chunk_data {
467 #define AMDGPU_INFO_FW_SMC 0x0a 614 #define AMDGPU_INFO_FW_SMC 0x0a
468 /* Subquery id: Query SDMA firmware version */ 615 /* Subquery id: Query SDMA firmware version */
469 #define AMDGPU_INFO_FW_SDMA 0x0b 616 #define AMDGPU_INFO_FW_SDMA 0x0b
617 /* Subquery id: Query PSP SOS firmware version */
618 #define AMDGPU_INFO_FW_SOS 0x0c
619 /* Subquery id: Query PSP ASD firmware version */
620 #define AMDGPU_INFO_FW_ASD 0x0d
621 /* Subquery id: Query VCN firmware version */
622 #define AMDGPU_INFO_FW_VCN 0x0e
470/* number of bytes moved for TTM migration */ 623/* number of bytes moved for TTM migration */
471#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f 624#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
472/* the used VRAM size */ 625/* the used VRAM size */
@@ -483,6 +636,43 @@ struct drm_amdgpu_cs_chunk_data {
483#define AMDGPU_INFO_DEV_INFO 0x16 636#define AMDGPU_INFO_DEV_INFO 0x16
484/* visible vram usage */ 637/* visible vram usage */
485#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 638#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
639/* number of TTM buffer evictions */
640#define AMDGPU_INFO_NUM_EVICTIONS 0x18
641/* Query memory about VRAM and GTT domains */
642#define AMDGPU_INFO_MEMORY 0x19
643/* Query vce clock table */
644#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
645/* Query vbios related information */
646#define AMDGPU_INFO_VBIOS 0x1B
647 /* Subquery id: Query vbios size */
648 #define AMDGPU_INFO_VBIOS_SIZE 0x1
649 /* Subquery id: Query vbios image */
650 #define AMDGPU_INFO_VBIOS_IMAGE 0x2
651/* Query UVD handles */
652#define AMDGPU_INFO_NUM_HANDLES 0x1C
653/* Query sensor related information */
654#define AMDGPU_INFO_SENSOR 0x1D
655 /* Subquery id: Query GPU shader clock */
656 #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1
657 /* Subquery id: Query GPU memory clock */
658 #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2
659 /* Subquery id: Query GPU temperature */
660 #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3
661 /* Subquery id: Query GPU load */
662 #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4
663 /* Subquery id: Query average GPU power */
664 #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5
665 /* Subquery id: Query northbridge voltage */
666 #define AMDGPU_INFO_SENSOR_VDDNB 0x6
667 /* Subquery id: Query graphics voltage */
668 #define AMDGPU_INFO_SENSOR_VDDGFX 0x7
669 /* Subquery id: Query GPU stable pstate shader clock */
670 #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
671 /* Subquery id: Query GPU stable pstate memory clock */
672 #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
673/* Number of VRAM page faults on CPU access. */
674#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
675#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
486 676
487#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 677#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
488#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff 678#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -491,86 +681,123 @@ struct drm_amdgpu_cs_chunk_data {
491 681
492struct drm_amdgpu_query_fw { 682struct drm_amdgpu_query_fw {
493 /** AMDGPU_INFO_FW_* */ 683 /** AMDGPU_INFO_FW_* */
494 uint32_t fw_type; 684 __u32 fw_type;
495 /** 685 /**
496 * Index of the IP if there are more IPs of 686 * Index of the IP if there are more IPs of
497 * the same type. 687 * the same type.
498 */ 688 */
499 uint32_t ip_instance; 689 __u32 ip_instance;
500 /** 690 /**
501 * Index of the engine. Whether this is used depends 691 * Index of the engine. Whether this is used depends
502 * on the firmware type. (e.g. MEC, SDMA) 692 * on the firmware type. (e.g. MEC, SDMA)
503 */ 693 */
504 uint32_t index; 694 __u32 index;
505 uint32_t _pad; 695 __u32 _pad;
506}; 696};
507 697
508/* Input structure for the INFO ioctl */ 698/* Input structure for the INFO ioctl */
509struct drm_amdgpu_info { 699struct drm_amdgpu_info {
510 /* Where the return value will be stored */ 700 /* Where the return value will be stored */
511 uint64_t return_pointer; 701 __u64 return_pointer;
512 /* The size of the return value. Just like "size" in "snprintf", 702 /* The size of the return value. Just like "size" in "snprintf",
513 * it limits how many bytes the kernel can write. */ 703 * it limits how many bytes the kernel can write. */
514 uint32_t return_size; 704 __u32 return_size;
515 /* The query request id. */ 705 /* The query request id. */
516 uint32_t query; 706 __u32 query;
517 707
518 union { 708 union {
519 struct { 709 struct {
520 uint32_t id; 710 __u32 id;
521 uint32_t _pad; 711 __u32 _pad;
522 } mode_crtc; 712 } mode_crtc;
523 713
524 struct { 714 struct {
525 /** AMDGPU_HW_IP_* */ 715 /** AMDGPU_HW_IP_* */
526 uint32_t type; 716 __u32 type;
527 /** 717 /**
528 * Index of the IP if there are more IPs of the same 718 * Index of the IP if there are more IPs of the same
529 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 719 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
530 */ 720 */
531 uint32_t ip_instance; 721 __u32 ip_instance;
532 } query_hw_ip; 722 } query_hw_ip;
533 723
534 struct { 724 struct {
535 uint32_t dword_offset; 725 __u32 dword_offset;
536 /** number of registers to read */ 726 /** number of registers to read */
537 uint32_t count; 727 __u32 count;
538 uint32_t instance; 728 __u32 instance;
539 /** For future use, no flags defined so far */ 729 /** For future use, no flags defined so far */
540 uint32_t flags; 730 __u32 flags;
541 } read_mmr_reg; 731 } read_mmr_reg;
542 732
543 struct drm_amdgpu_query_fw query_fw; 733 struct drm_amdgpu_query_fw query_fw;
734
735 struct {
736 __u32 type;
737 __u32 offset;
738 } vbios_info;
739
740 struct {
741 __u32 type;
742 } sensor_info;
544 }; 743 };
545}; 744};
546 745
547struct drm_amdgpu_info_gds { 746struct drm_amdgpu_info_gds {
548 /** GDS GFX partition size */ 747 /** GDS GFX partition size */
549 uint32_t gds_gfx_partition_size; 748 __u32 gds_gfx_partition_size;
550 /** GDS compute partition size */ 749 /** GDS compute partition size */
551 uint32_t compute_partition_size; 750 __u32 compute_partition_size;
552 /** total GDS memory size */ 751 /** total GDS memory size */
553 uint32_t gds_total_size; 752 __u32 gds_total_size;
554 /** GWS size per GFX partition */ 753 /** GWS size per GFX partition */
555 uint32_t gws_per_gfx_partition; 754 __u32 gws_per_gfx_partition;
556 /** GSW size per compute partition */ 755 /** GSW size per compute partition */
557 uint32_t gws_per_compute_partition; 756 __u32 gws_per_compute_partition;
558 /** OA size per GFX partition */ 757 /** OA size per GFX partition */
559 uint32_t oa_per_gfx_partition; 758 __u32 oa_per_gfx_partition;
560 /** OA size per compute partition */ 759 /** OA size per compute partition */
561 uint32_t oa_per_compute_partition; 760 __u32 oa_per_compute_partition;
562 uint32_t _pad; 761 __u32 _pad;
563}; 762};
564 763
565struct drm_amdgpu_info_vram_gtt { 764struct drm_amdgpu_info_vram_gtt {
566 uint64_t vram_size; 765 __u64 vram_size;
567 uint64_t vram_cpu_accessible_size; 766 __u64 vram_cpu_accessible_size;
568 uint64_t gtt_size; 767 __u64 gtt_size;
768};
769
770struct drm_amdgpu_heap_info {
771 /** max. physical memory */
772 __u64 total_heap_size;
773
774 /** Theoretical max. available memory in the given heap */
775 __u64 usable_heap_size;
776
777 /**
778 * Number of bytes allocated in the heap. This includes all processes
779 * and private allocations in the kernel. It changes when new buffers
780 * are allocated, freed, and moved. It cannot be larger than
781 * heap_size.
782 */
783 __u64 heap_usage;
784
785 /**
786 * Theoretical possible max. size of buffer which
787 * could be allocated in the given heap
788 */
789 __u64 max_allocation;
790};
791
792struct drm_amdgpu_memory_info {
793 struct drm_amdgpu_heap_info vram;
794 struct drm_amdgpu_heap_info cpu_accessible_vram;
795 struct drm_amdgpu_heap_info gtt;
569}; 796};
570 797
571struct drm_amdgpu_info_firmware { 798struct drm_amdgpu_info_firmware {
572 uint32_t ver; 799 __u32 ver;
573 uint32_t feature; 800 __u32 feature;
574}; 801};
575 802
576#define AMDGPU_VRAM_TYPE_UNKNOWN 0 803#define AMDGPU_VRAM_TYPE_UNKNOWN 0
@@ -581,74 +808,139 @@ struct drm_amdgpu_info_firmware {
581#define AMDGPU_VRAM_TYPE_GDDR5 5 808#define AMDGPU_VRAM_TYPE_GDDR5 5
582#define AMDGPU_VRAM_TYPE_HBM 6 809#define AMDGPU_VRAM_TYPE_HBM 6
583#define AMDGPU_VRAM_TYPE_DDR3 7 810#define AMDGPU_VRAM_TYPE_DDR3 7
811#define AMDGPU_VRAM_TYPE_DDR4 8
584 812
585struct drm_amdgpu_info_device { 813struct drm_amdgpu_info_device {
586 /** PCI Device ID */ 814 /** PCI Device ID */
587 uint32_t device_id; 815 __u32 device_id;
588 /** Internal chip revision: A0, A1, etc.) */ 816 /** Internal chip revision: A0, A1, etc.) */
589 uint32_t chip_rev; 817 __u32 chip_rev;
590 uint32_t external_rev; 818 __u32 external_rev;
591 /** Revision id in PCI Config space */ 819 /** Revision id in PCI Config space */
592 uint32_t pci_rev; 820 __u32 pci_rev;
593 uint32_t family; 821 __u32 family;
594 uint32_t num_shader_engines; 822 __u32 num_shader_engines;
595 uint32_t num_shader_arrays_per_engine; 823 __u32 num_shader_arrays_per_engine;
596 /* in KHz */ 824 /* in KHz */
597 uint32_t gpu_counter_freq; 825 __u32 gpu_counter_freq;
598 uint64_t max_engine_clock; 826 __u64 max_engine_clock;
599 uint64_t max_memory_clock; 827 __u64 max_memory_clock;
600 /* cu information */ 828 /* cu information */
601 uint32_t cu_active_number; 829 __u32 cu_active_number;
602 uint32_t cu_ao_mask; 830 /* NOTE: cu_ao_mask is INVALID, DON'T use it */
603 uint32_t cu_bitmap[4][4]; 831 __u32 cu_ao_mask;
832 __u32 cu_bitmap[4][4];
604 /** Render backend pipe mask. One render backend is CB+DB. */ 833 /** Render backend pipe mask. One render backend is CB+DB. */
605 uint32_t enabled_rb_pipes_mask; 834 __u32 enabled_rb_pipes_mask;
606 uint32_t num_rb_pipes; 835 __u32 num_rb_pipes;
607 uint32_t num_hw_gfx_contexts; 836 __u32 num_hw_gfx_contexts;
608 uint32_t _pad; 837 __u32 _pad;
609 uint64_t ids_flags; 838 __u64 ids_flags;
610 /** Starting virtual address for UMDs. */ 839 /** Starting virtual address for UMDs. */
611 uint64_t virtual_address_offset; 840 __u64 virtual_address_offset;
612 /** The maximum virtual address */ 841 /** The maximum virtual address */
613 uint64_t virtual_address_max; 842 __u64 virtual_address_max;
614 /** Required alignment of virtual addresses. */ 843 /** Required alignment of virtual addresses. */
615 uint32_t virtual_address_alignment; 844 __u32 virtual_address_alignment;
616 /** Page table entry - fragment size */ 845 /** Page table entry - fragment size */
617 uint32_t pte_fragment_size; 846 __u32 pte_fragment_size;
618 uint32_t gart_page_size; 847 __u32 gart_page_size;
619 /** constant engine ram size*/ 848 /** constant engine ram size*/
620 uint32_t ce_ram_size; 849 __u32 ce_ram_size;
621 /** video memory type info*/ 850 /** video memory type info*/
622 uint32_t vram_type; 851 __u32 vram_type;
623 /** video memory bit width*/ 852 /** video memory bit width*/
624 uint32_t vram_bit_width; 853 __u32 vram_bit_width;
625 /* vce harvesting instance */ 854 /* vce harvesting instance */
626 uint32_t vce_harvest_config; 855 __u32 vce_harvest_config;
856 /* gfx double offchip LDS buffers */
857 __u32 gc_double_offchip_lds_buf;
858 /* NGG Primitive Buffer */
859 __u64 prim_buf_gpu_addr;
860 /* NGG Position Buffer */
861 __u64 pos_buf_gpu_addr;
862 /* NGG Control Sideband */
863 __u64 cntl_sb_buf_gpu_addr;
864 /* NGG Parameter Cache */
865 __u64 param_buf_gpu_addr;
866 __u32 prim_buf_size;
867 __u32 pos_buf_size;
868 __u32 cntl_sb_buf_size;
869 __u32 param_buf_size;
870 /* wavefront size*/
871 __u32 wave_front_size;
872 /* shader visible vgprs*/
873 __u32 num_shader_visible_vgprs;
874 /* CU per shader array*/
875 __u32 num_cu_per_sh;
876 /* number of tcc blocks*/
877 __u32 num_tcc_blocks;
878 /* gs vgt table depth*/
879 __u32 gs_vgt_table_depth;
880 /* gs primitive buffer depth*/
881 __u32 gs_prim_buffer_depth;
882 /* max gs wavefront per vgt*/
883 __u32 max_gs_waves_per_vgt;
884 __u32 _pad1;
885 /* always on cu bitmap */
886 __u32 cu_ao_bitmap[4][4];
887 /** Starting high virtual address for UMDs. */
888 __u64 high_va_offset;
889 /** The maximum high virtual address */
890 __u64 high_va_max;
627}; 891};
628 892
629struct drm_amdgpu_info_hw_ip { 893struct drm_amdgpu_info_hw_ip {
630 /** Version of h/w IP */ 894 /** Version of h/w IP */
631 uint32_t hw_ip_version_major; 895 __u32 hw_ip_version_major;
632 uint32_t hw_ip_version_minor; 896 __u32 hw_ip_version_minor;
633 /** Capabilities */ 897 /** Capabilities */
634 uint64_t capabilities_flags; 898 __u64 capabilities_flags;
635 /** command buffer address start alignment*/ 899 /** command buffer address start alignment*/
636 uint32_t ib_start_alignment; 900 __u32 ib_start_alignment;
637 /** command buffer size alignment*/ 901 /** command buffer size alignment*/
638 uint32_t ib_size_alignment; 902 __u32 ib_size_alignment;
639 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 903 /** Bitmask of available rings. Bit 0 means ring 0, etc. */
640 uint32_t available_rings; 904 __u32 available_rings;
641 uint32_t _pad; 905 __u32 _pad;
906};
907
908struct drm_amdgpu_info_num_handles {
909 /** Max handles as supported by firmware for UVD */
910 __u32 uvd_max_handles;
911 /** Handles currently in use for UVD */
912 __u32 uvd_used_handles;
913};
914
915#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
916
917struct drm_amdgpu_info_vce_clock_table_entry {
918 /** System clock */
919 __u32 sclk;
920 /** Memory clock */
921 __u32 mclk;
922 /** VCE clock */
923 __u32 eclk;
924 __u32 pad;
925};
926
927struct drm_amdgpu_info_vce_clock_table {
928 struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
929 __u32 num_valid_entries;
930 __u32 pad;
642}; 931};
643 932
644/* 933/*
645 * Supported GPU families 934 * Supported GPU families
646 */ 935 */
647#define AMDGPU_FAMILY_UNKNOWN 0 936#define AMDGPU_FAMILY_UNKNOWN 0
937#define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
648#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 938#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
649#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 939#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
650#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 940#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
651#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ 941#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
942#define AMDGPU_FAMILY_AI 141 /* Vega10 */
943#define AMDGPU_FAMILY_RV 142 /* Raven */
652 944
653#if defined(__cplusplus) 945#if defined(__cplusplus)
654} 946}
diff --git a/include/drm/drm.h b/include/drm/drm.h
index f6fd5c2c..f0bd91de 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -641,6 +641,8 @@ struct drm_gem_open {
641#define DRM_CAP_CURSOR_HEIGHT 0x9 641#define DRM_CAP_CURSOR_HEIGHT 0x9
642#define DRM_CAP_ADDFB2_MODIFIERS 0x10 642#define DRM_CAP_ADDFB2_MODIFIERS 0x10
643#define DRM_CAP_PAGE_FLIP_TARGET 0x11 643#define DRM_CAP_PAGE_FLIP_TARGET 0x11
644#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
645#define DRM_CAP_SYNCOBJ 0x13
644 646
645/** DRM_IOCTL_GET_CAP ioctl argument type */ 647/** DRM_IOCTL_GET_CAP ioctl argument type */
646struct drm_get_cap { 648struct drm_get_cap {
@@ -690,6 +692,67 @@ struct drm_prime_handle {
690 __s32 fd; 692 __s32 fd;
691}; 693};
692 694
695struct drm_syncobj_create {
696 __u32 handle;
697#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
698 __u32 flags;
699};
700
701struct drm_syncobj_destroy {
702 __u32 handle;
703 __u32 pad;
704};
705
706#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
707#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
708struct drm_syncobj_handle {
709 __u32 handle;
710 __u32 flags;
711
712 __s32 fd;
713 __u32 pad;
714};
715
716#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
717#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
718struct drm_syncobj_wait {
719 __u64 handles;
720 /* absolute timeout */
721 __s64 timeout_nsec;
722 __u32 count_handles;
723 __u32 flags;
724 __u32 first_signaled; /* only valid when not waiting all */
725 __u32 pad;
726};
727
728struct drm_syncobj_array {
729 __u64 handles;
730 __u32 count_handles;
731 __u32 pad;
732};
733
734/* Query current scanout sequence number */
735struct drm_crtc_get_sequence {
736 __u32 crtc_id; /* requested crtc_id */
737 __u32 active; /* return: crtc output is active */
738 __u64 sequence; /* return: most recent vblank sequence */
739 __s64 sequence_ns; /* return: most recent time of first pixel out */
740};
741
742/* Queue event to be delivered at specified sequence. Time stamp marks
743 * when the first pixel of the refresh cycle leaves the display engine
744 * for the display
745 */
746#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
747#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
748
749struct drm_crtc_queue_sequence {
750 __u32 crtc_id;
751 __u32 flags;
752 __u64 sequence; /* on input, target sequence. on output, actual sequence */
753 __u64 user_data; /* user data passed to event */
754};
755
693#if defined(__cplusplus) 756#if defined(__cplusplus)
694} 757}
695#endif 758#endif
@@ -772,6 +835,9 @@ extern "C" {
772 835
773#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 836#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
774 837
838#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
839#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
840
775#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 841#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
776 842
777#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 843#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -808,6 +874,19 @@ extern "C" {
808#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) 874#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
809#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) 875#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
810 876
877#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create)
878#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
879#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
880#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
881#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
882#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
883#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
884
885#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
886#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
887#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
888#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
889
811/** 890/**
812 * Device specific ioctls should only be in their respective headers 891 * Device specific ioctls should only be in their respective headers
813 * The device specific ioctl range is from 0x40 to 0x9f. 892 * The device specific ioctl range is from 0x40 to 0x9f.
@@ -838,6 +917,7 @@ struct drm_event {
838 917
839#define DRM_EVENT_VBLANK 0x01 918#define DRM_EVENT_VBLANK 0x01
840#define DRM_EVENT_FLIP_COMPLETE 0x02 919#define DRM_EVENT_FLIP_COMPLETE 0x02
920#define DRM_EVENT_CRTC_SEQUENCE 0x03
841 921
842struct drm_event_vblank { 922struct drm_event_vblank {
843 struct drm_event base; 923 struct drm_event base;
@@ -845,7 +925,17 @@ struct drm_event_vblank {
845 __u32 tv_sec; 925 __u32 tv_sec;
846 __u32 tv_usec; 926 __u32 tv_usec;
847 __u32 sequence; 927 __u32 sequence;
848 __u32 reserved; 928 __u32 crtc_id; /* 0 on older kernels that do not support this */
929};
930
931/* Event delivered at sequence. Time stamp marks when the first pixel
932 * of the refresh cycle leaves the display engine for the display
933 */
934struct drm_event_crtc_sequence {
935 struct drm_event base;
936 __u64 user_data;
937 __s64 time_ns;
938 __u64 sequence;
849}; 939};
850 940
851/* typedef area */ 941/* typedef area */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 4d8da699..e04613d3 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -26,6 +26,10 @@
26 26
27#include "drm.h" 27#include "drm.h"
28 28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ 33#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((__u32)(c) << 16) | ((__u32)(d) << 24)) 34 ((__u32)(c) << 16) | ((__u32)(d) << 24))
31 35
@@ -37,10 +41,17 @@
37/* 8 bpp Red */ 41/* 8 bpp Red */
38#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ 42#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
39 43
44/* 16 bpp Red */
45#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
46
40/* 16 bpp RG */ 47/* 16 bpp RG */
41#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ 48#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
42#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ 49#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
43 50
51/* 32 bpp RG */
52#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */
53#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */
54
44/* 8 bpp RGB */ 55/* 8 bpp RGB */
45#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ 56#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
46#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ 57#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
@@ -103,6 +114,20 @@
103#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ 114#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
104 115
105/* 116/*
117 * 2 plane RGB + A
118 * index 0 = RGB plane, same format as the corresponding non _A8 format has
119 * index 1 = A plane, [7:0] A
120 */
121#define DRM_FORMAT_XRGB8888_A8 fourcc_code('X', 'R', 'A', '8')
122#define DRM_FORMAT_XBGR8888_A8 fourcc_code('X', 'B', 'A', '8')
123#define DRM_FORMAT_RGBX8888_A8 fourcc_code('R', 'X', 'A', '8')
124#define DRM_FORMAT_BGRX8888_A8 fourcc_code('B', 'X', 'A', '8')
125#define DRM_FORMAT_RGB888_A8 fourcc_code('R', '8', 'A', '8')
126#define DRM_FORMAT_BGR888_A8 fourcc_code('B', '8', 'A', '8')
127#define DRM_FORMAT_RGB565_A8 fourcc_code('R', '5', 'A', '8')
128#define DRM_FORMAT_BGR565_A8 fourcc_code('B', '5', 'A', '8')
129
130/*
106 * 2 plane YCbCr 131 * 2 plane YCbCr
107 * index 0 = Y plane, [7:0] Y 132 * index 0 = Y plane, [7:0] Y
108 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian 133 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
@@ -150,15 +175,20 @@
150 175
151/* Vendor Ids: */ 176/* Vendor Ids: */
152#define DRM_FORMAT_MOD_NONE 0 177#define DRM_FORMAT_MOD_NONE 0
178#define DRM_FORMAT_MOD_VENDOR_NONE 0
153#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 179#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
154#define DRM_FORMAT_MOD_VENDOR_AMD 0x02 180#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
155#define DRM_FORMAT_MOD_VENDOR_NV 0x03 181#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03
156#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 182#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
157#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 183#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
184#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
185#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
158/* add more to the end as needed */ 186/* add more to the end as needed */
159 187
188#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
189
160#define fourcc_mod_code(vendor, val) \ 190#define fourcc_mod_code(vendor, val) \
161 ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) 191 ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
162 192
163/* 193/*
164 * Format Modifier tokens: 194 * Format Modifier tokens:
@@ -168,6 +198,25 @@
168 * authoritative source for all of these. 198 * authoritative source for all of these.
169 */ 199 */
170 200
201/*
202 * Invalid Modifier
203 *
204 * This modifier can be used as a sentinel to terminate the format modifiers
205 * list, or to initialize a variable with an invalid modifier. It might also be
206 * used to report an error back to userspace for certain APIs.
207 */
208#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
209
210/*
211 * Linear Layout
212 *
213 * Just plain linear layout. Note that this is different from no specifying any
214 * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl),
215 * which tells the driver to also take driver-internal information into account
216 * and so might actually result in a tiled framebuffer.
217 */
218#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
219
171/* Intel framebuffer modifiers */ 220/* Intel framebuffer modifiers */
172 221
173/* 222/*
@@ -215,6 +264,26 @@
215#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) 264#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
216 265
217/* 266/*
267 * Intel color control surface (CCS) for render compression
268 *
269 * The framebuffer format must be one of the 8:8:8:8 RGB formats.
270 * The main surface will be plane index 0 and must be Y/Yf-tiled,
271 * the CCS will be plane index 1.
272 *
273 * Each CCS tile matches a 1024x512 pixel area of the main surface.
274 * To match certain aspects of the 3D hardware the CCS is
275 * considered to be made up of normal 128Bx32 Y tiles, Thus
276 * the CCS pitch must be specified in multiples of 128 bytes.
277 *
278 * In reality the CCS tile appears to be a 64Bx64 Y tile, composed
279 * of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks.
280 * But that fact is not relevant unless the memory is accessed
281 * directly.
282 */
283#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4)
284#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
285
286/*
218 * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks 287 * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
219 * 288 *
220 * Macroblocks are laid in a Z-shape, and each pixel data is following the 289 * Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -229,4 +298,115 @@
229 */ 298 */
230#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) 299#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
231 300
301/* Vivante framebuffer modifiers */
302
303/*
304 * Vivante 4x4 tiling layout
305 *
306 * This is a simple tiled layout using tiles of 4x4 pixels in a row-major
307 * layout.
308 */
309#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1)
310
311/*
312 * Vivante 64x64 super-tiling layout
313 *
314 * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile
315 * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row-
316 * major layout.
317 *
318 * For more information: see
319 * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling
320 */
321#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2)
322
323/*
324 * Vivante 4x4 tiling layout for dual-pipe
325 *
326 * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a
327 * different base address. Offsets from the base addresses are therefore halved
328 * compared to the non-split tiled layout.
329 */
330#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3)
331
332/*
333 * Vivante 64x64 super-tiling layout for dual-pipe
334 *
335 * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile
336 * starts at a different base address. Offsets from the base addresses are
337 * therefore halved compared to the non-split super-tiled layout.
338 */
339#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
340
341/* NVIDIA frame buffer modifiers */
342
343/*
344 * Tegra Tiled Layout, used by Tegra 2, 3 and 4.
345 *
346 * Pixels are arranged in simple tiles of 16 x 16 bytes.
347 */
348#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
349
350/*
351 * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
352 *
353 * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
354 * vertically by a power of 2 (1 to 32 GOBs) to form a block.
355 *
356 * Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
357 *
358 * Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
359 * Valid values are:
360 *
361 * 0 == ONE_GOB
362 * 1 == TWO_GOBS
363 * 2 == FOUR_GOBS
364 * 3 == EIGHT_GOBS
365 * 4 == SIXTEEN_GOBS
366 * 5 == THIRTYTWO_GOBS
367 *
368 * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
369 * in full detail.
370 */
371#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
372 fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
373
374#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
375 fourcc_mod_code(NVIDIA, 0x10)
376#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
377 fourcc_mod_code(NVIDIA, 0x11)
378#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
379 fourcc_mod_code(NVIDIA, 0x12)
380#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
381 fourcc_mod_code(NVIDIA, 0x13)
382#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
383 fourcc_mod_code(NVIDIA, 0x14)
384#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
385 fourcc_mod_code(NVIDIA, 0x15)
386
387/*
388 * Broadcom VC4 "T" format
389 *
390 * This is the primary layout that the V3D GPU can texture from (it
391 * can't do linear). The T format has:
392 *
393 * - 64b utiles of pixels in a raster-order grid according to cpp. It's 4x4
394 * pixels at 32 bit depth.
395 *
396 * - 1k subtiles made of a 4x4 raster-order grid of 64b utiles (so usually
397 * 16x16 pixels).
398 *
399 * - 4k tiles made of a 2x2 grid of 1k subtiles (so usually 32x32 pixels). On
400 * even 4k tile rows, they're arranged as (BL, TL, TR, BR), and on odd rows
401 * they're (TR, BR, BL, TL), where bottom left is start of memory.
402 *
403 * - an image made of 4k tiles in rows either left-to-right (even rows of 4k
404 * tiles) or right-to-left (odd rows of 4k tiles).
405 */
406#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
407
408#if defined(__cplusplus)
409}
410#endif
411
232#endif /* DRM_FOURCC_H */ 412#endif /* DRM_FOURCC_H */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 6708e2b7..74368de3 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -38,16 +38,24 @@ extern "C" {
38#define DRM_DISPLAY_MODE_LEN 32 38#define DRM_DISPLAY_MODE_LEN 32
39#define DRM_PROP_NAME_LEN 32 39#define DRM_PROP_NAME_LEN 32
40 40
41#define DRM_MODE_TYPE_BUILTIN (1<<0) 41#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
42#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) 42#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
43#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) 43#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
44#define DRM_MODE_TYPE_PREFERRED (1<<3) 44#define DRM_MODE_TYPE_PREFERRED (1<<3)
45#define DRM_MODE_TYPE_DEFAULT (1<<4) 45#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
46#define DRM_MODE_TYPE_USERDEF (1<<5) 46#define DRM_MODE_TYPE_USERDEF (1<<5)
47#define DRM_MODE_TYPE_DRIVER (1<<6) 47#define DRM_MODE_TYPE_DRIVER (1<<6)
48 48
49/* Video mode flags */ 49/* Video mode flags */
50/* bit compatible with the xorg definitions. */ 50/* bit compatible with the xrandr RR_ definitions (bits 0-13)
51 *
52 * ABI warning: Existing userspace really expects
53 * the mode flags to match the xrandr definitions. Any
54 * changes that don't match the xrandr definitions will
55 * likely need a new client cap or some other mechanism
56 * to avoid breaking existing userspace. This includes
57 * allocating new flags in the previously unused bits!
58 */
51#define DRM_MODE_FLAG_PHSYNC (1<<0) 59#define DRM_MODE_FLAG_PHSYNC (1<<0)
52#define DRM_MODE_FLAG_NHSYNC (1<<1) 60#define DRM_MODE_FLAG_NHSYNC (1<<1)
53#define DRM_MODE_FLAG_PVSYNC (1<<2) 61#define DRM_MODE_FLAG_PVSYNC (1<<2)
@@ -58,8 +66,8 @@ extern "C" {
58#define DRM_MODE_FLAG_PCSYNC (1<<7) 66#define DRM_MODE_FLAG_PCSYNC (1<<7)
59#define DRM_MODE_FLAG_NCSYNC (1<<8) 67#define DRM_MODE_FLAG_NCSYNC (1<<8)
60#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ 68#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
61#define DRM_MODE_FLAG_BCAST (1<<10) 69#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
62#define DRM_MODE_FLAG_PIXMUX (1<<11) 70#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
63#define DRM_MODE_FLAG_DBLCLK (1<<12) 71#define DRM_MODE_FLAG_DBLCLK (1<<12)
64#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 72#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
65 /* 73 /*
@@ -67,7 +75,7 @@ extern "C" {
67 * (define not exposed to user space). 75 * (define not exposed to user space).
68 */ 76 */
69#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) 77#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
70#define DRM_MODE_FLAG_3D_NONE (0<<14) 78#define DRM_MODE_FLAG_3D_NONE (0<<14)
71#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) 79#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
72#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) 80#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
73#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) 81#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
@@ -77,6 +85,19 @@ extern "C" {
77#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) 85#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
78#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) 86#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
79 87
88/* Picture aspect ratio options */
89#define DRM_MODE_PICTURE_ASPECT_NONE 0
90#define DRM_MODE_PICTURE_ASPECT_4_3 1
91#define DRM_MODE_PICTURE_ASPECT_16_9 2
92
93/* Aspect ratio flag bitmask (4 bits 22:19) */
94#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
95#define DRM_MODE_FLAG_PIC_AR_NONE \
96 (DRM_MODE_PICTURE_ASPECT_NONE<<19)
97#define DRM_MODE_FLAG_PIC_AR_4_3 \
98 (DRM_MODE_PICTURE_ASPECT_4_3<<19)
99#define DRM_MODE_FLAG_PIC_AR_16_9 \
100 (DRM_MODE_PICTURE_ASPECT_16_9<<19)
80 101
81/* DPMS flags */ 102/* DPMS flags */
82/* bit compatible with the xorg definitions. */ 103/* bit compatible with the xorg definitions. */
@@ -92,11 +113,6 @@ extern "C" {
92#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ 113#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
93#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ 114#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
94 115
95/* Picture aspect ratio options */
96#define DRM_MODE_PICTURE_ASPECT_NONE 0
97#define DRM_MODE_PICTURE_ASPECT_4_3 1
98#define DRM_MODE_PICTURE_ASPECT_16_9 2
99
100/* Dithering mode options */ 116/* Dithering mode options */
101#define DRM_MODE_DITHERING_OFF 0 117#define DRM_MODE_DITHERING_OFF 0
102#define DRM_MODE_DITHERING_ON 1 118#define DRM_MODE_DITHERING_ON 1
@@ -107,13 +123,73 @@ extern "C" {
107#define DRM_MODE_DIRTY_ON 1 123#define DRM_MODE_DIRTY_ON 1
108#define DRM_MODE_DIRTY_ANNOTATE 2 124#define DRM_MODE_DIRTY_ANNOTATE 2
109 125
126/* Link Status options */
127#define DRM_MODE_LINK_STATUS_GOOD 0
128#define DRM_MODE_LINK_STATUS_BAD 1
129
130/*
131 * DRM_MODE_ROTATE_<degrees>
132 *
133 * Signals that a drm plane is been rotated <degrees> degrees in counter
134 * clockwise direction.
135 *
136 * This define is provided as a convenience, looking up the property id
137 * using the name->prop id lookup is the preferred method.
138 */
139#define DRM_MODE_ROTATE_0 (1<<0)
140#define DRM_MODE_ROTATE_90 (1<<1)
141#define DRM_MODE_ROTATE_180 (1<<2)
142#define DRM_MODE_ROTATE_270 (1<<3)
143
144/*
145 * DRM_MODE_ROTATE_MASK
146 *
147 * Bitmask used to look for drm plane rotations.
148 */
149#define DRM_MODE_ROTATE_MASK (\
150 DRM_MODE_ROTATE_0 | \
151 DRM_MODE_ROTATE_90 | \
152 DRM_MODE_ROTATE_180 | \
153 DRM_MODE_ROTATE_270)
154
155/*
156 * DRM_MODE_REFLECT_<axis>
157 *
158 * Signals that the contents of a drm plane is reflected in the <axis> axis,
159 * in the same way as mirroring.
160 *
161 * This define is provided as a convenience, looking up the property id
162 * using the name->prop id lookup is the preferred method.
163 */
164#define DRM_MODE_REFLECT_X (1<<4)
165#define DRM_MODE_REFLECT_Y (1<<5)
166
167/*
168 * DRM_MODE_REFLECT_MASK
169 *
170 * Bitmask used to look for drm plane reflections.
171 */
172#define DRM_MODE_REFLECT_MASK (\
173 DRM_MODE_REFLECT_X | \
174 DRM_MODE_REFLECT_Y)
175
176
177/*
178 * Legacy definitions for old code that doesn't use
179 * the above mask definitions. Don't use in future code.
180 */
110/* rotation property bits */ 181/* rotation property bits */
111#define DRM_ROTATE_0 0 182#define DRM_ROTATE_0 0
112#define DRM_ROTATE_90 1 183#define DRM_ROTATE_90 1
113#define DRM_ROTATE_180 2 184#define DRM_ROTATE_180 2
114#define DRM_ROTATE_270 3 185#define DRM_ROTATE_270 3
115#define DRM_REFLECT_X 4 186#define DRM_REFLECT_X 4
116#define DRM_REFLECT_Y 5 187#define DRM_REFLECT_Y 5
188
189/* Content Protection Flags */
190#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
191#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
192#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
117 193
118struct drm_mode_modeinfo { 194struct drm_mode_modeinfo {
119 __u32 clock; 195 __u32 clock;
@@ -228,14 +304,16 @@ struct drm_mode_get_encoder {
228 304
229/* This is for connectors with multiple signal types. */ 305/* This is for connectors with multiple signal types. */
230/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ 306/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
231#define DRM_MODE_SUBCONNECTOR_Automatic 0 307enum drm_mode_subconnector {
232#define DRM_MODE_SUBCONNECTOR_Unknown 0 308 DRM_MODE_SUBCONNECTOR_Automatic = 0,
233#define DRM_MODE_SUBCONNECTOR_DVID 3 309 DRM_MODE_SUBCONNECTOR_Unknown = 0,
234#define DRM_MODE_SUBCONNECTOR_DVIA 4 310 DRM_MODE_SUBCONNECTOR_DVID = 3,
235#define DRM_MODE_SUBCONNECTOR_Composite 5 311 DRM_MODE_SUBCONNECTOR_DVIA = 4,
236#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 312 DRM_MODE_SUBCONNECTOR_Composite = 5,
237#define DRM_MODE_SUBCONNECTOR_Component 8 313 DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
238#define DRM_MODE_SUBCONNECTOR_SCART 9 314 DRM_MODE_SUBCONNECTOR_Component = 8,
315 DRM_MODE_SUBCONNECTOR_SCART = 9,
316};
239 317
240#define DRM_MODE_CONNECTOR_Unknown 0 318#define DRM_MODE_CONNECTOR_Unknown 0
241#define DRM_MODE_CONNECTOR_VGA 1 319#define DRM_MODE_CONNECTOR_VGA 1
@@ -280,7 +358,7 @@ struct drm_mode_get_connector {
280 __u32 pad; 358 __u32 pad;
281}; 359};
282 360
283#define DRM_MODE_PROP_PENDING (1<<0) 361#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
284#define DRM_MODE_PROP_RANGE (1<<1) 362#define DRM_MODE_PROP_RANGE (1<<1)
285#define DRM_MODE_PROP_IMMUTABLE (1<<2) 363#define DRM_MODE_PROP_IMMUTABLE (1<<2)
286#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ 364#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
@@ -400,17 +478,20 @@ struct drm_mode_fb_cmd2 {
400 * offsets[1]. Note that offsets[0] will generally 478 * offsets[1]. Note that offsets[0] will generally
401 * be 0 (but this is not required). 479 * be 0 (but this is not required).
402 * 480 *
403 * To accommodate tiled, compressed, etc formats, a per-plane 481 * To accommodate tiled, compressed, etc formats, a
404 * modifier can be specified. The default value of zero 482 * modifier can be specified. The default value of zero
405 * indicates "native" format as specified by the fourcc. 483 * indicates "native" format as specified by the fourcc.
406 * Vendor specific modifier token. This allows, for example, 484 * Vendor specific modifier token. Note that even though
407 * different tiling/swizzling pattern on different planes. 485 * it looks like we have a modifier per-plane, we in fact
408 * See discussion above of DRM_FORMAT_MOD_xxx. 486 * do not. The modifier for each plane must be identical.
487 * Thus all combinations of different data layouts for
488 * multi plane formats must be enumerated as separate
489 * modifiers.
409 */ 490 */
410 __u32 handles[4]; 491 __u32 handles[4];
411 __u32 pitches[4]; /* pitch for each plane */ 492 __u32 pitches[4]; /* pitch for each plane */
412 __u32 offsets[4]; /* offset of each plane */ 493 __u32 offsets[4]; /* offset of each plane */
413 __u64 modifier[4]; /* ie, tiling, compressed (per plane) */ 494 __u64 modifier[4]; /* ie, tiling, compress */
414}; 495};
415 496
416#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 497#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@@ -512,8 +593,11 @@ struct drm_mode_crtc_lut {
512}; 593};
513 594
514struct drm_color_ctm { 595struct drm_color_ctm {
515 /* Conversion matrix in S31.32 format. */ 596 /*
516 __s64 matrix[9]; 597 * Conversion matrix in S31.32 sign-magnitude
598 * (not two's complement!) format.
599 */
600 __u64 matrix[9];
517}; 601};
518 602
519struct drm_color_lut { 603struct drm_color_lut {
@@ -637,13 +721,6 @@ struct drm_mode_destroy_dumb {
637 DRM_MODE_ATOMIC_NONBLOCK |\ 721 DRM_MODE_ATOMIC_NONBLOCK |\
638 DRM_MODE_ATOMIC_ALLOW_MODESET) 722 DRM_MODE_ATOMIC_ALLOW_MODESET)
639 723
640#define DRM_MODE_ATOMIC_FLAGS (\
641 DRM_MODE_PAGE_FLIP_EVENT |\
642 DRM_MODE_PAGE_FLIP_ASYNC |\
643 DRM_MODE_ATOMIC_TEST_ONLY |\
644 DRM_MODE_ATOMIC_NONBLOCK |\
645 DRM_MODE_ATOMIC_ALLOW_MODESET)
646
647struct drm_mode_atomic { 724struct drm_mode_atomic {
648 __u32 flags; 725 __u32 flags;
649 __u32 count_objs; 726 __u32 count_objs;
@@ -655,6 +732,56 @@ struct drm_mode_atomic {
655 __u64 user_data; 732 __u64 user_data;
656}; 733};
657 734
735struct drm_format_modifier_blob {
736#define FORMAT_BLOB_CURRENT 1
737 /* Version of this blob format */
738 __u32 version;
739
740 /* Flags */
741 __u32 flags;
742
743 /* Number of fourcc formats supported */
744 __u32 count_formats;
745
746 /* Where in this blob the formats exist (in bytes) */
747 __u32 formats_offset;
748
749 /* Number of drm_format_modifiers */
750 __u32 count_modifiers;
751
752 /* Where in this blob the modifiers exist (in bytes) */
753 __u32 modifiers_offset;
754
755 /* __u32 formats[] */
756 /* struct drm_format_modifier modifiers[] */
757};
758
759struct drm_format_modifier {
760 /* Bitmask of formats in get_plane format list this info applies to. The
761 * offset allows a sliding window of which 64 formats (bits).
762 *
763 * Some examples:
764 * In today's world with < 65 formats, and formats 0, and 2 are
765 * supported
766 * 0x0000000000000005
767 * ^-offset = 0, formats = 5
768 *
769 * If the number formats grew to 128, and formats 98-102 are
770 * supported with the modifier:
771 *
772 * 0x0000007c00000000 0000000000000000
773 * ^
774 * |__offset = 64, formats = 0x7c00000000
775 *
776 */
777 __u64 formats;
778 __u32 offset;
779 __u32 pad;
780
781 /* The modifier that applies to the >get_plane format list bitmask. */
782 __u64 modifier;
783};
784
658/** 785/**
659 * Create a new 'blob' data property, copying length bytes from data pointer, 786 * Create a new 'blob' data property, copying length bytes from data pointer,
660 * and returning new blob ID. 787 * and returning new blob ID.
@@ -675,6 +802,72 @@ struct drm_mode_destroy_blob {
675 __u32 blob_id; 802 __u32 blob_id;
676}; 803};
677 804
805/**
806 * Lease mode resources, creating another drm_master.
807 */
808struct drm_mode_create_lease {
809 /** Pointer to array of object ids (__u32) */
810 __u64 object_ids;
811 /** Number of object ids */
812 __u32 object_count;
813 /** flags for new FD (O_CLOEXEC, etc) */
814 __u32 flags;
815
816 /** Return: unique identifier for lessee. */
817 __u32 lessee_id;
818 /** Return: file descriptor to new drm_master file */
819 __u32 fd;
820};
821
822/**
823 * List lesses from a drm_master
824 */
825struct drm_mode_list_lessees {
826 /** Number of lessees.
827 * On input, provides length of the array.
828 * On output, provides total number. No
829 * more than the input number will be written
830 * back, so two calls can be used to get
831 * the size and then the data.
832 */
833 __u32 count_lessees;
834 __u32 pad;
835
836 /** Pointer to lessees.
837 * pointer to __u64 array of lessee ids
838 */
839 __u64 lessees_ptr;
840};
841
842/**
843 * Get leased objects
844 */
845struct drm_mode_get_lease {
846 /** Number of leased objects.
847 * On input, provides length of the array.
848 * On output, provides total number. No
849 * more than the input number will be written
850 * back, so two calls can be used to get
851 * the size and then the data.
852 */
853 __u32 count_objects;
854 __u32 pad;
855
856 /** Pointer to objects.
857 * pointer to __u32 array of object ids
858 */
859 __u64 objects_ptr;
860};
861
862/**
863 * Revoke lease
864 */
865struct drm_mode_revoke_lease {
866 /** Unique ID of lessee
867 */
868 __u32 lessee_id;
869};
870
678#if defined(__cplusplus) 871#if defined(__cplusplus)
679} 872}
680#endif 873#endif
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index 502934ed..93025be8 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -34,6 +34,10 @@
34 34
35#include "drm.h" 35#include "drm.h"
36 36
37#if defined(__cplusplus)
38extern "C" {
39#endif
40
37/* SAREA area needs to be at least a page */ 41/* SAREA area needs to be at least a page */
38#if defined(__alpha__) 42#if defined(__alpha__)
39#define SAREA_MAX 0x2000U 43#define SAREA_MAX 0x2000U
@@ -81,4 +85,8 @@ typedef struct drm_sarea_drawable drm_sarea_drawable_t;
81typedef struct drm_sarea_frame drm_sarea_frame_t; 85typedef struct drm_sarea_frame drm_sarea_frame_t;
82typedef struct drm_sarea drm_sarea_t; 86typedef struct drm_sarea drm_sarea_t;
83 87
88#if defined(__cplusplus)
89}
90#endif
91
84#endif /* _DRM_SAREA_H_ */ 92#endif /* _DRM_SAREA_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 5ebe0462..16e452aa 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
86 I915_MOCS_CACHED, 86 I915_MOCS_CACHED,
87}; 87};
88 88
89/*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103};
104
105/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
113 I915_SAMPLE_SEMA = 2
114};
115
116#define I915_PMU_SAMPLE_BITS (4)
117#define I915_PMU_SAMPLE_MASK (0xf)
118#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119#define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122#define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127#define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130#define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133#define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
140#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
141#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
142
143#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
144
89/* Each region is a minimum of 16k, and there are at most 255 of them. 145/* Each region is a minimum of 16k, and there are at most 255 of them.
90 */ 146 */
91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 147#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -260,6 +316,9 @@ typedef struct _drm_i915_sarea {
260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 316#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 317#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
262#define DRM_I915_PERF_OPEN 0x36 318#define DRM_I915_PERF_OPEN 0x36
319#define DRM_I915_PERF_ADD_CONFIG 0x37
320#define DRM_I915_PERF_REMOVE_CONFIG 0x38
321#define DRM_I915_QUERY 0x39
263 322
264#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 323#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
265#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 324#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +374,9 @@ typedef struct _drm_i915_sarea {
315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 374#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 375#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
317#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 376#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
377#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
378#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
379#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
318 380
319/* Allow drivers to submit batchbuffers directly to hardware, relying 381/* Allow drivers to submit batchbuffers directly to hardware, relying
320 * on the security mechanisms provided by hardware. 382 * on the security mechanisms provided by hardware.
@@ -393,10 +455,20 @@ typedef struct drm_i915_irq_wait {
393#define I915_PARAM_MIN_EU_IN_POOL 39 455#define I915_PARAM_MIN_EU_IN_POOL 39
394#define I915_PARAM_MMAP_GTT_VERSION 40 456#define I915_PARAM_MMAP_GTT_VERSION 40
395 457
396/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 458/*
459 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
397 * priorities and the driver will attempt to execute batches in priority order. 460 * priorities and the driver will attempt to execute batches in priority order.
461 * The param returns a capability bitmask, nonzero implies that the scheduler
462 * is enabled, with different features present according to the mask.
463 *
464 * The initial priority for each batch is supplied by the context and is
465 * controlled via I915_CONTEXT_PARAM_PRIORITY.
398 */ 466 */
399#define I915_PARAM_HAS_SCHEDULER 41 467#define I915_PARAM_HAS_SCHEDULER 41
468#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
469#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
470#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
471
400#define I915_PARAM_HUC_STATUS 42 472#define I915_PARAM_HUC_STATUS 42
401 473
402/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 474/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -412,6 +484,51 @@ typedef struct drm_i915_irq_wait {
412 */ 484 */
413#define I915_PARAM_HAS_EXEC_FENCE 44 485#define I915_PARAM_HAS_EXEC_FENCE 44
414 486
487/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
488 * user specified bufffers for post-mortem debugging of GPU hangs. See
489 * EXEC_OBJECT_CAPTURE.
490 */
491#define I915_PARAM_HAS_EXEC_CAPTURE 45
492
493#define I915_PARAM_SLICE_MASK 46
494
495/* Assuming it's uniform for each slice, this queries the mask of subslices
496 * per-slice for this system.
497 */
498#define I915_PARAM_SUBSLICE_MASK 47
499
500/*
501 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
502 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
503 */
504#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
505
506/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
507 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
508 */
509#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
510
511/*
512 * Query whether every context (both per-file default and user created) is
513 * isolated (insofar as HW supports). If this parameter is not true, then
514 * freshly created contexts may inherit values from an existing context,
515 * rather than default HW values. If true, it also ensures (insofar as HW
516 * supports) that all state set by this context will not leak to any other
517 * context.
518 *
519 * As not every engine across every gen support contexts, the returned
520 * value reports the support of context isolation for individual engines by
521 * returning a bitmask of each engine class set to true if that class supports
522 * isolation.
523 */
524#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
525
526/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
527 * registers. This used to be fixed per platform but from CNL onwards, this
528 * might vary depending on the parts.
529 */
530#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
531
415typedef struct drm_i915_getparam { 532typedef struct drm_i915_getparam {
416 __s32 param; 533 __s32 param;
417 /* 534 /*
@@ -666,6 +783,8 @@ struct drm_i915_gem_relocation_entry {
666#define I915_GEM_DOMAIN_VERTEX 0x00000020 783#define I915_GEM_DOMAIN_VERTEX 0x00000020
667/** GTT domain - aperture and scanout */ 784/** GTT domain - aperture and scanout */
668#define I915_GEM_DOMAIN_GTT 0x00000040 785#define I915_GEM_DOMAIN_GTT 0x00000040
786/** WC domain - uncached access */
787#define I915_GEM_DOMAIN_WC 0x00000080
669/** @} */ 788/** @} */
670 789
671struct drm_i915_gem_exec_object { 790struct drm_i915_gem_exec_object {
@@ -773,8 +892,15 @@ struct drm_i915_gem_exec_object2 {
773 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 892 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
774 */ 893 */
775#define EXEC_OBJECT_ASYNC (1<<6) 894#define EXEC_OBJECT_ASYNC (1<<6)
895/* Request that the contents of this execobject be copied into the error
896 * state upon a GPU hang involving this batch for post-mortem debugging.
897 * These buffers are recorded in no particular order as "user" in
898 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
899 * if the kernel supports this flag.
900 */
901#define EXEC_OBJECT_CAPTURE (1<<7)
776/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 902/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
777#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1) 903#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
778 __u64 flags; 904 __u64 flags;
779 905
780 union { 906 union {
@@ -784,6 +910,18 @@ struct drm_i915_gem_exec_object2 {
784 __u64 rsvd2; 910 __u64 rsvd2;
785}; 911};
786 912
913struct drm_i915_gem_exec_fence {
914 /**
915 * User's handle for a drm_syncobj to wait on or signal.
916 */
917 __u32 handle;
918
919#define I915_EXEC_FENCE_WAIT (1<<0)
920#define I915_EXEC_FENCE_SIGNAL (1<<1)
921#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
922 __u32 flags;
923};
924
787struct drm_i915_gem_execbuffer2 { 925struct drm_i915_gem_execbuffer2 {
788 /** 926 /**
789 * List of gem_exec_object2 structs 927 * List of gem_exec_object2 structs
@@ -798,7 +936,11 @@ struct drm_i915_gem_execbuffer2 {
798 __u32 DR1; 936 __u32 DR1;
799 __u32 DR4; 937 __u32 DR4;
800 __u32 num_cliprects; 938 __u32 num_cliprects;
801 /** This is a struct drm_clip_rect *cliprects */ 939 /**
940 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
941 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
942 * struct drm_i915_gem_exec_fence *fences.
943 */
802 __u64 cliprects_ptr; 944 __u64 cliprects_ptr;
803#define I915_EXEC_RING_MASK (7<<0) 945#define I915_EXEC_RING_MASK (7<<0)
804#define I915_EXEC_DEFAULT (0<<0) 946#define I915_EXEC_DEFAULT (0<<0)
@@ -889,7 +1031,24 @@ struct drm_i915_gem_execbuffer2 {
889 */ 1031 */
890#define I915_EXEC_FENCE_OUT (1<<17) 1032#define I915_EXEC_FENCE_OUT (1<<17)
891 1033
892#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1)) 1034/*
1035 * Traditionally the execbuf ioctl has only considered the final element in
1036 * the execobject[] to be the executable batch. Often though, the client
1037 * will known the batch object prior to construction and being able to place
1038 * it into the execobject[] array first can simplify the relocation tracking.
1039 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1040 * execobject[] as the * batch instead (the default is to use the last
1041 * element).
1042 */
1043#define I915_EXEC_BATCH_FIRST (1<<18)
1044
1045/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1046 * define an array of i915_gem_exec_fence structures which specify a set of
1047 * dma fences to wait upon or signal.
1048 */
1049#define I915_EXEC_FENCE_ARRAY (1<<19)
1050
1051#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
893 1052
894#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1053#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
895#define i915_execbuffer2_set_context_id(eb2, context) \ 1054#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1201,7 +1360,9 @@ struct drm_intel_overlay_attrs {
1201 * active on a given plane. 1360 * active on a given plane.
1202 */ 1361 */
1203 1362
1204#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 1363#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1364 * flags==0 to disable colorkeying.
1365 */
1205#define I915_SET_COLORKEY_DESTINATION (1<<1) 1366#define I915_SET_COLORKEY_DESTINATION (1<<1)
1206#define I915_SET_COLORKEY_SOURCE (1<<2) 1367#define I915_SET_COLORKEY_SOURCE (1<<2)
1207struct drm_intel_sprite_colorkey { 1368struct drm_intel_sprite_colorkey {
@@ -1239,14 +1400,16 @@ struct drm_i915_reg_read {
1239 * be specified 1400 * be specified
1240 */ 1401 */
1241 __u64 offset; 1402 __u64 offset;
1403#define I915_REG_READ_8B_WA (1ul << 0)
1404
1242 __u64 val; /* Return value */ 1405 __u64 val; /* Return value */
1243}; 1406};
1244/* Known registers: 1407/* Known registers:
1245 * 1408 *
1246 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1409 * Render engine timestamp - 0x2358 + 64bit - gen7+
1247 * - Note this register returns an invalid value if using the default 1410 * - Note this register returns an invalid value if using the default
1248 * single instruction 8byte read, in order to workaround that use 1411 * single instruction 8byte read, in order to workaround that pass
1249 * offset (0x2538 | 1) instead. 1412 * flag I915_REG_READ_8B_WA in offset field.
1250 * 1413 *
1251 */ 1414 */
1252 1415
@@ -1289,17 +1452,26 @@ struct drm_i915_gem_context_param {
1289#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1452#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1290#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1453#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1291#define I915_CONTEXT_PARAM_BANNABLE 0x5 1454#define I915_CONTEXT_PARAM_BANNABLE 0x5
1455#define I915_CONTEXT_PARAM_PRIORITY 0x6
1456#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1457#define I915_CONTEXT_DEFAULT_PRIORITY 0
1458#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1292 __u64 value; 1459 __u64 value;
1293}; 1460};
1294 1461
1295enum drm_i915_oa_format { 1462enum drm_i915_oa_format {
1296 I915_OA_FORMAT_A13 = 1, 1463 I915_OA_FORMAT_A13 = 1, /* HSW only */
1297 I915_OA_FORMAT_A29, 1464 I915_OA_FORMAT_A29, /* HSW only */
1298 I915_OA_FORMAT_A13_B8_C8, 1465 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1299 I915_OA_FORMAT_B4_C8, 1466 I915_OA_FORMAT_B4_C8, /* HSW only */
1300 I915_OA_FORMAT_A45_B8_C8, 1467 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1301 I915_OA_FORMAT_B4_C8_A16, 1468 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1302 I915_OA_FORMAT_C4_B8, 1469 I915_OA_FORMAT_C4_B8, /* HSW+ */
1470
1471 /* Gen8+ */
1472 I915_OA_FORMAT_A12,
1473 I915_OA_FORMAT_A12_B8_C8,
1474 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1303 1475
1304 I915_OA_FORMAT_MAX /* non-ABI */ 1476 I915_OA_FORMAT_MAX /* non-ABI */
1305}; 1477};
@@ -1424,6 +1596,127 @@ enum drm_i915_perf_record_type {
1424 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1596 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1425}; 1597};
1426 1598
1599/**
1600 * Structure to upload perf dynamic configuration into the kernel.
1601 */
1602struct drm_i915_perf_oa_config {
1603 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1604 char uuid[36];
1605
1606 __u32 n_mux_regs;
1607 __u32 n_boolean_regs;
1608 __u32 n_flex_regs;
1609
1610 /*
1611 * These fields are pointers to tuples of u32 values (register address,
1612 * value). For example the expected length of the buffer pointed by
1613 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1614 */
1615 __u64 mux_regs_ptr;
1616 __u64 boolean_regs_ptr;
1617 __u64 flex_regs_ptr;
1618};
1619
1620struct drm_i915_query_item {
1621 __u64 query_id;
1622#define DRM_I915_QUERY_TOPOLOGY_INFO 1
1623
1624 /*
1625 * When set to zero by userspace, this is filled with the size of the
1626 * data to be written at the data_ptr pointer. The kernel sets this
1627 * value to a negative value to signal an error on a particular query
1628 * item.
1629 */
1630 __s32 length;
1631
1632 /*
1633 * Unused for now. Must be cleared to zero.
1634 */
1635 __u32 flags;
1636
1637 /*
1638 * Data will be written at the location pointed by data_ptr when the
1639 * value of length matches the length of the data to be written by the
1640 * kernel.
1641 */
1642 __u64 data_ptr;
1643};
1644
1645struct drm_i915_query {
1646 __u32 num_items;
1647
1648 /*
1649 * Unused for now. Must be cleared to zero.
1650 */
1651 __u32 flags;
1652
1653 /*
1654 * This points to an array of num_items drm_i915_query_item structures.
1655 */
1656 __u64 items_ptr;
1657};
1658
1659/*
1660 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1661 *
1662 * data: contains the 3 pieces of information :
1663 *
1664 * - the slice mask with one bit per slice telling whether a slice is
1665 * available. The availability of slice X can be queried with the following
1666 * formula :
1667 *
1668 * (data[X / 8] >> (X % 8)) & 1
1669 *
1670 * - the subslice mask for each slice with one bit per subslice telling
1671 * whether a subslice is available. The availability of subslice Y in slice
1672 * X can be queried with the following formula :
1673 *
1674 * (data[subslice_offset +
1675 * X * subslice_stride +
1676 * Y / 8] >> (Y % 8)) & 1
1677 *
1678 * - the EU mask for each subslice in each slice with one bit per EU telling
1679 * whether an EU is available. The availability of EU Z in subslice Y in
1680 * slice X can be queried with the following formula :
1681 *
1682 * (data[eu_offset +
1683 * (X * max_subslices + Y) * eu_stride +
1684 * Z / 8] >> (Z % 8)) & 1
1685 */
1686struct drm_i915_query_topology_info {
1687 /*
1688 * Unused for now. Must be cleared to zero.
1689 */
1690 __u16 flags;
1691
1692 __u16 max_slices;
1693 __u16 max_subslices;
1694 __u16 max_eus_per_subslice;
1695
1696 /*
1697 * Offset in data[] at which the subslice masks are stored.
1698 */
1699 __u16 subslice_offset;
1700
1701 /*
1702 * Stride at which each of the subslice masks for each slice are
1703 * stored.
1704 */
1705 __u16 subslice_stride;
1706
1707 /*
1708 * Offset in data[] at which the EU masks are stored.
1709 */
1710 __u16 eu_offset;
1711
1712 /*
1713 * Stride at which each of the EU masks for each subslice are stored.
1714 */
1715 __u16 eu_stride;
1716
1717 __u8 data[];
1718};
1719
1427#if defined(__cplusplus) 1720#if defined(__cplusplus)
1428} 1721}
1429#endif 1722#endif
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index b630e8fa..79300111 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -37,6 +37,10 @@
37 37
38#include "drm.h" 38#include "drm.h"
39 39
40#if defined(__cplusplus)
41extern "C" {
42#endif
43
40/* WARNING: If you change any of these defines, make sure to change the 44/* WARNING: If you change any of these defines, make sure to change the
41 * defines in the Xserver file (mga_sarea.h) 45 * defines in the Xserver file (mga_sarea.h)
42 */ 46 */
@@ -107,7 +111,7 @@
107 */ 111 */
108#define MGA_NR_SAREA_CLIPRECTS 8 112#define MGA_NR_SAREA_CLIPRECTS 8
109 113
110/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 114/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
111 * regions, subject to a minimum region size of (1<<16) == 64k. 115 * regions, subject to a minimum region size of (1<<16) == 64k.
112 * 116 *
113 * Clients may subdivide regions internally, but when sharing between 117 * Clients may subdivide regions internally, but when sharing between
@@ -248,7 +252,7 @@ typedef struct _drm_mga_sarea {
248#define DRM_MGA_DMA_BOOTSTRAP 0x0c 252#define DRM_MGA_DMA_BOOTSTRAP 0x0c
249 253
250#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) 254#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
251#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) 255#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
252#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) 256#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
253#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) 257#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
254#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) 258#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
@@ -416,4 +420,8 @@ typedef struct drm_mga_getparam {
416 void *value; 420 void *value;
417} drm_mga_getparam_t; 421} drm_mga_getparam_t;
418 422
423#if defined(__cplusplus)
424}
425#endif
426
419#endif 427#endif
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 1372f533..91d2f314 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -27,6 +27,12 @@
27 27
28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16 28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
29 29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
30/* reserved object handles when using deprecated object APIs - these 36/* reserved object handles when using deprecated object APIs - these
31 * are here so that libdrm can allow interoperability with the new 37 * are here so that libdrm can allow interoperability with the new
32 * object APIs 38 * object APIs
@@ -106,6 +112,7 @@ struct drm_nouveau_setparam {
106#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) 112#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
107#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4) 113#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
108 114
115#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
109#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 116#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
110#define NOUVEAU_GEM_TILE_16BPP 0x00000001 117#define NOUVEAU_GEM_TILE_16BPP 0x00000001
111#define NOUVEAU_GEM_TILE_32BPP 0x00000002 118#define NOUVEAU_GEM_TILE_32BPP 0x00000002
@@ -113,13 +120,13 @@ struct drm_nouveau_setparam {
113#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 120#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
114 121
115struct drm_nouveau_gem_info { 122struct drm_nouveau_gem_info {
116 uint32_t handle; 123 __u32 handle;
117 uint32_t domain; 124 __u32 domain;
118 uint64_t size; 125 __u64 size;
119 uint64_t offset; 126 __u64 offset;
120 uint64_t map_handle; 127 __u64 map_handle;
121 uint32_t tile_mode; 128 __u32 tile_mode;
122 uint32_t tile_flags; 129 __u32 tile_flags;
123}; 130};
124 131
125struct drm_nouveau_gem_set_tiling { 132struct drm_nouveau_gem_set_tiling {
@@ -130,23 +137,23 @@ struct drm_nouveau_gem_set_tiling {
130 137
131struct drm_nouveau_gem_new { 138struct drm_nouveau_gem_new {
132 struct drm_nouveau_gem_info info; 139 struct drm_nouveau_gem_info info;
133 uint32_t channel_hint; 140 __u32 channel_hint;
134 uint32_t align; 141 __u32 align;
135}; 142};
136 143
137#define NOUVEAU_GEM_MAX_BUFFERS 1024 144#define NOUVEAU_GEM_MAX_BUFFERS 1024
138struct drm_nouveau_gem_pushbuf_bo_presumed { 145struct drm_nouveau_gem_pushbuf_bo_presumed {
139 uint32_t valid; 146 __u32 valid;
140 uint32_t domain; 147 __u32 domain;
141 uint64_t offset; 148 __u64 offset;
142}; 149};
143 150
144struct drm_nouveau_gem_pushbuf_bo { 151struct drm_nouveau_gem_pushbuf_bo {
145 uint64_t user_priv; 152 __u64 user_priv;
146 uint32_t handle; 153 __u32 handle;
147 uint32_t read_domains; 154 __u32 read_domains;
148 uint32_t write_domains; 155 __u32 write_domains;
149 uint32_t valid_domains; 156 __u32 valid_domains;
150 struct drm_nouveau_gem_pushbuf_bo_presumed presumed; 157 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
151}; 158};
152 159
@@ -155,35 +162,35 @@ struct drm_nouveau_gem_pushbuf_bo {
155#define NOUVEAU_GEM_RELOC_OR (1 << 2) 162#define NOUVEAU_GEM_RELOC_OR (1 << 2)
156#define NOUVEAU_GEM_MAX_RELOCS 1024 163#define NOUVEAU_GEM_MAX_RELOCS 1024
157struct drm_nouveau_gem_pushbuf_reloc { 164struct drm_nouveau_gem_pushbuf_reloc {
158 uint32_t reloc_bo_index; 165 __u32 reloc_bo_index;
159 uint32_t reloc_bo_offset; 166 __u32 reloc_bo_offset;
160 uint32_t bo_index; 167 __u32 bo_index;
161 uint32_t flags; 168 __u32 flags;
162 uint32_t data; 169 __u32 data;
163 uint32_t vor; 170 __u32 vor;
164 uint32_t tor; 171 __u32 tor;
165}; 172};
166 173
167#define NOUVEAU_GEM_MAX_PUSH 512 174#define NOUVEAU_GEM_MAX_PUSH 512
168struct drm_nouveau_gem_pushbuf_push { 175struct drm_nouveau_gem_pushbuf_push {
169 uint32_t bo_index; 176 __u32 bo_index;
170 uint32_t pad; 177 __u32 pad;
171 uint64_t offset; 178 __u64 offset;
172 uint64_t length; 179 __u64 length;
173}; 180};
174 181
175struct drm_nouveau_gem_pushbuf { 182struct drm_nouveau_gem_pushbuf {
176 uint32_t channel; 183 __u32 channel;
177 uint32_t nr_buffers; 184 __u32 nr_buffers;
178 uint64_t buffers; 185 __u64 buffers;
179 uint32_t nr_relocs; 186 __u32 nr_relocs;
180 uint32_t nr_push; 187 __u32 nr_push;
181 uint64_t relocs; 188 __u64 relocs;
182 uint64_t push; 189 __u64 push;
183 uint32_t suffix0; 190 __u32 suffix0;
184 uint32_t suffix1; 191 __u32 suffix1;
185 uint64_t vram_available; 192 __u64 vram_available;
186 uint64_t gart_available; 193 __u64 gart_available;
187}; 194};
188 195
189#define NOUVEAU_GEM_PUSHBUF_2_FENCE_WAIT 0x00000001 196#define NOUVEAU_GEM_PUSHBUF_2_FENCE_WAIT 0x00000001
@@ -205,12 +212,12 @@ struct drm_nouveau_gem_pushbuf_2 {
205#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 212#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
206#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 213#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
207struct drm_nouveau_gem_cpu_prep { 214struct drm_nouveau_gem_cpu_prep {
208 uint32_t handle; 215 __u32 handle;
209 uint32_t flags; 216 __u32 flags;
210}; 217};
211 218
212struct drm_nouveau_gem_cpu_fini { 219struct drm_nouveau_gem_cpu_fini {
213 uint32_t handle; 220 __u32 handle;
214}; 221};
215 222
216#define NOUVEAU_GEM_AS_SPARSE 0x00000001 223#define NOUVEAU_GEM_AS_SPARSE 0x00000001
@@ -287,4 +294,7 @@ struct drm_nouveau_gem_unmap {
287#define DRM_NOUVEAU_GEM_MAP 0x56 294#define DRM_NOUVEAU_GEM_MAP 0x56
288#define DRM_NOUVEAU_GEM_UNMAP 0x57 295#define DRM_NOUVEAU_GEM_UNMAP 0x57
289 296
297#if defined(__cplusplus)
298}
299#endif
290#endif /* __NOUVEAU_DRM_H__ */ 300#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/qxl_drm.h b/include/drm/qxl_drm.h
index 1e331a86..38a0dbdf 100644
--- a/include/drm/qxl_drm.h
+++ b/include/drm/qxl_drm.h
@@ -27,10 +27,14 @@
27#include <stddef.h> 27#include <stddef.h>
28#include "drm.h" 28#include "drm.h"
29 29
30#if defined(__cplusplus)
31extern "C" {
32#endif
33
30/* Please note that modifications to all structs defined here are 34/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 35 * subject to backwards-compatibility constraints.
32 * 36 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 37 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 38 * compatibility Keep fields aligned to their size
35 */ 39 */
36 40
@@ -48,14 +52,14 @@
48#define DRM_QXL_ALLOC_SURF 0x06 52#define DRM_QXL_ALLOC_SURF 0x06
49 53
50struct drm_qxl_alloc { 54struct drm_qxl_alloc {
51 uint32_t size; 55 __u32 size;
52 uint32_t handle; /* 0 is an invalid handle */ 56 __u32 handle; /* 0 is an invalid handle */
53}; 57};
54 58
55struct drm_qxl_map { 59struct drm_qxl_map {
56 uint64_t offset; /* use for mmap system call */ 60 __u64 offset; /* use for mmap system call */
57 uint32_t handle; 61 __u32 handle;
58 uint32_t pad; 62 __u32 pad;
59}; 63};
60 64
61/* 65/*
@@ -68,59 +72,59 @@ struct drm_qxl_map {
68#define QXL_RELOC_TYPE_SURF 2 72#define QXL_RELOC_TYPE_SURF 2
69 73
70struct drm_qxl_reloc { 74struct drm_qxl_reloc {
71 uint64_t src_offset; /* offset into src_handle or src buffer */ 75 __u64 src_offset; /* offset into src_handle or src buffer */
72 uint64_t dst_offset; /* offset in dest handle */ 76 __u64 dst_offset; /* offset in dest handle */
73 uint32_t src_handle; /* dest handle to compute address from */ 77 __u32 src_handle; /* dest handle to compute address from */
74 uint32_t dst_handle; /* 0 if to command buffer */ 78 __u32 dst_handle; /* 0 if to command buffer */
75 uint32_t reloc_type; 79 __u32 reloc_type;
76 uint32_t pad; 80 __u32 pad;
77}; 81};
78 82
79struct drm_qxl_command { 83struct drm_qxl_command {
80 uint64_t command; /* void* */ 84 __u64 command; /* void* */
81 uint64_t relocs; /* struct drm_qxl_reloc* */ 85 __u64 relocs; /* struct drm_qxl_reloc* */
82 uint32_t type; 86 __u32 type;
83 uint32_t command_size; 87 __u32 command_size;
84 uint32_t relocs_num; 88 __u32 relocs_num;
85 uint32_t pad; 89 __u32 pad;
86}; 90};
87 91
88/* XXX: call it drm_qxl_commands? */ 92/* XXX: call it drm_qxl_commands? */
89struct drm_qxl_execbuffer { 93struct drm_qxl_execbuffer {
90 uint32_t flags; /* for future use */ 94 __u32 flags; /* for future use */
91 uint32_t commands_num; 95 __u32 commands_num;
92 uint64_t commands; /* struct drm_qxl_command* */ 96 __u64 commands; /* struct drm_qxl_command* */
93}; 97};
94 98
95struct drm_qxl_update_area { 99struct drm_qxl_update_area {
96 uint32_t handle; 100 __u32 handle;
97 uint32_t top; 101 __u32 top;
98 uint32_t left; 102 __u32 left;
99 uint32_t bottom; 103 __u32 bottom;
100 uint32_t right; 104 __u32 right;
101 uint32_t pad; 105 __u32 pad;
102}; 106};
103 107
104#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ 108#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
105#define QXL_PARAM_MAX_RELOCS 2 109#define QXL_PARAM_MAX_RELOCS 2
106struct drm_qxl_getparam { 110struct drm_qxl_getparam {
107 uint64_t param; 111 __u64 param;
108 uint64_t value; 112 __u64 value;
109}; 113};
110 114
111/* these are one bit values */ 115/* these are one bit values */
112struct drm_qxl_clientcap { 116struct drm_qxl_clientcap {
113 uint32_t index; 117 __u32 index;
114 uint32_t pad; 118 __u32 pad;
115}; 119};
116 120
117struct drm_qxl_alloc_surf { 121struct drm_qxl_alloc_surf {
118 uint32_t format; 122 __u32 format;
119 uint32_t width; 123 __u32 width;
120 uint32_t height; 124 __u32 height;
121 int32_t stride; 125 __s32 stride;
122 uint32_t handle; 126 __u32 handle;
123 uint32_t pad; 127 __u32 pad;
124}; 128};
125 129
126#define DRM_IOCTL_QXL_ALLOC \ 130#define DRM_IOCTL_QXL_ALLOC \
@@ -149,4 +153,8 @@ struct drm_qxl_alloc_surf {
149 DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\ 153 DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
150 struct drm_qxl_alloc_surf) 154 struct drm_qxl_alloc_surf)
151 155
156#if defined(__cplusplus)
157}
158#endif
159
152#endif 160#endif
diff --git a/include/drm/r128_drm.h b/include/drm/r128_drm.h
index ede78ff9..bf431a02 100644
--- a/include/drm/r128_drm.h
+++ b/include/drm/r128_drm.h
@@ -33,6 +33,12 @@
33#ifndef __R128_DRM_H__ 33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__ 34#define __R128_DRM_H__
35 35
36#include "drm.h"
37
38#if defined(__cplusplus)
39extern "C" {
40#endif
41
36/* WARNING: If you change any of these defines, make sure to change the 42/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (r128_sarea.h) 43 * defines in the X server file (r128_sarea.h)
38 */ 44 */
@@ -323,4 +329,8 @@ typedef struct drm_r128_getparam {
323 void *value; 329 void *value;
324} drm_r128_getparam_t; 330} drm_r128_getparam_t;
325 331
332#if defined(__cplusplus)
333}
334#endif
335
326#endif 336#endif
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index f09cc04c..a1e385d6 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -797,9 +797,9 @@ typedef struct drm_radeon_surface_free {
797#define RADEON_GEM_DOMAIN_VRAM 0x4 797#define RADEON_GEM_DOMAIN_VRAM 0x4
798 798
799struct drm_radeon_gem_info { 799struct drm_radeon_gem_info {
800 uint64_t gart_size; 800 __u64 gart_size;
801 uint64_t vram_size; 801 __u64 vram_size;
802 uint64_t vram_visible; 802 __u64 vram_visible;
803}; 803};
804 804
805#define RADEON_GEM_NO_BACKING_STORE (1 << 0) 805#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
@@ -811,11 +811,11 @@ struct drm_radeon_gem_info {
811#define RADEON_GEM_NO_CPU_ACCESS (1 << 4) 811#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
812 812
813struct drm_radeon_gem_create { 813struct drm_radeon_gem_create {
814 uint64_t size; 814 __u64 size;
815 uint64_t alignment; 815 __u64 alignment;
816 uint32_t handle; 816 __u32 handle;
817 uint32_t initial_domain; 817 __u32 initial_domain;
818 uint32_t flags; 818 __u32 flags;
819}; 819};
820 820
821/* 821/*
@@ -829,10 +829,10 @@ struct drm_radeon_gem_create {
829#define RADEON_GEM_USERPTR_REGISTER (1 << 3) 829#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
830 830
831struct drm_radeon_gem_userptr { 831struct drm_radeon_gem_userptr {
832 uint64_t addr; 832 __u64 addr;
833 uint64_t size; 833 __u64 size;
834 uint32_t flags; 834 __u32 flags;
835 uint32_t handle; 835 __u32 handle;
836}; 836};
837 837
838#define RADEON_TILING_MACRO 0x1 838#define RADEON_TILING_MACRO 0x1
@@ -855,72 +855,72 @@ struct drm_radeon_gem_userptr {
855#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf 855#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
856 856
857struct drm_radeon_gem_set_tiling { 857struct drm_radeon_gem_set_tiling {
858 uint32_t handle; 858 __u32 handle;
859 uint32_t tiling_flags; 859 __u32 tiling_flags;
860 uint32_t pitch; 860 __u32 pitch;
861}; 861};
862 862
863struct drm_radeon_gem_get_tiling { 863struct drm_radeon_gem_get_tiling {
864 uint32_t handle; 864 __u32 handle;
865 uint32_t tiling_flags; 865 __u32 tiling_flags;
866 uint32_t pitch; 866 __u32 pitch;
867}; 867};
868 868
869struct drm_radeon_gem_mmap { 869struct drm_radeon_gem_mmap {
870 uint32_t handle; 870 __u32 handle;
871 uint32_t pad; 871 __u32 pad;
872 uint64_t offset; 872 __u64 offset;
873 uint64_t size; 873 __u64 size;
874 uint64_t addr_ptr; 874 __u64 addr_ptr;
875}; 875};
876 876
877struct drm_radeon_gem_set_domain { 877struct drm_radeon_gem_set_domain {
878 uint32_t handle; 878 __u32 handle;
879 uint32_t read_domains; 879 __u32 read_domains;
880 uint32_t write_domain; 880 __u32 write_domain;
881}; 881};
882 882
883struct drm_radeon_gem_wait_idle { 883struct drm_radeon_gem_wait_idle {
884 uint32_t handle; 884 __u32 handle;
885 uint32_t pad; 885 __u32 pad;
886}; 886};
887 887
888struct drm_radeon_gem_busy { 888struct drm_radeon_gem_busy {
889 uint32_t handle; 889 __u32 handle;
890 uint32_t domain; 890 __u32 domain;
891}; 891};
892 892
893struct drm_radeon_gem_pread { 893struct drm_radeon_gem_pread {
894 /** Handle for the object being read. */ 894 /** Handle for the object being read. */
895 uint32_t handle; 895 __u32 handle;
896 uint32_t pad; 896 __u32 pad;
897 /** Offset into the object to read from */ 897 /** Offset into the object to read from */
898 uint64_t offset; 898 __u64 offset;
899 /** Length of data to read */ 899 /** Length of data to read */
900 uint64_t size; 900 __u64 size;
901 /** Pointer to write the data into. */ 901 /** Pointer to write the data into. */
902 /* void *, but pointers are not 32/64 compatible */ 902 /* void *, but pointers are not 32/64 compatible */
903 uint64_t data_ptr; 903 __u64 data_ptr;
904}; 904};
905 905
906struct drm_radeon_gem_pwrite { 906struct drm_radeon_gem_pwrite {
907 /** Handle for the object being written to. */ 907 /** Handle for the object being written to. */
908 uint32_t handle; 908 __u32 handle;
909 uint32_t pad; 909 __u32 pad;
910 /** Offset into the object to write to */ 910 /** Offset into the object to write to */
911 uint64_t offset; 911 __u64 offset;
912 /** Length of data to write */ 912 /** Length of data to write */
913 uint64_t size; 913 __u64 size;
914 /** Pointer to read the data from. */ 914 /** Pointer to read the data from. */
915 /* void *, but pointers are not 32/64 compatible */ 915 /* void *, but pointers are not 32/64 compatible */
916 uint64_t data_ptr; 916 __u64 data_ptr;
917}; 917};
918 918
919/* Sets or returns a value associated with a buffer. */ 919/* Sets or returns a value associated with a buffer. */
920struct drm_radeon_gem_op { 920struct drm_radeon_gem_op {
921 uint32_t handle; /* buffer */ 921 __u32 handle; /* buffer */
922 uint32_t op; /* RADEON_GEM_OP_* */ 922 __u32 op; /* RADEON_GEM_OP_* */
923 uint64_t value; /* input or return value */ 923 __u64 value; /* input or return value */
924}; 924};
925 925
926#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 926#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
@@ -940,11 +940,11 @@ struct drm_radeon_gem_op {
940#define RADEON_VM_PAGE_SNOOPED (1 << 4) 940#define RADEON_VM_PAGE_SNOOPED (1 << 4)
941 941
942struct drm_radeon_gem_va { 942struct drm_radeon_gem_va {
943 uint32_t handle; 943 __u32 handle;
944 uint32_t operation; 944 __u32 operation;
945 uint32_t vm_id; 945 __u32 vm_id;
946 uint32_t flags; 946 __u32 flags;
947 uint64_t offset; 947 __u64 offset;
948}; 948};
949 949
950#define RADEON_CHUNK_ID_RELOCS 0x01 950#define RADEON_CHUNK_ID_RELOCS 0x01
@@ -966,29 +966,29 @@ struct drm_radeon_gem_va {
966/* 0 = normal, + = higher priority, - = lower priority */ 966/* 0 = normal, + = higher priority, - = lower priority */
967 967
968struct drm_radeon_cs_chunk { 968struct drm_radeon_cs_chunk {
969 uint32_t chunk_id; 969 __u32 chunk_id;
970 uint32_t length_dw; 970 __u32 length_dw;
971 uint64_t chunk_data; 971 __u64 chunk_data;
972}; 972};
973 973
974/* drm_radeon_cs_reloc.flags */ 974/* drm_radeon_cs_reloc.flags */
975#define RADEON_RELOC_PRIO_MASK (0xf << 0) 975#define RADEON_RELOC_PRIO_MASK (0xf << 0)
976 976
977struct drm_radeon_cs_reloc { 977struct drm_radeon_cs_reloc {
978 uint32_t handle; 978 __u32 handle;
979 uint32_t read_domains; 979 __u32 read_domains;
980 uint32_t write_domain; 980 __u32 write_domain;
981 uint32_t flags; 981 __u32 flags;
982}; 982};
983 983
984struct drm_radeon_cs { 984struct drm_radeon_cs {
985 uint32_t num_chunks; 985 __u32 num_chunks;
986 uint32_t cs_id; 986 __u32 cs_id;
987 /* this points to uint64_t * which point to cs chunks */ 987 /* this points to __u64 * which point to cs chunks */
988 uint64_t chunks; 988 __u64 chunks;
989 /* updates to the limits after this CS ioctl */ 989 /* updates to the limits after this CS ioctl */
990 uint64_t gart_limit; 990 __u64 gart_limit;
991 uint64_t vram_limit; 991 __u64 vram_limit;
992}; 992};
993 993
994#define RADEON_INFO_DEVICE_ID 0x00 994#define RADEON_INFO_DEVICE_ID 0x00
@@ -1047,9 +1047,9 @@ struct drm_radeon_cs {
1047#define RADEON_INFO_GPU_RESET_COUNTER 0x26 1047#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1048 1048
1049struct drm_radeon_info { 1049struct drm_radeon_info {
1050 uint32_t request; 1050 __u32 request;
1051 uint32_t pad; 1051 __u32 pad;
1052 uint64_t value; 1052 __u64 value;
1053}; 1053};
1054 1054
1055/* Those correspond to the tile index to use, this is to explicitly state 1055/* Those correspond to the tile index to use, this is to explicitly state
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h
index f7a75eff..1a91234e 100644
--- a/include/drm/savage_drm.h
+++ b/include/drm/savage_drm.h
@@ -26,10 +26,16 @@
26#ifndef __SAVAGE_DRM_H__ 26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__ 27#define __SAVAGE_DRM_H__
28 28
29#include "drm.h"
30
31#if defined(__cplusplus)
32extern "C" {
33#endif
34
29#ifndef __SAVAGE_SAREA_DEFINES__ 35#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__ 36#define __SAVAGE_SAREA_DEFINES__
31 37
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 38/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
33 * regions, subject to a minimum region size of (1<<16) == 64k. 39 * regions, subject to a minimum region size of (1<<16) == 64k.
34 * 40 *
35 * Clients may subdivide regions internally, but when sharing between 41 * Clients may subdivide regions internally, but when sharing between
@@ -63,10 +69,10 @@ typedef struct _drm_savage_sarea {
63#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 69#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
64#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 70#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
65 71
66#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) 72#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
67#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) 73#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
68#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) 74#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
69#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) 75#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
70 76
71#define SAVAGE_DMA_PCI 1 77#define SAVAGE_DMA_PCI 1
72#define SAVAGE_DMA_AGP 3 78#define SAVAGE_DMA_AGP 3
@@ -207,4 +213,8 @@ union drm_savage_cmd_header {
207 } clear1; /* SAVAGE_CMD_CLEAR data */ 213 } clear1; /* SAVAGE_CMD_CLEAR data */
208}; 214};
209 215
216#if defined(__cplusplus)
217}
218#endif
219
210#endif 220#endif
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
index 30f7b382..8e51bb9a 100644
--- a/include/drm/sis_drm.h
+++ b/include/drm/sis_drm.h
@@ -27,6 +27,12 @@
27#ifndef __SIS_DRM_H__ 27#ifndef __SIS_DRM_H__
28#define __SIS_DRM_H__ 28#define __SIS_DRM_H__
29 29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
30/* SiS specific ioctls */ 36/* SiS specific ioctls */
31#define NOT_USED_0_3 37#define NOT_USED_0_3
32#define DRM_SIS_FB_ALLOC 0x04 38#define DRM_SIS_FB_ALLOC 0x04
@@ -64,4 +70,8 @@ typedef struct {
64 unsigned int offset, size; 70 unsigned int offset, size;
65} drm_sis_fb_t; 71} drm_sis_fb_t;
66 72
73#if defined(__cplusplus)
74}
75#endif
76
67#endif /* __SIS_DRM_H__ */ 77#endif /* __SIS_DRM_H__ */
diff --git a/include/drm/tegra_drm.h b/include/drm/tegra_drm.h
index 1be09c4b..f01f7a11 100644
--- a/include/drm/tegra_drm.h
+++ b/include/drm/tegra_drm.h
@@ -1,23 +1,33 @@
1/* 1/*
2 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * under the terms and conditions of the GNU General Public License, 5 * copy of this software and associated documentation files (the "Software"),
6 * version 2, as published by the Free Software Foundation. 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
7 * 10 *
8 * This program is distributed in the hope it will be useful, but WITHOUT 11 * The above copyright notice and this permission notice shall be included in
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * all copies or substantial portions of the Software.
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 * 13 *
13 * You should have received a copy of the GNU General Public License 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
15 */ 21 */
16 22
17#ifndef _UAPI_TEGRA_DRM_H_ 23#ifndef _TEGRA_DRM_H_
18#define _UAPI_TEGRA_DRM_H_ 24#define _TEGRA_DRM_H_
19 25
20#include <drm/drm.h> 26#include "drm.h"
27
28#if defined(__cplusplus)
29extern "C" {
30#endif
21 31
22#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) 32#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
23#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) 33#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
@@ -229,4 +239,8 @@ struct drm_tegra_keepon {
229#define DRM_IOCTL_TEGRA_START_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_START_KEEPON, struct drm_tegra_keepon) 239#define DRM_IOCTL_TEGRA_START_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_START_KEEPON, struct drm_tegra_keepon)
230#define DRM_IOCTL_TEGRA_STOP_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_STOP_KEEPON, struct drm_tegra_keepon) 240#define DRM_IOCTL_TEGRA_STOP_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_STOP_KEEPON, struct drm_tegra_keepon)
231 241
242#if defined(__cplusplus)
243}
244#endif
245
232#endif 246#endif
diff --git a/include/drm/vc4_drm.h b/include/drm/vc4_drm.h
index 319881d8..4117117b 100644
--- a/include/drm/vc4_drm.h
+++ b/include/drm/vc4_drm.h
@@ -38,6 +38,13 @@ extern "C" {
38#define DRM_VC4_CREATE_SHADER_BO 0x05 38#define DRM_VC4_CREATE_SHADER_BO 0x05
39#define DRM_VC4_GET_HANG_STATE 0x06 39#define DRM_VC4_GET_HANG_STATE 0x06
40#define DRM_VC4_GET_PARAM 0x07 40#define DRM_VC4_GET_PARAM 0x07
41#define DRM_VC4_SET_TILING 0x08
42#define DRM_VC4_GET_TILING 0x09
43#define DRM_VC4_LABEL_BO 0x0a
44#define DRM_VC4_GEM_MADVISE 0x0b
45#define DRM_VC4_PERFMON_CREATE 0x0c
46#define DRM_VC4_PERFMON_DESTROY 0x0d
47#define DRM_VC4_PERFMON_GET_VALUES 0x0e
41 48
42#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) 49#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
43#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) 50#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -47,6 +54,13 @@ extern "C" {
47#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) 54#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
48#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) 55#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
49#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param) 56#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
57#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
58#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
59#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
60#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
61#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
62#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
63#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
50 64
51struct drm_vc4_submit_rcl_surface { 65struct drm_vc4_submit_rcl_surface {
52 __u32 hindex; /* Handle index, or ~0 if not present. */ 66 __u32 hindex; /* Handle index, or ~0 if not present. */
@@ -149,12 +163,31 @@ struct drm_vc4_submit_cl {
149 __u32 pad:24; 163 __u32 pad:24;
150 164
151#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) 165#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
166/* By default, the kernel gets to choose the order that the tiles are
167 * rendered in. If this is set, then the tiles will be rendered in a
168 * raster order, with the right-to-left vs left-to-right and
169 * top-to-bottom vs bottom-to-top dictated by
170 * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
171 * blits to be implemented using the 3D engine.
172 */
173#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
174#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
175#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
152 __u32 flags; 176 __u32 flags;
153 177
154 /* Returned value of the seqno of this render job (for the 178 /* Returned value of the seqno of this render job (for the
155 * wait ioctl). 179 * wait ioctl).
156 */ 180 */
157 __u64 seqno; 181 __u64 seqno;
182
183 /* ID of the perfmon to attach to this job. 0 means no perfmon. */
184 __u32 perfmonid;
185
186 /* Unused field to align this struct on 64 bits. Must be set to 0.
187 * If one ever needs to add an u32 field to this struct, this field
188 * can be used.
189 */
190 __u32 pad2;
158}; 191};
159 192
160/** 193/**
@@ -288,6 +321,9 @@ struct drm_vc4_get_hang_state {
288#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3 321#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
289#define DRM_VC4_PARAM_SUPPORTS_ETC1 4 322#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
290#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 323#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
324#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
325#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
326#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
291 327
292struct drm_vc4_get_param { 328struct drm_vc4_get_param {
293 __u32 param; 329 __u32 param;
@@ -295,6 +331,103 @@ struct drm_vc4_get_param {
295 __u64 value; 331 __u64 value;
296}; 332};
297 333
334struct drm_vc4_get_tiling {
335 __u32 handle;
336 __u32 flags;
337 __u64 modifier;
338};
339
340struct drm_vc4_set_tiling {
341 __u32 handle;
342 __u32 flags;
343 __u64 modifier;
344};
345
346/**
347 * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
348 */
349struct drm_vc4_label_bo {
350 __u32 handle;
351 __u32 len;
352 __u64 name;
353};
354
355/*
356 * States prefixed with '__' are internal states and cannot be passed to the
357 * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
358 */
359#define VC4_MADV_WILLNEED 0
360#define VC4_MADV_DONTNEED 1
361#define __VC4_MADV_PURGED 2
362#define __VC4_MADV_NOTSUPP 3
363
364struct drm_vc4_gem_madvise {
365 __u32 handle;
366 __u32 madv;
367 __u32 retained;
368 __u32 pad;
369};
370
371enum {
372 VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
373 VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
374 VC4_PERFCNT_FEP_CLIPPED_QUADS,
375 VC4_PERFCNT_FEP_VALID_QUADS,
376 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
377 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
378 VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
379 VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
380 VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
381 VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
382 VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
383 VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
384 VC4_PERFCNT_PSE_PRIMS_REVERSED,
385 VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
386 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
387 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
388 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
389 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
390 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
391 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
392 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
393 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
394 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
395 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
396 VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
397 VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
398 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
399 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
400 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
401 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
402 VC4_PERFCNT_NUM_EVENTS,
403};
404
405#define DRM_VC4_MAX_PERF_COUNTERS 16
406
407struct drm_vc4_perfmon_create {
408 __u32 id;
409 __u32 ncounters;
410 __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
411};
412
413struct drm_vc4_perfmon_destroy {
414 __u32 id;
415};
416
417/*
418 * Returns the values of the performance counters tracked by this
419 * perfmon (as an array of ncounters u64 values).
420 *
421 * No implicit synchronization is performed, so the user has to
422 * guarantee that any jobs using this perfmon have already been
423 * completed (probably by blocking on the seqno returned by the
424 * last exec that used the perfmon).
425 */
426struct drm_vc4_perfmon_get_values {
427 __u32 id;
428 __u64 values_ptr;
429};
430
298#if defined(__cplusplus) 431#if defined(__cplusplus)
299} 432}
300#endif 433#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 182f8792..8b69e819 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -26,6 +26,10 @@
26 26
27#include "drm.h" 27#include "drm.h"
28 28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
29/* WARNING: These defines must be the same as what the Xserver uses. 33/* WARNING: These defines must be the same as what the Xserver uses.
30 * if you change them, you must change the defines in the Xserver. 34 * if you change them, you must change the defines in the Xserver.
31 */ 35 */
@@ -272,4 +276,8 @@ typedef struct drm_via_dmablit {
272 drm_via_blitsync_t sync; 276 drm_via_blitsync_t sync;
273} drm_via_dmablit_t; 277} drm_via_dmablit_t;
274 278
279#if defined(__cplusplus)
280}
281#endif
282
275#endif /* _VIA_DRM_H_ */ 283#endif /* _VIA_DRM_H_ */
diff --git a/include/drm/virtgpu_drm.h b/include/drm/virtgpu_drm.h
index 91a31ffe..9a781f06 100644
--- a/include/drm/virtgpu_drm.h
+++ b/include/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
63}; 63};
64 64
65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
66#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
66 67
67struct drm_virtgpu_getparam { 68struct drm_virtgpu_getparam {
68 __u64 param; 69 __u64 param;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 5b68b4d1..0bc784f5 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -30,6 +30,10 @@
30 30
31#include "drm.h" 31#include "drm.h"
32 32
33#if defined(__cplusplus)
34extern "C" {
35#endif
36
33#define DRM_VMW_MAX_SURFACE_FACES 6 37#define DRM_VMW_MAX_SURFACE_FACES 6
34#define DRM_VMW_MAX_MIP_LEVELS 24 38#define DRM_VMW_MAX_MIP_LEVELS 24
35 39
@@ -37,6 +41,7 @@
37#define DRM_VMW_GET_PARAM 0 41#define DRM_VMW_GET_PARAM 0
38#define DRM_VMW_ALLOC_DMABUF 1 42#define DRM_VMW_ALLOC_DMABUF 1
39#define DRM_VMW_UNREF_DMABUF 2 43#define DRM_VMW_UNREF_DMABUF 2
44#define DRM_VMW_HANDLE_CLOSE 2
40#define DRM_VMW_CURSOR_BYPASS 3 45#define DRM_VMW_CURSOR_BYPASS 3
41/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 46/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
42#define DRM_VMW_CONTROL_STREAM 4 47#define DRM_VMW_CONTROL_STREAM 4
@@ -292,13 +297,17 @@ union drm_vmw_surface_reference_arg {
292 * @version: Allows expanding the execbuf ioctl parameters without breaking 297 * @version: Allows expanding the execbuf ioctl parameters without breaking
293 * backwards compatibility, since user-space will always tell the kernel 298 * backwards compatibility, since user-space will always tell the kernel
294 * which version it uses. 299 * which version it uses.
295 * @flags: Execbuf flags. None currently. 300 * @flags: Execbuf flags.
301 * @imported_fence_fd: FD for a fence imported from another device
296 * 302 *
297 * Argument to the DRM_VMW_EXECBUF Ioctl. 303 * Argument to the DRM_VMW_EXECBUF Ioctl.
298 */ 304 */
299 305
300#define DRM_VMW_EXECBUF_VERSION 2 306#define DRM_VMW_EXECBUF_VERSION 2
301 307
308#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
309#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
310
302struct drm_vmw_execbuf_arg { 311struct drm_vmw_execbuf_arg {
303 __u64 commands; 312 __u64 commands;
304 __u32 command_size; 313 __u32 command_size;
@@ -307,7 +316,7 @@ struct drm_vmw_execbuf_arg {
307 __u32 version; 316 __u32 version;
308 __u32 flags; 317 __u32 flags;
309 __u32 context_handle; 318 __u32 context_handle;
310 __u32 pad64; 319 __s32 imported_fence_fd;
311}; 320};
312 321
313/** 322/**
@@ -323,6 +332,7 @@ struct drm_vmw_execbuf_arg {
323 * @passed_seqno: The highest seqno number processed by the hardware 332 * @passed_seqno: The highest seqno number processed by the hardware
324 * so far. This can be used to mark user-space fence objects as signaled, and 333 * so far. This can be used to mark user-space fence objects as signaled, and
325 * to determine whether a fence seqno might be stale. 334 * to determine whether a fence seqno might be stale.
335 * @fd: FD associated with the fence, -1 if not exported
326 * @error: This member should've been set to -EFAULT on submission. 336 * @error: This member should've been set to -EFAULT on submission.
327 * The following actions should be take on completion: 337 * The following actions should be take on completion:
328 * error == -EFAULT: Fence communication failed. The host is synchronized. 338 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -340,7 +350,7 @@ struct drm_vmw_fence_rep {
340 __u32 mask; 350 __u32 mask;
341 __u32 seqno; 351 __u32 seqno;
342 __u32 passed_seqno; 352 __u32 passed_seqno;
343 __u32 pad64; 353 __s32 fd;
344 __s32 error; 354 __s32 error;
345}; 355};
346 356
@@ -1087,4 +1097,32 @@ union drm_vmw_extended_context_arg {
1087 enum drm_vmw_extended_context req; 1097 enum drm_vmw_extended_context req;
1088 struct drm_vmw_context_arg rep; 1098 struct drm_vmw_context_arg rep;
1089}; 1099};
1100
1101/*************************************************************************/
1102/*
1103 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1104 * underlying resource.
1105 *
1106 * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
1107 * The ioctl arguments therefore need to be identical in layout.
1108 *
1109 */
1110
1111/**
1112 * struct drm_vmw_handle_close_arg
1113 *
1114 * @handle: Handle to close.
1115 *
1116 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1117 */
1118struct drm_vmw_handle_close_arg {
1119 __u32 handle;
1120 __u32 pad64;
1121};
1122
1123
1124#if defined(__cplusplus)
1125}
1126#endif
1127
1090#endif 1128#endif