aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Yoo2017-05-09 00:32:30 -0500
committerRichard Yoo2017-05-09 01:08:15 -0500
commitd8ed3020112ded2e210e652f5a8f367d5f847157 (patch)
treea56c761e21902ba6b24df5c5b8168372ab995162 /include
parent0da99b8ab0f691ad7ec7f4c5c8a09c5df92486a1 (diff)
parentd4b8344363b4e0f0e831e5722b6df5cc0bb08df8 (diff)
downloadexternal-libgbm-d8ed3020112ded2e210e652f5a8f367d5f847157.tar.gz
external-libgbm-d8ed3020112ded2e210e652f5a8f367d5f847157.tar.xz
external-libgbm-d8ed3020112ded2e210e652f5a8f367d5f847157.zip
Merge tag libdrm-2.4.75 into aosp/master
Below is a brief summary of patches pulled in: 0da99b8a (m/master, aosp/master) Move libdrm.so to vendor partition d4b83443 (tag: libdrm-2.4.75) Bump version for 2.4.75 release dae413e4 (tag: libdrm-2.4.74) Bump version for release 317bdff1 (tag: libdrm-2.4.73) Bump version for release 8cf43127 (tag: libdrm-2.4.72) Bump version for release a44c9c31 (tag: libdrm-2.4.71) Bump version for release 20208455 (tag: android-o-preview-1, tag: android-n-mr2-preview-2, tag: android-n-mr2-preview-1, aosp/sdk-release, aosp/o-preview) add a flag control that private libdrm can be chosen Bug: 35871718 Test: aosp_arm-eng compiles Change-Id: I81985fd41d5c0d8a732705dc2a4bee8eb5d459bb
Diffstat (limited to 'include')
-rw-r--r--include/drm/README157
-rw-r--r--include/drm/amdgpu_drm.h44
-rw-r--r--include/drm/drm.h82
-rw-r--r--include/drm/drm_fourcc.h17
-rw-r--r--include/drm/drm_mode.h210
-rw-r--r--include/drm/drm_sarea.h2
-rw-r--r--include/drm/i915_drm.h315
-rw-r--r--include/drm/radeon_drm.h51
-rw-r--r--include/drm/vc4_drm.h302
-rw-r--r--include/drm/virtgpu_drm.h109
-rw-r--r--include/drm/vmwgfx_drm.h792
11 files changed, 1746 insertions, 335 deletions
diff --git a/include/drm/README b/include/drm/README
new file mode 100644
index 00000000..a50b02c0
--- /dev/null
+++ b/include/drm/README
@@ -0,0 +1,157 @@
1What are these headers ?
2------------------------
3This is the canonical source of drm headers that user space should use for
4communicating with the kernel DRM subsystem.
5
6They flow from the kernel, thus any changes must be merged there first.
7Do _not_ attempt to "fix" these by deviating from the kernel ones !
8
9
10Non-linux platforms - changes/patches
11-------------------------------------
12If your platform has local changes, please send them upstream for inclusion.
13Even if your patches don't get accepted in their current form, devs will
14give you feedback on how to address things properly.
15
16git send-email --subject-prefix="PATCH libdrm" your patches to dri-devel
17mailing list.
18
19Before doing so, please consider the following:
20 - Have the [libdrm vs kernel] headers on your platform deviated ?
21Consider unifying them first.
22
23 - Have you introduced additional ABI that's not available in Linux ?
24Propose it for [Linux kernel] upstream inclusion.
25If that doesn't work out (hopefully it never does), move it to another header
26and/or keep the change(s) local ?
27
28 - Are your changes DRI1/UMS specific ?
29There is virtually no interest/power in keeping those legacy interfaces. They
30are around due to the kernel "thou shalt not break existing user space" rule.
31
32Consider porting the driver to DRI2/KMS - all (almost?) sensible hardware is
33capable of supporting those.
34
35
36Which headers go where ?
37------------------------
38A snipped from the, now removed, Makefile.am used to state:
39
40 XXX airlied says, nothing besides *_drm.h and drm*.h should be necessary.
41 however, r300 and via need their reg headers installed in order to build.
42 better solutions are welcome.
43
44Obviously the r300 and via headers are no longer around ;-)
45
46Reason behind is that the drm headers can be used as a basic communications
47channel with the respective kernel modules. If more advanced functionality is
48required one can pull the specific libdrm_$driver which is free to pull
49additional files from the kernel.
50
51For example: nouveau has nouveau/nvif/*.h while vc4 has vc4/*.h
52
53If your driver is still in prototyping/staging state, consider moving the
54$driver_drm.h into $driver and _not_ installing it. An header providing opaque
55definitions and access [via $driver_drmif.h or similar] would be better fit.
56
57
58When and which headers to update
59--------------------------------
60Ideally all files will be synced (updated) with the latest released kernel on
61each libdrm release. Sadly that's not yet possible since quite a few headers
62differ significantly - see Outdated or Broken Headers section below.
63
64That said, it's up-to the individual developers to sync with newer version
65(from drm-next) as they see fit.
66
67
68When and how to update these files
69----------------------------------
70In order to update the files do the following:
71 - Switch to a Linux kernel tree/branch which is not rebased.
72For example: airlied/drm-next
73 - Install the headers via `make headers_install' to a separate location.
74 - Copy the drm header[s] + git add + git commit.
75 - Note: Your commit message must include:
76 a) Brief summary on the delta. If there's any change that looks like an
77API/ABI break one _must_ explicitly state why it's safe to do so.
78 b) "Generated using make headers_install."
79 c) "Generated from $tree/branch commit $sha"
80
81
82Outdated or Broken Headers
83--------------------------
84This section contains a list of headers and the respective "issues" they might
85have relative to their kernel equivalent.
86
87Nearly all headers:
88 - Missing extern C notation.
89Status: Trivial.
90
91Most UMS headers:
92 - Not using fixed size integers - compat ioctls are broken.
93Status: ?
94Promote to fixed size ints, which match the current (32bit) ones.
95
96
97amdgpu_drm.h
98 - Using the stdint.h uint*_t over the respective __u* ones
99Status: Trivial.
100
101drm_mode.h
102 - Missing DPI encode/connector pair.
103Status: Trivial.
104
105i915_drm.h
106 - Missing PARAMS - HAS_POOLED_EU, MIN_EU_IN_POOL CONTEXT_PARAM_NO_ERROR_CAPTURE
107Status: Trivial.
108
109mga_drm.h
110 - Typo fix, use struct over typedef.
111Status: Trivial.
112
113nouveau_drm.h
114 - Missing macros NOUVEAU_GETPARAM*, NOUVEAU_DRM_HEADER_PATCHLEVEL, structs,
115enums, using stdint.h over the __u* types.
116Status: ?
117
118qxl_drm.h
119 - Using the stdint.h uint*_t over the respective __u* ones
120Status: Trivial.
121
122r128_drm.h
123 - Broken compat ioctls.
124
125radeon_drm.h
126 - Missing RADEON_TILING_R600_NO_SCANOUT, CIK_TILE_MODE_*, broken UMS ioctls,
127using stdint types.
128 - Both kernel and libdrm: missing padding -
129drm_radeon_gem_{create,{g,s}et_tiling,set_domain} others ?
130Status: ?
131
132savage_drm.h
133 - Renamed ioctls - DRM_IOCTL_SAVAGE_{,BCI}_EVENT_EMIT, compat ioctls are broken.
134Status: ?
135
136sis_drm.h
137 - Borken ioctls + libdrm uses int vs kernel long
138Status: ?
139
140via_drm.h
141 - Borken ioctls - libdrm int vs kernel long
142Status: ?
143
144
145omap_drm.h (living in $TOP/omap)
146 - License mismatch, missing DRM_IOCTL_OMAP_GEM_NEW and related struct
147Status: ?
148
149msm_drm.h (located in $TOP/freedreno/msm/)
150 - License mismatch, missing MSM_PIPE_*, MSM_SUBMIT_*. Renamed
151drm_msm_gem_submit::flags, missing drm_msm_gem_submit::fence_fd.
152Status: ?
153
154exynos_drm.h (living in $TOP/exynos)
155 - License mismatch, now using fixed size ints (but not everywhere). Lots of
156new stuff.
157Status: ?
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index fbdd1185..d8f24976 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -34,6 +34,10 @@
34 34
35#include "drm.h" 35#include "drm.h"
36 36
37#if defined(__cplusplus)
38extern "C" {
39#endif
40
37#define DRM_AMDGPU_GEM_CREATE 0x00 41#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 42#define DRM_AMDGPU_GEM_MMAP 0x01
39#define DRM_AMDGPU_CTX 0x02 43#define DRM_AMDGPU_CTX 0x02
@@ -73,6 +77,8 @@
73#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) 77#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
74/* Flag that USWC attributes should be used for GTT */ 78/* Flag that USWC attributes should be used for GTT */
75#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 79#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
80/* Flag that the memory should be in VRAM and cleared */
81#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
76 82
77struct drm_amdgpu_gem_create_in { 83struct drm_amdgpu_gem_create_in {
78 /** the requested memory size */ 84 /** the requested memory size */
@@ -483,6 +489,22 @@ struct drm_amdgpu_cs_chunk_data {
483#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 489#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
484#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff 490#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
485 491
492struct drm_amdgpu_query_fw {
493 /** AMDGPU_INFO_FW_* */
494 uint32_t fw_type;
495 /**
496 * Index of the IP if there are more IPs of
497 * the same type.
498 */
499 uint32_t ip_instance;
500 /**
501 * Index of the engine. Whether this is used depends
502 * on the firmware type. (e.g. MEC, SDMA)
503 */
504 uint32_t index;
505 uint32_t _pad;
506};
507
486/* Input structure for the INFO ioctl */ 508/* Input structure for the INFO ioctl */
487struct drm_amdgpu_info { 509struct drm_amdgpu_info {
488 /* Where the return value will be stored */ 510 /* Where the return value will be stored */
@@ -518,21 +540,7 @@ struct drm_amdgpu_info {
518 uint32_t flags; 540 uint32_t flags;
519 } read_mmr_reg; 541 } read_mmr_reg;
520 542
521 struct { 543 struct drm_amdgpu_query_fw query_fw;
522 /** AMDGPU_INFO_FW_* */
523 uint32_t fw_type;
524 /**
525 * Index of the IP if there are more IPs of
526 * the same type.
527 */
528 uint32_t ip_instance;
529 /**
530 * Index of the engine. Whether this is used depends
531 * on the firmware type. (e.g. MEC, SDMA)
532 */
533 uint32_t index;
534 uint32_t _pad;
535 } query_fw;
536 }; 544 };
537}; 545};
538 546
@@ -640,6 +648,10 @@ struct drm_amdgpu_info_hw_ip {
640#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 648#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
641#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 649#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
642#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 650#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
643#define AMDGPU_FAMILY_CZ 135 /* Carrizo */ 651#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
652
653#if defined(__cplusplus)
654}
655#endif
644 656
645#endif 657#endif
diff --git a/include/drm/drm.h b/include/drm/drm.h
index d36331a8..f6fd5c2c 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,7 +36,7 @@
36#ifndef _DRM_H_ 36#ifndef _DRM_H_
37#define _DRM_H_ 37#define _DRM_H_
38 38
39#if defined(__linux__) 39#if defined(__linux__)
40 40
41#include <linux/types.h> 41#include <linux/types.h>
42#include <asm/ioctl.h> 42#include <asm/ioctl.h>
@@ -54,10 +54,15 @@ typedef int32_t __s32;
54typedef uint32_t __u32; 54typedef uint32_t __u32;
55typedef int64_t __s64; 55typedef int64_t __s64;
56typedef uint64_t __u64; 56typedef uint64_t __u64;
57typedef size_t __kernel_size_t;
57typedef unsigned long drm_handle_t; 58typedef unsigned long drm_handle_t;
58 59
59#endif 60#endif
60 61
62#if defined(__cplusplus)
63extern "C" {
64#endif
65
61#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 66#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
62#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 67#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
63#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 68#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
@@ -129,11 +134,11 @@ struct drm_version {
129 int version_major; /**< Major version */ 134 int version_major; /**< Major version */
130 int version_minor; /**< Minor version */ 135 int version_minor; /**< Minor version */
131 int version_patchlevel; /**< Patch level */ 136 int version_patchlevel; /**< Patch level */
132 size_t name_len; /**< Length of name buffer */ 137 __kernel_size_t name_len; /**< Length of name buffer */
133 char *name; /**< Name of driver */ 138 char *name; /**< Name of driver */
134 size_t date_len; /**< Length of date buffer */ 139 __kernel_size_t date_len; /**< Length of date buffer */
135 char *date; /**< User-space buffer to hold date */ 140 char *date; /**< User-space buffer to hold date */
136 size_t desc_len; /**< Length of desc buffer */ 141 __kernel_size_t desc_len; /**< Length of desc buffer */
137 char *desc; /**< User-space buffer to hold desc */ 142 char *desc; /**< User-space buffer to hold desc */
138}; 143};
139 144
@@ -143,7 +148,7 @@ struct drm_version {
143 * \sa drmGetBusid() and drmSetBusId(). 148 * \sa drmGetBusid() and drmSetBusId().
144 */ 149 */
145struct drm_unique { 150struct drm_unique {
146 size_t unique_len; /**< Length of unique */ 151 __kernel_size_t unique_len; /**< Length of unique */
147 char *unique; /**< Unique name for driver instantiation */ 152 char *unique; /**< Unique name for driver instantiation */
148}; 153};
149 154
@@ -180,8 +185,7 @@ enum drm_map_type {
180 _DRM_SHM = 2, /**< shared, cached */ 185 _DRM_SHM = 2, /**< shared, cached */
181 _DRM_AGP = 3, /**< AGP/GART */ 186 _DRM_AGP = 3, /**< AGP/GART */
182 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ 187 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
183 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ 188 _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
184 _DRM_GEM = 6 /**< GEM object */
185}; 189};
186 190
187/** 191/**
@@ -467,12 +471,15 @@ struct drm_irq_busid {
467enum drm_vblank_seq_type { 471enum drm_vblank_seq_type {
468 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
469 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
474 /* bits 1-6 are reserved for high crtcs */
475 _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
470 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ 476 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
471 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 477 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
472 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 478 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
473 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 479 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
474 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ 480 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
475}; 481};
482#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
476 483
477#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) 484#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
478#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ 485#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
@@ -612,6 +619,29 @@ struct drm_gem_open {
612 __u64 size; 619 __u64 size;
613}; 620};
614 621
622#define DRM_CAP_DUMB_BUFFER 0x1
623#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
624#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
625#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
626#define DRM_CAP_PRIME 0x5
627#define DRM_PRIME_CAP_IMPORT 0x1
628#define DRM_PRIME_CAP_EXPORT 0x2
629#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
630#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
631/*
632 * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
633 * combination for the hardware cursor. The intention is that a hardware
634 * agnostic userspace can query a cursor plane size to use.
635 *
636 * Note that the cross-driver contract is to merely return a valid size;
637 * drivers are free to attach another meaning on top, eg. i915 returns the
638 * maximum plane size.
639 */
640#define DRM_CAP_CURSOR_WIDTH 0x8
641#define DRM_CAP_CURSOR_HEIGHT 0x9
642#define DRM_CAP_ADDFB2_MODIFIERS 0x10
643#define DRM_CAP_PAGE_FLIP_TARGET 0x11
644
615/** DRM_IOCTL_GET_CAP ioctl argument type */ 645/** DRM_IOCTL_GET_CAP ioctl argument type */
616struct drm_get_cap { 646struct drm_get_cap {
617 __u64 capability; 647 __u64 capability;
@@ -642,19 +672,13 @@ struct drm_get_cap {
642 */ 672 */
643#define DRM_CLIENT_CAP_ATOMIC 3 673#define DRM_CLIENT_CAP_ATOMIC 3
644 674
645/**
646 * DRM_CLIENT_CAP_ATOMIC
647 *
648 * If set to 1, the DRM core will allow atomic modesetting requests.
649 */
650#define DRM_CLIENT_CAP_ATOMIC 3
651
652/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 675/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
653struct drm_set_client_cap { 676struct drm_set_client_cap {
654 __u64 capability; 677 __u64 capability;
655 __u64 value; 678 __u64 value;
656}; 679};
657 680
681#define DRM_RDWR O_RDWR
658#define DRM_CLOEXEC O_CLOEXEC 682#define DRM_CLOEXEC O_CLOEXEC
659struct drm_prime_handle { 683struct drm_prime_handle {
660 __u32 handle; 684 __u32 handle;
@@ -666,8 +690,16 @@ struct drm_prime_handle {
666 __s32 fd; 690 __s32 fd;
667}; 691};
668 692
693#if defined(__cplusplus)
694}
695#endif
696
669#include "drm_mode.h" 697#include "drm_mode.h"
670 698
699#if defined(__cplusplus)
700extern "C" {
701#endif
702
671#define DRM_IOCTL_BASE 'd' 703#define DRM_IOCTL_BASE 'd'
672#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 704#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
673#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) 705#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
@@ -750,8 +782,8 @@ struct drm_prime_handle {
750#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) 782#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
751#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) 783#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
752#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) 784#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
753#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) 785#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
754#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) 786#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
755 787
756#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) 788#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
757#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) 789#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
@@ -778,7 +810,7 @@ struct drm_prime_handle {
778 810
779/** 811/**
780 * Device specific ioctls should only be in their respective headers 812 * Device specific ioctls should only be in their respective headers
781 * The device specific ioctl range is from 0x40 to 0x99. 813 * The device specific ioctl range is from 0x40 to 0x9f.
782 * Generic IOCTLS restart at 0xA0. 814 * Generic IOCTLS restart at 0xA0.
783 * 815 *
784 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and 816 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
@@ -816,18 +848,6 @@ struct drm_event_vblank {
816 __u32 reserved; 848 __u32 reserved;
817}; 849};
818 850
819#define DRM_CAP_DUMB_BUFFER 0x1
820#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
821#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
822#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
823#define DRM_CAP_PRIME 0x5
824#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
825#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
826#define DRM_CAP_ADDFB2_MODIFIERS 0x10
827
828#define DRM_PRIME_CAP_IMPORT 0x1
829#define DRM_PRIME_CAP_EXPORT 0x2
830
831/* typedef area */ 851/* typedef area */
832typedef struct drm_clip_rect drm_clip_rect_t; 852typedef struct drm_clip_rect drm_clip_rect_t;
833typedef struct drm_drawable_info drm_drawable_info_t; 853typedef struct drm_drawable_info drm_drawable_info_t;
@@ -871,4 +891,8 @@ typedef struct drm_agp_info drm_agp_info_t;
871typedef struct drm_scatter_gather drm_scatter_gather_t; 891typedef struct drm_scatter_gather drm_scatter_gather_t;
872typedef struct drm_set_version drm_set_version_t; 892typedef struct drm_set_version drm_set_version_t;
873 893
894#if defined(__cplusplus)
895}
896#endif
897
874#endif 898#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index e741b09a..4d8da699 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -24,16 +24,23 @@
24#ifndef DRM_FOURCC_H 24#ifndef DRM_FOURCC_H
25#define DRM_FOURCC_H 25#define DRM_FOURCC_H
26 26
27#include <inttypes.h> 27#include "drm.h"
28 28
29#define fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \ 29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24)) 30 ((__u32)(c) << 16) | ((__u32)(d) << 24))
31 31
32#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ 32#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
33 33
34/* color index */ 34/* color index */
35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ 35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
36 36
37/* 8 bpp Red */
38#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
39
40/* 16 bpp RG */
41#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
42#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
43
37/* 8 bpp RGB */ 44/* 8 bpp RGB */
38#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ 45#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
39#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ 46#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
@@ -106,6 +113,8 @@
106#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ 113#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
107#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ 114#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
108#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ 115#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
116#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
117#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
109 118
110/* 119/*
111 * 3 plane YCbCr 120 * 3 plane YCbCr
@@ -216,7 +225,7 @@
216 * - multiple of 128 pixels for the width 225 * - multiple of 128 pixels for the width
217 * - multiple of 32 pixels for the height 226 * - multiple of 32 pixels for the height
218 * 227 *
219 * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html 228 * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html
220 */ 229 */
221#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) 230#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
222 231
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 59e67b14..6708e2b7 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,6 +27,12 @@
27#ifndef _DRM_MODE_H 27#ifndef _DRM_MODE_H
28#define _DRM_MODE_H 28#define _DRM_MODE_H
29 29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
30#define DRM_DISPLAY_INFO_LEN 32 36#define DRM_DISPLAY_INFO_LEN 32
31#define DRM_CONNECTOR_NAME_LEN 32 37#define DRM_CONNECTOR_NAME_LEN 32
32#define DRM_DISPLAY_MODE_LEN 32 38#define DRM_DISPLAY_MODE_LEN 32
@@ -56,6 +62,10 @@
56#define DRM_MODE_FLAG_PIXMUX (1<<11) 62#define DRM_MODE_FLAG_PIXMUX (1<<11)
57#define DRM_MODE_FLAG_DBLCLK (1<<12) 63#define DRM_MODE_FLAG_DBLCLK (1<<12)
58#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 64#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
65 /*
66 * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
67 * (define not exposed to user space).
68 */
59#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) 69#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
60#define DRM_MODE_FLAG_3D_NONE (0<<14) 70#define DRM_MODE_FLAG_3D_NONE (0<<14)
61#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) 71#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
@@ -82,6 +92,11 @@
82#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ 92#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
83#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ 93#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
84 94
95/* Picture aspect ratio options */
96#define DRM_MODE_PICTURE_ASPECT_NONE 0
97#define DRM_MODE_PICTURE_ASPECT_4_3 1
98#define DRM_MODE_PICTURE_ASPECT_16_9 2
99
85/* Dithering mode options */ 100/* Dithering mode options */
86#define DRM_MODE_DITHERING_OFF 0 101#define DRM_MODE_DITHERING_OFF 0
87#define DRM_MODE_DITHERING_ON 1 102#define DRM_MODE_DITHERING_ON 1
@@ -102,8 +117,16 @@
102 117
103struct drm_mode_modeinfo { 118struct drm_mode_modeinfo {
104 __u32 clock; 119 __u32 clock;
105 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; 120 __u16 hdisplay;
106 __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; 121 __u16 hsync_start;
122 __u16 hsync_end;
123 __u16 htotal;
124 __u16 hskew;
125 __u16 vdisplay;
126 __u16 vsync_start;
127 __u16 vsync_end;
128 __u16 vtotal;
129 __u16 vscan;
107 130
108 __u32 vrefresh; 131 __u32 vrefresh;
109 132
@@ -121,8 +144,10 @@ struct drm_mode_card_res {
121 __u32 count_crtcs; 144 __u32 count_crtcs;
122 __u32 count_connectors; 145 __u32 count_connectors;
123 __u32 count_encoders; 146 __u32 count_encoders;
124 __u32 min_width, max_width; 147 __u32 min_width;
125 __u32 min_height, max_height; 148 __u32 max_width;
149 __u32 min_height;
150 __u32 max_height;
126}; 151};
127 152
128struct drm_mode_crtc { 153struct drm_mode_crtc {
@@ -132,30 +157,35 @@ struct drm_mode_crtc {
132 __u32 crtc_id; /**< Id */ 157 __u32 crtc_id; /**< Id */
133 __u32 fb_id; /**< Id of framebuffer */ 158 __u32 fb_id; /**< Id of framebuffer */
134 159
135 __u32 x, y; /**< Position on the frameuffer */ 160 __u32 x; /**< x Position on the framebuffer */
161 __u32 y; /**< y Position on the framebuffer */
136 162
137 __u32 gamma_size; 163 __u32 gamma_size;
138 __u32 mode_valid; 164 __u32 mode_valid;
139 struct drm_mode_modeinfo mode; 165 struct drm_mode_modeinfo mode;
140}; 166};
141 167
142#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) 168#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
143#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) 169#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
144 170
145/* Planes blend with or override other bits on the CRTC */ 171/* Planes blend with or override other bits on the CRTC */
146struct drm_mode_set_plane { 172struct drm_mode_set_plane {
147 __u32 plane_id; 173 __u32 plane_id;
148 __u32 crtc_id; 174 __u32 crtc_id;
149 __u32 fb_id; /* fb object contains surface format type */ 175 __u32 fb_id; /* fb object contains surface format type */
150 __u32 flags; 176 __u32 flags; /* see above flags */
151 177
152 /* Signed dest location allows it to be partially off screen */ 178 /* Signed dest location allows it to be partially off screen */
153 __s32 crtc_x, crtc_y; 179 __s32 crtc_x;
154 __u32 crtc_w, crtc_h; 180 __s32 crtc_y;
181 __u32 crtc_w;
182 __u32 crtc_h;
155 183
156 /* Source values are 16.16 fixed point */ 184 /* Source values are 16.16 fixed point */
157 __u32 src_x, src_y; 185 __u32 src_x;
158 __u32 src_h, src_w; 186 __u32 src_y;
187 __u32 src_h;
188 __u32 src_w;
159}; 189};
160 190
161struct drm_mode_get_plane { 191struct drm_mode_get_plane {
@@ -184,6 +214,7 @@ struct drm_mode_get_plane_res {
184#define DRM_MODE_ENCODER_VIRTUAL 5 214#define DRM_MODE_ENCODER_VIRTUAL 5
185#define DRM_MODE_ENCODER_DSI 6 215#define DRM_MODE_ENCODER_DSI 6
186#define DRM_MODE_ENCODER_DPMST 7 216#define DRM_MODE_ENCODER_DPMST 7
217#define DRM_MODE_ENCODER_DPI 8
187 218
188struct drm_mode_get_encoder { 219struct drm_mode_get_encoder {
189 __u32 encoder_id; 220 __u32 encoder_id;
@@ -223,6 +254,7 @@ struct drm_mode_get_encoder {
223#define DRM_MODE_CONNECTOR_eDP 14 254#define DRM_MODE_CONNECTOR_eDP 14
224#define DRM_MODE_CONNECTOR_VIRTUAL 15 255#define DRM_MODE_CONNECTOR_VIRTUAL 15
225#define DRM_MODE_CONNECTOR_DSI 16 256#define DRM_MODE_CONNECTOR_DSI 16
257#define DRM_MODE_CONNECTOR_DPI 17
226 258
227struct drm_mode_get_connector { 259struct drm_mode_get_connector {
228 260
@@ -241,8 +273,11 @@ struct drm_mode_get_connector {
241 __u32 connector_type_id; 273 __u32 connector_type_id;
242 274
243 __u32 connection; 275 __u32 connection;
244 __u32 mm_width, mm_height; /**< HxW in millimeters */ 276 __u32 mm_width; /**< width in millimeters */
277 __u32 mm_height; /**< height in millimeters */
245 __u32 subpixel; 278 __u32 subpixel;
279
280 __u32 pad;
246}; 281};
247 282
248#define DRM_MODE_PROP_PENDING (1<<0) 283#define DRM_MODE_PROP_PENDING (1<<0)
@@ -288,6 +323,8 @@ struct drm_mode_get_property {
288 char name[DRM_PROP_NAME_LEN]; 323 char name[DRM_PROP_NAME_LEN];
289 324
290 __u32 count_values; 325 __u32 count_values;
326 /* This is only used to count enum values, not blobs. The _blobs is
327 * simply because of a historical reason, i.e. backwards compat. */
291 __u32 count_enum_blobs; 328 __u32 count_enum_blobs;
292}; 329};
293 330
@@ -305,6 +342,7 @@ struct drm_mode_connector_set_property {
305#define DRM_MODE_OBJECT_FB 0xfbfbfbfb 342#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
306#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 343#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
307#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee 344#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
345#define DRM_MODE_OBJECT_ANY 0
308 346
309struct drm_mode_obj_get_properties { 347struct drm_mode_obj_get_properties {
310 __u64 props_ptr; 348 __u64 props_ptr;
@@ -329,7 +367,8 @@ struct drm_mode_get_blob {
329 367
330struct drm_mode_fb_cmd { 368struct drm_mode_fb_cmd {
331 __u32 fb_id; 369 __u32 fb_id;
332 __u32 width, height; 370 __u32 width;
371 __u32 height;
333 __u32 pitch; 372 __u32 pitch;
334 __u32 bpp; 373 __u32 bpp;
335 __u32 depth; 374 __u32 depth;
@@ -342,9 +381,10 @@ struct drm_mode_fb_cmd {
342 381
343struct drm_mode_fb_cmd2 { 382struct drm_mode_fb_cmd2 {
344 __u32 fb_id; 383 __u32 fb_id;
345 __u32 width, height; 384 __u32 width;
385 __u32 height;
346 __u32 pixel_format; /* fourcc code from drm_fourcc.h */ 386 __u32 pixel_format; /* fourcc code from drm_fourcc.h */
347 __u32 flags; 387 __u32 flags; /* see above flags */
348 388
349 /* 389 /*
350 * In case of planar formats, this ioctl allows up to 4 390 * In case of planar formats, this ioctl allows up to 4
@@ -356,9 +396,9 @@ struct drm_mode_fb_cmd2 {
356 * followed by an interleaved U/V plane containing 396 * followed by an interleaved U/V plane containing
357 * 8 bit 2x2 subsampled colour difference samples. 397 * 8 bit 2x2 subsampled colour difference samples.
358 * 398 *
359 * So it would consist of Y as offset[0] and UV as 399 * So it would consist of Y as offsets[0] and UV as
360 * offset[1]. Note that offset[0] will generally 400 * offsets[1]. Note that offsets[0] will generally
361 * be 0. 401 * be 0 (but this is not required).
362 * 402 *
363 * To accommodate tiled, compressed, etc formats, a per-plane 403 * To accommodate tiled, compressed, etc formats, a per-plane
364 * modifier can be specified. The default value of zero 404 * modifier can be specified. The default value of zero
@@ -377,6 +417,8 @@ struct drm_mode_fb_cmd2 {
377#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 417#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
378#define DRM_MODE_FB_DIRTY_FLAGS 0x03 418#define DRM_MODE_FB_DIRTY_FLAGS 0x03
379 419
420#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
421
380/* 422/*
381 * Mark a region of a framebuffer as dirty. 423 * Mark a region of a framebuffer as dirty.
382 * 424 *
@@ -417,20 +459,21 @@ struct drm_mode_mode_cmd {
417 struct drm_mode_modeinfo mode; 459 struct drm_mode_modeinfo mode;
418}; 460};
419 461
420#define DRM_MODE_CURSOR_BO (1<<0) 462#define DRM_MODE_CURSOR_BO 0x01
421#define DRM_MODE_CURSOR_MOVE (1<<1) 463#define DRM_MODE_CURSOR_MOVE 0x02
464#define DRM_MODE_CURSOR_FLAGS 0x03
422 465
423/* 466/*
424 * depending on the value in flags diffrent members are used. 467 * depending on the value in flags different members are used.
425 * 468 *
426 * CURSOR_BO uses 469 * CURSOR_BO uses
427 * crtc 470 * crtc_id
428 * width 471 * width
429 * height 472 * height
430 * handle - if 0 turns the cursor of 473 * handle - if 0 turns the cursor off
431 * 474 *
432 * CURSOR_MOVE uses 475 * CURSOR_MOVE uses
433 * crtc 476 * crtc_id
434 * x 477 * x
435 * y 478 * y
436 */ 479 */
@@ -468,9 +511,30 @@ struct drm_mode_crtc_lut {
468 __u64 blue; 511 __u64 blue;
469}; 512};
470 513
514struct drm_color_ctm {
515 /* Conversion matrix in S31.32 format. */
516 __s64 matrix[9];
517};
518
519struct drm_color_lut {
520 /*
521 * Data is U0.16 fixed point format.
522 */
523 __u16 red;
524 __u16 green;
525 __u16 blue;
526 __u16 reserved;
527};
528
471#define DRM_MODE_PAGE_FLIP_EVENT 0x01 529#define DRM_MODE_PAGE_FLIP_EVENT 0x01
472#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 530#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
473#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) 531#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
532#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
533#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
534 DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
535#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
536 DRM_MODE_PAGE_FLIP_ASYNC | \
537 DRM_MODE_PAGE_FLIP_TARGET)
474 538
475/* 539/*
476 * Request a page flip on the specified crtc. 540 * Request a page flip on the specified crtc.
@@ -484,14 +548,16 @@ struct drm_mode_crtc_lut {
484 * flip is already pending as the ioctl is called, EBUSY will be 548 * flip is already pending as the ioctl is called, EBUSY will be
485 * returned. 549 * returned.
486 * 550 *
487 * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will 551 * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
488 * request that drm sends back a vblank event (see drm.h: struct 552 * event (see drm.h: struct drm_event_vblank) when the page flip is
489 * drm_event_vblank) when the page flip is done. The user_data field 553 * done. The user_data field passed in with this ioctl will be
490 * passed in with this ioctl will be returned as the user_data field 554 * returned as the user_data field in the vblank event struct.
491 * in the vblank event struct.
492 * 555 *
493 * The reserved field must be zero until we figure out something 556 * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
494 * clever to use it for. 557 * 'as soon as possible', meaning that it not delay waiting for vblank.
558 * This may cause tearing on the screen.
559 *
560 * The reserved field must be zero.
495 */ 561 */
496 562
497struct drm_mode_crtc_page_flip { 563struct drm_mode_crtc_page_flip {
@@ -502,29 +568,57 @@ struct drm_mode_crtc_page_flip {
502 __u64 user_data; 568 __u64 user_data;
503}; 569};
504 570
571/*
572 * Request a page flip on the specified crtc.
573 *
574 * Same as struct drm_mode_crtc_page_flip, but supports new flags and
575 * re-purposes the reserved field:
576 *
577 * The sequence field must be zero unless either of the
578 * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
579 * the ABSOLUTE flag is specified, the sequence field denotes the absolute
580 * vblank sequence when the flip should take effect. When the RELATIVE
581 * flag is specified, the sequence field denotes the relative (to the
582 * current one when the ioctl is called) vblank sequence when the flip
583 * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
584 * make sure the vblank sequence before the target one has passed before
585 * calling this ioctl. The purpose of the
586 * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
587 * the target for when code dealing with a page flip runs during a
588 * vertical blank period.
589 */
590
591struct drm_mode_crtc_page_flip_target {
592 __u32 crtc_id;
593 __u32 fb_id;
594 __u32 flags;
595 __u32 sequence;
596 __u64 user_data;
597};
598
505/* create a dumb scanout buffer */ 599/* create a dumb scanout buffer */
506struct drm_mode_create_dumb { 600struct drm_mode_create_dumb {
507 __u32 height; 601 __u32 height;
508 __u32 width; 602 __u32 width;
509 __u32 bpp; 603 __u32 bpp;
510 __u32 flags; 604 __u32 flags;
511 /* handle, pitch, size will be returned */ 605 /* handle, pitch, size will be returned */
512 __u32 handle; 606 __u32 handle;
513 __u32 pitch; 607 __u32 pitch;
514 __u64 size; 608 __u64 size;
515}; 609};
516 610
517/* set up for mmap of a dumb scanout buffer */ 611/* set up for mmap of a dumb scanout buffer */
518struct drm_mode_map_dumb { 612struct drm_mode_map_dumb {
519 /** Handle for the object being mapped. */ 613 /** Handle for the object being mapped. */
520 __u32 handle; 614 __u32 handle;
521 __u32 pad; 615 __u32 pad;
522 /** 616 /**
523 * Fake offset to use for subsequent mmap call 617 * Fake offset to use for subsequent mmap call
524 * 618 *
525 * This is a fixed-size type for 32/64 compatibility. 619 * This is a fixed-size type for 32/64 compatibility.
526 */ 620 */
527 __u64 offset; 621 __u64 offset;
528}; 622};
529 623
530struct drm_mode_destroy_dumb { 624struct drm_mode_destroy_dumb {
@@ -532,9 +626,16 @@ struct drm_mode_destroy_dumb {
532}; 626};
533 627
534/* page-flip flags are valid, plus: */ 628/* page-flip flags are valid, plus: */
535#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100 629#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
536#define DRM_MODE_ATOMIC_NONBLOCK 0x0200 630#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
537#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400 631#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
632
633#define DRM_MODE_ATOMIC_FLAGS (\
634 DRM_MODE_PAGE_FLIP_EVENT |\
635 DRM_MODE_PAGE_FLIP_ASYNC |\
636 DRM_MODE_ATOMIC_TEST_ONLY |\
637 DRM_MODE_ATOMIC_NONBLOCK |\
638 DRM_MODE_ATOMIC_ALLOW_MODESET)
538 639
539#define DRM_MODE_ATOMIC_FLAGS (\ 640#define DRM_MODE_ATOMIC_FLAGS (\
540 DRM_MODE_PAGE_FLIP_EVENT |\ 641 DRM_MODE_PAGE_FLIP_EVENT |\
@@ -574,5 +675,8 @@ struct drm_mode_destroy_blob {
574 __u32 blob_id; 675 __u32 blob_id;
575}; 676};
576 677
678#if defined(__cplusplus)
679}
680#endif
577 681
578#endif 682#endif
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index 7325558d..502934ed 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -37,6 +37,8 @@
37/* SAREA area needs to be at least a page */ 37/* SAREA area needs to be at least a page */
38#if defined(__alpha__) 38#if defined(__alpha__)
39#define SAREA_MAX 0x2000U 39#define SAREA_MAX 0x2000U
40#elif defined(__mips__)
41#define SAREA_MAX 0x4000U
40#elif defined(__ia64__) 42#elif defined(__ia64__)
41#define SAREA_MAX 0x10000U /* 64kB */ 43#define SAREA_MAX 0x10000U /* 64kB */
42#else 44#else
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 0e51d421..5ebe0462 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -29,6 +29,10 @@
29 29
30#include "drm.h" 30#include "drm.h"
31 31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
32/* Please note that modifications to all structs defined here are 36/* Please note that modifications to all structs defined here are
33 * subject to backwards-compatibility constraints. 37 * subject to backwards-compatibility constraints.
34 */ 38 */
@@ -58,6 +62,30 @@
58#define I915_ERROR_UEVENT "ERROR" 62#define I915_ERROR_UEVENT "ERROR"
59#define I915_RESET_UEVENT "RESET" 63#define I915_RESET_UEVENT "RESET"
60 64
65/*
66 * MOCS indexes used for GPU surfaces, defining the cacheability of the
67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
68 */
69enum i915_mocs_table_index {
70 /*
71 * Not cached anywhere, coherency between CPU and GPU accesses is
72 * guaranteed.
73 */
74 I915_MOCS_UNCACHED,
75 /*
76 * Cacheability and coherency controlled by the kernel automatically
77 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
78 * usage of the surface (used for display scanout or not).
79 */
80 I915_MOCS_PTE,
81 /*
82 * Cached in all GPU caches available on the platform.
83 * Coherency between CPU and GPU accesses to the surface is not
84 * guaranteed without extra synchronization.
85 */
86 I915_MOCS_CACHED,
87};
88
61/* Each region is a minimum of 16k, and there are at most 255 of them. 89/* Each region is a minimum of 16k, and there are at most 255 of them.
62 */ 90 */
63#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -218,6 +246,7 @@ typedef struct _drm_i915_sarea {
218#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 246#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
219#define DRM_I915_OVERLAY_ATTRS 0x28 247#define DRM_I915_OVERLAY_ATTRS 0x28
220#define DRM_I915_GEM_EXECBUFFER2 0x29 248#define DRM_I915_GEM_EXECBUFFER2 0x29
249#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
221#define DRM_I915_GET_SPRITE_COLORKEY 0x2a 250#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
222#define DRM_I915_SET_SPRITE_COLORKEY 0x2b 251#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
223#define DRM_I915_GEM_WAIT 0x2c 252#define DRM_I915_GEM_WAIT 0x2c
@@ -230,6 +259,7 @@ typedef struct _drm_i915_sarea {
230#define DRM_I915_GEM_USERPTR 0x33 259#define DRM_I915_GEM_USERPTR 0x33
231#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
232#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
262#define DRM_I915_PERF_OPEN 0x36
233 263
234#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 264#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
235#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 265#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -251,6 +281,7 @@ typedef struct _drm_i915_sarea {
251#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 281#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
252#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 282#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
253#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 283#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
284#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
254#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 285#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
255#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 286#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
256#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 287#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -283,6 +314,7 @@ typedef struct _drm_i915_sarea {
283#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 314#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
284#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
285#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
317#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
286 318
287/* Allow drivers to submit batchbuffers directly to hardware, relying 319/* Allow drivers to submit batchbuffers directly to hardware, relying
288 * on the security mechanisms provided by hardware. 320 * on the security mechanisms provided by hardware.
@@ -357,6 +389,28 @@ typedef struct drm_i915_irq_wait {
357#define I915_PARAM_HAS_GPU_RESET 35 389#define I915_PARAM_HAS_GPU_RESET 35
358#define I915_PARAM_HAS_RESOURCE_STREAMER 36 390#define I915_PARAM_HAS_RESOURCE_STREAMER 36
359#define I915_PARAM_HAS_EXEC_SOFTPIN 37 391#define I915_PARAM_HAS_EXEC_SOFTPIN 37
392#define I915_PARAM_HAS_POOLED_EU 38
393#define I915_PARAM_MIN_EU_IN_POOL 39
394#define I915_PARAM_MMAP_GTT_VERSION 40
395
396/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
397 * priorities and the driver will attempt to execute batches in priority order.
398 */
399#define I915_PARAM_HAS_SCHEDULER 41
400#define I915_PARAM_HUC_STATUS 42
401
402/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
403 * synchronisation with implicit fencing on individual objects.
404 * See EXEC_OBJECT_ASYNC.
405 */
406#define I915_PARAM_HAS_EXEC_ASYNC 43
407
408/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
409 * both being able to pass in a sync_file fd to wait upon before executing,
410 * and being able to return a new sync_file fd that is signaled when the
411 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
412 */
413#define I915_PARAM_HAS_EXEC_FENCE 44
360 414
361typedef struct drm_i915_getparam { 415typedef struct drm_i915_getparam {
362 __s32 param; 416 __s32 param;
@@ -692,15 +746,41 @@ struct drm_i915_gem_exec_object2 {
692 */ 746 */
693 __u64 offset; 747 __u64 offset;
694 748
695#define EXEC_OBJECT_NEEDS_FENCE (1<<0) 749#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
696#define EXEC_OBJECT_NEEDS_GTT (1<<1) 750#define EXEC_OBJECT_NEEDS_GTT (1<<1)
697#define EXEC_OBJECT_WRITE (1<<2) 751#define EXEC_OBJECT_WRITE (1<<2)
698#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 752#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
699#define EXEC_OBJECT_PINNED (1<<4) 753#define EXEC_OBJECT_PINNED (1<<4)
700#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1) 754#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
755/* The kernel implicitly tracks GPU activity on all GEM objects, and
756 * synchronises operations with outstanding rendering. This includes
757 * rendering on other devices if exported via dma-buf. However, sometimes
758 * this tracking is too coarse and the user knows better. For example,
759 * if the object is split into non-overlapping ranges shared between different
760 * clients or engines (i.e. suballocating objects), the implicit tracking
761 * by kernel assumes that each operation affects the whole object rather
762 * than an individual range, causing needless synchronisation between clients.
763 * The kernel will also forgo any CPU cache flushes prior to rendering from
764 * the object as the client is expected to be also handling such domain
765 * tracking.
766 *
767 * The kernel maintains the implicit tracking in order to manage resources
768 * used by the GPU - this flag only disables the synchronisation prior to
769 * rendering with this object in this execbuf.
770 *
771 * Opting out of implicit synhronisation requires the user to do its own
772 * explicit tracking to avoid rendering corruption. See, for example,
773 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
774 */
775#define EXEC_OBJECT_ASYNC (1<<6)
776/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
777#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1)
701 __u64 flags; 778 __u64 flags;
702 779
703 __u64 rsvd1; 780 union {
781 __u64 rsvd1;
782 __u64 pad_to_size;
783 };
704 __u64 rsvd2; 784 __u64 rsvd2;
705}; 785};
706 786
@@ -772,17 +852,44 @@ struct drm_i915_gem_execbuffer2 {
772#define I915_EXEC_HANDLE_LUT (1<<12) 852#define I915_EXEC_HANDLE_LUT (1<<12)
773 853
774/** Used for switching BSD rings on the platforms with two BSD rings */ 854/** Used for switching BSD rings on the platforms with two BSD rings */
775#define I915_EXEC_BSD_MASK (3<<13) 855#define I915_EXEC_BSD_SHIFT (13)
776#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */ 856#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
777#define I915_EXEC_BSD_RING1 (1<<13) 857/* default ping-pong mode */
778#define I915_EXEC_BSD_RING2 (2<<13) 858#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
859#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
860#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
779 861
780/** Tell the kernel that the batchbuffer is processed by 862/** Tell the kernel that the batchbuffer is processed by
781 * the resource streamer. 863 * the resource streamer.
782 */ 864 */
783#define I915_EXEC_RESOURCE_STREAMER (1<<15) 865#define I915_EXEC_RESOURCE_STREAMER (1<<15)
784 866
785#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1) 867/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
868 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
869 * the batch.
870 *
871 * Returns -EINVAL if the sync_file fd cannot be found.
872 */
873#define I915_EXEC_FENCE_IN (1<<16)
874
875/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
876 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
877 * to the caller, and it should be close() after use. (The fd is a regular
878 * file descriptor and will be cleaned up on process termination. It holds
879 * a reference to the request, but nothing else.)
880 *
881 * The sync_file fd can be combined with other sync_file and passed either
882 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
883 * will only occur after this request completes), or to other devices.
884 *
885 * Using I915_EXEC_FENCE_OUT requires use of
886 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
887 * back to userspace. Failure to do so will cause the out-fence to always
888 * be reported as zero, and the real fence fd to be leaked.
889 */
890#define I915_EXEC_FENCE_OUT (1<<17)
891
892#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1))
786 893
787#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 894#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
788#define i915_execbuffer2_set_context_id(eb2, context) \ 895#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -812,10 +919,49 @@ struct drm_i915_gem_busy {
812 /** Handle of the buffer to check for busy */ 919 /** Handle of the buffer to check for busy */
813 __u32 handle; 920 __u32 handle;
814 921
815 /** Return busy status (1 if busy, 0 if idle). 922 /** Return busy status
816 * The high word is used to indicate on which rings the object 923 *
817 * currently resides: 924 * A return of 0 implies that the object is idle (after
818 * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) 925 * having flushed any pending activity), and a non-zero return that
926 * the object is still in-flight on the GPU. (The GPU has not yet
927 * signaled completion for all pending requests that reference the
928 * object.) An object is guaranteed to become idle eventually (so
929 * long as no new GPU commands are executed upon it). Due to the
930 * asynchronous nature of the hardware, an object reported
931 * as busy may become idle before the ioctl is completed.
932 *
933 * Furthermore, if the object is busy, which engine is busy is only
934 * provided as a guide. There are race conditions which prevent the
935 * report of which engines are busy from being always accurate.
936 * However, the converse is not true. If the object is idle, the
937 * result of the ioctl, that all engines are idle, is accurate.
938 *
939 * The returned dword is split into two fields to indicate both
940 * the engines on which the object is being read, and the
941 * engine on which it is currently being written (if any).
942 *
943 * The low word (bits 0:15) indicate if the object is being written
944 * to by any engine (there can only be one, as the GEM implicit
945 * synchronisation rules force writes to be serialised). Only the
946 * engine for the last write is reported.
947 *
948 * The high word (bits 16:31) are a bitmask of which engines are
949 * currently reading from the object. Multiple engines may be
950 * reading from the object simultaneously.
951 *
952 * The value of each engine is the same as specified in the
953 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
954 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
955 * the I915_EXEC_RENDER engine for execution, and so it is never
956 * reported as active itself. Some hardware may have parallel
957 * execution engines, e.g. multiple media engines, which are
958 * mapped to the same identifier in the EXECBUFFER2 ioctl and
959 * so are not separately reported for busyness.
960 *
961 * Caveat emptor:
962 * Only the boolean result of this query is reliable; that is whether
963 * the object is idle or busy. The report of which engines are busy
964 * should be only used as a heuristic.
819 */ 965 */
820 __u32 busy; 966 __u32 busy;
821}; 967};
@@ -864,6 +1010,7 @@ struct drm_i915_gem_caching {
864#define I915_TILING_NONE 0 1010#define I915_TILING_NONE 0
865#define I915_TILING_X 1 1011#define I915_TILING_X 1
866#define I915_TILING_Y 2 1012#define I915_TILING_Y 2
1013#define I915_TILING_LAST I915_TILING_Y
867 1014
868#define I915_BIT_6_SWIZZLE_NONE 0 1015#define I915_BIT_6_SWIZZLE_NONE 0
869#define I915_BIT_6_SWIZZLE_9 1 1016#define I915_BIT_6_SWIZZLE_9 1
@@ -1140,7 +1287,145 @@ struct drm_i915_gem_context_param {
1140#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1287#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1141#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1288#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1142#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1289#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1290#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1291#define I915_CONTEXT_PARAM_BANNABLE 0x5
1143 __u64 value; 1292 __u64 value;
1144}; 1293};
1145 1294
1295enum drm_i915_oa_format {
1296 I915_OA_FORMAT_A13 = 1,
1297 I915_OA_FORMAT_A29,
1298 I915_OA_FORMAT_A13_B8_C8,
1299 I915_OA_FORMAT_B4_C8,
1300 I915_OA_FORMAT_A45_B8_C8,
1301 I915_OA_FORMAT_B4_C8_A16,
1302 I915_OA_FORMAT_C4_B8,
1303
1304 I915_OA_FORMAT_MAX /* non-ABI */
1305};
1306
1307enum drm_i915_perf_property_id {
1308 /**
1309 * Open the stream for a specific context handle (as used with
1310 * execbuffer2). A stream opened for a specific context this way
1311 * won't typically require root privileges.
1312 */
1313 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1314
1315 /**
1316 * A value of 1 requests the inclusion of raw OA unit reports as
1317 * part of stream samples.
1318 */
1319 DRM_I915_PERF_PROP_SAMPLE_OA,
1320
1321 /**
1322 * The value specifies which set of OA unit metrics should be
1323 * be configured, defining the contents of any OA unit reports.
1324 */
1325 DRM_I915_PERF_PROP_OA_METRICS_SET,
1326
1327 /**
1328 * The value specifies the size and layout of OA unit reports.
1329 */
1330 DRM_I915_PERF_PROP_OA_FORMAT,
1331
1332 /**
1333 * Specifying this property implicitly requests periodic OA unit
1334 * sampling and (at least on Haswell) the sampling frequency is derived
1335 * from this exponent as follows:
1336 *
1337 * 80ns * 2^(period_exponent + 1)
1338 */
1339 DRM_I915_PERF_PROP_OA_EXPONENT,
1340
1341 DRM_I915_PERF_PROP_MAX /* non-ABI */
1342};
1343
1344struct drm_i915_perf_open_param {
1345 __u32 flags;
1346#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
1347#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
1348#define I915_PERF_FLAG_DISABLED (1<<2)
1349
1350 /** The number of u64 (id, value) pairs */
1351 __u32 num_properties;
1352
1353 /**
1354 * Pointer to array of u64 (id, value) pairs configuring the stream
1355 * to open.
1356 */
1357 __u64 properties_ptr;
1358};
1359
1360/**
1361 * Enable data capture for a stream that was either opened in a disabled state
1362 * via I915_PERF_FLAG_DISABLED or was later disabled via
1363 * I915_PERF_IOCTL_DISABLE.
1364 *
1365 * It is intended to be cheaper to disable and enable a stream than it may be
1366 * to close and re-open a stream with the same configuration.
1367 *
1368 * It's undefined whether any pending data for the stream will be lost.
1369 */
1370#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
1371
1372/**
1373 * Disable data capture for a stream.
1374 *
1375 * It is an error to try and read a stream that is disabled.
1376 */
1377#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1378
1379/**
1380 * Common to all i915 perf records
1381 */
1382struct drm_i915_perf_record_header {
1383 __u32 type;
1384 __u16 pad;
1385 __u16 size;
1386};
1387
1388enum drm_i915_perf_record_type {
1389
1390 /**
1391 * Samples are the work horse record type whose contents are extensible
1392 * and defined when opening an i915 perf stream based on the given
1393 * properties.
1394 *
1395 * Boolean properties following the naming convention
1396 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1397 * every sample.
1398 *
1399 * The order of these sample properties given by userspace has no
1400 * affect on the ordering of data within a sample. The order is
1401 * documented here.
1402 *
1403 * struct {
1404 * struct drm_i915_perf_record_header header;
1405 *
1406 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1407 * };
1408 */
1409 DRM_I915_PERF_RECORD_SAMPLE = 1,
1410
1411 /*
1412 * Indicates that one or more OA reports were not written by the
1413 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1414 * command collides with periodic sampling - which would be more likely
1415 * at higher sampling frequencies.
1416 */
1417 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1418
1419 /**
1420 * An error occurred that resulted in all pending OA reports being lost.
1421 */
1422 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1423
1424 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1425};
1426
1427#if defined(__cplusplus)
1428}
1429#endif
1430
1146#endif /* _I915_DRM_H_ */ 1431#endif /* _I915_DRM_H_ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index cd31794f..f09cc04c 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -35,6 +35,10 @@
35 35
36#include "drm.h" 36#include "drm.h"
37 37
38#if defined(__cplusplus)
39extern "C" {
40#endif
41
38/* WARNING: If you change any of these defines, make sure to change the 42/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 43 * defines in the X server file (radeon_sarea.h)
40 */ 44 */
@@ -511,6 +515,7 @@ typedef struct {
511#define DRM_RADEON_GEM_BUSY 0x2a 515#define DRM_RADEON_GEM_BUSY 0x2a
512#define DRM_RADEON_GEM_VA 0x2b 516#define DRM_RADEON_GEM_VA 0x2b
513#define DRM_RADEON_GEM_OP 0x2c 517#define DRM_RADEON_GEM_OP 0x2c
518#define DRM_RADEON_GEM_USERPTR 0x2d
514 519
515#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 520#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
516#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 521#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -554,6 +559,7 @@ typedef struct {
554#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) 559#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
555#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) 560#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
556#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) 561#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
562#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
557 563
558typedef struct drm_radeon_init { 564typedef struct drm_radeon_init {
559 enum { 565 enum {
@@ -796,7 +802,13 @@ struct drm_radeon_gem_info {
796 uint64_t vram_visible; 802 uint64_t vram_visible;
797}; 803};
798 804
799#define RADEON_GEM_NO_BACKING_STORE 1 805#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
806#define RADEON_GEM_GTT_UC (1 << 1)
807#define RADEON_GEM_GTT_WC (1 << 2)
808/* BO is expected to be accessed by the CPU */
809#define RADEON_GEM_CPU_ACCESS (1 << 3)
810/* CPU access is not expected to work for this BO */
811#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
800 812
801struct drm_radeon_gem_create { 813struct drm_radeon_gem_create {
802 uint64_t size; 814 uint64_t size;
@@ -806,6 +818,23 @@ struct drm_radeon_gem_create {
806 uint32_t flags; 818 uint32_t flags;
807}; 819};
808 820
821/*
822 * This is not a reliable API and you should expect it to fail for any
823 * number of reasons and have fallback path that do not use userptr to
824 * perform any operation.
825 */
826#define RADEON_GEM_USERPTR_READONLY (1 << 0)
827#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
828#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
829#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
830
831struct drm_radeon_gem_userptr {
832 uint64_t addr;
833 uint64_t size;
834 uint32_t flags;
835 uint32_t handle;
836};
837
809#define RADEON_TILING_MACRO 0x1 838#define RADEON_TILING_MACRO 0x1
810#define RADEON_TILING_MICRO 0x2 839#define RADEON_TILING_MICRO 0x2
811#define RADEON_TILING_SWAP_16BIT 0x4 840#define RADEON_TILING_SWAP_16BIT 0x4
@@ -943,6 +972,7 @@ struct drm_radeon_cs_chunk {
943}; 972};
944 973
945/* drm_radeon_cs_reloc.flags */ 974/* drm_radeon_cs_reloc.flags */
975#define RADEON_RELOC_PRIO_MASK (0xf << 0)
946 976
947struct drm_radeon_cs_reloc { 977struct drm_radeon_cs_reloc {
948 uint32_t handle; 978 uint32_t handle;
@@ -1008,7 +1038,13 @@ struct drm_radeon_cs {
1008#define RADEON_INFO_NUM_BYTES_MOVED 0x1d 1038#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
1009#define RADEON_INFO_VRAM_USAGE 0x1e 1039#define RADEON_INFO_VRAM_USAGE 0x1e
1010#define RADEON_INFO_GTT_USAGE 0x1f 1040#define RADEON_INFO_GTT_USAGE 0x1f
1011 1041#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
1042#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
1043#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
1044#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
1045#define RADEON_INFO_READ_REG 0x24
1046#define RADEON_INFO_VA_UNMAP_WORKING 0x25
1047#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1012 1048
1013struct drm_radeon_info { 1049struct drm_radeon_info {
1014 uint32_t request; 1050 uint32_t request;
@@ -1034,13 +1070,10 @@ struct drm_radeon_info {
1034#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 1070#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
1035#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 1071#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
1036 1072
1037#define CIK_TILE_MODE_COLOR_2D 14
1038#define CIK_TILE_MODE_COLOR_2D_SCANOUT 10
1039#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64 0
1040#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128 1
1041#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256 2
1042#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512 3
1043#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE 4
1044#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 1073#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
1045 1074
1075#if defined(__cplusplus)
1076}
1077#endif
1078
1046#endif 1079#endif
diff --git a/include/drm/vc4_drm.h b/include/drm/vc4_drm.h
new file mode 100644
index 00000000..319881d8
--- /dev/null
+++ b/include/drm/vc4_drm.h
@@ -0,0 +1,302 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef _VC4_DRM_H_
25#define _VC4_DRM_H_
26
27#include "drm.h"
28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
33#define DRM_VC4_SUBMIT_CL 0x00
34#define DRM_VC4_WAIT_SEQNO 0x01
35#define DRM_VC4_WAIT_BO 0x02
36#define DRM_VC4_CREATE_BO 0x03
37#define DRM_VC4_MMAP_BO 0x04
38#define DRM_VC4_CREATE_SHADER_BO 0x05
39#define DRM_VC4_GET_HANG_STATE 0x06
40#define DRM_VC4_GET_PARAM 0x07
41
42#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
43#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
44#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
45#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
46#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
47#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
48#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
49#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
50
51struct drm_vc4_submit_rcl_surface {
52 __u32 hindex; /* Handle index, or ~0 if not present. */
53 __u32 offset; /* Offset to start of buffer. */
54 /*
55 * Bits for either render config (color_write) or load/store packet.
56 * Bits should all be 0 for MSAA load/stores.
57 */
58 __u16 bits;
59
60#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
61 __u16 flags;
62};
63
64/**
65 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
66 * engine.
67 *
68 * Drivers typically use GPU BOs to store batchbuffers / command lists and
69 * their associated state. However, because the VC4 lacks an MMU, we have to
70 * do validation of memory accesses by the GPU commands. If we were to store
71 * our commands in BOs, we'd need to do uncached readback from them to do the
72 * validation process, which is too expensive. Instead, userspace accumulates
73 * commands and associated state in plain memory, then the kernel copies the
74 * data to its own address space, and then validates and stores it in a GPU
75 * BO.
76 */
77struct drm_vc4_submit_cl {
78 /* Pointer to the binner command list.
79 *
80 * This is the first set of commands executed, which runs the
81 * coordinate shader to determine where primitives land on the screen,
82 * then writes out the state updates and draw calls necessary per tile
83 * to the tile allocation BO.
84 */
85 __u64 bin_cl;
86
87 /* Pointer to the shader records.
88 *
89 * Shader records are the structures read by the hardware that contain
90 * pointers to uniforms, shaders, and vertex attributes. The
91 * reference to the shader record has enough information to determine
92 * how many pointers are necessary (fixed number for shaders/uniforms,
93 * and an attribute count), so those BO indices into bo_handles are
94 * just stored as __u32s before each shader record passed in.
95 */
96 __u64 shader_rec;
97
98 /* Pointer to uniform data and texture handles for the textures
99 * referenced by the shader.
100 *
101 * For each shader state record, there is a set of uniform data in the
102 * order referenced by the record (FS, VS, then CS). Each set of
103 * uniform data has a __u32 index into bo_handles per texture
104 * sample operation, in the order the QPU_W_TMUn_S writes appear in
105 * the program. Following the texture BO handle indices is the actual
106 * uniform data.
107 *
108 * The individual uniform state blocks don't have sizes passed in,
109 * because the kernel has to determine the sizes anyway during shader
110 * code validation.
111 */
112 __u64 uniforms;
113 __u64 bo_handles;
114
115 /* Size in bytes of the binner command list. */
116 __u32 bin_cl_size;
117 /* Size in bytes of the set of shader records. */
118 __u32 shader_rec_size;
119 /* Number of shader records.
120 *
121 * This could just be computed from the contents of shader_records and
122 * the address bits of references to them from the bin CL, but it
123 * keeps the kernel from having to resize some allocations it makes.
124 */
125 __u32 shader_rec_count;
126 /* Size in bytes of the uniform state. */
127 __u32 uniforms_size;
128
129 /* Number of BO handles passed in (size is that times 4). */
130 __u32 bo_handle_count;
131
132 /* RCL setup: */
133 __u16 width;
134 __u16 height;
135 __u8 min_x_tile;
136 __u8 min_y_tile;
137 __u8 max_x_tile;
138 __u8 max_y_tile;
139 struct drm_vc4_submit_rcl_surface color_read;
140 struct drm_vc4_submit_rcl_surface color_write;
141 struct drm_vc4_submit_rcl_surface zs_read;
142 struct drm_vc4_submit_rcl_surface zs_write;
143 struct drm_vc4_submit_rcl_surface msaa_color_write;
144 struct drm_vc4_submit_rcl_surface msaa_zs_write;
145 __u32 clear_color[2];
146 __u32 clear_z;
147 __u8 clear_s;
148
149 __u32 pad:24;
150
151#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
152 __u32 flags;
153
154 /* Returned value of the seqno of this render job (for the
155 * wait ioctl).
156 */
157 __u64 seqno;
158};
159
160/**
161 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
162 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
163 *
164 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
165 * block, just return the status."
166 */
167struct drm_vc4_wait_seqno {
168 __u64 seqno;
169 __u64 timeout_ns;
170};
171
172/**
173 * struct drm_vc4_wait_bo - ioctl argument for waiting for
174 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
175 *
176 * This is useful for cases where multiple processes might be
177 * rendering to a BO and you want to wait for all rendering to be
178 * completed.
179 */
180struct drm_vc4_wait_bo {
181 __u32 handle;
182 __u32 pad;
183 __u64 timeout_ns;
184};
185
186/**
187 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
188 *
189 * There are currently no values for the flags argument, but it may be
190 * used in a future extension.
191 */
192struct drm_vc4_create_bo {
193 __u32 size;
194 __u32 flags;
195 /** Returned GEM handle for the BO. */
196 __u32 handle;
197 __u32 pad;
198};
199
200/**
201 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
202 *
203 * This doesn't actually perform an mmap. Instead, it returns the
204 * offset you need to use in an mmap on the DRM device node. This
205 * means that tools like valgrind end up knowing about the mapped
206 * memory.
207 *
208 * There are currently no values for the flags argument, but it may be
209 * used in a future extension.
210 */
211struct drm_vc4_mmap_bo {
212 /** Handle for the object being mapped. */
213 __u32 handle;
214 __u32 flags;
215 /** offset into the drm node to use for subsequent mmap call. */
216 __u64 offset;
217};
218
219/**
220 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
221 * shader BOs.
222 *
223 * Since allowing a shader to be overwritten while it's also being
224 * executed from would allow privlege escalation, shaders must be
225 * created using this ioctl, and they can't be mmapped later.
226 */
227struct drm_vc4_create_shader_bo {
228 /* Size of the data argument. */
229 __u32 size;
230 /* Flags, currently must be 0. */
231 __u32 flags;
232
233 /* Pointer to the data. */
234 __u64 data;
235
236 /** Returned GEM handle for the BO. */
237 __u32 handle;
238 /* Pad, must be 0. */
239 __u32 pad;
240};
241
242struct drm_vc4_get_hang_state_bo {
243 __u32 handle;
244 __u32 paddr;
245 __u32 size;
246 __u32 pad;
247};
248
249/**
250 * struct drm_vc4_hang_state - ioctl argument for collecting state
251 * from a GPU hang for analysis.
252*/
253struct drm_vc4_get_hang_state {
254 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
255 __u64 bo;
256 /**
257 * On input, the size of the bo array. Output is the number
258 * of bos to be returned.
259 */
260 __u32 bo_count;
261
262 __u32 start_bin, start_render;
263
264 __u32 ct0ca, ct0ea;
265 __u32 ct1ca, ct1ea;
266 __u32 ct0cs, ct1cs;
267 __u32 ct0ra0, ct1ra0;
268
269 __u32 bpca, bpcs;
270 __u32 bpoa, bpos;
271
272 __u32 vpmbase;
273
274 __u32 dbge;
275 __u32 fdbgo;
276 __u32 fdbgb;
277 __u32 fdbgr;
278 __u32 fdbgs;
279 __u32 errstat;
280
281 /* Pad that we may save more registers into in the future. */
282 __u32 pad[16];
283};
284
285#define DRM_VC4_PARAM_V3D_IDENT0 0
286#define DRM_VC4_PARAM_V3D_IDENT1 1
287#define DRM_VC4_PARAM_V3D_IDENT2 2
288#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
289#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
290#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
291
292struct drm_vc4_get_param {
293 __u32 param;
294 __u32 pad;
295 __u64 value;
296};
297
298#if defined(__cplusplus)
299}
300#endif
301
302#endif /* _VC4_DRM_H_ */
diff --git a/include/drm/virtgpu_drm.h b/include/drm/virtgpu_drm.h
index abf11c58..91a31ffe 100644
--- a/include/drm/virtgpu_drm.h
+++ b/include/drm/virtgpu_drm.h
@@ -24,13 +24,16 @@
24#ifndef VIRTGPU_DRM_H 24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H 25#define VIRTGPU_DRM_H
26 26
27#include <stddef.h> 27#include "drm.h"
28#include "drm/drm.h" 28
29#if defined(__cplusplus)
30extern "C" {
31#endif
29 32
30/* Please note that modifications to all structs defined here are 33/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 34 * subject to backwards-compatibility constraints.
32 * 35 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 36 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 37 * compatibility Keep fields aligned to their size
35 */ 38 */
36 39
@@ -45,88 +48,88 @@
45#define DRM_VIRTGPU_GET_CAPS 0x09 48#define DRM_VIRTGPU_GET_CAPS 0x09
46 49
47struct drm_virtgpu_map { 50struct drm_virtgpu_map {
48 uint64_t offset; /* use for mmap system call */ 51 __u64 offset; /* use for mmap system call */
49 uint32_t handle; 52 __u32 handle;
50 uint32_t pad; 53 __u32 pad;
51}; 54};
52 55
53struct drm_virtgpu_execbuffer { 56struct drm_virtgpu_execbuffer {
54 uint32_t flags; /* for future use */ 57 __u32 flags; /* for future use */
55 uint32_t size; 58 __u32 size;
56 uint64_t command; /* void* */ 59 __u64 command; /* void* */
57 uint64_t bo_handles; 60 __u64 bo_handles;
58 uint32_t num_bo_handles; 61 __u32 num_bo_handles;
59 uint32_t pad; 62 __u32 pad;
60}; 63};
61 64
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63 66
64struct drm_virtgpu_getparam { 67struct drm_virtgpu_getparam {
65 uint64_t param; 68 __u64 param;
66 uint64_t value; 69 __u64 value;
67}; 70};
68 71
69/* NO_BO flags? NO resource flag? */ 72/* NO_BO flags? NO resource flag? */
70/* resource flag for y_0_top */ 73/* resource flag for y_0_top */
71struct drm_virtgpu_resource_create { 74struct drm_virtgpu_resource_create {
72 uint32_t target; 75 __u32 target;
73 uint32_t format; 76 __u32 format;
74 uint32_t bind; 77 __u32 bind;
75 uint32_t width; 78 __u32 width;
76 uint32_t height; 79 __u32 height;
77 uint32_t depth; 80 __u32 depth;
78 uint32_t array_size; 81 __u32 array_size;
79 uint32_t last_level; 82 __u32 last_level;
80 uint32_t nr_samples; 83 __u32 nr_samples;
81 uint32_t flags; 84 __u32 flags;
82 uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 85 __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
83 uint32_t res_handle; /* returned by kernel */ 86 __u32 res_handle; /* returned by kernel */
84 uint32_t size; /* validate transfer in the host */ 87 __u32 size; /* validate transfer in the host */
85 uint32_t stride; /* validate transfer in the host */ 88 __u32 stride; /* validate transfer in the host */
86}; 89};
87 90
88struct drm_virtgpu_resource_info { 91struct drm_virtgpu_resource_info {
89 uint32_t bo_handle; 92 __u32 bo_handle;
90 uint32_t res_handle; 93 __u32 res_handle;
91 uint32_t size; 94 __u32 size;
92 uint32_t stride; 95 __u32 stride;
93}; 96};
94 97
95struct drm_virtgpu_3d_box { 98struct drm_virtgpu_3d_box {
96 uint32_t x; 99 __u32 x;
97 uint32_t y; 100 __u32 y;
98 uint32_t z; 101 __u32 z;
99 uint32_t w; 102 __u32 w;
100 uint32_t h; 103 __u32 h;
101 uint32_t d; 104 __u32 d;
102}; 105};
103 106
104struct drm_virtgpu_3d_transfer_to_host { 107struct drm_virtgpu_3d_transfer_to_host {
105 uint32_t bo_handle; 108 __u32 bo_handle;
106 struct drm_virtgpu_3d_box box; 109 struct drm_virtgpu_3d_box box;
107 uint32_t level; 110 __u32 level;
108 uint32_t offset; 111 __u32 offset;
109}; 112};
110 113
111struct drm_virtgpu_3d_transfer_from_host { 114struct drm_virtgpu_3d_transfer_from_host {
112 uint32_t bo_handle; 115 __u32 bo_handle;
113 struct drm_virtgpu_3d_box box; 116 struct drm_virtgpu_3d_box box;
114 uint32_t level; 117 __u32 level;
115 uint32_t offset; 118 __u32 offset;
116}; 119};
117 120
118#define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 121#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
119struct drm_virtgpu_3d_wait { 122struct drm_virtgpu_3d_wait {
120 uint32_t handle; /* 0 is an invalid handle */ 123 __u32 handle; /* 0 is an invalid handle */
121 uint32_t flags; 124 __u32 flags;
122}; 125};
123 126
124struct drm_virtgpu_get_caps { 127struct drm_virtgpu_get_caps {
125 uint32_t cap_set_id; 128 __u32 cap_set_id;
126 uint32_t cap_set_ver; 129 __u32 cap_set_ver;
127 uint64_t addr; 130 __u64 addr;
128 uint32_t size; 131 __u32 size;
129 uint32_t pad; 132 __u32 pad;
130}; 133};
131 134
132#define DRM_IOCTL_VIRTGPU_MAP \ 135#define DRM_IOCTL_VIRTGPU_MAP \
@@ -164,4 +167,8 @@ struct drm_virtgpu_get_caps {
164 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \ 167 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
165 struct drm_virtgpu_get_caps) 168 struct drm_virtgpu_get_caps)
166 169
170#if defined(__cplusplus)
171}
172#endif
173
167#endif 174#endif
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 4d084239..5b68b4d1 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,10 +28,11 @@
28#ifndef __VMWGFX_DRM_H__ 28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__ 29#define __VMWGFX_DRM_H__
30 30
31#include "drm.h"
32
31#define DRM_VMW_MAX_SURFACE_FACES 6 33#define DRM_VMW_MAX_SURFACE_FACES 6
32#define DRM_VMW_MAX_MIP_LEVELS 24 34#define DRM_VMW_MAX_MIP_LEVELS 24
33 35
34#define DRM_VMW_EXT_NAME_LEN 128
35 36
36#define DRM_VMW_GET_PARAM 0 37#define DRM_VMW_GET_PARAM 0
37#define DRM_VMW_ALLOC_DMABUF 1 38#define DRM_VMW_ALLOC_DMABUF 1
@@ -48,11 +49,20 @@
48#define DRM_VMW_UNREF_SURFACE 10 49#define DRM_VMW_UNREF_SURFACE 10
49#define DRM_VMW_REF_SURFACE 11 50#define DRM_VMW_REF_SURFACE 11
50#define DRM_VMW_EXECBUF 12 51#define DRM_VMW_EXECBUF 12
51#define DRM_VMW_FIFO_DEBUG 13 52#define DRM_VMW_GET_3D_CAP 13
52#define DRM_VMW_FENCE_WAIT 14 53#define DRM_VMW_FENCE_WAIT 14
53/* guarded by minor version >= 2 */ 54#define DRM_VMW_FENCE_SIGNALED 15
54#define DRM_VMW_UPDATE_LAYOUT 15 55#define DRM_VMW_FENCE_UNREF 16
55 56#define DRM_VMW_FENCE_EVENT 17
57#define DRM_VMW_PRESENT 18
58#define DRM_VMW_PRESENT_READBACK 19
59#define DRM_VMW_UPDATE_LAYOUT 20
60#define DRM_VMW_CREATE_SHADER 21
61#define DRM_VMW_UNREF_SHADER 22
62#define DRM_VMW_GB_SURFACE_CREATE 23
63#define DRM_VMW_GB_SURFACE_REF 24
64#define DRM_VMW_SYNCCPU 25
65#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
56 66
57/*************************************************************************/ 67/*************************************************************************/
58/** 68/**
@@ -69,66 +79,39 @@
69#define DRM_VMW_PARAM_NUM_STREAMS 0 79#define DRM_VMW_PARAM_NUM_STREAMS 0
70#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 80#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
71#define DRM_VMW_PARAM_3D 2 81#define DRM_VMW_PARAM_3D 2
72#define DRM_VMW_PARAM_FIFO_OFFSET 3 82#define DRM_VMW_PARAM_HW_CAPS 3
73#define DRM_VMW_PARAM_HW_CAPS 4 83#define DRM_VMW_PARAM_FIFO_CAPS 4
74#define DRM_VMW_PARAM_FIFO_CAPS 5 84#define DRM_VMW_PARAM_MAX_FB_SIZE 5
75 85#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
76/** 86#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
77 * struct drm_vmw_getparam_arg 87#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
78 * 88#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
79 * @value: Returned value. //Out 89#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
80 * @param: Parameter to query. //In. 90#define DRM_VMW_PARAM_SCREEN_TARGET 11
81 * 91#define DRM_VMW_PARAM_DX 12
82 * Argument to the DRM_VMW_GET_PARAM Ioctl.
83 */
84
85struct drm_vmw_getparam_arg {
86 uint64_t value;
87 uint32_t param;
88 uint32_t pad64;
89};
90
91/*************************************************************************/
92/**
93 * DRM_VMW_EXTENSION - Query device extensions.
94 */
95 92
96/** 93/**
97 * struct drm_vmw_extension_rep 94 * enum drm_vmw_handle_type - handle type for ref ioctls
98 *
99 * @exists: The queried extension exists.
100 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
101 * @driver_sarea_offset: Offset to any space in the DRI SAREA
102 * used by the extension.
103 * @major: Major version number of the extension.
104 * @minor: Minor version number of the extension.
105 * @pl: Patch level version number of the extension.
106 * 95 *
107 * Output argument to the DRM_VMW_EXTENSION Ioctl.
108 */ 96 */
109 97enum drm_vmw_handle_type {
110struct drm_vmw_extension_rep { 98 DRM_VMW_HANDLE_LEGACY = 0,
111 int32_t exists; 99 DRM_VMW_HANDLE_PRIME = 1
112 uint32_t driver_ioctl_offset;
113 uint32_t driver_sarea_offset;
114 uint32_t major;
115 uint32_t minor;
116 uint32_t pl;
117 uint32_t pad64;
118}; 100};
119 101
120/** 102/**
121 * union drm_vmw_extension_arg 103 * struct drm_vmw_getparam_arg
122 * 104 *
123 * @extension - Ascii name of the extension to be queried. //In 105 * @value: Returned value. //Out
124 * @rep - Reply as defined above. //Out 106 * @param: Parameter to query. //In.
125 * 107 *
126 * Argument to the DRM_VMW_EXTENSION Ioctl. 108 * Argument to the DRM_VMW_GET_PARAM Ioctl.
127 */ 109 */
128 110
129union drm_vmw_extension_arg { 111struct drm_vmw_getparam_arg {
130 char extension[DRM_VMW_EXT_NAME_LEN]; 112 __u64 value;
131 struct drm_vmw_extension_rep rep; 113 __u32 param;
114 __u32 pad64;
132}; 115};
133 116
134/*************************************************************************/ 117/*************************************************************************/
@@ -149,8 +132,8 @@ union drm_vmw_extension_arg {
149 */ 132 */
150 133
151struct drm_vmw_context_arg { 134struct drm_vmw_context_arg {
152 int32_t cid; 135 __s32 cid;
153 uint32_t pad64; 136 __u32 pad64;
154}; 137};
155 138
156/*************************************************************************/ 139/*************************************************************************/
@@ -180,7 +163,7 @@ struct drm_vmw_context_arg {
180 * @mip_levels: Number of mip levels for each face. 163 * @mip_levels: Number of mip levels for each face.
181 * An unused face should have 0 encoded. 164 * An unused face should have 0 encoded.
182 * @size_addr: Address of a user-space array of sruct drm_vmw_size 165 * @size_addr: Address of a user-space array of sruct drm_vmw_size
183 * cast to an uint64_t for 32-64 bit compatibility. 166 * cast to an __u64 for 32-64 bit compatibility.
184 * The size of the array should equal the total number of mipmap levels. 167 * The size of the array should equal the total number of mipmap levels.
185 * @shareable: Boolean whether other clients (as identified by file descriptors) 168 * @shareable: Boolean whether other clients (as identified by file descriptors)
186 * may reference this surface. 169 * may reference this surface.
@@ -192,18 +175,19 @@ struct drm_vmw_context_arg {
192 */ 175 */
193 176
194struct drm_vmw_surface_create_req { 177struct drm_vmw_surface_create_req {
195 uint32_t flags; 178 __u32 flags;
196 uint32_t format; 179 __u32 format;
197 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 180 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
198 uint64_t size_addr; 181 __u64 size_addr;
199 int32_t shareable; 182 __s32 shareable;
200 int32_t scanout; 183 __s32 scanout;
201}; 184};
202 185
203/** 186/**
204 * struct drm_wmv_surface_arg 187 * struct drm_wmv_surface_arg
205 * 188 *
206 * @sid: Surface id of created surface or surface to destroy or reference. 189 * @sid: Surface id of created surface or surface to destroy or reference.
190 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
207 * 191 *
208 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 192 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
209 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 193 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
@@ -211,8 +195,8 @@ struct drm_vmw_surface_create_req {
211 */ 195 */
212 196
213struct drm_vmw_surface_arg { 197struct drm_vmw_surface_arg {
214 int32_t sid; 198 __s32 sid;
215 uint32_t pad64; 199 enum drm_vmw_handle_type handle_type;
216}; 200};
217 201
218/** 202/**
@@ -227,10 +211,10 @@ struct drm_vmw_surface_arg {
227 */ 211 */
228 212
229struct drm_vmw_size { 213struct drm_vmw_size {
230 uint32_t width; 214 __u32 width;
231 uint32_t height; 215 __u32 height;
232 uint32_t depth; 216 __u32 depth;
233 uint32_t pad64; 217 __u32 pad64;
234}; 218};
235 219
236/** 220/**
@@ -291,20 +275,20 @@ union drm_vmw_surface_reference_arg {
291 * DRM_VMW_EXECBUF 275 * DRM_VMW_EXECBUF
292 * 276 *
293 * Submit a command buffer for execution on the host, and return a 277 * Submit a command buffer for execution on the host, and return a
294 * fence sequence that when signaled, indicates that the command buffer has 278 * fence seqno that when signaled, indicates that the command buffer has
295 * executed. 279 * executed.
296 */ 280 */
297 281
298/** 282/**
299 * struct drm_vmw_execbuf_arg 283 * struct drm_vmw_execbuf_arg
300 * 284 *
301 * @commands: User-space address of a command buffer cast to an uint64_t. 285 * @commands: User-space address of a command buffer cast to an __u64.
302 * @command-size: Size in bytes of the command buffer. 286 * @command-size: Size in bytes of the command buffer.
303 * @throttle-us: Sleep until software is less than @throttle_us 287 * @throttle-us: Sleep until software is less than @throttle_us
304 * microseconds ahead of hardware. The driver may round this value 288 * microseconds ahead of hardware. The driver may round this value
305 * to the nearest kernel tick. 289 * to the nearest kernel tick.
306 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 290 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
307 * uint64_t. 291 * __u64.
308 * @version: Allows expanding the execbuf ioctl parameters without breaking 292 * @version: Allows expanding the execbuf ioctl parameters without breaking
309 * backwards compatibility, since user-space will always tell the kernel 293 * backwards compatibility, since user-space will always tell the kernel
310 * which version it uses. 294 * which version it uses.
@@ -313,21 +297,32 @@ union drm_vmw_surface_reference_arg {
313 * Argument to the DRM_VMW_EXECBUF Ioctl. 297 * Argument to the DRM_VMW_EXECBUF Ioctl.
314 */ 298 */
315 299
316#define DRM_VMW_EXECBUF_VERSION 0 300#define DRM_VMW_EXECBUF_VERSION 2
317 301
318struct drm_vmw_execbuf_arg { 302struct drm_vmw_execbuf_arg {
319 uint64_t commands; 303 __u64 commands;
320 uint32_t command_size; 304 __u32 command_size;
321 uint32_t throttle_us; 305 __u32 throttle_us;
322 uint64_t fence_rep; 306 __u64 fence_rep;
323 uint32_t version; 307 __u32 version;
324 uint32_t flags; 308 __u32 flags;
309 __u32 context_handle;
310 __u32 pad64;
325}; 311};
326 312
327/** 313/**
328 * struct drm_vmw_fence_rep 314 * struct drm_vmw_fence_rep
329 * 315 *
330 * @fence_seq: Fence sequence associated with a command submission. 316 * @handle: Fence object handle for fence associated with a command submission.
317 * @mask: Fence flags relevant for this fence object.
318 * @seqno: Fence sequence number in fifo. A fence object with a lower
319 * seqno will signal the EXEC flag before a fence object with a higher
320 * seqno. This can be used by user-space to avoid kernel calls to determine
321 * whether a fence has signaled the EXEC flag. Note that @seqno will
322 * wrap at 32-bit.
323 * @passed_seqno: The highest seqno number processed by the hardware
324 * so far. This can be used to mark user-space fence objects as signaled, and
325 * to determine whether a fence seqno might be stale.
331 * @error: This member should've been set to -EFAULT on submission. 326 * @error: This member should've been set to -EFAULT on submission.
332 * The following actions should be take on completion: 327 * The following actions should be take on completion:
333 * error == -EFAULT: Fence communication failed. The host is synchronized. 328 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -341,9 +336,12 @@ struct drm_vmw_execbuf_arg {
341 */ 336 */
342 337
343struct drm_vmw_fence_rep { 338struct drm_vmw_fence_rep {
344 uint64_t fence_seq; 339 __u32 handle;
345 int32_t error; 340 __u32 mask;
346 uint32_t pad64; 341 __u32 seqno;
342 __u32 passed_seqno;
343 __u32 pad64;
344 __s32 error;
347}; 345};
348 346
349/*************************************************************************/ 347/*************************************************************************/
@@ -373,8 +371,8 @@ struct drm_vmw_fence_rep {
373 */ 371 */
374 372
375struct drm_vmw_alloc_dmabuf_req { 373struct drm_vmw_alloc_dmabuf_req {
376 uint32_t size; 374 __u32 size;
377 uint32_t pad64; 375 __u32 pad64;
378}; 376};
379 377
380/** 378/**
@@ -391,11 +389,11 @@ struct drm_vmw_alloc_dmabuf_req {
391 */ 389 */
392 390
393struct drm_vmw_dmabuf_rep { 391struct drm_vmw_dmabuf_rep {
394 uint64_t map_handle; 392 __u64 map_handle;
395 uint32_t handle; 393 __u32 handle;
396 uint32_t cur_gmr_id; 394 __u32 cur_gmr_id;
397 uint32_t cur_gmr_offset; 395 __u32 cur_gmr_offset;
398 uint32_t pad64; 396 __u32 pad64;
399}; 397};
400 398
401/** 399/**
@@ -428,41 +426,8 @@ union drm_vmw_alloc_dmabuf_arg {
428 */ 426 */
429 427
430struct drm_vmw_unref_dmabuf_arg { 428struct drm_vmw_unref_dmabuf_arg {
431 uint32_t handle; 429 __u32 handle;
432 uint32_t pad64; 430 __u32 pad64;
433};
434
435/*************************************************************************/
436/**
437 * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
438 *
439 * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
440 */
441
442/**
443 * struct drm_vmw_fifo_debug_arg
444 *
445 * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
446 * @debug_buffer_size: Size in bytes of debug buffer //In
447 * @used_size: Number of bytes copied to the buffer // Out
448 * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
449 *
450 * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
451 */
452
453struct drm_vmw_fifo_debug_arg {
454 uint64_t debug_buffer;
455 uint32_t debug_buffer_size;
456 uint32_t used_size;
457 int32_t did_not_fit;
458 uint32_t pad64;
459};
460
461struct drm_vmw_fence_wait_arg {
462 uint64_t sequence;
463 uint64_t kernel_cookie;
464 int32_t cookie_valid;
465 int32_t pad64;
466}; 431};
467 432
468/*************************************************************************/ 433/*************************************************************************/
@@ -485,10 +450,10 @@ struct drm_vmw_fence_wait_arg {
485 */ 450 */
486 451
487struct drm_vmw_rect { 452struct drm_vmw_rect {
488 int32_t x; 453 __s32 x;
489 int32_t y; 454 __s32 y;
490 uint32_t w; 455 __u32 w;
491 uint32_t h; 456 __u32 h;
492}; 457};
493 458
494/** 459/**
@@ -510,21 +475,21 @@ struct drm_vmw_rect {
510 */ 475 */
511 476
512struct drm_vmw_control_stream_arg { 477struct drm_vmw_control_stream_arg {
513 uint32_t stream_id; 478 __u32 stream_id;
514 uint32_t enabled; 479 __u32 enabled;
515 480
516 uint32_t flags; 481 __u32 flags;
517 uint32_t color_key; 482 __u32 color_key;
518 483
519 uint32_t handle; 484 __u32 handle;
520 uint32_t offset; 485 __u32 offset;
521 int32_t format; 486 __s32 format;
522 uint32_t size; 487 __u32 size;
523 uint32_t width; 488 __u32 width;
524 uint32_t height; 489 __u32 height;
525 uint32_t pitch[3]; 490 __u32 pitch[3];
526 491
527 uint32_t pad64; 492 __u32 pad64;
528 struct drm_vmw_rect src; 493 struct drm_vmw_rect src;
529 struct drm_vmw_rect dst; 494 struct drm_vmw_rect dst;
530}; 495};
@@ -552,12 +517,12 @@ struct drm_vmw_control_stream_arg {
552 */ 517 */
553 518
554struct drm_vmw_cursor_bypass_arg { 519struct drm_vmw_cursor_bypass_arg {
555 uint32_t flags; 520 __u32 flags;
556 uint32_t crtc_id; 521 __u32 crtc_id;
557 int32_t xpos; 522 __s32 xpos;
558 int32_t ypos; 523 __s32 ypos;
559 int32_t xhot; 524 __s32 xhot;
560 int32_t yhot; 525 __s32 yhot;
561}; 526};
562 527
563/*************************************************************************/ 528/*************************************************************************/
@@ -575,8 +540,8 @@ struct drm_vmw_cursor_bypass_arg {
575 */ 540 */
576 541
577struct drm_vmw_stream_arg { 542struct drm_vmw_stream_arg {
578 uint32_t stream_id; 543 __u32 stream_id;
579 uint32_t pad64; 544 __u32 pad64;
580}; 545};
581 546
582/*************************************************************************/ 547/*************************************************************************/
@@ -589,26 +554,537 @@ struct drm_vmw_stream_arg {
589 554
590/*************************************************************************/ 555/*************************************************************************/
591/** 556/**
557 * DRM_VMW_GET_3D_CAP
558 *
559 * Read 3D capabilities from the FIFO
560 *
561 */
562
563/**
564 * struct drm_vmw_get_3d_cap_arg
565 *
566 * @buffer: Pointer to a buffer for capability data, cast to an __u64
567 * @size: Max size to copy
568 *
569 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
570 * ioctls.
571 */
572
573struct drm_vmw_get_3d_cap_arg {
574 __u64 buffer;
575 __u32 max_size;
576 __u32 pad64;
577};
578
579/*************************************************************************/
580/**
581 * DRM_VMW_FENCE_WAIT
582 *
583 * Waits for a fence object to signal. The wait is interruptible, so that
584 * signals may be delivered during the interrupt. The wait may timeout,
585 * in which case the calls returns -EBUSY. If the wait is restarted,
586 * that is restarting without resetting @cookie_valid to zero,
587 * the timeout is computed from the first call.
588 *
589 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
590 * on:
591 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
592 * stream
593 * have executed.
594 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
595 * commands
596 * in the buffer given to the EXECBUF ioctl returning the fence object handle
597 * are available to user-space.
598 *
599 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
600 * fenc wait ioctl returns 0, the fence object has been unreferenced after
601 * the wait.
602 */
603
604#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
605#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
606
607#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
608
609/**
610 * struct drm_vmw_fence_wait_arg
611 *
612 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
613 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
614 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
615 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
616 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
617 * before returning.
618 * @flags: Fence flags to wait on.
619 * @wait_options: Options that control the behaviour of the wait ioctl.
620 *
621 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
622 */
623
624struct drm_vmw_fence_wait_arg {
625 __u32 handle;
626 __s32 cookie_valid;
627 __u64 kernel_cookie;
628 __u64 timeout_us;
629 __s32 lazy;
630 __s32 flags;
631 __s32 wait_options;
632 __s32 pad64;
633};
634
635/*************************************************************************/
636/**
637 * DRM_VMW_FENCE_SIGNALED
638 *
639 * Checks if a fence object is signaled..
640 */
641
642/**
643 * struct drm_vmw_fence_signaled_arg
644 *
645 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
646 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
647 * @signaled: Out: Flags signaled.
648 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
649 * EXEC flag of user-space fence objects.
650 *
651 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
652 * ioctls.
653 */
654
655struct drm_vmw_fence_signaled_arg {
656 __u32 handle;
657 __u32 flags;
658 __s32 signaled;
659 __u32 passed_seqno;
660 __u32 signaled_flags;
661 __u32 pad64;
662};
663
664/*************************************************************************/
665/**
666 * DRM_VMW_FENCE_UNREF
667 *
668 * Unreferences a fence object, and causes it to be destroyed if there are no
669 * other references to it.
670 *
671 */
672
673/**
674 * struct drm_vmw_fence_arg
675 *
676 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
677 *
678 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
679 */
680
681struct drm_vmw_fence_arg {
682 __u32 handle;
683 __u32 pad64;
684};
685
686
687/*************************************************************************/
688/**
689 * DRM_VMW_FENCE_EVENT
690 *
691 * Queues an event on a fence to be delivered on the drm character device
692 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
693 * Optionally the approximate time when the fence signaled is
694 * given by the event.
695 */
696
697/*
698 * The event type
699 */
700#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
701
702struct drm_vmw_event_fence {
703 struct drm_event base;
704 __u64 user_data;
705 __u32 tv_sec;
706 __u32 tv_usec;
707};
708
709/*
710 * Flags that may be given to the command.
711 */
712/* Request fence signaled time on the event. */
713#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
714
715/**
716 * struct drm_vmw_fence_event_arg
717 *
718 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
719 * the fence is not supposed to be referenced by user-space.
720 * @user_info: Info to be delivered with the event.
721 * @handle: Attach the event to this fence only.
722 * @flags: A set of flags as defined above.
723 */
724struct drm_vmw_fence_event_arg {
725 __u64 fence_rep;
726 __u64 user_data;
727 __u32 handle;
728 __u32 flags;
729};
730
731
732/*************************************************************************/
733/**
734 * DRM_VMW_PRESENT
735 *
736 * Executes an SVGA present on a given fb for a given surface. The surface
737 * is placed on the framebuffer. Cliprects are given relative to the given
738 * point (the point disignated by dest_{x|y}).
739 *
740 */
741
742/**
743 * struct drm_vmw_present_arg
744 * @fb_id: framebuffer id to present / read back from.
745 * @sid: Surface id to present from.
746 * @dest_x: X placement coordinate for surface.
747 * @dest_y: Y placement coordinate for surface.
748 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
749 * @num_clips: Number of cliprects given relative to the framebuffer origin,
750 * in the same coordinate space as the frame buffer.
751 * @pad64: Unused 64-bit padding.
752 *
753 * Input argument to the DRM_VMW_PRESENT ioctl.
754 */
755
756struct drm_vmw_present_arg {
757 __u32 fb_id;
758 __u32 sid;
759 __s32 dest_x;
760 __s32 dest_y;
761 __u64 clips_ptr;
762 __u32 num_clips;
763 __u32 pad64;
764};
765
766
767/*************************************************************************/
768/**
769 * DRM_VMW_PRESENT_READBACK
770 *
771 * Executes an SVGA present readback from a given fb to the dma buffer
772 * currently bound as the fb. If there is no dma buffer bound to the fb,
773 * an error will be returned.
774 *
775 */
776
777/**
778 * struct drm_vmw_present_arg
779 * @fb_id: fb_id to present / read back from.
780 * @num_clips: Number of cliprects.
781 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
782 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
783 * If this member is NULL, then the ioctl should not return a fence.
784 */
785
786struct drm_vmw_present_readback_arg {
787 __u32 fb_id;
788 __u32 num_clips;
789 __u64 clips_ptr;
790 __u64 fence_rep;
791};
792
793/*************************************************************************/
794/**
592 * DRM_VMW_UPDATE_LAYOUT - Update layout 795 * DRM_VMW_UPDATE_LAYOUT - Update layout
593 * 796 *
594 * Updates the prefered modes and connection status for connectors. The 797 * Updates the preferred modes and connection status for connectors. The
595 * command conisits of one drm_vmw_update_layout_arg pointing out a array 798 * command consists of one drm_vmw_update_layout_arg pointing to an array
596 * of num_outputs drm_vmw_rect's. 799 * of num_outputs drm_vmw_rect's.
597 */ 800 */
598 801
599/** 802/**
600 * struct drm_vmw_update_layout_arg 803 * struct drm_vmw_update_layout_arg
601 * 804 *
602 * @num_outputs: number of active 805 * @num_outputs: number of active connectors
603 * @rects: pointer to array of drm_vmw_rect 806 * @rects: pointer to array of drm_vmw_rect cast to an __u64
604 * 807 *
605 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 808 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
606 */ 809 */
607
608struct drm_vmw_update_layout_arg { 810struct drm_vmw_update_layout_arg {
609 uint32_t num_outputs; 811 __u32 num_outputs;
610 uint32_t pad64; 812 __u32 pad64;
611 uint64_t rects; 813 __u64 rects;
814};
815
816
817/*************************************************************************/
818/**
819 * DRM_VMW_CREATE_SHADER - Create shader
820 *
821 * Creates a shader and optionally binds it to a dma buffer containing
822 * the shader byte-code.
823 */
824
825/**
826 * enum drm_vmw_shader_type - Shader types
827 */
828enum drm_vmw_shader_type {
829 drm_vmw_shader_type_vs = 0,
830 drm_vmw_shader_type_ps,
612}; 831};
613 832
833
834/**
835 * struct drm_vmw_shader_create_arg
836 *
837 * @shader_type: Shader type of the shader to create.
838 * @size: Size of the byte-code in bytes.
839 * where the shader byte-code starts
840 * @buffer_handle: Buffer handle identifying the buffer containing the
841 * shader byte-code
842 * @shader_handle: On successful completion contains a handle that
843 * can be used to subsequently identify the shader.
844 * @offset: Offset in bytes into the buffer given by @buffer_handle,
845 *
846 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
847 */
848struct drm_vmw_shader_create_arg {
849 enum drm_vmw_shader_type shader_type;
850 __u32 size;
851 __u32 buffer_handle;
852 __u32 shader_handle;
853 __u64 offset;
854};
855
856/*************************************************************************/
857/**
858 * DRM_VMW_UNREF_SHADER - Unreferences a shader
859 *
860 * Destroys a user-space reference to a shader, optionally destroying
861 * it.
862 */
863
864/**
865 * struct drm_vmw_shader_arg
866 *
867 * @handle: Handle identifying the shader to destroy.
868 *
869 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
870 */
871struct drm_vmw_shader_arg {
872 __u32 handle;
873 __u32 pad64;
874};
875
876/*************************************************************************/
877/**
878 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
879 *
880 * Allocates a surface handle and queues a create surface command
881 * for the host on the first use of the surface. The surface ID can
882 * be used as the surface ID in commands referencing the surface.
883 */
884
885/**
886 * enum drm_vmw_surface_flags
887 *
888 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
889 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
890 * surface.
891 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
892 * given.
893 */
894enum drm_vmw_surface_flags {
895 drm_vmw_surface_flag_shareable = (1 << 0),
896 drm_vmw_surface_flag_scanout = (1 << 1),
897 drm_vmw_surface_flag_create_buffer = (1 << 2)
898};
899
900/**
901 * struct drm_vmw_gb_surface_create_req
902 *
903 * @svga3d_flags: SVGA3d surface flags for the device.
904 * @format: SVGA3d format.
905 * @mip_level: Number of mip levels for all faces.
906 * @drm_surface_flags Flags as described above.
907 * @multisample_count Future use. Set to 0.
908 * @autogen_filter Future use. Set to 0.
909 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
910 * if none.
911 * @base_size Size of the base mip level for all faces.
912 * @array_size Must be zero for non-DX hardware, and if non-zero
913 * svga3d_flags must have proper bind flags setup.
914 *
915 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
916 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
917 */
918struct drm_vmw_gb_surface_create_req {
919 __u32 svga3d_flags;
920 __u32 format;
921 __u32 mip_levels;
922 enum drm_vmw_surface_flags drm_surface_flags;
923 __u32 multisample_count;
924 __u32 autogen_filter;
925 __u32 buffer_handle;
926 __u32 array_size;
927 struct drm_vmw_size base_size;
928};
929
930/**
931 * struct drm_vmw_gb_surface_create_rep
932 *
933 * @handle: Surface handle.
934 * @backup_size: Size of backup buffers for this surface.
935 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
936 * @buffer_size: Actual size of the buffer identified by
937 * @buffer_handle
938 * @buffer_map_handle: Offset into device address space for the buffer
939 * identified by @buffer_handle.
940 *
941 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
942 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
943 */
944struct drm_vmw_gb_surface_create_rep {
945 __u32 handle;
946 __u32 backup_size;
947 __u32 buffer_handle;
948 __u32 buffer_size;
949 __u64 buffer_map_handle;
950};
951
952/**
953 * union drm_vmw_gb_surface_create_arg
954 *
955 * @req: Input argument as described above.
956 * @rep: Output argument as described above.
957 *
958 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
959 */
960union drm_vmw_gb_surface_create_arg {
961 struct drm_vmw_gb_surface_create_rep rep;
962 struct drm_vmw_gb_surface_create_req req;
963};
964
965/*************************************************************************/
966/**
967 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
968 *
969 * Puts a reference on a host surface with a given handle, as previously
970 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
971 * A reference will make sure the surface isn't destroyed while we hold
972 * it and will allow the calling client to use the surface handle in
973 * the command stream.
974 *
975 * On successful return, the Ioctl returns the surface information given
976 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
977 */
978
979/**
980 * struct drm_vmw_gb_surface_reference_arg
981 *
982 * @creq: The data used as input when the surface was created, as described
983 * above at "struct drm_vmw_gb_surface_create_req"
984 * @crep: Additional data output when the surface was created, as described
985 * above at "struct drm_vmw_gb_surface_create_rep"
986 *
987 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
988 */
989struct drm_vmw_gb_surface_ref_rep {
990 struct drm_vmw_gb_surface_create_req creq;
991 struct drm_vmw_gb_surface_create_rep crep;
992};
993
994/**
995 * union drm_vmw_gb_surface_reference_arg
996 *
997 * @req: Input data as described above at "struct drm_vmw_surface_arg"
998 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
999 *
1000 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1001 */
1002union drm_vmw_gb_surface_reference_arg {
1003 struct drm_vmw_gb_surface_ref_rep rep;
1004 struct drm_vmw_surface_arg req;
1005};
1006
1007
1008/*************************************************************************/
1009/**
1010 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1011 *
1012 * Idles any previously submitted GPU operations on the buffer and
1013 * by default blocks command submissions that reference the buffer.
1014 * If the file descriptor used to grab a blocking CPU sync is closed, the
1015 * cpu sync is released.
1016 * The flags argument indicates how the grab / release operation should be
1017 * performed:
1018 */
1019
1020/**
1021 * enum drm_vmw_synccpu_flags - Synccpu flags:
1022 *
1023 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1024 * hint to the kernel to allow command submissions that references the buffer
1025 * for read-only.
1026 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1027 * referencing this buffer.
1028 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1029 * -EBUSY should the buffer be busy.
1030 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1031 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1032 * behavior.
1033 */
1034enum drm_vmw_synccpu_flags {
1035 drm_vmw_synccpu_read = (1 << 0),
1036 drm_vmw_synccpu_write = (1 << 1),
1037 drm_vmw_synccpu_dontblock = (1 << 2),
1038 drm_vmw_synccpu_allow_cs = (1 << 3)
1039};
1040
1041/**
1042 * enum drm_vmw_synccpu_op - Synccpu operations:
1043 *
1044 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1045 * @drm_vmw_synccpu_release: Release a previous grab.
1046 */
1047enum drm_vmw_synccpu_op {
1048 drm_vmw_synccpu_grab,
1049 drm_vmw_synccpu_release
1050};
1051
1052/**
1053 * struct drm_vmw_synccpu_arg
1054 *
1055 * @op: The synccpu operation as described above.
1056 * @handle: Handle identifying the buffer object.
1057 * @flags: Flags as described above.
1058 */
1059struct drm_vmw_synccpu_arg {
1060 enum drm_vmw_synccpu_op op;
1061 enum drm_vmw_synccpu_flags flags;
1062 __u32 handle;
1063 __u32 pad64;
1064};
1065
1066/*************************************************************************/
1067/**
1068 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1069 *
1070 * Allocates a device unique context id, and queues a create context command
1071 * for the host. Does not wait for host completion.
1072 */
1073enum drm_vmw_extended_context {
1074 drm_vmw_context_legacy,
1075 drm_vmw_context_dx
1076};
1077
1078/**
1079 * union drm_vmw_extended_context_arg
1080 *
1081 * @req: Context type.
1082 * @rep: Context identifier.
1083 *
1084 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1085 */
1086union drm_vmw_extended_context_arg {
1087 enum drm_vmw_extended_context req;
1088 struct drm_vmw_context_arg rep;
1089};
614#endif 1090#endif