summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Yoo2017-05-09 00:32:30 -0500
committerRichard Yoo2017-05-09 01:08:15 -0500
commitd8ed3020112ded2e210e652f5a8f367d5f847157 (patch)
treea56c761e21902ba6b24df5c5b8168372ab995162
parent0da99b8ab0f691ad7ec7f4c5c8a09c5df92486a1 (diff)
parentd4b8344363b4e0f0e831e5722b6df5cc0bb08df8 (diff)
downloadexternal-libdrm-d8ed3020112ded2e210e652f5a8f367d5f847157.tar.gz
external-libdrm-d8ed3020112ded2e210e652f5a8f367d5f847157.tar.xz
external-libdrm-d8ed3020112ded2e210e652f5a8f367d5f847157.zip
Merge tag libdrm-2.4.75 into aosp/masterandroid-n-iot-preview-4
Below is a brief summary of patches pulled in: 0da99b8a (m/master, aosp/master) Move libdrm.so to vendor partition d4b83443 (tag: libdrm-2.4.75) Bump version for 2.4.75 release dae413e4 (tag: libdrm-2.4.74) Bump version for release 317bdff1 (tag: libdrm-2.4.73) Bump version for release 8cf43127 (tag: libdrm-2.4.72) Bump version for release a44c9c31 (tag: libdrm-2.4.71) Bump version for release 20208455 (tag: android-o-preview-1, tag: android-n-mr2-preview-2, tag: android-n-mr2-preview-1, aosp/sdk-release, aosp/o-preview) add a flag control that private libdrm can be chosen Bug: 35871718 Test: aosp_arm-eng compiles Change-Id: I81985fd41d5c0d8a732705dc2a4bee8eb5d459bb
-rw-r--r--.editorconfig19
-rw-r--r--.gitignore7
-rw-r--r--Android.common.mk12
-rw-r--r--Android.mk12
-rw-r--r--Makefile.am16
-rw-r--r--Makefile.sources5
-rw-r--r--README8
-rw-r--r--RELEASING26
-rw-r--r--amdgpu/Android.mk8
-rw-r--r--amdgpu/Makefile.sources1
-rwxr-xr-xamdgpu/amdgpu-symbol-check5
-rw-r--r--amdgpu/amdgpu.h79
-rw-r--r--amdgpu/amdgpu_asic_id.h165
-rw-r--r--amdgpu/amdgpu_bo.c14
-rw-r--r--amdgpu/amdgpu_cs.c181
-rw-r--r--amdgpu/amdgpu_device.c15
-rw-r--r--amdgpu/amdgpu_gpu_info.c22
-rw-r--r--amdgpu/amdgpu_internal.h16
-rw-r--r--amdgpu/libdrm_amdgpu.pc.in1
-rwxr-xr-xautogen.sh6
-rw-r--r--configure.ac88
-rw-r--r--etnaviv/Android.mk14
-rw-r--r--etnaviv/Makefile.am26
-rw-r--r--etnaviv/Makefile.sources12
-rwxr-xr-xetnaviv/etnaviv-symbol-check48
-rw-r--r--etnaviv/etnaviv_bo.c347
-rw-r--r--etnaviv/etnaviv_bo_cache.c196
-rw-r--r--etnaviv/etnaviv_cmd_stream.c243
-rw-r--r--etnaviv/etnaviv_device.c119
-rw-r--r--etnaviv/etnaviv_drm.h233
-rw-r--r--etnaviv/etnaviv_drmif.h191
-rw-r--r--etnaviv/etnaviv_gpu.c175
-rw-r--r--etnaviv/etnaviv_pipe.c83
-rw-r--r--etnaviv/etnaviv_priv.h201
-rw-r--r--etnaviv/libdrm_etnaviv.pc.in11
-rw-r--r--exynos/exynos_drm.c2
-rw-r--r--freedreno/Android.mk6
-rw-r--r--freedreno/Makefile.am2
-rw-r--r--freedreno/Makefile.sources1
-rwxr-xr-xfreedreno/freedreno-symbol-check6
-rw-r--r--freedreno/freedreno_bo.c148
-rw-r--r--freedreno/freedreno_bo_cache.c222
-rw-r--r--freedreno/freedreno_device.c62
-rw-r--r--freedreno/freedreno_drmif.h17
-rw-r--r--freedreno/freedreno_pipe.c4
-rw-r--r--freedreno/freedreno_priv.h42
-rw-r--r--freedreno/freedreno_ringbuffer.c71
-rw-r--r--freedreno/freedreno_ringbuffer.h27
-rw-r--r--freedreno/kgsl/README (renamed from freedreno/README)10
-rw-r--r--freedreno/kgsl/kgsl_bo.c6
-rw-r--r--freedreno/kgsl/kgsl_drm.h2
-rw-r--r--freedreno/kgsl/kgsl_pipe.c9
-rw-r--r--freedreno/kgsl/kgsl_ringbuffer.c19
-rw-r--r--freedreno/kgsl/msm_kgsl.h4
-rw-r--r--freedreno/msm/msm_bo.c20
-rw-r--r--freedreno/msm/msm_device.c3
-rw-r--r--freedreno/msm/msm_drm.h131
-rw-r--r--freedreno/msm/msm_pipe.c47
-rw-r--r--freedreno/msm/msm_priv.h17
-rw-r--r--freedreno/msm/msm_ringbuffer.c452
-rw-r--r--include/drm/README157
-rw-r--r--include/drm/amdgpu_drm.h44
-rw-r--r--include/drm/drm.h82
-rw-r--r--include/drm/drm_fourcc.h17
-rw-r--r--include/drm/drm_mode.h210
-rw-r--r--include/drm/drm_sarea.h2
-rw-r--r--include/drm/i915_drm.h315
-rw-r--r--include/drm/radeon_drm.h51
-rw-r--r--include/drm/vc4_drm.h302
-rw-r--r--include/drm/virtgpu_drm.h109
-rw-r--r--include/drm/vmwgfx_drm.h792
-rw-r--r--intel/Android.mk7
-rw-r--r--intel/Makefile.am1
-rw-r--r--intel/Makefile.sources3
-rwxr-xr-xintel/intel-symbol-check10
-rw-r--r--intel/intel_bufmgr.h20
-rw-r--r--intel/intel_bufmgr_fake.c6
-rw-r--r--intel/intel_bufmgr_gem.c541
-rw-r--r--intel/intel_chipset.h92
-rw-r--r--intel/intel_decode.c4
-rw-r--r--intel/tests/gen5-3d.batch-ref.txt2
-rw-r--r--intel/tests/gen6-3d.batch-ref.txt2
-rw-r--r--intel/uthash.h1074
-rw-r--r--libkms/Android.mk6
-rw-r--r--libkms/Makefile.am6
-rw-r--r--libkms/exynos.c3
-rw-r--r--libkms/libkms.pc.in1
-rw-r--r--libkms/linux.c99
-rw-r--r--libsync.h148
-rw-r--r--man/drm-kms.xml2
-rw-r--r--nouveau/Android.mk6
-rw-r--r--nouveau/Makefile.am2
-rw-r--r--radeon/Android.mk6
-rw-r--r--radeon/Makefile.am3
-rw-r--r--radeon/libdrm_radeon.pc.in1
-rw-r--r--radeon/radeon_bo_gem.c2
-rw-r--r--radeon/radeon_cs_gem.c12
-rw-r--r--radeon/radeon_surface.c16
-rw-r--r--tests/Makefile.am50
-rw-r--r--tests/amdgpu/amdgpu_test.c259
-rw-r--r--tests/amdgpu/amdgpu_test.h3
-rw-r--r--tests/amdgpu/basic_tests.c468
-rw-r--r--tests/amdgpu/bo_tests.c13
-rw-r--r--tests/amdgpu/cs_tests.c59
-rw-r--r--tests/amdgpu/vce_ib.h2
-rw-r--r--tests/amdgpu/vce_tests.c28
-rw-r--r--tests/auth.c138
-rw-r--r--tests/dristat.c285
-rw-r--r--tests/drmdevice.c71
-rw-r--r--tests/drmtest.c135
-rw-r--r--tests/drmtest.h40
-rw-r--r--tests/etnaviv/Makefile.am41
-rw-r--r--tests/etnaviv/cmdstream.xml.h242
-rw-r--r--tests/etnaviv/etnaviv_2d_test.c240
-rw-r--r--tests/etnaviv/etnaviv_bo_cache_test.c121
-rw-r--r--tests/etnaviv/etnaviv_cmd_stream_test.c123
-rw-r--r--tests/etnaviv/state.xml.h375
-rw-r--r--tests/etnaviv/state_2d.xml.h1497
-rw-r--r--tests/etnaviv/write_bmp.c151
-rw-r--r--tests/etnaviv/write_bmp.h (renamed from tests/getstats.c)52
-rw-r--r--tests/exynos/Makefile.am4
-rw-r--r--tests/getclient.c61
-rw-r--r--tests/getversion.c49
-rw-r--r--tests/kms/kms-steal-crtc.c4
-rw-r--r--tests/kms/kms-universal-planes.c5
-rw-r--r--tests/kmstest/Makefile.am6
-rw-r--r--tests/kmstest/main.c45
-rw-r--r--tests/lock.c264
-rw-r--r--tests/modetest/Android.mk3
-rw-r--r--tests/modetest/Makefile.am6
-rw-r--r--tests/modetest/modetest.c42
-rw-r--r--tests/name_from_fd.c58
-rw-r--r--tests/nouveau/Makefile.am8
-rw-r--r--tests/openclose.c37
-rw-r--r--tests/proptest/Android.mk3
-rw-r--r--tests/proptest/proptest.c4
-rw-r--r--tests/setversion.c91
-rw-r--r--tests/updatedraw.c154
-rw-r--r--tests/util/Android.mk7
-rw-r--r--tests/util/kms.c6
-rw-r--r--tests/vbltest/vbltest.c7
-rw-r--r--util_double_list.h8
-rw-r--r--vc4/Makefile.am34
-rw-r--r--vc4/Makefile.sources3
-rw-r--r--vc4/libdrm_vc4.pc.in9
-rw-r--r--vc4/vc4_packet.h397
-rw-r--r--vc4/vc4_qpu_defines.h274
-rw-r--r--xf86drm.c2433
-rw-r--r--xf86drm.h50
-rw-r--r--xf86drmMode.c52
-rw-r--r--xf86drmMode.h29
151 files changed, 13205 insertions, 3432 deletions
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 00000000..893b7be0
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,19 @@
1# To use this config with your editor, follow the instructions at:
2# http://editorconfig.org
3
4root = true
5
6[*]
7charset = utf-8
8insert_final_newline = true
9
10[*.{c,h}]
11indent_style = space
12indent_size = 4
13
14[{Makefile.*,*.mk}]
15indent_style = tab
16
17[*.m4]
18indent_style = space
19indent_size = 2
diff --git a/.gitignore b/.gitignore
index c1e87c50..d51e619b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,6 +56,8 @@ libdrm_omap.pc
56libdrm_exynos.pc 56libdrm_exynos.pc
57libdrm_freedreno.pc 57libdrm_freedreno.pc
58libdrm_amdgpu.pc 58libdrm_amdgpu.pc
59libdrm_vc4.pc
60libdrm_etnaviv.pc
59libkms.pc 61libkms.pc
60libtool 62libtool
61ltmain.sh 63ltmain.sh
@@ -92,10 +94,15 @@ tests/modeprint/modeprint
92tests/modetest/modetest 94tests/modetest/modetest
93tests/name_from_fd 95tests/name_from_fd
94tests/proptest/proptest 96tests/proptest/proptest
97tests/kms/kms-steal-crtc
98tests/kms/kms-universal-planes
95tests/kmstest/kmstest 99tests/kmstest/kmstest
96tests/vbltest/vbltest 100tests/vbltest/vbltest
97tests/radeon/radeon_ttm 101tests/radeon/radeon_ttm
98tests/exynos/exynos_fimg2d_event 102tests/exynos/exynos_fimg2d_event
99tests/exynos/exynos_fimg2d_perf 103tests/exynos/exynos_fimg2d_perf
100tests/exynos/exynos_fimg2d_test 104tests/exynos/exynos_fimg2d_test
105tests/etnaviv/etnaviv_2d_test
106tests/etnaviv/etnaviv_cmd_stream_test
107tests/etnaviv/etnaviv_bo_cache_test
101man/*.3 108man/*.3
diff --git a/Android.common.mk b/Android.common.mk
new file mode 100644
index 00000000..f57b8d37
--- /dev/null
+++ b/Android.common.mk
@@ -0,0 +1,12 @@
1# XXX: Consider moving these to config.h analogous to autoconf.
2LOCAL_CFLAGS += \
3 -DHAVE_VISIBILITY=1 \
4 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
5
6LOCAL_CFLAGS += \
7 -Wno-unused-parameter \
8 -Wno-missing-field-initializers
9
10# Quiet down the build system and remove any .h files from the sources
11LOCAL_SRC_FILES := $(patsubst %.h, , $(LOCAL_SRC_FILES))
12LOCAL_EXPORT_C_INCLUDE_DIRS += $(LOCAL_PATH)
diff --git a/Android.mk b/Android.mk
index 42d254cc..102c9a39 100644
--- a/Android.mk
+++ b/Android.mk
@@ -21,21 +21,21 @@
21# IN THE SOFTWARE. 21# IN THE SOFTWARE.
22# 22#
23 23
24
24ifneq ($(TARGET_USE_PRIVATE_LIBDRM),true) 25ifneq ($(TARGET_USE_PRIVATE_LIBDRM),true)
26
27LIBDRM_COMMON_MK := $(call my-dir)/Android.common.mk
28
25LOCAL_PATH := $(call my-dir) 29LOCAL_PATH := $(call my-dir)
26 30
27# Import variables LIBDRM_{,H_,INCLUDE_H_,INCLUDE_VMWGFX_H_}FILES 31# Import variables LIBDRM_{,H_,INCLUDE_H_,INCLUDE_VMWGFX_H_}FILES
28include $(LOCAL_PATH)/Makefile.sources 32include $(LOCAL_PATH)/Makefile.sources
29 33
30common_CFLAGS := \ 34common_CFLAGS := \
31 -DHAVE_VISIBILITY=1 \
32 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1 \
33 -Wno-enum-conversion \ 35 -Wno-enum-conversion \
34 -Wno-missing-field-initializers \
35 -Wno-pointer-arith \ 36 -Wno-pointer-arith \
36 -Wno-sign-compare \ 37 -Wno-sign-compare \
37 -Wno-tautological-compare \ 38 -Wno-tautological-compare
38 -Wno-unused-parameter
39 39
40# Static library for the device (recovery) 40# Static library for the device (recovery)
41include $(CLEAR_VARS) 41include $(CLEAR_VARS)
@@ -53,6 +53,7 @@ LOCAL_C_INCLUDES := \
53LOCAL_CFLAGS := \ 53LOCAL_CFLAGS := \
54 $(common_CFLAGS) 54 $(common_CFLAGS)
55 55
56include $(LIBDRM_COMMON_MK)
56include $(BUILD_STATIC_LIBRARY) 57include $(BUILD_STATIC_LIBRARY)
57 58
58# Dynamic library for the device 59# Dynamic library for the device
@@ -72,6 +73,7 @@ LOCAL_C_INCLUDES := \
72LOCAL_CFLAGS := \ 73LOCAL_CFLAGS := \
73 $(common_CFLAGS) 74 $(common_CFLAGS)
74 75
76include $(LIBDRM_COMMON_MK)
75include $(BUILD_SHARED_LIBRARY) 77include $(BUILD_SHARED_LIBRARY)
76 78
77include $(call all-makefiles-under,$(LOCAL_PATH)) 79include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/Makefile.am b/Makefile.am
index 11ed1028..2bf644be 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -22,6 +22,7 @@ include Makefile.sources
22 22
23ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS} 23ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS}
24 24
25AM_MAKEFLAGS = -s
25AM_DISTCHECK_CONFIGURE_FLAGS = \ 26AM_DISTCHECK_CONFIGURE_FLAGS = \
26 --enable-udev \ 27 --enable-udev \
27 --enable-libkms \ 28 --enable-libkms \
@@ -29,12 +30,14 @@ AM_DISTCHECK_CONFIGURE_FLAGS = \
29 --enable-radeon \ 30 --enable-radeon \
30 --enable-amdgpu \ 31 --enable-amdgpu \
31 --enable-nouveau \ 32 --enable-nouveau \
33 --enable-vc4 \
32 --enable-vmwgfx \ 34 --enable-vmwgfx \
33 --enable-omap-experimental-api \ 35 --enable-omap-experimental-api \
34 --enable-exynos-experimental-api \ 36 --enable-exynos-experimental-api \
35 --enable-freedreno \ 37 --enable-freedreno \
36 --enable-freedreno-kgsl\ 38 --enable-freedreno-kgsl\
37 --enable-tegra-experimental-api \ 39 --enable-tegra-experimental-api \
40 --enable-etnaviv-experimental-api \
38 --enable-install-test-programs \ 41 --enable-install-test-programs \
39 --enable-cairo-tests \ 42 --enable-cairo-tests \
40 --enable-manpages \ 43 --enable-manpages \
@@ -79,6 +82,14 @@ if HAVE_TEGRA
79TEGRA_SUBDIR = tegra 82TEGRA_SUBDIR = tegra
80endif 83endif
81 84
85if HAVE_VC4
86VC4_SUBDIR = vc4
87endif
88
89if HAVE_ETNAVIV
90ETNAVIV_SUBDIR = etnaviv
91endif
92
82if BUILD_MANPAGES 93if BUILD_MANPAGES
83if HAVE_MANPAGES_STYLESHEET 94if HAVE_MANPAGES_STYLESHEET
84MAN_SUBDIR = man 95MAN_SUBDIR = man
@@ -100,6 +111,8 @@ SUBDIRS = \
100 $(EXYNOS_SUBDIR) \ 111 $(EXYNOS_SUBDIR) \
101 $(FREEDRENO_SUBDIR) \ 112 $(FREEDRENO_SUBDIR) \
102 $(TEGRA_SUBDIR) \ 113 $(TEGRA_SUBDIR) \
114 $(VC4_SUBDIR) \
115 $(ETNAVIV_SUBDIR) \
103 tests \ 116 tests \
104 $(MAN_SUBDIR) \ 117 $(MAN_SUBDIR) \
105 $(ROCKCHIP_SUBDIR) 118 $(ROCKCHIP_SUBDIR)
@@ -119,8 +132,6 @@ libdrm_la_SOURCES = $(LIBDRM_FILES)
119libdrmincludedir = ${includedir} 132libdrmincludedir = ${includedir}
120libdrminclude_HEADERS = $(LIBDRM_H_FILES) 133libdrminclude_HEADERS = $(LIBDRM_H_FILES)
121 134
122EXTRA_DIST = Android.mk
123
124klibdrmincludedir = ${includedir}/libdrm 135klibdrmincludedir = ${includedir}/libdrm
125klibdrminclude_HEADERS = $(LIBDRM_INCLUDE_H_FILES) 136klibdrminclude_HEADERS = $(LIBDRM_INCLUDE_H_FILES)
126 137
@@ -128,6 +139,7 @@ if HAVE_VMWGFX
128klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES) 139klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES)
129endif 140endif
130 141
142EXTRA_DIST = include/drm/README
131 143
132copy-headers : 144copy-headers :
133 cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/ 145 cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/
diff --git a/Makefile.sources b/Makefile.sources
index a77f48de..10aa1d0f 100644
--- a/Makefile.sources
+++ b/Makefile.sources
@@ -13,6 +13,7 @@ LIBDRM_FILES := \
13 util_math.h 13 util_math.h
14 14
15LIBDRM_H_FILES := \ 15LIBDRM_H_FILES := \
16 libsync.h \
16 xf86drm.h \ 17 xf86drm.h \
17 xf86drmMode.h 18 xf86drmMode.h
18 19
@@ -32,7 +33,9 @@ LIBDRM_INCLUDE_H_FILES := \
32 include/drm/savage_drm.h \ 33 include/drm/savage_drm.h \
33 include/drm/sis_drm.h \ 34 include/drm/sis_drm.h \
34 include/drm/tegra_drm.h \ 35 include/drm/tegra_drm.h \
35 include/drm/via_drm.h 36 include/drm/vc4_drm.h \
37 include/drm/via_drm.h \
38 include/drm/virtgpu_drm.h
36 39
37LIBDRM_INCLUDE_VMWGFX_H_FILES := \ 40LIBDRM_INCLUDE_VMWGFX_H_FILES := \
38 include/drm/vmwgfx_drm.h 41 include/drm/vmwgfx_drm.h
diff --git a/README b/README
index 603a1c10..26cab9d3 100644
--- a/README
+++ b/README
@@ -1,7 +1,7 @@
1libdrm - userspace library for drm 1libdrm - userspace library for drm
2 2
3This is libdrm, a userspace library for accessing the DRM, direct 3This is libdrm, a userspace library for accessing the DRM, direct
4rendering manager, on Linux, BSD and other operating systes that 4rendering manager, on Linux, BSD and other operating systems that
5support the ioctl interface. The library provides wrapper functions 5support the ioctl interface. The library provides wrapper functions
6for the ioctls to avoid exposing the kernel interface directly, and 6for the ioctls to avoid exposing the kernel interface directly, and
7for chipsets with drm memory manager, support for tracking relocations 7for chipsets with drm memory manager, support for tracking relocations
@@ -15,7 +15,7 @@ with an older kernel.
15Compiling 15Compiling
16--------- 16---------
17 17
18libdrm is a standard autotools packages and follows the normal 18libdrm is a standard autotools package and follows the normal
19configure, build and install steps. The first step is to configure 19configure, build and install steps. The first step is to configure
20the package, which is done by running the configure shell script: 20the package, which is done by running the configure shell script:
21 21
@@ -37,5 +37,5 @@ and once make finishes successfully, install the package using
37 37
38 make install 38 make install
39 39
40If you are install into a system location, you will need to be root to 40If you are installing into a system location, you will need to be root
41perform the install step. 41to perform the install step.
diff --git a/RELEASING b/RELEASING
index 62c5be9f..262ca08d 100644
--- a/RELEASING
+++ b/RELEASING
@@ -9,21 +9,14 @@ However, this is up to whoever is driving the feature in question.
9 9
10Follow these steps to release a new version of libdrm: 10Follow these steps to release a new version of libdrm:
11 11
12 1) Ensure that there are no local, uncommitted/unpushed 12 1) Bump the version number in configure.ac. We seem to have settled
13 modifications. You're probably in a good state if both "git diff
14 HEAD" and "git log master..origin/master" give no output.
15
16 2) Bump the version number in configure.ac. We seem to have settled
17 for 2.4.x as the versioning scheme for libdrm, so just bump the 13 for 2.4.x as the versioning scheme for libdrm, so just bump the
18 micro version. 14 micro version.
19 15
20 3) Run autoconf and then re-run ./configure so the build system 16 2) Run autoconf and then re-run ./configure so the build system
21 picks up the new version number. 17 picks up the new version number.
22 18
23 4) (optional step, release.sh will make distcheck for you, but it can be 19 3) Verify that the code passes "make distcheck". Running "make
24 heart warming to verify that make distcheck passes)
25
26 Verify that the code passes "make distcheck". Running "make
27 distcheck" should result in no warnings or errors and end with a 20 distcheck" should result in no warnings or errors and end with a
28 message of the form: 21 message of the form:
29 22
@@ -36,20 +29,13 @@ Follow these steps to release a new version of libdrm:
36 Make sure that the version number reported by distcheck and in 29 Make sure that the version number reported by distcheck and in
37 the tarball names matches the number you bumped to in configure.ac. 30 the tarball names matches the number you bumped to in configure.ac.
38 31
39 5) Commit the configure.ac change and make an annotated tag for that 32 4) Push the updated master branch with the bumped version number:
40 commit with the version number of the release as the name and a
41 message of "libdrm X.Y.Z". For example, for the 2.4.16 release
42 the command is:
43
44 git tag -a 2.4.16 -m "libdrm 2.4.16"
45
46 6) Push the commit and tag by saying
47 33
48 git push --tags origin master 34 git push origin master
49 35
50 assuming the remote for the upstream libdrm repo is called origin. 36 assuming the remote for the upstream libdrm repo is called origin.
51 37
52 7) Use the release.sh script from the xorg/util/modular repo to 38 5) Use the release.sh script from the xorg/util/modular repo to
53 upload the tarballs to the freedesktop.org download area and 39 upload the tarballs to the freedesktop.org download area and
54 create an announce email template. The script takes one argument: 40 create an announce email template. The script takes one argument:
55 the path to the libdrm checkout. So, if a checkout of modular is 41 the path to the libdrm checkout. So, if a checkout of modular is
diff --git a/amdgpu/Android.mk b/amdgpu/Android.mk
index e5777e53..bf0611ba 100644
--- a/amdgpu/Android.mk
+++ b/amdgpu/Android.mk
@@ -5,14 +5,10 @@ include $(CLEAR_VARS)
5include $(LOCAL_PATH)/Makefile.sources 5include $(LOCAL_PATH)/Makefile.sources
6 6
7LOCAL_MODULE := libdrm_amdgpu 7LOCAL_MODULE := libdrm_amdgpu
8LOCAL_MODULE_TAGS := optional
9 8
10LOCAL_SHARED_LIBRARIES := libdrm 9LOCAL_SHARED_LIBRARIES := libdrm
11 10
12LOCAL_SRC_FILES := $(filter-out %.h,$(LIBDRM_AMDGPU_FILES)) 11LOCAL_SRC_FILES := $(LIBDRM_AMDGPU_FILES)
13LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
14
15LOCAL_CFLAGS := \
16 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
17 12
13include $(LIBDRM_COMMON_MK)
18include $(BUILD_SHARED_LIBRARY) 14include $(BUILD_SHARED_LIBRARY)
diff --git a/amdgpu/Makefile.sources b/amdgpu/Makefile.sources
index 0c0b9a93..487b9e0a 100644
--- a/amdgpu/Makefile.sources
+++ b/amdgpu/Makefile.sources
@@ -1,4 +1,5 @@
1LIBDRM_AMDGPU_FILES := \ 1LIBDRM_AMDGPU_FILES := \
2 amdgpu_asic_id.h \
2 amdgpu_bo.c \ 3 amdgpu_bo.c \
3 amdgpu_cs.c \ 4 amdgpu_cs.c \
4 amdgpu_device.c \ 5 amdgpu_device.c \
diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
index 9a0b36cb..87f4fd2c 100755
--- a/amdgpu/amdgpu-symbol-check
+++ b/amdgpu/amdgpu-symbol-check
@@ -24,13 +24,18 @@ amdgpu_bo_set_metadata
24amdgpu_bo_va_op 24amdgpu_bo_va_op
25amdgpu_bo_wait_for_idle 25amdgpu_bo_wait_for_idle
26amdgpu_create_bo_from_user_mem 26amdgpu_create_bo_from_user_mem
27amdgpu_cs_create_semaphore
27amdgpu_cs_ctx_create 28amdgpu_cs_ctx_create
28amdgpu_cs_ctx_free 29amdgpu_cs_ctx_free
30amdgpu_cs_destroy_semaphore
29amdgpu_cs_query_fence_status 31amdgpu_cs_query_fence_status
30amdgpu_cs_query_reset_state 32amdgpu_cs_query_reset_state
33amdgpu_cs_signal_semaphore
31amdgpu_cs_submit 34amdgpu_cs_submit
35amdgpu_cs_wait_semaphore
32amdgpu_device_deinitialize 36amdgpu_device_deinitialize
33amdgpu_device_initialize 37amdgpu_device_initialize
38amdgpu_get_marketing_name
34amdgpu_query_buffer_size_alignment 39amdgpu_query_buffer_size_alignment
35amdgpu_query_crtc_from_id 40amdgpu_query_crtc_from_id
36amdgpu_query_firmware_version 41amdgpu_query_firmware_version
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index e44d802b..7b26a04c 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -124,6 +124,11 @@ typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
124 */ 124 */
125typedef struct amdgpu_va *amdgpu_va_handle; 125typedef struct amdgpu_va *amdgpu_va_handle;
126 126
127/**
128 * Define handle for semaphore
129 */
130typedef struct amdgpu_semaphore *amdgpu_semaphore_handle;
131
127/*--------------------------------------------------------------------------*/ 132/*--------------------------------------------------------------------------*/
128/* -------------------------- Structures ---------------------------------- */ 133/* -------------------------- Structures ---------------------------------- */
129/*--------------------------------------------------------------------------*/ 134/*--------------------------------------------------------------------------*/
@@ -680,7 +685,7 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
680int amdgpu_bo_free(amdgpu_bo_handle buf_handle); 685int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
681 686
682/** 687/**
683 * Request CPU access to GPU accessable memory 688 * Request CPU access to GPU accessible memory
684 * 689 *
685 * \param buf_handle - \c [in] Buffer handle 690 * \param buf_handle - \c [in] Buffer handle
686 * \param cpu - \c [out] CPU address to be used for access 691 * \param cpu - \c [out] CPU address to be used for access
@@ -846,7 +851,7 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
846 * order. 851 * order.
847 * 852 *
848 * The caller can specify the user fence buffer/location with the fence_info in the 853 * The caller can specify the user fence buffer/location with the fence_info in the
849 * cs_request.The sequence number is returned via the 'seq_no' paramter 854 * cs_request.The sequence number is returned via the 'seq_no' parameter
850 * in ibs_request structure. 855 * in ibs_request structure.
851 * 856 *
852 * 857 *
@@ -1180,4 +1185,74 @@ int amdgpu_bo_va_op(amdgpu_bo_handle bo,
1180 uint64_t flags, 1185 uint64_t flags,
1181 uint32_t ops); 1186 uint32_t ops);
1182 1187
1188/**
1189 * create semaphore
1190 *
1191 * \param sem - \c [out] semaphore handle
1192 *
1193 * \return 0 on success\n
1194 * <0 - Negative POSIX Error code
1195 *
1196*/
1197int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem);
1198
1199/**
1200 * signal semaphore
1201 *
1202 * \param context - \c [in] GPU Context
1203 * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
1204 * \param ip_instance - \c [in] Index of the IP block of the same type
1205 * \param ring - \c [in] Specify ring index of the IP
1206 * \param sem - \c [in] semaphore handle
1207 *
1208 * \return 0 on success\n
1209 * <0 - Negative POSIX Error code
1210 *
1211*/
1212int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
1213 uint32_t ip_type,
1214 uint32_t ip_instance,
1215 uint32_t ring,
1216 amdgpu_semaphore_handle sem);
1217
1218/**
1219 * wait semaphore
1220 *
1221 * \param context - \c [in] GPU Context
1222 * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
1223 * \param ip_instance - \c [in] Index of the IP block of the same type
1224 * \param ring - \c [in] Specify ring index of the IP
1225 * \param sem - \c [in] semaphore handle
1226 *
1227 * \return 0 on success\n
1228 * <0 - Negative POSIX Error code
1229 *
1230*/
1231int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
1232 uint32_t ip_type,
1233 uint32_t ip_instance,
1234 uint32_t ring,
1235 amdgpu_semaphore_handle sem);
1236
1237/**
1238 * destroy semaphore
1239 *
1240 * \param sem - \c [in] semaphore handle
1241 *
1242 * \return 0 on success\n
1243 * <0 - Negative POSIX Error code
1244 *
1245*/
1246int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
1247
1248/**
1249 * Get the ASIC marketing name
1250 *
1251 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1252 *
1253 * \return the constant string of the marketing name
1254 * "NULL" means the ASIC is not found
1255*/
1256const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
1257
1183#endif /* #ifdef _AMDGPU_H_ */ 1258#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_asic_id.h b/amdgpu/amdgpu_asic_id.h
new file mode 100644
index 00000000..3e7d736b
--- /dev/null
+++ b/amdgpu/amdgpu_asic_id.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __AMDGPU_ASIC_ID_H__
26#define __AMDGPU_ASIC_ID_H__
27
28static struct amdgpu_asic_id_table_t {
29 uint32_t did;
30 uint32_t rid;
31 const char *marketing_name;
32} const amdgpu_asic_id_table [] = {
33 {0x6600, 0x0, "AMD Radeon HD 8600/8700M"},
34 {0x6600, 0x81, "AMD Radeon R7 M370"},
35 {0x6601, 0x0, "AMD Radeon HD 8500M/8700M"},
36 {0x6604, 0x0, "AMD Radeon R7 M265 Series"},
37 {0x6604, 0x81, "AMD Radeon R7 M350"},
38 {0x6605, 0x0, "AMD Radeon R7 M260 Series"},
39 {0x6605, 0x81, "AMD Radeon R7 M340"},
40 {0x6606, 0x0, "AMD Radeon HD 8790M"},
41 {0x6607, 0x0, "AMD Radeon HD8530M"},
42 {0x6608, 0x0, "AMD FirePro W2100"},
43 {0x6610, 0x0, "AMD Radeon HD 8600 Series"},
44 {0x6610, 0x81, "AMD Radeon R7 350"},
45 {0x6610, 0x83, "AMD Radeon R5 340"},
46 {0x6611, 0x0, "AMD Radeon HD 8500 Series"},
47 {0x6613, 0x0, "AMD Radeon HD 8500 series"},
48 {0x6617, 0xC7, "AMD Radeon R7 240 Series"},
49 {0x6640, 0x0, "AMD Radeon HD 8950"},
50 {0x6640, 0x80, "AMD Radeon R9 M380"},
51 {0x6646, 0x0, "AMD Radeon R9 M280X"},
52 {0x6646, 0x80, "AMD Radeon R9 M470X"},
53 {0x6647, 0x0, "AMD Radeon R9 M270X"},
54 {0x6647, 0x80, "AMD Radeon R9 M380"},
55 {0x6649, 0x0, "AMD FirePro W5100"},
56 {0x6658, 0x0, "AMD Radeon R7 200 Series"},
57 {0x665C, 0x0, "AMD Radeon HD 7700 Series"},
58 {0x665D, 0x0, "AMD Radeon R7 200 Series"},
59 {0x665F, 0x81, "AMD Radeon R7 300 Series"},
60 {0x6660, 0x0, "AMD Radeon HD 8600M Series"},
61 {0x6660, 0x81, "AMD Radeon R5 M335"},
62 {0x6660, 0x83, "AMD Radeon R5 M330"},
63 {0x6663, 0x0, "AMD Radeon HD 8500M Series"},
64 {0x6663, 0x83, "AMD Radeon R5 M320"},
65 {0x6664, 0x0, "AMD Radeon R5 M200 Series"},
66 {0x6665, 0x0, "AMD Radeon R5 M200 Series"},
67 {0x6665, 0x83, "AMD Radeon R5 M320"},
68 {0x6667, 0x0, "AMD Radeon R5 M200 Series"},
69 {0x666F, 0x0, "AMD Radeon HD 8500M"},
70 {0x6780, 0x0, "ATI FirePro V (FireGL V) Graphics Adapter"},
71 {0x678A, 0x0, "ATI FirePro V (FireGL V) Graphics Adapter"},
72 {0x6798, 0x0, "AMD Radeon HD 7900 Series"},
73 {0x679A, 0x0, "AMD Radeon HD 7900 Series"},
74 {0x679B, 0x0, "AMD Radeon HD 7900 Series"},
75 {0x679E, 0x0, "AMD Radeon HD 7800 Series"},
76 {0x67A0, 0x0, "HAWAII XTGL (67A0)"},
77 {0x67A1, 0x0, "HAWAII GL40 (67A1)"},
78 {0x67B0, 0x0, "AMD Radeon R9 200 Series"},
79 {0x67B0, 0x80, "AMD Radeon R9 390 Series"},
80 {0x67B1, 0x0, "AMD Radeon R9 200 Series"},
81 {0x67B1, 0x80, "AMD Radeon R9 390 Series"},
82 {0x67B9, 0x0, "AMD Radeon R9 200 Series"},
83 {0x67DF, 0xC4, "AMD Radeon RX 480 Graphics"},
84 {0x67DF, 0xC5, "AMD Radeon RX 470 Graphics"},
85 {0x67DF, 0xC7, "AMD Radeon RX 480 Graphics"},
86 {0x67DF, 0xCF, "AMD Radeon RX 470 Graphics"},
87 {0x67C4, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
88 {0x67C7, 0x00, "AMD Radeon Pro WX 5100 Graphics"},
89 {0x67C0, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
90 {0x67E0, 0x00, "AMD Radeon Pro WX Series Graphics"},
91 {0x67E3, 0x00, "AMD Radeon Pro WX 4100 Graphics"},
92 {0x67E8, 0x00, "AMD Radeon Pro WX Series Graphics"},
93 {0x67E8, 0x01, "AMD Radeon Pro WX Series Graphics"},
94 {0x67E8, 0x80, "AMD Radeon E9260 Graphics"},
95 {0x67EB, 0x00, "AMD Radeon Pro WX Series Graphics"},
96 {0x67EF, 0xC0, "AMD Radeon RX Graphics"},
97 {0x67EF, 0xC1, "AMD Radeon RX 460 Graphics"},
98 {0x67EF, 0xC5, "AMD Radeon RX 460 Graphics"},
99 {0x67EF, 0xC7, "AMD Radeon RX Graphics"},
100 {0x67EF, 0xCF, "AMD Radeon RX 460 Graphics"},
101 {0x67EF, 0xEF, "AMD Radeon RX Graphics"},
102 {0x67FF, 0xC0, "AMD Radeon RX Graphics"},
103 {0x67FF, 0xC1, "AMD Radeon RX Graphics"},
104 {0x6800, 0x0, "AMD Radeon HD 7970M"},
105 {0x6801, 0x0, "AMD Radeon(TM) HD8970M"},
106 {0x6808, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
107 {0x6809, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
108 {0x6810, 0x0, "AMD Radeon(TM) HD 8800 Series"},
109 {0x6810, 0x81, "AMD Radeon R7 370 Series"},
110 {0x6811, 0x0, "AMD Radeon(TM) HD8800 Series"},
111 {0x6811, 0x81, "AMD Radeon R7 300 Series"},
112 {0x6818, 0x0, "AMD Radeon HD 7800 Series"},
113 {0x6819, 0x0, "AMD Radeon HD 7800 Series"},
114 {0x6820, 0x0, "AMD Radeon HD 8800M Series"},
115 {0x6820, 0x81, "AMD Radeon R9 M375"},
116 {0x6820, 0x83, "AMD Radeon R9 M375X"},
117 {0x6821, 0x0, "AMD Radeon HD 8800M Series"},
118 {0x6821, 0x87, "AMD Radeon R7 M380"},
119 {0x6821, 0x83, "AMD Radeon R9 M370X"},
120 {0x6822, 0x0, "AMD Radeon E8860"},
121 {0x6823, 0x0, "AMD Radeon HD 8800M Series"},
122 {0x6825, 0x0, "AMD Radeon HD 7800M Series"},
123 {0x6827, 0x0, "AMD Radeon HD 7800M Series"},
124 {0x6828, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
125 {0x682B, 0x0, "AMD Radeon HD 8800M Series"},
126 {0x682B, 0x87, "AMD Radeon R9 M360"},
127 {0x682C, 0x0, "AMD FirePro W4100"},
128 {0x682D, 0x0, "AMD Radeon HD 7700M Series"},
129 {0x682F, 0x0, "AMD Radeon HD 7700M Series"},
130 {0x6835, 0x0, "AMD Radeon R7 Series / HD 9000 Series"},
131 {0x6837, 0x0, "AMD Radeon HD7700 Series"},
132 {0x683D, 0x0, "AMD Radeon HD 7700 Series"},
133 {0x683F, 0x0, "AMD Radeon HD 7700 Series"},
134 {0x6900, 0x0, "AMD Radeon R7 M260"},
135 {0x6900, 0x81, "AMD Radeon R7 M360"},
136 {0x6900, 0x83, "AMD Radeon R7 M340"},
137 {0x6901, 0x0, "AMD Radeon R5 M255"},
138 {0x6907, 0x0, "AMD Radeon R5 M255"},
139 {0x6907, 0x87, "AMD Radeon R5 M315"},
140 {0x6920, 0x0, "AMD Radeon R9 M395X"},
141 {0x6920, 0x1, "AMD Radeon R9 M390X"},
142 {0x6921, 0x0, "AMD Radeon R9 M295X"},
143 {0x6929, 0x0, "AMD FirePro S7150"},
144 {0x692B, 0x0, "AMD FirePro W7100"},
145 {0x6938, 0x0, "AMD Radeon R9 200 Series"},
146 {0x6938, 0xF0, "AMD Radeon R9 200 Series"},
147 {0x6938, 0xF1, "AMD Radeon R9 380 Series"},
148 {0x6939, 0xF0, "AMD Radeon R9 200 Series"},
149 {0x6939, 0x0, "AMD Radeon R9 200 Series"},
150 {0x6939, 0xF1, "AMD Radeon R9 380 Series"},
151 {0x7300, 0xC8, "AMD Radeon R9 Fury Series"},
152 {0x7300, 0xCB, "AMD Radeon R9 Fury Series"},
153 {0x7300, 0xCA, "AMD Radeon R9 Fury Series"},
154 {0x9874, 0xC4, "AMD Radeon R7 Graphics"},
155 {0x9874, 0xC5, "AMD Radeon R6 Graphics"},
156 {0x9874, 0xC6, "AMD Radeon R6 Graphics"},
157 {0x9874, 0xC7, "AMD Radeon R5 Graphics"},
158 {0x9874, 0x81, "AMD Radeon R6 Graphics"},
159 {0x9874, 0x87, "AMD Radeon R5 Graphics"},
160 {0x9874, 0x85, "AMD Radeon R6 Graphics"},
161 {0x9874, 0x84, "AMD Radeon R7 Graphics"},
162
163 {0x0000, 0x0, "\0"},
164};
165#endif
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 1a5a4011..d30fd1e7 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -537,18 +537,10 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
537 int r; 537 int r;
538 struct amdgpu_bo *bo; 538 struct amdgpu_bo *bo;
539 struct drm_amdgpu_gem_userptr args; 539 struct drm_amdgpu_gem_userptr args;
540 uintptr_t cpu0;
541 uint32_t ps, off;
542 540
543 memset(&args, 0, sizeof(args)); 541 args.addr = (uintptr_t)cpu;
544 ps = getpagesize(); 542 args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
545 543 AMDGPU_GEM_USERPTR_VALIDATE;
546 cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
547 off = (uintptr_t)cpu - cpu0;
548 size = ROUND_UP(size + off, ps);
549
550 args.addr = cpu0;
551 args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
552 args.size = size; 544 args.size = size;
553 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR, 545 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
554 &args, sizeof(args)); 546 &args, sizeof(args));
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index 6747158c..fb5b3a8c 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -40,6 +40,9 @@
40#include "amdgpu_drm.h" 40#include "amdgpu_drm.h"
41#include "amdgpu_internal.h" 41#include "amdgpu_internal.h"
42 42
43static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
44static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
45
43/** 46/**
44 * Create command submission context 47 * Create command submission context
45 * 48 *
@@ -53,6 +56,7 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
53{ 56{
54 struct amdgpu_context *gpu_context; 57 struct amdgpu_context *gpu_context;
55 union drm_amdgpu_ctx args; 58 union drm_amdgpu_ctx args;
59 int i, j, k;
56 int r; 60 int r;
57 61
58 if (NULL == dev) 62 if (NULL == dev)
@@ -66,6 +70,10 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
66 70
67 gpu_context->dev = dev; 71 gpu_context->dev = dev;
68 72
73 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
74 if (r)
75 goto error;
76
69 /* Create the context */ 77 /* Create the context */
70 memset(&args, 0, sizeof(args)); 78 memset(&args, 0, sizeof(args));
71 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX; 79 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
@@ -74,11 +82,16 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
74 goto error; 82 goto error;
75 83
76 gpu_context->id = args.out.alloc.ctx_id; 84 gpu_context->id = args.out.alloc.ctx_id;
85 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
86 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
87 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
88 list_inithead(&gpu_context->sem_list[i][j][k]);
77 *context = (amdgpu_context_handle)gpu_context; 89 *context = (amdgpu_context_handle)gpu_context;
78 90
79 return 0; 91 return 0;
80 92
81error: 93error:
94 pthread_mutex_destroy(&gpu_context->sequence_mutex);
82 free(gpu_context); 95 free(gpu_context);
83 return r; 96 return r;
84} 97}
@@ -94,18 +107,32 @@ error:
94int amdgpu_cs_ctx_free(amdgpu_context_handle context) 107int amdgpu_cs_ctx_free(amdgpu_context_handle context)
95{ 108{
96 union drm_amdgpu_ctx args; 109 union drm_amdgpu_ctx args;
110 int i, j, k;
97 int r; 111 int r;
98 112
99 if (NULL == context) 113 if (NULL == context)
100 return -EINVAL; 114 return -EINVAL;
101 115
116 pthread_mutex_destroy(&context->sequence_mutex);
117
102 /* now deal with kernel side */ 118 /* now deal with kernel side */
103 memset(&args, 0, sizeof(args)); 119 memset(&args, 0, sizeof(args));
104 args.in.op = AMDGPU_CTX_OP_FREE_CTX; 120 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
105 args.in.ctx_id = context->id; 121 args.in.ctx_id = context->id;
106 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX, 122 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
107 &args, sizeof(args)); 123 &args, sizeof(args));
108 124 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
125 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
126 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
127 amdgpu_semaphore_handle sem;
128 LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
129 list_del(&sem->list);
130 amdgpu_cs_reset_sem(sem);
131 amdgpu_cs_unreference_sem(sem);
132 }
133 }
134 }
135 }
109 free(context); 136 free(context);
110 137
111 return r; 138 return r;
@@ -150,7 +177,10 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
150 struct drm_amdgpu_cs_chunk *chunks; 177 struct drm_amdgpu_cs_chunk *chunks;
151 struct drm_amdgpu_cs_chunk_data *chunk_data; 178 struct drm_amdgpu_cs_chunk_data *chunk_data;
152 struct drm_amdgpu_cs_chunk_dep *dependencies = NULL; 179 struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
153 uint32_t i, size; 180 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
181 struct list_head *sem_list;
182 amdgpu_semaphore_handle sem, tmp;
183 uint32_t i, size, sem_count = 0;
154 bool user_fence; 184 bool user_fence;
155 int r = 0; 185 int r = 0;
156 186
@@ -160,9 +190,13 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
160 return -EINVAL; 190 return -EINVAL;
161 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT) 191 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
162 return -EINVAL; 192 return -EINVAL;
193 if (ibs_request->number_of_ibs == 0) {
194 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
195 return 0;
196 }
163 user_fence = (ibs_request->fence_info.handle != NULL); 197 user_fence = (ibs_request->fence_info.handle != NULL);
164 198
165 size = ibs_request->number_of_ibs + (user_fence ? 2 : 1); 199 size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
166 200
167 chunk_array = alloca(sizeof(uint64_t) * size); 201 chunk_array = alloca(sizeof(uint64_t) * size);
168 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size); 202 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
@@ -196,6 +230,8 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
196 chunk_data[i].ib_data.flags = ib->flags; 230 chunk_data[i].ib_data.flags = ib->flags;
197 } 231 }
198 232
233 pthread_mutex_lock(&context->sequence_mutex);
234
199 if (user_fence) { 235 if (user_fence) {
200 i = cs.in.num_chunks++; 236 i = cs.in.num_chunks++;
201 237
@@ -240,15 +276,49 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
240 chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies; 276 chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
241 } 277 }
242 278
279 sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
280 LIST_FOR_EACH_ENTRY(sem, sem_list, list)
281 sem_count++;
282 if (sem_count) {
283 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
284 if (!sem_dependencies) {
285 r = -ENOMEM;
286 goto error_unlock;
287 }
288 sem_count = 0;
289 LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
290 struct amdgpu_cs_fence *info = &sem->signal_fence;
291 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
292 dep->ip_type = info->ip_type;
293 dep->ip_instance = info->ip_instance;
294 dep->ring = info->ring;
295 dep->ctx_id = info->context->id;
296 dep->handle = info->fence;
297
298 list_del(&sem->list);
299 amdgpu_cs_reset_sem(sem);
300 amdgpu_cs_unreference_sem(sem);
301 }
302 i = cs.in.num_chunks++;
303
304 /* dependencies chunk */
305 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
306 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
307 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
308 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
309 }
310
243 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS, 311 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
244 &cs, sizeof(cs)); 312 &cs, sizeof(cs));
245 if (r) 313 if (r)
246 goto error_unlock; 314 goto error_unlock;
247 315
248 ibs_request->seq_no = cs.out.handle; 316 ibs_request->seq_no = cs.out.handle;
249 317 context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
250error_unlock: 318error_unlock:
319 pthread_mutex_unlock(&context->sequence_mutex);
251 free(dependencies); 320 free(dependencies);
321 free(sem_dependencies);
252 return r; 322 return r;
253} 323}
254 324
@@ -356,6 +426,10 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
356 return -EINVAL; 426 return -EINVAL;
357 if (fence->ring >= AMDGPU_CS_MAX_RINGS) 427 if (fence->ring >= AMDGPU_CS_MAX_RINGS)
358 return -EINVAL; 428 return -EINVAL;
429 if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
430 *expired = true;
431 return 0;
432 }
359 433
360 *expired = false; 434 *expired = false;
361 435
@@ -369,3 +443,102 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
369 return r; 443 return r;
370} 444}
371 445
446int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
447{
448 struct amdgpu_semaphore *gpu_semaphore;
449
450 if (NULL == sem)
451 return -EINVAL;
452
453 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
454 if (NULL == gpu_semaphore)
455 return -ENOMEM;
456
457 atomic_set(&gpu_semaphore->refcount, 1);
458 *sem = gpu_semaphore;
459
460 return 0;
461}
462
463int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
464 uint32_t ip_type,
465 uint32_t ip_instance,
466 uint32_t ring,
467 amdgpu_semaphore_handle sem)
468{
469 if (NULL == ctx)
470 return -EINVAL;
471 if (ip_type >= AMDGPU_HW_IP_NUM)
472 return -EINVAL;
473 if (ring >= AMDGPU_CS_MAX_RINGS)
474 return -EINVAL;
475 if (NULL == sem)
476 return -EINVAL;
477 /* sem has been signaled */
478 if (sem->signal_fence.context)
479 return -EINVAL;
480 pthread_mutex_lock(&ctx->sequence_mutex);
481 sem->signal_fence.context = ctx;
482 sem->signal_fence.ip_type = ip_type;
483 sem->signal_fence.ip_instance = ip_instance;
484 sem->signal_fence.ring = ring;
485 sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
486 update_references(NULL, &sem->refcount);
487 pthread_mutex_unlock(&ctx->sequence_mutex);
488 return 0;
489}
490
491int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
492 uint32_t ip_type,
493 uint32_t ip_instance,
494 uint32_t ring,
495 amdgpu_semaphore_handle sem)
496{
497 if (NULL == ctx)
498 return -EINVAL;
499 if (ip_type >= AMDGPU_HW_IP_NUM)
500 return -EINVAL;
501 if (ring >= AMDGPU_CS_MAX_RINGS)
502 return -EINVAL;
503 if (NULL == sem)
504 return -EINVAL;
505 /* must signal first */
506 if (NULL == sem->signal_fence.context)
507 return -EINVAL;
508
509 pthread_mutex_lock(&ctx->sequence_mutex);
510 list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
511 pthread_mutex_unlock(&ctx->sequence_mutex);
512 return 0;
513}
514
515static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
516{
517 if (NULL == sem)
518 return -EINVAL;
519 if (NULL == sem->signal_fence.context)
520 return -EINVAL;
521
522 sem->signal_fence.context = NULL;;
523 sem->signal_fence.ip_type = 0;
524 sem->signal_fence.ip_instance = 0;
525 sem->signal_fence.ring = 0;
526 sem->signal_fence.fence = 0;
527
528 return 0;
529}
530
531static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
532{
533 if (NULL == sem)
534 return -EINVAL;
535
536 if (update_references(&sem->refcount, NULL))
537 free(sem);
538 return 0;
539}
540
541int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
542{
543 return amdgpu_cs_unreference_sem(sem);
544}
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index e5a923e6..f4ede031 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -44,6 +44,7 @@
44#include "amdgpu_internal.h" 44#include "amdgpu_internal.h"
45#include "util_hash_table.h" 45#include "util_hash_table.h"
46#include "util_math.h" 46#include "util_math.h"
47#include "amdgpu_asic_id.h"
47 48
48#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 49#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
49#define UINT_TO_PTR(x) ((void *)((intptr_t)(x))) 50#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@@ -303,3 +304,17 @@ int amdgpu_device_deinitialize(amdgpu_device_handle dev)
303 amdgpu_device_reference(&dev, NULL); 304 amdgpu_device_reference(&dev, NULL);
304 return 0; 305 return 0;
305} 306}
307
308const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
309{
310 const struct amdgpu_asic_id_table_t *t = amdgpu_asic_id_table;
311
312 while (t->did) {
313 if ((t->did == dev->info.asic_id) &&
314 (t->rid == dev->info.pci_rev_id))
315 return t->marketing_name;
316 t++;
317 }
318
319 return NULL;
320}
diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
index 0cc17f1f..66c7e0e1 100644
--- a/amdgpu/amdgpu_gpu_info.c
+++ b/amdgpu/amdgpu_gpu_info.c
@@ -119,7 +119,7 @@ int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
119 uint32_t *version, uint32_t *feature) 119 uint32_t *version, uint32_t *feature)
120{ 120{
121 struct drm_amdgpu_info request; 121 struct drm_amdgpu_info request;
122 struct drm_amdgpu_info_firmware firmware; 122 struct drm_amdgpu_info_firmware firmware = {};
123 int r; 123 int r;
124 124
125 memset(&request, 0, sizeof(request)); 125 memset(&request, 0, sizeof(request));
@@ -187,10 +187,12 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
187 if (r) 187 if (r)
188 return r; 188 return r;
189 189
190 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0, 190 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
191 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
191 &dev->info.pa_sc_raster_cfg1[i]); 192 &dev->info.pa_sc_raster_cfg1[i]);
192 if (r) 193 if (r)
193 return r; 194 return r;
195 }
194 } 196 }
195 197
196 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0, 198 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
@@ -198,10 +200,12 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
198 if (r) 200 if (r)
199 return r; 201 return r;
200 202
201 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0, 203 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
202 dev->info.gb_macro_tile_mode); 204 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
203 if (r) 205 dev->info.gb_macro_tile_mode);
204 return r; 206 if (r)
207 return r;
208 }
205 209
206 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0, 210 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
207 &dev->info.gb_addr_cfg); 211 &dev->info.gb_addr_cfg);
@@ -226,6 +230,8 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
226int amdgpu_query_gpu_info(amdgpu_device_handle dev, 230int amdgpu_query_gpu_info(amdgpu_device_handle dev,
227 struct amdgpu_gpu_info *info) 231 struct amdgpu_gpu_info *info)
228{ 232{
233 if ((dev == NULL) || (info == NULL))
234 return -EINVAL;
229 /* Get ASIC info*/ 235 /* Get ASIC info*/
230 *info = dev->info; 236 *info = dev->info;
231 237
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 7dd5c1c7..4f039b68 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -44,6 +44,7 @@
44#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y)) 44#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
45 45
46#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff 46#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
47#define AMDGPU_NULL_SUBMIT_SEQ 0
47 48
48struct amdgpu_bo_va_hole { 49struct amdgpu_bo_va_hole {
49 struct list_head list; 50 struct list_head list;
@@ -111,8 +112,23 @@ struct amdgpu_bo_list {
111 112
112struct amdgpu_context { 113struct amdgpu_context {
113 struct amdgpu_device *dev; 114 struct amdgpu_device *dev;
115 /** Mutex for accessing fences and to maintain command submissions
116 in good sequence. */
117 pthread_mutex_t sequence_mutex;
114 /* context id*/ 118 /* context id*/
115 uint32_t id; 119 uint32_t id;
120 uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
121 struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
122};
123
124/**
125 * Structure describing sw semaphore based on scheduler
126 *
127 */
128struct amdgpu_semaphore {
129 atomic_t refcount;
130 struct list_head list;
131 struct amdgpu_cs_fence signal_fence;
116}; 132};
117 133
118/** 134/**
diff --git a/amdgpu/libdrm_amdgpu.pc.in b/amdgpu/libdrm_amdgpu.pc.in
index 417865e5..f1c552a6 100644
--- a/amdgpu/libdrm_amdgpu.pc.in
+++ b/amdgpu/libdrm_amdgpu.pc.in
@@ -8,3 +8,4 @@ Description: Userspace interface to kernel DRM services for amdgpu
8Version: @PACKAGE_VERSION@ 8Version: @PACKAGE_VERSION@
9Libs: -L${libdir} -ldrm_amdgpu 9Libs: -L${libdir} -ldrm_amdgpu
10Cflags: -I${includedir} -I${includedir}/libdrm 10Cflags: -I${includedir} -I${includedir}/libdrm
11Requires.private: libdrm
diff --git a/autogen.sh b/autogen.sh
index c8960971..d82ab180 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -9,6 +9,12 @@ cd "$srcdir"
9autoreconf --force --verbose --install || exit 1 9autoreconf --force --verbose --install || exit 1
10cd "$ORIGDIR" || exit $? 10cd "$ORIGDIR" || exit $?
11 11
12git config --local --get format.subjectPrefix ||
13 git config --local format.subjectPrefix "PATCH libdrm" 2>/dev/null
14
15git config --local --get sendemail.to ||
16 git config --local sendemail.to "dri-devel@lists.freedesktop.org" 2>/dev/null
17
12if test -z "$NOCONFIGURE"; then 18if test -z "$NOCONFIGURE"; then
13 "$srcdir"/configure "$@" 19 "$srcdir"/configure "$@"
14fi 20fi
diff --git a/configure.ac b/configure.ac
index f729fd87..1da9d86b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -20,7 +20,7 @@
20 20
21AC_PREREQ([2.63]) 21AC_PREREQ([2.63])
22AC_INIT([libdrm], 22AC_INIT([libdrm],
23 [2.4.66], 23 [2.4.75],
24 [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI], 24 [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
25 [libdrm]) 25 [libdrm])
26 26
@@ -53,14 +53,15 @@ AC_USE_SYSTEM_EXTENSIONS
53AC_SYS_LARGEFILE 53AC_SYS_LARGEFILE
54AC_FUNC_ALLOCA 54AC_FUNC_ALLOCA
55 55
56AC_CHECK_HEADERS([sys/mkdev.h sys/sysctl.h]) 56AC_HEADER_MAJOR
57AC_CHECK_HEADERS([sys/sysctl.h sys/select.h])
57 58
58# Initialize libtool 59# Initialize libtool
59LT_PREREQ([2.2]) 60LT_PREREQ([2.2])
60LT_INIT([disable-static]) 61LT_INIT([disable-static])
61 62
62 63
63PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs) 64
64AC_SUBST(PTHREADSTUBS_CFLAGS) 65AC_SUBST(PTHREADSTUBS_CFLAGS)
65AC_SUBST(PTHREADSTUBS_LIBS) 66AC_SUBST(PTHREADSTUBS_LIBS)
66 67
@@ -131,6 +132,16 @@ AC_ARG_ENABLE(rockchip-experimental-api,
131 [Enable support for rockchip's experimental API (default: disabled)]), 132 [Enable support for rockchip's experimental API (default: disabled)]),
132 [ROCKCHIP=$enableval], [ROCKCHIP=no]) 133 [ROCKCHIP=$enableval], [ROCKCHIP=no])
133 134
135AC_ARG_ENABLE(vc4,
136 AS_HELP_STRING([--disable-vc4],
137 [Enable support for vc4's API (default: auto, enabled on arm)]),
138 [VC4=$enableval], [VC4=auto])
139
140AC_ARG_ENABLE(etnaviv-experimental-api,
141 AS_HELP_STRING([--enable-etnaviv-experimental-api],
142 [Enable support for etnaviv's experimental API (default: disabled)]),
143 [ETNAVIV=$enableval], [ETNAVIV=no])
144
134AC_ARG_ENABLE(install-test-programs, 145AC_ARG_ENABLE(install-test-programs,
135 AS_HELP_STRING([--enable-install-test-programs], 146 AS_HELP_STRING([--enable-install-test-programs],
136 [Install test programs (default: no)]), 147 [Install test programs (default: no)]),
@@ -179,7 +190,8 @@ MAYBE_WARN="-Wall -Wextra \
179-Wstrict-aliasing=2 -Winit-self \ 190-Wstrict-aliasing=2 -Winit-self \
180-Wdeclaration-after-statement -Wold-style-definition \ 191-Wdeclaration-after-statement -Wold-style-definition \
181-Wno-unused-parameter \ 192-Wno-unused-parameter \
182-Wno-attributes -Wno-long-long -Winline -Wshadow" 193-Wno-attributes -Wno-long-long -Winline -Wshadow \
194-Wno-missing-field-initializers"
183 195
184# invalidate cached value if MAYBE_WARN has changed 196# invalidate cached value if MAYBE_WARN has changed
185if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then 197if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
@@ -272,6 +284,9 @@ if test "x$drm_cv_atomic_primitives" = "xnone"; then
272 284
273 LIBDRM_ATOMICS_NOT_FOUND_MSG($TEGRA, tegra, NVIDIA Tegra, tegra-experimental-api) 285 LIBDRM_ATOMICS_NOT_FOUND_MSG($TEGRA, tegra, NVIDIA Tegra, tegra-experimental-api)
274 TEGRA=no 286 TEGRA=no
287
288 LIBDRM_ATOMICS_NOT_FOUND_MSG($ETNAVIV, etnaviv, Vivante, etnaviv-experimental-api)
289 ETNAVIV=no
275else 290else
276 if test "x$INTEL" = xauto; then 291 if test "x$INTEL" = xauto; then
277 case $host_cpu in 292 case $host_cpu in
@@ -294,6 +309,12 @@ else
294 *) FREEDRENO=no ;; 309 *) FREEDRENO=no ;;
295 esac 310 esac
296 fi 311 fi
312 if test "x$VC4" = xauto; then
313 case $host_cpu in
314 arm*|aarch64) VC4=yes ;;
315 *) VC4=no ;;
316 esac
317 fi
297fi 318fi
298 319
299if test "x$INTEL" != "xno"; then 320if test "x$INTEL" != "xno"; then
@@ -364,19 +385,23 @@ if test "x$RADEON" = xyes; then
364 AC_DEFINE(HAVE_RADEON, 1, [Have radeon support]) 385 AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
365fi 386fi
366 387
367# Detect cunit library 388if test "x$AMDGPU" != xno; then
368PKG_CHECK_MODULES([CUNIT], [cunit >= 2.1], [have_cunit=yes], [have_cunit=no]) 389 # Detect cunit library
369# If pkg-config does not find cunit, check it using AC_CHECK_LIB. We 390 PKG_CHECK_MODULES([CUNIT], [cunit >= 2.1], [have_cunit=yes], [have_cunit=no])
370# do this because Debian (Ubuntu) lacks pkg-config file for cunit. 391 # If pkg-config does not find cunit, check it using AC_CHECK_LIB. We
371# fixed in 2.1-2.dfsg-3: http://anonscm.debian.org/cgit/collab-maint/cunit.git/commit/?h=debian 392 # do this because Debian (Ubuntu) lacks pkg-config file for cunit.
372if test "x${have_cunit}" = "xno"; then 393 # fixed in 2.1-2.dfsg-3: http://anonscm.debian.org/cgit/collab-maint/cunit.git/commit/?h=debian
373 AC_CHECK_LIB([cunit], [CU_initialize_registry], [have_cunit=yes], [have_cunit=no]) 394 if test "x${have_cunit}" = "xno"; then
374 if test "x${have_cunit}" = "xyes"; then 395 AC_CHECK_LIB([cunit], [CU_initialize_registry], [have_cunit=yes], [have_cunit=no])
375 CUNIT_LIBS="-lcunit" 396 if test "x${have_cunit}" = "xyes"; then
376 CUNIT_CFLAGS="" 397 CUNIT_LIBS="-lcunit"
377 AC_SUBST([CUNIT_LIBS]) 398 CUNIT_CFLAGS=""
378 AC_SUBST([CUNIT_CFLAGS]) 399 AC_SUBST([CUNIT_LIBS])
400 AC_SUBST([CUNIT_CFLAGS])
401 fi
379 fi 402 fi
403else
404 have_cunit=no
380fi 405fi
381AM_CONDITIONAL(HAVE_CUNIT, [test "x$have_cunit" != "xno"]) 406AM_CONDITIONAL(HAVE_CUNIT, [test "x$have_cunit" != "xno"])
382 407
@@ -399,6 +424,15 @@ fi
399AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes]) 424AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes])
400if test "x$ROCKCHIP" = xyes; then 425if test "x$ROCKCHIP" = xyes; then
401 AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support]) 426 AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support])
427
428AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes])
429if test "x$VC4" = xyes; then
430 AC_DEFINE(HAVE_VC4, 1, [Have VC4 support])
431fi
432
433AM_CONDITIONAL(HAVE_ETNAVIV, [test "x$ETNAVIV" = xyes])
434if test "x$ETNAVIV" = xyes; then
435 AC_DEFINE(HAVE_ETNAVIV, 1, [Have etnaviv support])
402fi 436fi
403 437
404AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes]) 438AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes])
@@ -410,7 +444,9 @@ AC_ARG_ENABLE([cairo-tests],
410 [AS_HELP_STRING([--enable-cairo-tests], 444 [AS_HELP_STRING([--enable-cairo-tests],
411 [Enable support for Cairo rendering in tests (default: auto)])], 445 [Enable support for Cairo rendering in tests (default: auto)])],
412 [CAIRO=$enableval], [CAIRO=auto]) 446 [CAIRO=$enableval], [CAIRO=auto])
413PKG_CHECK_MODULES(CAIRO, cairo, [HAVE_CAIRO=yes], [HAVE_CAIRO=no]) 447if test "x$CAIRO" != xno; then
448 PKG_CHECK_MODULES(CAIRO, cairo, [HAVE_CAIRO=yes], [HAVE_CAIRO=no])
449fi
414AC_MSG_CHECKING([whether to enable Cairo tests]) 450AC_MSG_CHECKING([whether to enable Cairo tests])
415if test "x$CAIRO" = xauto; then 451if test "x$CAIRO" = xauto; then
416 CAIRO="$HAVE_CAIRO" 452 CAIRO="$HAVE_CAIRO"
@@ -424,13 +460,6 @@ fi
424AC_MSG_RESULT([$CAIRO]) 460AC_MSG_RESULT([$CAIRO])
425AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes]) 461AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes])
426 462
427# For enumerating devices in test case
428PKG_CHECK_MODULES(LIBUDEV, libudev, [HAVE_LIBUDEV=yes], [HAVE_LIBUDEV=no])
429if test "x$HAVE_LIBUDEV" = xyes; then
430 AC_DEFINE(HAVE_LIBUDEV, 1, [Have libudev support])
431fi
432AM_CONDITIONAL(HAVE_LIBUDEV, [test "x$HAVE_LIBUDEV" = xyes])
433
434# xsltproc for docbook manpages 463# xsltproc for docbook manpages
435AC_ARG_ENABLE([manpages], 464AC_ARG_ENABLE([manpages],
436 AS_HELP_STRING([--enable-manpages], [enable manpages @<:@default=auto@:>@]), 465 AS_HELP_STRING([--enable-manpages], [enable manpages @<:@default=auto@:>@]),
@@ -455,7 +484,9 @@ AC_ARG_ENABLE(valgrind,
455 [AS_HELP_STRING([--enable-valgrind], 484 [AS_HELP_STRING([--enable-valgrind],
456 [Build libdrm with valgrind support (default: auto)])], 485 [Build libdrm with valgrind support (default: auto)])],
457 [VALGRIND=$enableval], [VALGRIND=auto]) 486 [VALGRIND=$enableval], [VALGRIND=auto])
458PKG_CHECK_MODULES(VALGRIND, [valgrind], [have_valgrind=yes], [have_valgrind=no]) 487if test "x$VALGRIND" != xno; then
488 PKG_CHECK_MODULES(VALGRIND, [valgrind], [have_valgrind=yes], [have_valgrind=no])
489fi
459AC_MSG_CHECKING([whether to enable Valgrind support]) 490AC_MSG_CHECKING([whether to enable Valgrind support])
460if test "x$VALGRIND" = xauto; then 491if test "x$VALGRIND" = xauto; then
461 VALGRIND="$have_valgrind" 492 VALGRIND="$have_valgrind"
@@ -508,6 +539,10 @@ AC_CONFIG_FILES([
508 tegra/libdrm_tegra.pc 539 tegra/libdrm_tegra.pc
509 rockchip/Makefile 540 rockchip/Makefile
510 rockchip/libdrm_rockchip.pc 541 rockchip/libdrm_rockchip.pc
542 vc4/Makefile
543 vc4/libdrm_vc4.pc
544 etnaviv/Makefile
545 etnaviv/libdrm_etnaviv.pc
511 tests/Makefile 546 tests/Makefile
512 tests/modeprint/Makefile 547 tests/modeprint/Makefile
513 tests/modetest/Makefile 548 tests/modetest/Makefile
@@ -521,6 +556,7 @@ AC_CONFIG_FILES([
521 tests/tegra/Makefile 556 tests/tegra/Makefile
522 tests/nouveau/Makefile 557 tests/nouveau/Makefile
523 tests/planetest/Makefile 558 tests/planetest/Makefile
559 tests/etnaviv/Makefile
524 tests/util/Makefile 560 tests/util/Makefile
525 man/Makefile 561 man/Makefile
526 libdrm.pc]) 562 libdrm.pc])
@@ -540,4 +576,6 @@ echo " EXYNOS API $EXYNOS"
540echo " Freedreno API $FREEDRENO (kgsl: $FREEDRENO_KGSL)" 576echo " Freedreno API $FREEDRENO (kgsl: $FREEDRENO_KGSL)"
541echo " Tegra API $TEGRA" 577echo " Tegra API $TEGRA"
542echo " Rockchip API $ROCKCHIP" 578echo " Rockchip API $ROCKCHIP"
579echo " VC4 API $VC4"
580echo " Etnaviv API $ETNAVIV"
543echo "" 581echo ""
diff --git a/etnaviv/Android.mk b/etnaviv/Android.mk
new file mode 100644
index 00000000..390f9a98
--- /dev/null
+++ b/etnaviv/Android.mk
@@ -0,0 +1,14 @@
1LOCAL_PATH := $(call my-dir)
2include $(CLEAR_VARS)
3
4# Import variables LIBDRM_ETNAVIV_FILES, LIBDRM_ETNAVIV_H_FILES
5include $(LOCAL_PATH)/Makefile.sources
6
7LOCAL_MODULE := libdrm_etnaviv
8
9LOCAL_SHARED_LIBRARIES := libdrm
10
11LOCAL_SRC_FILES := $(LIBDRM_ETNAVIV_FILES)
12
13include $(LIBDRM_COMMON_MK)
14include $(BUILD_SHARED_LIBRARY)
diff --git a/etnaviv/Makefile.am b/etnaviv/Makefile.am
new file mode 100644
index 00000000..be96ba86
--- /dev/null
+++ b/etnaviv/Makefile.am
@@ -0,0 +1,26 @@
1include Makefile.sources
2
3AM_CFLAGS = \
4 $(WARN_CFLAGS) \
5 -I$(top_srcdir) \
6 $(PTHREADSTUBS_CFLAGS) \
7 -I$(top_srcdir)/include/drm
8
9libdrm_etnaviv_ladir = $(libdir)
10libdrm_etnaviv_la_LTLIBRARIES = libdrm_etnaviv.la
11libdrm_etnaviv_la_LDFLAGS = -version-number 1:0:0 -no-undefined
12libdrm_etnaviv_la_LIBADD = \
13 ../libdrm.la \
14 @PTHREADSTUBS_LIBS@ \
15 @CLOCK_LIB@
16
17libdrm_etnaviv_la_SOURCES = $(LIBDRM_ETNAVIV_FILES)
18
19libdrm_etnavivincludedir = ${includedir}/libdrm
20libdrm_etnavivinclude_HEADERS = $(LIBDRM_ETNAVIV_H_FILES)
21
22pkgconfigdir = @pkgconfigdir@
23pkgconfig_DATA = libdrm_etnaviv.pc
24
25TESTS = etnaviv-symbol-check
26EXTRA_DIST = $(TESTS)
diff --git a/etnaviv/Makefile.sources b/etnaviv/Makefile.sources
new file mode 100644
index 00000000..52580567
--- /dev/null
+++ b/etnaviv/Makefile.sources
@@ -0,0 +1,12 @@
1LIBDRM_ETNAVIV_FILES := \
2 etnaviv_device.c \
3 etnaviv_gpu.c \
4 etnaviv_bo.c \
5 etnaviv_bo_cache.c \
6 etnaviv_pipe.c \
7 etnaviv_cmd_stream.c \
8 etnaviv_drm.h \
9 etnaviv_priv.h
10
11LIBDRM_ETNAVIV_H_FILES := \
12 etnaviv_drmif.h
diff --git a/etnaviv/etnaviv-symbol-check b/etnaviv/etnaviv-symbol-check
new file mode 100755
index 00000000..22afd168
--- /dev/null
+++ b/etnaviv/etnaviv-symbol-check
@@ -0,0 +1,48 @@
1#!/bin/bash
2
3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.sources/LIBDRM_ETNAVIV_H_FILES
5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_etnaviv.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start
9_edata
10_end
11_fini
12_init
13etna_device_new
14etna_device_new_dup
15etna_device_ref
16etna_device_del
17etna_device_fd
18etna_gpu_new
19etna_gpu_del
20etna_gpu_get_param
21etna_pipe_new
22etna_pipe_del
23etna_pipe_wait
24etna_pipe_wait_ns
25etna_bo_new
26etna_bo_from_handle
27etna_bo_from_name
28etna_bo_from_dmabuf
29etna_bo_ref
30etna_bo_del
31etna_bo_get_name
32etna_bo_handle
33etna_bo_dmabuf
34etna_bo_size
35etna_bo_map
36etna_bo_cpu_prep
37etna_bo_cpu_fini
38etna_cmd_stream_new
39etna_cmd_stream_del
40etna_cmd_stream_timestamp
41etna_cmd_stream_flush
42etna_cmd_stream_finish
43etna_cmd_stream_reloc
44EOF
45done)
46
47test ! -n "$FUNCS" || echo $FUNCS
48test ! -n "$FUNCS"
diff --git a/etnaviv/etnaviv_bo.c b/etnaviv/etnaviv_bo.c
new file mode 100644
index 00000000..4ad0434e
--- /dev/null
+++ b/etnaviv/etnaviv_bo.c
@@ -0,0 +1,347 @@
1/*
2 * Copyright (C) 2014 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h"
33
34drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
35drm_private void bo_del(struct etna_bo *bo);
36
37/* set buffer name, and add to table, call w/ table_lock held: */
38static void set_name(struct etna_bo *bo, uint32_t name)
39{
40 bo->name = name;
41 /* add ourself into the name table: */
42 drmHashInsert(bo->dev->name_table, name, bo);
43}
44
45/* Called under table_lock */
46drm_private void bo_del(struct etna_bo *bo)
47{
48 if (bo->map)
49 drm_munmap(bo->map, bo->size);
50
51 if (bo->name)
52 drmHashDelete(bo->dev->name_table, bo->name);
53
54 if (bo->handle) {
55 struct drm_gem_close req = {
56 .handle = bo->handle,
57 };
58
59 drmHashDelete(bo->dev->handle_table, bo->handle);
60 drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
61 }
62
63 free(bo);
64}
65
66/* lookup a buffer from it's handle, call w/ table_lock held: */
67static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
68{
69 struct etna_bo *bo = NULL;
70
71 if (!drmHashLookup(tbl, handle, (void **)&bo)) {
72 /* found, incr refcnt and return: */
73 bo = etna_bo_ref(bo);
74
75 /* don't break the bucket if this bo was found in one */
76 list_delinit(&bo->list);
77 }
78
79 return bo;
80}
81
82/* allocate a new buffer object, call w/ table_lock held */
83static struct etna_bo *bo_from_handle(struct etna_device *dev,
84 uint32_t size, uint32_t handle, uint32_t flags)
85{
86 struct etna_bo *bo = calloc(sizeof(*bo), 1);
87
88 if (!bo) {
89 struct drm_gem_close req = {
90 .handle = handle,
91 };
92
93 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
94
95 return NULL;
96 }
97
98 bo->dev = etna_device_ref(dev);
99 bo->size = size;
100 bo->handle = handle;
101 bo->flags = flags;
102 atomic_set(&bo->refcnt, 1);
103 list_inithead(&bo->list);
104 /* add ourselves to the handle table: */
105 drmHashInsert(dev->handle_table, handle, bo);
106
107 return bo;
108}
109
110/* allocate a new (un-tiled) buffer object */
111struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
112 uint32_t flags)
113{
114 struct etna_bo *bo;
115 int ret;
116 struct drm_etnaviv_gem_new req = {
117 .flags = flags,
118 };
119
120 bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
121 if (bo)
122 return bo;
123
124 req.size = size;
125 ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
126 &req, sizeof(req));
127 if (ret)
128 return NULL;
129
130 pthread_mutex_lock(&table_lock);
131 bo = bo_from_handle(dev, size, req.handle, flags);
132 bo->reuse = 1;
133 pthread_mutex_unlock(&table_lock);
134
135 return bo;
136}
137
138struct etna_bo *etna_bo_ref(struct etna_bo *bo)
139{
140 atomic_inc(&bo->refcnt);
141
142 return bo;
143}
144
145/* get buffer info */
146static int get_buffer_info(struct etna_bo *bo)
147{
148 int ret;
149 struct drm_etnaviv_gem_info req = {
150 .handle = bo->handle,
151 };
152
153 ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
154 &req, sizeof(req));
155 if (ret) {
156 return ret;
157 }
158
159 /* really all we need for now is mmap offset */
160 bo->offset = req.offset;
161
162 return 0;
163}
164
165/* import a buffer object from DRI2 name */
166struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name)
167{
168 struct etna_bo *bo;
169 struct drm_gem_open req = {
170 .name = name,
171 };
172
173 pthread_mutex_lock(&table_lock);
174
175 /* check name table first, to see if bo is already open: */
176 bo = lookup_bo(dev->name_table, req.handle);
177 if (bo)
178 goto out_unlock;
179
180 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
181 ERROR_MSG("gem-open failed: %s", strerror(errno));
182 goto out_unlock;
183 }
184
185 bo = lookup_bo(dev->handle_table, req.handle);
186 if (bo)
187 goto out_unlock;
188
189 bo = bo_from_handle(dev, req.size, req.handle, 0);
190 if (bo)
191 set_name(bo, name);
192
193out_unlock:
194 pthread_mutex_unlock(&table_lock);
195
196 return bo;
197}
198
199/* import a buffer from dmabuf fd, does not take ownership of the
200 * fd so caller should close() the fd when it is otherwise done
201 * with it (even if it is still using the 'struct etna_bo *')
202 */
203struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
204{
205 struct etna_bo *bo;
206 int ret, size;
207 uint32_t handle;
208
209 pthread_mutex_lock(&table_lock);
210
211 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
212 if (ret) {
213 return NULL;
214 }
215
216 bo = lookup_bo(dev->handle_table, handle);
217 if (bo)
218 goto out_unlock;
219
220 /* lseek() to get bo size */
221 size = lseek(fd, 0, SEEK_END);
222 lseek(fd, 0, SEEK_CUR);
223
224 bo = bo_from_handle(dev, size, handle, 0);
225
226out_unlock:
227 pthread_mutex_unlock(&table_lock);
228
229 return bo;
230}
231
232/* destroy a buffer object */
233void etna_bo_del(struct etna_bo *bo)
234{
235 struct etna_device *dev = bo->dev;
236
237 if (!bo)
238 return;
239
240 if (!atomic_dec_and_test(&bo->refcnt))
241 return;
242
243 pthread_mutex_lock(&table_lock);
244
245 if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
246 goto out;
247
248 bo_del(bo);
249 etna_device_del_locked(dev);
250out:
251 pthread_mutex_unlock(&table_lock);
252}
253
254/* get the global flink/DRI2 buffer name */
255int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
256{
257 if (!bo->name) {
258 struct drm_gem_flink req = {
259 .handle = bo->handle,
260 };
261 int ret;
262
263 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
264 if (ret) {
265 return ret;
266 }
267
268 pthread_mutex_lock(&table_lock);
269 set_name(bo, req.name);
270 pthread_mutex_unlock(&table_lock);
271 bo->reuse = 0;
272 }
273
274 *name = bo->name;
275
276 return 0;
277}
278
279uint32_t etna_bo_handle(struct etna_bo *bo)
280{
281 return bo->handle;
282}
283
284/* caller owns the dmabuf fd that is returned and is responsible
285 * to close() it when done
286 */
287int etna_bo_dmabuf(struct etna_bo *bo)
288{
289 int ret, prime_fd;
290
291 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
292 &prime_fd);
293 if (ret) {
294 ERROR_MSG("failed to get dmabuf fd: %d", ret);
295 return ret;
296 }
297
298 bo->reuse = 0;
299
300 return prime_fd;
301}
302
303uint32_t etna_bo_size(struct etna_bo *bo)
304{
305 return bo->size;
306}
307
308void *etna_bo_map(struct etna_bo *bo)
309{
310 if (!bo->map) {
311 if (!bo->offset) {
312 get_buffer_info(bo);
313 }
314
315 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
316 MAP_SHARED, bo->dev->fd, bo->offset);
317 if (bo->map == MAP_FAILED) {
318 ERROR_MSG("mmap failed: %s", strerror(errno));
319 bo->map = NULL;
320 }
321 }
322
323 return bo->map;
324}
325
326int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
327{
328 struct drm_etnaviv_gem_cpu_prep req = {
329 .handle = bo->handle,
330 .op = op,
331 };
332
333 get_abs_timeout(&req.timeout, 5000000000);
334
335 return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
336 &req, sizeof(req));
337}
338
339void etna_bo_cpu_fini(struct etna_bo *bo)
340{
341 struct drm_etnaviv_gem_cpu_fini req = {
342 .handle = bo->handle,
343 };
344
345 drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
346 &req, sizeof(req));
347}
diff --git a/etnaviv/etnaviv_bo_cache.c b/etnaviv/etnaviv_bo_cache.c
new file mode 100644
index 00000000..8924651f
--- /dev/null
+++ b/etnaviv/etnaviv_bo_cache.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright (C) 2016 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h"
33
34drm_private void bo_del(struct etna_bo *bo);
35drm_private extern pthread_mutex_t table_lock;
36
37static void add_bucket(struct etna_bo_cache *cache, int size)
38{
39 unsigned i = cache->num_buckets;
40
41 assert(i < ARRAY_SIZE(cache->cache_bucket));
42
43 list_inithead(&cache->cache_bucket[i].list);
44 cache->cache_bucket[i].size = size;
45 cache->num_buckets++;
46}
47
48drm_private void etna_bo_cache_init(struct etna_bo_cache *cache)
49{
50 unsigned long size, cache_max_size = 64 * 1024 * 1024;
51
52 /* OK, so power of two buckets was too wasteful of memory.
53 * Give 3 other sizes between each power of two, to hopefully
54 * cover things accurately enough. (The alternative is
55 * probably to just go for exact matching of sizes, and assume
56 * that for things like composited window resize the tiled
57 * width/height alignment and rounding of sizes to pages will
58 * get us useful cache hit rates anyway)
59 */
60 add_bucket(cache, 4096);
61 add_bucket(cache, 4096 * 2);
62 add_bucket(cache, 4096 * 3);
63
64 /* Initialize the linked lists for BO reuse cache. */
65 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
66 add_bucket(cache, size);
67 add_bucket(cache, size + size * 1 / 4);
68 add_bucket(cache, size + size * 2 / 4);
69 add_bucket(cache, size + size * 3 / 4);
70 }
71}
72
73/* Frees older cached buffers. Called under table_lock */
74drm_private void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time)
75{
76 unsigned i;
77
78 if (cache->time == time)
79 return;
80
81 for (i = 0; i < cache->num_buckets; i++) {
82 struct etna_bo_bucket *bucket = &cache->cache_bucket[i];
83 struct etna_bo *bo;
84
85 while (!LIST_IS_EMPTY(&bucket->list)) {
86 bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
87
88 /* keep things in cache for at least 1 second: */
89 if (time && ((time - bo->free_time) <= 1))
90 break;
91
92 list_del(&bo->list);
93 bo_del(bo);
94 }
95 }
96
97 cache->time = time;
98}
99
100static struct etna_bo_bucket *get_bucket(struct etna_bo_cache *cache, uint32_t size)
101{
102 unsigned i;
103
104 /* hmm, this is what intel does, but I suppose we could calculate our
105 * way to the correct bucket size rather than looping..
106 */
107 for (i = 0; i < cache->num_buckets; i++) {
108 struct etna_bo_bucket *bucket = &cache->cache_bucket[i];
109 if (bucket->size >= size) {
110 return bucket;
111 }
112 }
113
114 return NULL;
115}
116
117static int is_idle(struct etna_bo *bo)
118{
119 return etna_bo_cpu_prep(bo,
120 DRM_ETNA_PREP_READ |
121 DRM_ETNA_PREP_WRITE |
122 DRM_ETNA_PREP_NOSYNC) == 0;
123}
124
125static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags)
126{
127 struct etna_bo *bo = NULL;
128
129 pthread_mutex_lock(&table_lock);
130 while (!LIST_IS_EMPTY(&bucket->list)) {
131 bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
132
133 if (bo->flags == flags && is_idle(bo)) {
134 list_del(&bo->list);
135 break;
136 }
137
138 bo = NULL;
139 break;
140 }
141 pthread_mutex_unlock(&table_lock);
142
143 return bo;
144}
145
146/* allocate a new (un-tiled) buffer object
147 *
148 * NOTE: size is potentially rounded up to bucket size
149 */
150drm_private struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache, uint32_t *size,
151 uint32_t flags)
152{
153 struct etna_bo *bo;
154 struct etna_bo_bucket *bucket;
155
156 *size = ALIGN(*size, 4096);
157 bucket = get_bucket(cache, *size);
158
159 /* see if we can be green and recycle: */
160 if (bucket) {
161 *size = bucket->size;
162 bo = find_in_bucket(bucket, flags);
163 if (bo) {
164 atomic_set(&bo->refcnt, 1);
165 etna_device_ref(bo->dev);
166 return bo;
167 }
168 }
169
170 return NULL;
171}
172
173drm_private int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo)
174{
175 struct etna_bo_bucket *bucket = get_bucket(cache, bo->size);
176
177 /* see if we can be green and recycle: */
178 if (bucket) {
179 struct timespec time;
180
181 clock_gettime(CLOCK_MONOTONIC, &time);
182
183 bo->free_time = time.tv_sec;
184 list_addtail(&bo->list, &bucket->list);
185 etna_bo_cache_cleanup(cache, time.tv_sec);
186
187 /* bo's in the bucket cache don't have a ref and
188 * don't hold a ref to the dev:
189 */
190 etna_device_del_locked(bo->dev);
191
192 return 0;
193 }
194
195 return -1;
196}
diff --git a/etnaviv/etnaviv_cmd_stream.c b/etnaviv/etnaviv_cmd_stream.c
new file mode 100644
index 00000000..9ce3f363
--- /dev/null
+++ b/etnaviv/etnaviv_cmd_stream.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (C) 2014-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include <assert.h>
32
33#include "etnaviv_drmif.h"
34#include "etnaviv_priv.h"
35
36static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
37
38static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
39{
40 if ((nr + 1) > *max) {
41 if ((*max * 2) < (nr + 1))
42 *max = nr + 5;
43 else
44 *max = *max * 2;
45 ptr = realloc(ptr, *max * sz);
46 }
47
48 return ptr;
49}
50
51#define APPEND(x, name) ({ \
52 (x)->name = grow((x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
53 (x)->nr_ ## name ++; \
54})
55
56static inline struct etna_cmd_stream_priv *
57etna_cmd_stream_priv(struct etna_cmd_stream *stream)
58{
59 return (struct etna_cmd_stream_priv *)stream;
60}
61
62struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe, uint32_t size,
63 void (*reset_notify)(struct etna_cmd_stream *stream, void *priv),
64 void *priv)
65{
66 struct etna_cmd_stream_priv *stream = NULL;
67
68 if (size == 0) {
69 ERROR_MSG("invalid size of 0");
70 goto fail;
71 }
72
73 stream = calloc(1, sizeof(*stream));
74 if (!stream) {
75 ERROR_MSG("allocation failed");
76 goto fail;
77 }
78
79 /* allocate even number of 32-bit words */
80 size = ALIGN(size, 2);
81
82 stream->base.buffer = malloc(size * sizeof(uint32_t));
83 if (!stream->base.buffer) {
84 ERROR_MSG("allocation failed");
85 goto fail;
86 }
87
88 stream->base.size = size;
89 stream->pipe = pipe;
90 stream->reset_notify = reset_notify;
91 stream->reset_notify_priv = priv;
92
93 return &stream->base;
94
95fail:
96 if (stream)
97 etna_cmd_stream_del(&stream->base);
98
99 return NULL;
100}
101
102void etna_cmd_stream_del(struct etna_cmd_stream *stream)
103{
104 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
105
106 free(stream->buffer);
107 free(priv->submit.relocs);
108 free(priv);
109}
110
111static void reset_buffer(struct etna_cmd_stream *stream)
112{
113 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
114
115 stream->offset = 0;
116 priv->submit.nr_bos = 0;
117 priv->submit.nr_relocs = 0;
118 priv->nr_bos = 0;
119
120 if (priv->reset_notify)
121 priv->reset_notify(stream, priv->reset_notify_priv);
122}
123
124uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream)
125{
126 return etna_cmd_stream_priv(stream)->last_timestamp;
127}
128
129static uint32_t append_bo(struct etna_cmd_stream *stream, struct etna_bo *bo)
130{
131 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
132 uint32_t idx;
133
134 idx = APPEND(&priv->submit, bos);
135 idx = APPEND(priv, bos);
136
137 priv->submit.bos[idx].flags = 0;
138 priv->submit.bos[idx].handle = bo->handle;
139
140 priv->bos[idx] = etna_bo_ref(bo);
141
142 return idx;
143}
144
145/* add (if needed) bo, return idx: */
146static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
147 uint32_t flags)
148{
149 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
150 uint32_t idx;
151
152 pthread_mutex_lock(&idx_lock);
153
154 if (!bo->current_stream) {
155 idx = append_bo(stream, bo);
156 bo->current_stream = stream;
157 bo->idx = idx;
158 } else if (bo->current_stream == stream) {
159 idx = bo->idx;
160 } else {
161 /* slow-path: */
162 for (idx = 0; idx < priv->nr_bos; idx++)
163 if (priv->bos[idx] == bo)
164 break;
165 if (idx == priv->nr_bos) {
166 /* not found */
167 idx = append_bo(stream, bo);
168 }
169 }
170 pthread_mutex_unlock(&idx_lock);
171
172 if (flags & ETNA_RELOC_READ)
173 priv->submit.bos[idx].flags |= ETNA_SUBMIT_BO_READ;
174 if (flags & ETNA_RELOC_WRITE)
175 priv->submit.bos[idx].flags |= ETNA_SUBMIT_BO_WRITE;
176
177 return idx;
178}
179
180static void flush(struct etna_cmd_stream *stream)
181{
182 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
183 int ret, id = priv->pipe->id;
184 struct etna_gpu *gpu = priv->pipe->gpu;
185
186 struct drm_etnaviv_gem_submit req = {
187 .pipe = gpu->core,
188 .exec_state = id,
189 .bos = VOID2U64(priv->submit.bos),
190 .nr_bos = priv->submit.nr_bos,
191 .relocs = VOID2U64(priv->submit.relocs),
192 .nr_relocs = priv->submit.nr_relocs,
193 .stream = VOID2U64(stream->buffer),
194 .stream_size = stream->offset * 4, /* in bytes */
195 };
196
197 ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT,
198 &req, sizeof(req));
199
200 if (ret)
201 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
202 else
203 priv->last_timestamp = req.fence;
204
205 for (uint32_t i = 0; i < priv->nr_bos; i++) {
206 struct etna_bo *bo = priv->bos[i];
207
208 bo->current_stream = NULL;
209 etna_bo_del(bo);
210 }
211}
212
213void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
214{
215 flush(stream);
216 reset_buffer(stream);
217}
218
219void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
220{
221 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
222
223 flush(stream);
224 etna_pipe_wait(priv->pipe, priv->last_timestamp, 5000);
225 reset_buffer(stream);
226}
227
228void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_reloc *r)
229{
230 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
231 struct drm_etnaviv_gem_submit_reloc *reloc;
232 uint32_t idx = APPEND(&priv->submit, relocs);
233 uint32_t addr = 0;
234
235 reloc = &priv->submit.relocs[idx];
236
237 reloc->reloc_idx = bo2idx(stream, r->bo, r->flags);
238 reloc->reloc_offset = r->offset;
239 reloc->submit_offset = stream->offset * 4; /* in bytes */
240 reloc->flags = 0;
241
242 etna_cmd_stream_emit(stream, addr);
243}
diff --git a/etnaviv/etnaviv_device.c b/etnaviv/etnaviv_device.c
new file mode 100644
index 00000000..3ce92030
--- /dev/null
+++ b/etnaviv/etnaviv_device.c
@@ -0,0 +1,119 @@
1/*
2 * Copyright (C) 2014 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <stdlib.h>
32#include <linux/stddef.h>
33#include <linux/types.h>
34#include <errno.h>
35#include <sys/mman.h>
36#include <fcntl.h>
37#include <unistd.h>
38#include <pthread.h>
39
40#include <xf86drm.h>
41#include <xf86atomic.h>
42
43#include "etnaviv_priv.h"
44#include "etnaviv_drmif.h"
45
46static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
47
48struct etna_device *etna_device_new(int fd)
49{
50 struct etna_device *dev = calloc(sizeof(*dev), 1);
51
52 if (!dev)
53 return NULL;
54
55 atomic_set(&dev->refcnt, 1);
56 dev->fd = fd;
57 dev->handle_table = drmHashCreate();
58 dev->name_table = drmHashCreate();
59 etna_bo_cache_init(&dev->bo_cache);
60
61 return dev;
62}
63
64/* like etna_device_new() but creates it's own private dup() of the fd
65 * which is close()d when the device is finalized. */
66struct etna_device *etna_device_new_dup(int fd)
67{
68 int dup_fd = dup(fd);
69 struct etna_device *dev = etna_device_new(dup_fd);
70
71 if (dev)
72 dev->closefd = 1;
73 else
74 close(dup_fd);
75
76 return dev;
77}
78
79struct etna_device *etna_device_ref(struct etna_device *dev)
80{
81 atomic_inc(&dev->refcnt);
82
83 return dev;
84}
85
86static void etna_device_del_impl(struct etna_device *dev)
87{
88 etna_bo_cache_cleanup(&dev->bo_cache, 0);
89 drmHashDestroy(dev->handle_table);
90 drmHashDestroy(dev->name_table);
91
92 if (dev->closefd)
93 close(dev->fd);
94
95 free(dev);
96}
97
98drm_private void etna_device_del_locked(struct etna_device *dev)
99{
100 if (!atomic_dec_and_test(&dev->refcnt))
101 return;
102
103 etna_device_del_impl(dev);
104}
105
106void etna_device_del(struct etna_device *dev)
107{
108 if (!atomic_dec_and_test(&dev->refcnt))
109 return;
110
111 pthread_mutex_lock(&table_lock);
112 etna_device_del_impl(dev);
113 pthread_mutex_unlock(&table_lock);
114}
115
116int etna_device_fd(struct etna_device *dev)
117{
118 return dev->fd;
119}
diff --git a/etnaviv/etnaviv_drm.h b/etnaviv/etnaviv_drm.h
new file mode 100644
index 00000000..2584c1cc
--- /dev/null
+++ b/etnaviv/etnaviv_drm.h
@@ -0,0 +1,233 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_DRM_H__
18#define __ETNAVIV_DRM_H__
19
20#include "drm.h"
21
22#if defined(__cplusplus)
23extern "C" {
24#endif
25
26/* Please note that modifications to all structs defined here are
27 * subject to backwards-compatibility constraints:
28 * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
29 * user/kernel compatibility
30 * 2) Keep fields aligned to their size
31 * 3) Because of how drm_ioctl() works, we can add new fields at
32 * the end of an ioctl if some care is taken: drm_ioctl() will
33 * zero out the new fields at the tail of the ioctl, so a zero
34 * value should have a backwards compatible meaning. And for
35 * output params, userspace won't see the newly added output
36 * fields.. so that has to be somehow ok.
37 */
38
39/* timeouts are specified in clock-monotonic absolute times (to simplify
40 * restarting interrupted ioctls). The following struct is logically the
41 * same as 'struct timespec' but 32/64b ABI safe.
42 */
43struct drm_etnaviv_timespec {
44 __s64 tv_sec; /* seconds */
45 __s64 tv_nsec; /* nanoseconds */
46};
47
48#define ETNAVIV_PARAM_GPU_MODEL 0x01
49#define ETNAVIV_PARAM_GPU_REVISION 0x02
50#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
51#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
52#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
53#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
54#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
55#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
56#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
57
58#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
59#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
60#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
61#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
62#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
63#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
64#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
65#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
66#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
67#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
68#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
69
70#define ETNA_MAX_PIPES 4
71
72struct drm_etnaviv_param {
73 __u32 pipe; /* in */
74 __u32 param; /* in, ETNAVIV_PARAM_x */
75 __u64 value; /* out (get_param) or in (set_param) */
76};
77
78/*
79 * GEM buffers:
80 */
81
82#define ETNA_BO_CACHE_MASK 0x000f0000
83/* cache modes */
84#define ETNA_BO_CACHED 0x00010000
85#define ETNA_BO_WC 0x00020000
86#define ETNA_BO_UNCACHED 0x00040000
87/* map flags */
88#define ETNA_BO_FORCE_MMU 0x00100000
89
90struct drm_etnaviv_gem_new {
91 __u64 size; /* in */
92 __u32 flags; /* in, mask of ETNA_BO_x */
93 __u32 handle; /* out */
94};
95
96struct drm_etnaviv_gem_info {
97 __u32 handle; /* in */
98 __u32 pad;
99 __u64 offset; /* out, offset to pass to mmap() */
100};
101
102#define ETNA_PREP_READ 0x01
103#define ETNA_PREP_WRITE 0x02
104#define ETNA_PREP_NOSYNC 0x04
105
106struct drm_etnaviv_gem_cpu_prep {
107 __u32 handle; /* in */
108 __u32 op; /* in, mask of ETNA_PREP_x */
109 struct drm_etnaviv_timespec timeout; /* in */
110};
111
112struct drm_etnaviv_gem_cpu_fini {
113 __u32 handle; /* in */
114 __u32 flags; /* in, placeholder for now, no defined values */
115};
116
117/*
118 * Cmdstream Submission:
119 */
120
121/* The value written into the cmdstream is logically:
122 * relocbuf->gpuaddr + reloc_offset
123 *
124 * NOTE that reloc's must be sorted by order of increasing submit_offset,
125 * otherwise EINVAL.
126 */
127struct drm_etnaviv_gem_submit_reloc {
128 __u32 submit_offset; /* in, offset from submit_bo */
129 __u32 reloc_idx; /* in, index of reloc_bo buffer */
130 __u64 reloc_offset; /* in, offset from start of reloc_bo */
131 __u32 flags; /* in, placeholder for now, no defined values */
132};
133
134/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
135 * cmdstream buffer(s) themselves or reloc entries) has one (and only
136 * one) entry in the submit->bos[] table.
137 *
138 * As a optimization, the current buffer (gpu virtual address) can be
139 * passed back through the 'presumed' field. If on a subsequent reloc,
140 * userspace passes back a 'presumed' address that is still valid,
141 * then patching the cmdstream for this entry is skipped. This can
142 * avoid kernel needing to map/access the cmdstream bo in the common
143 * case.
144 */
145#define ETNA_SUBMIT_BO_READ 0x0001
146#define ETNA_SUBMIT_BO_WRITE 0x0002
147struct drm_etnaviv_gem_submit_bo {
148 __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
149 __u32 handle; /* in, GEM handle */
150 __u64 presumed; /* in/out, presumed buffer address */
151};
152
153/* Each cmdstream submit consists of a table of buffers involved, and
154 * one or more cmdstream buffers. This allows for conditional execution
155 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
156 */
157#define ETNA_PIPE_3D 0x00
158#define ETNA_PIPE_2D 0x01
159#define ETNA_PIPE_VG 0x02
160struct drm_etnaviv_gem_submit {
161 __u32 fence; /* out */
162 __u32 pipe; /* in */
163 __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
164 __u32 nr_bos; /* in, number of submit_bo's */
165 __u32 nr_relocs; /* in, number of submit_reloc's */
166 __u32 stream_size; /* in, cmdstream size */
167 __u64 bos; /* in, ptr to array of submit_bo's */
168 __u64 relocs; /* in, ptr to array of submit_reloc's */
169 __u64 stream; /* in, ptr to cmdstream */
170};
171
172/* The normal way to synchronize with the GPU is just to CPU_PREP on
173 * a buffer if you need to access it from the CPU (other cmdstream
174 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
175 * handle the required synchronization under the hood). This ioctl
176 * mainly just exists as a way to implement the gallium pipe_fence
177 * APIs without requiring a dummy bo to synchronize on.
178 */
179#define ETNA_WAIT_NONBLOCK 0x01
180struct drm_etnaviv_wait_fence {
181 __u32 pipe; /* in */
182 __u32 fence; /* in */
183 __u32 flags; /* in, mask of ETNA_WAIT_x */
184 __u32 pad;
185 struct drm_etnaviv_timespec timeout; /* in */
186};
187
188#define ETNA_USERPTR_READ 0x01
189#define ETNA_USERPTR_WRITE 0x02
190struct drm_etnaviv_gem_userptr {
191 __u64 user_ptr; /* in, page aligned user pointer */
192 __u64 user_size; /* in, page aligned user size */
193 __u32 flags; /* in, flags */
194 __u32 handle; /* out, non-zero handle */
195};
196
197struct drm_etnaviv_gem_wait {
198 __u32 pipe; /* in */
199 __u32 handle; /* in, bo to be waited for */
200 __u32 flags; /* in, mask of ETNA_WAIT_x */
201 __u32 pad;
202 struct drm_etnaviv_timespec timeout; /* in */
203};
204
205#define DRM_ETNAVIV_GET_PARAM 0x00
206/* placeholder:
207#define DRM_ETNAVIV_SET_PARAM 0x01
208 */
209#define DRM_ETNAVIV_GEM_NEW 0x02
210#define DRM_ETNAVIV_GEM_INFO 0x03
211#define DRM_ETNAVIV_GEM_CPU_PREP 0x04
212#define DRM_ETNAVIV_GEM_CPU_FINI 0x05
213#define DRM_ETNAVIV_GEM_SUBMIT 0x06
214#define DRM_ETNAVIV_WAIT_FENCE 0x07
215#define DRM_ETNAVIV_GEM_USERPTR 0x08
216#define DRM_ETNAVIV_GEM_WAIT 0x09
217#define DRM_ETNAVIV_NUM_IOCTLS 0x0a
218
219#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
220#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
221#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
222#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
223#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
224#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
225#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
226#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
227#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
228
229#if defined(__cplusplus)
230}
231#endif
232
233#endif /* __ETNAVIV_DRM_H__ */
diff --git a/etnaviv/etnaviv_drmif.h b/etnaviv/etnaviv_drmif.h
new file mode 100644
index 00000000..8119baad
--- /dev/null
+++ b/etnaviv/etnaviv_drmif.h
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2014-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifndef ETNAVIV_DRMIF_H_
28#define ETNAVIV_DRMIF_H_
29
30#include <xf86drm.h>
31#include <stdint.h>
32
33struct etna_bo;
34struct etna_pipe;
35struct etna_gpu;
36struct etna_device;
37struct etna_cmd_stream;
38
39enum etna_pipe_id {
40 ETNA_PIPE_3D = 0,
41 ETNA_PIPE_2D = 1,
42 ETNA_PIPE_VG = 2,
43 ETNA_PIPE_MAX
44};
45
46enum etna_param_id {
47 ETNA_GPU_MODEL = 0x1,
48 ETNA_GPU_REVISION = 0x2,
49 ETNA_GPU_FEATURES_0 = 0x3,
50 ETNA_GPU_FEATURES_1 = 0x4,
51 ETNA_GPU_FEATURES_2 = 0x5,
52 ETNA_GPU_FEATURES_3 = 0x6,
53 ETNA_GPU_FEATURES_4 = 0x7,
54 ETNA_GPU_FEATURES_5 = 0x8,
55 ETNA_GPU_FEATURES_6 = 0x9,
56
57 ETNA_GPU_STREAM_COUNT = 0x10,
58 ETNA_GPU_REGISTER_MAX = 0x11,
59 ETNA_GPU_THREAD_COUNT = 0x12,
60 ETNA_GPU_VERTEX_CACHE_SIZE = 0x13,
61 ETNA_GPU_SHADER_CORE_COUNT = 0x14,
62 ETNA_GPU_PIXEL_PIPES = 0x15,
63 ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE = 0x16,
64 ETNA_GPU_BUFFER_SIZE = 0x17,
65 ETNA_GPU_INSTRUCTION_COUNT = 0x18,
66 ETNA_GPU_NUM_CONSTANTS = 0x19,
67 ETNA_GPU_NUM_VARYINGS = 0x1a
68};
69
70/* bo flags: */
71#define DRM_ETNA_GEM_CACHE_CACHED 0x00010000
72#define DRM_ETNA_GEM_CACHE_WC 0x00020000
73#define DRM_ETNA_GEM_CACHE_UNCACHED 0x00040000
74#define DRM_ETNA_GEM_CACHE_MASK 0x000f0000
75/* map flags */
76#define DRM_ETNA_GEM_FORCE_MMU 0x00100000
77
78/* bo access flags: (keep aligned to ETNA_PREP_x) */
79#define DRM_ETNA_PREP_READ 0x01
80#define DRM_ETNA_PREP_WRITE 0x02
81#define DRM_ETNA_PREP_NOSYNC 0x04
82
83/* device functions:
84 */
85
86struct etna_device *etna_device_new(int fd);
87struct etna_device *etna_device_new_dup(int fd);
88struct etna_device *etna_device_ref(struct etna_device *dev);
89void etna_device_del(struct etna_device *dev);
90int etna_device_fd(struct etna_device *dev);
91
92/* gpu functions:
93 */
94
95struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core);
96void etna_gpu_del(struct etna_gpu *gpu);
97int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
98 uint64_t *value);
99
100
101/* pipe functions:
102 */
103
104struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id);
105void etna_pipe_del(struct etna_pipe *pipe);
106int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms);
107int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns);
108
109
110/* buffer-object functions:
111 */
112
113struct etna_bo *etna_bo_new(struct etna_device *dev,
114 uint32_t size, uint32_t flags);
115struct etna_bo *etna_bo_from_handle(struct etna_device *dev,
116 uint32_t handle, uint32_t size);
117struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name);
118struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd);
119struct etna_bo *etna_bo_ref(struct etna_bo *bo);
120void etna_bo_del(struct etna_bo *bo);
121int etna_bo_get_name(struct etna_bo *bo, uint32_t *name);
122uint32_t etna_bo_handle(struct etna_bo *bo);
123int etna_bo_dmabuf(struct etna_bo *bo);
124uint32_t etna_bo_size(struct etna_bo *bo);
125void * etna_bo_map(struct etna_bo *bo);
126int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op);
127void etna_bo_cpu_fini(struct etna_bo *bo);
128
129
130/* cmd stream functions:
131 */
132
133struct etna_cmd_stream {
134 uint32_t *buffer;
135 uint32_t offset; /* in 32-bit words */
136 uint32_t size; /* in 32-bit words */
137};
138
139struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe, uint32_t size,
140 void (*reset_notify)(struct etna_cmd_stream *stream, void *priv),
141 void *priv);
142void etna_cmd_stream_del(struct etna_cmd_stream *stream);
143uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream);
144void etna_cmd_stream_flush(struct etna_cmd_stream *stream);
145void etna_cmd_stream_finish(struct etna_cmd_stream *stream);
146
147static inline uint32_t etna_cmd_stream_avail(struct etna_cmd_stream *stream)
148{
149 static const uint32_t END_CLEARANCE = 2; /* LINK op code */
150
151 return stream->size - stream->offset - END_CLEARANCE;
152}
153
154static inline void etna_cmd_stream_reserve(struct etna_cmd_stream *stream, size_t n)
155{
156 if (etna_cmd_stream_avail(stream) < n)
157 etna_cmd_stream_flush(stream);
158}
159
160static inline void etna_cmd_stream_emit(struct etna_cmd_stream *stream, uint32_t data)
161{
162 stream->buffer[stream->offset++] = data;
163}
164
165static inline uint32_t etna_cmd_stream_get(struct etna_cmd_stream *stream, uint32_t offset)
166{
167 return stream->buffer[offset];
168}
169
170static inline void etna_cmd_stream_set(struct etna_cmd_stream *stream, uint32_t offset,
171 uint32_t data)
172{
173 stream->buffer[offset] = data;
174}
175
176static inline uint32_t etna_cmd_stream_offset(struct etna_cmd_stream *stream)
177{
178 return stream->offset;
179}
180
181struct etna_reloc {
182 struct etna_bo *bo;
183#define ETNA_RELOC_READ 0x0001
184#define ETNA_RELOC_WRITE 0x0002
185 uint32_t flags;
186 uint32_t offset;
187};
188
189void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_reloc *r);
190
191#endif /* ETNAVIV_DRMIF_H_ */
diff --git a/etnaviv/etnaviv_gpu.c b/etnaviv/etnaviv_gpu.c
new file mode 100644
index 00000000..35dec6cd
--- /dev/null
+++ b/etnaviv/etnaviv_gpu.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h"
33
34static uint64_t get_param(struct etna_device *dev, uint32_t core, uint32_t param)
35{
36 struct drm_etnaviv_param req = {
37 .pipe = core,
38 .param = param,
39 };
40 int ret;
41
42 ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GET_PARAM, &req, sizeof(req));
43 if (ret) {
44 ERROR_MSG("get-param (%x) failed! %d (%s)", param, ret, strerror(errno));
45 return 0;
46 }
47
48 return req.value;
49}
50
51struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core)
52{
53 struct etna_gpu *gpu;
54
55 gpu = calloc(1, sizeof(*gpu));
56 if (!gpu) {
57 ERROR_MSG("allocation failed");
58 goto fail;
59 }
60
61 gpu->dev = dev;
62 gpu->core = core;
63
64 /* get specs from kernel space */
65 gpu->specs.model = get_param(dev, core, ETNAVIV_PARAM_GPU_MODEL);
66 gpu->specs.revision = get_param(dev, core, ETNAVIV_PARAM_GPU_REVISION);
67 gpu->specs.features[0] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_0);
68 gpu->specs.features[1] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_1);
69 gpu->specs.features[2] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_2);
70 gpu->specs.features[3] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_3);
71 gpu->specs.features[4] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_4);
72 gpu->specs.features[5] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_5);
73 gpu->specs.features[6] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_6);
74 gpu->specs.stream_count = get_param(dev, core, ETNA_GPU_STREAM_COUNT);
75 gpu->specs.register_max = get_param(dev, core, ETNA_GPU_REGISTER_MAX);
76 gpu->specs.thread_count = get_param(dev, core, ETNA_GPU_THREAD_COUNT);
77 gpu->specs.vertex_cache_size = get_param(dev, core, ETNA_GPU_VERTEX_CACHE_SIZE);
78 gpu->specs.shader_core_count = get_param(dev, core, ETNA_GPU_SHADER_CORE_COUNT);
79 gpu->specs.pixel_pipes = get_param(dev, core, ETNA_GPU_PIXEL_PIPES);
80 gpu->specs.vertex_output_buffer_size = get_param(dev, core, ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE);
81 gpu->specs.buffer_size = get_param(dev, core, ETNA_GPU_BUFFER_SIZE);
82 gpu->specs.instruction_count = get_param(dev, core, ETNA_GPU_INSTRUCTION_COUNT);
83 gpu->specs.num_constants = get_param(dev, core, ETNA_GPU_NUM_CONSTANTS);
84 gpu->specs.num_varyings = get_param(dev, core, ETNA_GPU_NUM_VARYINGS);
85
86 if (!gpu->specs.model)
87 goto fail;
88
89 INFO_MSG(" GPU model: 0x%x (rev %x)", gpu->specs.model, gpu->specs.revision);
90
91 return gpu;
92fail:
93 if (gpu)
94 etna_gpu_del(gpu);
95
96 return NULL;
97}
98
99void etna_gpu_del(struct etna_gpu *gpu)
100{
101 free(gpu);
102}
103
104int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
105 uint64_t *value)
106{
107 switch(param) {
108 case ETNA_GPU_MODEL:
109 *value = gpu->specs.model;
110 return 0;
111 case ETNA_GPU_REVISION:
112 *value = gpu->specs.revision;
113 return 0;
114 case ETNA_GPU_FEATURES_0:
115 *value = gpu->specs.features[0];
116 return 0;
117 case ETNA_GPU_FEATURES_1:
118 *value = gpu->specs.features[1];
119 return 0;
120 case ETNA_GPU_FEATURES_2:
121 *value = gpu->specs.features[2];
122 return 0;
123 case ETNA_GPU_FEATURES_3:
124 *value = gpu->specs.features[3];
125 return 0;
126 case ETNA_GPU_FEATURES_4:
127 *value = gpu->specs.features[4];
128 return 0;
129 case ETNA_GPU_FEATURES_5:
130 *value = gpu->specs.features[5];
131 return 0;
132 case ETNA_GPU_FEATURES_6:
133 *value = gpu->specs.features[6];
134 return 0;
135 case ETNA_GPU_STREAM_COUNT:
136 *value = gpu->specs.stream_count;
137 return 0;
138 case ETNA_GPU_REGISTER_MAX:
139 *value = gpu->specs.register_max;
140 return 0;
141 case ETNA_GPU_THREAD_COUNT:
142 *value = gpu->specs.thread_count;
143 return 0;
144 case ETNA_GPU_VERTEX_CACHE_SIZE:
145 *value = gpu->specs.vertex_cache_size;
146 return 0;
147 case ETNA_GPU_SHADER_CORE_COUNT:
148 *value = gpu->specs.shader_core_count;
149 return 0;
150 case ETNA_GPU_PIXEL_PIPES:
151 *value = gpu->specs.pixel_pipes;
152 return 0;
153 case ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
154 *value = gpu->specs.vertex_output_buffer_size;
155 return 0;
156 case ETNA_GPU_BUFFER_SIZE:
157 *value = gpu->specs.buffer_size;
158 return 0;
159 case ETNA_GPU_INSTRUCTION_COUNT:
160 *value = gpu->specs.instruction_count;
161 return 0;
162 case ETNA_GPU_NUM_CONSTANTS:
163 *value = gpu->specs.num_constants;
164 return 0;
165 case ETNA_GPU_NUM_VARYINGS:
166 *value = gpu->specs.num_varyings;
167 return 0;
168
169 default:
170 ERROR_MSG("invalid param id: %d", param);
171 return -1;
172 }
173
174 return 0;
175}
diff --git a/etnaviv/etnaviv_pipe.c b/etnaviv/etnaviv_pipe.c
new file mode 100644
index 00000000..94c5d377
--- /dev/null
+++ b/etnaviv/etnaviv_pipe.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2014-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h"
32
33int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms)
34{
35 return etna_pipe_wait_ns(pipe, timestamp, ms * 1000000);
36}
37
38int etna_pipe_wait_ns(struct etna_pipe *pipe, uint32_t timestamp, uint64_t ns)
39{
40 struct etna_device *dev = pipe->gpu->dev;
41 int ret;
42
43 struct drm_etnaviv_wait_fence req = {
44 .pipe = pipe->gpu->core,
45 .fence = timestamp,
46 };
47
48 if (ns == 0)
49 req.flags |= ETNA_WAIT_NONBLOCK;
50
51 get_abs_timeout(&req.timeout, ns);
52
53 ret = drmCommandWrite(dev->fd, DRM_ETNAVIV_WAIT_FENCE, &req, sizeof(req));
54 if (ret) {
55 ERROR_MSG("wait-fence failed! %d (%s)", ret, strerror(errno));
56 return ret;
57 }
58
59 return 0;
60}
61
62void etna_pipe_del(struct etna_pipe *pipe)
63{
64 free(pipe);
65}
66
67struct etna_pipe *etna_pipe_new(struct etna_gpu *gpu, enum etna_pipe_id id)
68{
69 struct etna_pipe *pipe;
70
71 pipe = calloc(1, sizeof(*pipe));
72 if (!pipe) {
73 ERROR_MSG("allocation failed");
74 goto fail;
75 }
76
77 pipe->id = id;
78 pipe->gpu = gpu;
79
80 return pipe;
81fail:
82 return NULL;
83}
diff --git a/etnaviv/etnaviv_priv.h b/etnaviv/etnaviv_priv.h
new file mode 100644
index 00000000..feaa5ad9
--- /dev/null
+++ b/etnaviv/etnaviv_priv.h
@@ -0,0 +1,201 @@
1/*
2 * Copyright (C) 2014-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifndef ETNAVIV_PRIV_H_
28#define ETNAVIV_PRIV_H_
29
30#include <stdlib.h>
31#include <errno.h>
32#include <string.h>
33#include <unistd.h>
34#include <errno.h>
35#include <fcntl.h>
36#include <sys/ioctl.h>
37#include <pthread.h>
38#include <stdio.h>
39#include <assert.h>
40
41#include "libdrm_macros.h"
42#include "xf86drm.h"
43#include "xf86atomic.h"
44
45#include "util_double_list.h"
46
47#include "etnaviv_drmif.h"
48#include "etnaviv_drm.h"
49
50#define VIV_FEATURES_WORD_COUNT 7
51
52struct etna_specs {
53 uint32_t model;
54 uint32_t revision;
55 uint32_t features[VIV_FEATURES_WORD_COUNT];
56 uint32_t stream_count;
57 uint32_t register_max;
58 uint32_t thread_count;
59 uint32_t shader_core_count;
60 uint32_t vertex_cache_size;
61 uint32_t vertex_output_buffer_size;
62 uint32_t pixel_pipes;
63 uint32_t instruction_count;
64 uint32_t num_constants;
65 uint32_t num_varyings;
66 uint32_t buffer_size;
67};
68
69struct etna_bo_bucket {
70 uint32_t size;
71 struct list_head list;
72};
73
74struct etna_bo_cache {
75 struct etna_bo_bucket cache_bucket[14 * 4];
76 unsigned num_buckets;
77 time_t time;
78};
79
80struct etna_device {
81 int fd;
82 atomic_t refcnt;
83
84 /* tables to keep track of bo's, to avoid "evil-twin" etna_bo objects:
85 *
86 * handle_table: maps handle to etna_bo
87 * name_table: maps flink name to etna_bo
88 *
89 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
90 * returns a new handle. So we need to figure out if the bo is already
91 * open in the process first, before calling gem-open.
92 */
93 void *handle_table, *name_table;
94
95 struct etna_bo_cache bo_cache;
96
97 int closefd; /* call close(fd) upon destruction */
98};
99
100drm_private void etna_bo_cache_init(struct etna_bo_cache *cache);
101drm_private void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time);
102drm_private struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache,
103 uint32_t *size, uint32_t flags);
104drm_private int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo);
105
106/* for where @table_lock is already held: */
107drm_private void etna_device_del_locked(struct etna_device *dev);
108
109/* a GEM buffer object allocated from the DRM device */
110struct etna_bo {
111 struct etna_device *dev;
112 void *map; /* userspace mmap'ing (if there is one) */
113 uint32_t size;
114 uint32_t handle;
115 uint32_t flags;
116 uint32_t name; /* flink global handle (DRI2 name) */
117 uint64_t offset; /* offset to mmap() */
118 atomic_t refcnt;
119
120 /* in the common case, a bo won't be referenced by more than a single
121 * command stream. So to avoid looping over all the bo's in the
122 * reloc table to find the idx of a bo that might already be in the
123 * table, we cache the idx in the bo. But in order to detect the
124 * slow-path where bo is ref'd in multiple streams, we also must track
125 * the current_stream for which the idx is valid. See bo2idx().
126 */
127 struct etna_cmd_stream *current_stream;
128 uint32_t idx;
129
130 int reuse;
131 struct list_head list; /* bucket-list entry */
132 time_t free_time; /* time when added to bucket-list */
133};
134
135struct etna_gpu {
136 struct etna_device *dev;
137 struct etna_specs specs;
138 uint32_t core;
139};
140
141struct etna_pipe {
142 enum etna_pipe_id id;
143 struct etna_gpu *gpu;
144};
145
146struct etna_cmd_stream_priv {
147 struct etna_cmd_stream base;
148 struct etna_pipe *pipe;
149
150 uint32_t last_timestamp;
151
152 /* submit ioctl related tables: */
153 struct {
154 /* bo's table: */
155 struct drm_etnaviv_gem_submit_bo *bos;
156 uint32_t nr_bos, max_bos;
157
158 /* reloc's table: */
159 struct drm_etnaviv_gem_submit_reloc *relocs;
160 uint32_t nr_relocs, max_relocs;
161 } submit;
162
163 /* should have matching entries in submit.bos: */
164 struct etna_bo **bos;
165 uint32_t nr_bos, max_bos;
166
167 /* notify callback if buffer reset happend */
168 void (*reset_notify)(struct etna_cmd_stream *stream, void *priv);
169 void *reset_notify_priv;
170};
171
172#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
173#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
174
175#define enable_debug 1 /* TODO make dynamic */
176
177#define INFO_MSG(fmt, ...) \
178 do { drmMsg("[I] "fmt " (%s:%d)\n", \
179 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
180#define DEBUG_MSG(fmt, ...) \
181 do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \
182 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
183#define WARN_MSG(fmt, ...) \
184 do { drmMsg("[W] "fmt " (%s:%d)\n", \
185 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
186#define ERROR_MSG(fmt, ...) \
187 do { drmMsg("[E] " fmt " (%s:%d)\n", \
188 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
189
190#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
191
192static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
193{
194 struct timespec t;
195 uint32_t s = ns / 1000000000;
196 clock_gettime(CLOCK_MONOTONIC, &t);
197 tv->tv_sec = t.tv_sec + s;
198 tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
199}
200
201#endif /* ETNAVIV_PRIV_H_ */
diff --git a/etnaviv/libdrm_etnaviv.pc.in b/etnaviv/libdrm_etnaviv.pc.in
new file mode 100644
index 00000000..13fed01d
--- /dev/null
+++ b/etnaviv/libdrm_etnaviv.pc.in
@@ -0,0 +1,11 @@
1prefix=@prefix@
2exec_prefix=@exec_prefix@
3libdir=@libdir@
4includedir=@includedir@
5
6Name: libdrm_etnaviv
7Description: Userspace interface to etnaviv kernel DRM services
8Version: @PACKAGE_VERSION@
9Libs: -L${libdir} -ldrm_etnaviv
10Cflags: -I${includedir} -I${includedir}/libdrm
11Requires.private: libdrm
diff --git a/exynos/exynos_drm.c b/exynos/exynos_drm.c
index e689781d..b961e520 100644
--- a/exynos/exynos_drm.c
+++ b/exynos/exynos_drm.c
@@ -347,7 +347,7 @@ exynos_prime_fd_to_handle(struct exynos_device *dev, int fd, uint32_t *handle)
347 * 347 *
348 * @dev: a exynos device object. 348 * @dev: a exynos device object.
349 * @connect: indicate whether connectoin or disconnection request. 349 * @connect: indicate whether connectoin or disconnection request.
350 * @ext: indicate whether edid data includes extentions data or not. 350 * @ext: indicate whether edid data includes extensions data or not.
351 * @edid: a pointer to edid data from Wireless Display device. 351 * @edid: a pointer to edid data from Wireless Display device.
352 * 352 *
353 * this interface is used to request Virtual Display driver connection or 353 * this interface is used to request Virtual Display driver connection or
diff --git a/freedreno/Android.mk b/freedreno/Android.mk
index 162c804a..2b582aed 100644
--- a/freedreno/Android.mk
+++ b/freedreno/Android.mk
@@ -8,9 +8,7 @@ LOCAL_MODULE := libdrm_freedreno
8 8
9LOCAL_SHARED_LIBRARIES := libdrm 9LOCAL_SHARED_LIBRARIES := libdrm
10 10
11LOCAL_SRC_FILES := $(filter-out %.h,$(LIBDRM_FREEDRENO_FILES)) 11LOCAL_SRC_FILES := $(LIBDRM_FREEDRENO_FILES)
12
13LOCAL_CFLAGS := \
14 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
15 12
13include $(LIBDRM_COMMON_MK)
16include $(BUILD_SHARED_LIBRARY) 14include $(BUILD_SHARED_LIBRARY)
diff --git a/freedreno/Makefile.am b/freedreno/Makefile.am
index 9b7ec7df..0771d146 100644
--- a/freedreno/Makefile.am
+++ b/freedreno/Makefile.am
@@ -27,4 +27,4 @@ pkgconfigdir = @pkgconfigdir@
27pkgconfig_DATA = libdrm_freedreno.pc 27pkgconfig_DATA = libdrm_freedreno.pc
28 28
29TESTS = freedreno-symbol-check 29TESTS = freedreno-symbol-check
30EXTRA_DIST = Android.mk $(TESTS) 30EXTRA_DIST = $(TESTS)
diff --git a/freedreno/Makefile.sources b/freedreno/Makefile.sources
index 57a8bf1b..68a679bf 100644
--- a/freedreno/Makefile.sources
+++ b/freedreno/Makefile.sources
@@ -4,6 +4,7 @@ LIBDRM_FREEDRENO_FILES := \
4 freedreno_priv.h \ 4 freedreno_priv.h \
5 freedreno_ringbuffer.c \ 5 freedreno_ringbuffer.c \
6 freedreno_bo.c \ 6 freedreno_bo.c \
7 freedreno_bo_cache.c \
7 msm/msm_bo.c \ 8 msm/msm_bo.c \
8 msm/msm_device.c \ 9 msm/msm_device.c \
9 msm/msm_drm.h \ 10 msm/msm_drm.h \
diff --git a/freedreno/freedreno-symbol-check b/freedreno/freedreno-symbol-check
index f517b6e7..42f2c439 100755
--- a/freedreno/freedreno-symbol-check
+++ b/freedreno/freedreno-symbol-check
@@ -29,22 +29,28 @@ fd_device_fd
29fd_device_new 29fd_device_new
30fd_device_new_dup 30fd_device_new_dup
31fd_device_ref 31fd_device_ref
32fd_device_version
32fd_pipe_del 33fd_pipe_del
33fd_pipe_get_param 34fd_pipe_get_param
34fd_pipe_new 35fd_pipe_new
35fd_pipe_wait 36fd_pipe_wait
36fd_pipe_wait_timeout 37fd_pipe_wait_timeout
38fd_ringbuffer_cmd_count
37fd_ringbuffer_del 39fd_ringbuffer_del
38fd_ringbuffer_emit_reloc_ring 40fd_ringbuffer_emit_reloc_ring
41fd_ringbuffer_emit_reloc_ring_full
39fd_ringbuffer_flush 42fd_ringbuffer_flush
43fd_ringbuffer_grow
40fd_ringbuffer_new 44fd_ringbuffer_new
41fd_ringbuffer_reloc 45fd_ringbuffer_reloc
46fd_ringbuffer_reloc2
42fd_ringbuffer_reset 47fd_ringbuffer_reset
43fd_ringbuffer_set_parent 48fd_ringbuffer_set_parent
44fd_ringbuffer_timestamp 49fd_ringbuffer_timestamp
45fd_ringmarker_del 50fd_ringmarker_del
46fd_ringmarker_dwords 51fd_ringmarker_dwords
47fd_ringmarker_flush 52fd_ringmarker_flush
53fd_ringbuffer_flush2
48fd_ringmarker_mark 54fd_ringmarker_mark
49fd_ringmarker_new 55fd_ringmarker_new
50EOF 56EOF
diff --git a/freedreno/freedreno_bo.c b/freedreno/freedreno_bo.c
index a23c65d0..996d6b95 100644
--- a/freedreno/freedreno_bo.c
+++ b/freedreno/freedreno_bo.c
@@ -33,9 +33,8 @@
33#include "freedreno_drmif.h" 33#include "freedreno_drmif.h"
34#include "freedreno_priv.h" 34#include "freedreno_priv.h"
35 35
36static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER; 36drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
37 37drm_private void bo_del(struct fd_bo *bo);
38static void bo_del(struct fd_bo *bo);
39 38
40/* set buffer name, and add to table, call w/ table_lock held: */ 39/* set buffer name, and add to table, call w/ table_lock held: */
41static void set_name(struct fd_bo *bo, uint32_t name) 40static void set_name(struct fd_bo *bo, uint32_t name)
@@ -83,114 +82,16 @@ static struct fd_bo * bo_from_handle(struct fd_device *dev,
83 return bo; 82 return bo;
84} 83}
85 84
86/* Frees older cached buffers. Called under table_lock */
87drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time)
88{
89 int i;
90
91 if (dev->time == time)
92 return;
93
94 for (i = 0; i < dev->num_buckets; i++) {
95 struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
96 struct fd_bo *bo;
97
98 while (!LIST_IS_EMPTY(&bucket->list)) {
99 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
100
101 /* keep things in cache for at least 1 second: */
102 if (time && ((time - bo->free_time) <= 1))
103 break;
104
105 list_del(&bo->list);
106 bo_del(bo);
107 }
108 }
109
110 dev->time = time;
111}
112
113static struct fd_bo_bucket * get_bucket(struct fd_device *dev, uint32_t size)
114{
115 int i;
116
117 /* hmm, this is what intel does, but I suppose we could calculate our
118 * way to the correct bucket size rather than looping..
119 */
120 for (i = 0; i < dev->num_buckets; i++) {
121 struct fd_bo_bucket *bucket = &dev->cache_bucket[i];
122 if (bucket->size >= size) {
123 return bucket;
124 }
125 }
126
127 return NULL;
128}
129
130static int is_idle(struct fd_bo *bo)
131{
132 return fd_bo_cpu_prep(bo, NULL,
133 DRM_FREEDRENO_PREP_READ |
134 DRM_FREEDRENO_PREP_WRITE |
135 DRM_FREEDRENO_PREP_NOSYNC) == 0;
136}
137
138static struct fd_bo *find_in_bucket(struct fd_device *dev,
139 struct fd_bo_bucket *bucket, uint32_t flags)
140{
141 struct fd_bo *bo = NULL;
142
143 /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
144 * skip the busy check.. if it is only going to be a render target
145 * then we probably don't need to stall..
146 *
147 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
148 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
149 */
150 pthread_mutex_lock(&table_lock);
151 while (!LIST_IS_EMPTY(&bucket->list)) {
152 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
153 if (0 /* TODO: if madvise tells us bo is gone... */) {
154 list_del(&bo->list);
155 bo_del(bo);
156 bo = NULL;
157 continue;
158 }
159 /* TODO check for compatible flags? */
160 if (is_idle(bo)) {
161 list_del(&bo->list);
162 break;
163 }
164 bo = NULL;
165 break;
166 }
167 pthread_mutex_unlock(&table_lock);
168
169 return bo;
170}
171
172
173struct fd_bo * 85struct fd_bo *
174fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags) 86fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
175{ 87{
176 struct fd_bo *bo = NULL; 88 struct fd_bo *bo = NULL;
177 struct fd_bo_bucket *bucket;
178 uint32_t handle; 89 uint32_t handle;
179 int ret; 90 int ret;
180 91
181 size = ALIGN(size, 4096); 92 bo = fd_bo_cache_alloc(&dev->bo_cache, &size, flags);
182 bucket = get_bucket(dev, size); 93 if (bo)
183 94 return bo;
184 /* see if we can be green and recycle: */
185 if (bucket) {
186 size = bucket->size;
187 bo = find_in_bucket(dev, bucket, flags);
188 if (bo) {
189 atomic_set(&bo->refcnt, 1);
190 fd_device_ref(bo->dev);
191 return bo;
192 }
193 }
194 95
195 ret = dev->funcs->bo_new_handle(dev, size, flags, &handle); 96 ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
196 if (ret) 97 if (ret)
@@ -198,7 +99,7 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
198 99
199 pthread_mutex_lock(&table_lock); 100 pthread_mutex_lock(&table_lock);
200 bo = bo_from_handle(dev, size, handle); 101 bo = bo_from_handle(dev, size, handle);
201 bo->bo_reuse = 1; 102 bo->bo_reuse = TRUE;
202 pthread_mutex_unlock(&table_lock); 103 pthread_mutex_unlock(&table_lock);
203 104
204 return bo; 105 return bo;
@@ -300,35 +201,17 @@ void fd_bo_del(struct fd_bo *bo)
300 201
301 pthread_mutex_lock(&table_lock); 202 pthread_mutex_lock(&table_lock);
302 203
303 if (bo->bo_reuse) { 204 if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
304 struct fd_bo_bucket *bucket = get_bucket(dev, bo->size); 205 goto out;
305
306 /* see if we can be green and recycle: */
307 if (bucket) {
308 struct timespec time;
309
310 clock_gettime(CLOCK_MONOTONIC, &time);
311
312 bo->free_time = time.tv_sec;
313 list_addtail(&bo->list, &bucket->list);
314 fd_cleanup_bo_cache(dev, time.tv_sec);
315
316 /* bo's in the bucket cache don't have a ref and
317 * don't hold a ref to the dev:
318 */
319
320 goto out;
321 }
322 }
323 206
324 bo_del(bo); 207 bo_del(bo);
325out:
326 fd_device_del_locked(dev); 208 fd_device_del_locked(dev);
209out:
327 pthread_mutex_unlock(&table_lock); 210 pthread_mutex_unlock(&table_lock);
328} 211}
329 212
330/* Called under table_lock */ 213/* Called under table_lock */
331static void bo_del(struct fd_bo *bo) 214drm_private void bo_del(struct fd_bo *bo)
332{ 215{
333 if (bo->map) 216 if (bo->map)
334 drm_munmap(bo->map, bo->size); 217 drm_munmap(bo->map, bo->size);
@@ -366,7 +249,7 @@ int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
366 pthread_mutex_lock(&table_lock); 249 pthread_mutex_lock(&table_lock);
367 set_name(bo, req.name); 250 set_name(bo, req.name);
368 pthread_mutex_unlock(&table_lock); 251 pthread_mutex_unlock(&table_lock);
369 bo->bo_reuse = 0; 252 bo->bo_reuse = FALSE;
370 } 253 }
371 254
372 *name = bo->name; 255 *name = bo->name;
@@ -390,7 +273,7 @@ int fd_bo_dmabuf(struct fd_bo *bo)
390 return ret; 273 return ret;
391 } 274 }
392 275
393 bo->bo_reuse = 0; 276 bo->bo_reuse = FALSE;
394 277
395 return prime_fd; 278 return prime_fd;
396} 279}
@@ -431,3 +314,10 @@ void fd_bo_cpu_fini(struct fd_bo *bo)
431{ 314{
432 bo->funcs->cpu_fini(bo); 315 bo->funcs->cpu_fini(bo);
433} 316}
317
318#ifndef HAVE_FREEDRENO_KGSL
319struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
320{
321 return NULL;
322}
323#endif
diff --git a/freedreno/freedreno_bo_cache.c b/freedreno/freedreno_bo_cache.c
new file mode 100644
index 00000000..7becb0d6
--- /dev/null
+++ b/freedreno/freedreno_bo_cache.c
@@ -0,0 +1,222 @@
1/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3/*
4 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "freedreno_drmif.h"
34#include "freedreno_priv.h"
35
36
37drm_private void bo_del(struct fd_bo *bo);
38drm_private extern pthread_mutex_t table_lock;
39
40static void
41add_bucket(struct fd_bo_cache *cache, int size)
42{
43 unsigned int i = cache->num_buckets;
44
45 assert(i < ARRAY_SIZE(cache->cache_bucket));
46
47 list_inithead(&cache->cache_bucket[i].list);
48 cache->cache_bucket[i].size = size;
49 cache->num_buckets++;
50}
51
52/**
53 * @coarse: if true, only power-of-two bucket sizes, otherwise
54 * fill in for a bit smoother size curve..
55 */
56drm_private void
57fd_bo_cache_init(struct fd_bo_cache *cache, int course)
58{
59 unsigned long size, cache_max_size = 64 * 1024 * 1024;
60
61 /* OK, so power of two buckets was too wasteful of memory.
62 * Give 3 other sizes between each power of two, to hopefully
63 * cover things accurately enough. (The alternative is
64 * probably to just go for exact matching of sizes, and assume
65 * that for things like composited window resize the tiled
66 * width/height alignment and rounding of sizes to pages will
67 * get us useful cache hit rates anyway)
68 */
69 add_bucket(cache, 4096);
70 add_bucket(cache, 4096 * 2);
71 if (!course)
72 add_bucket(cache, 4096 * 3);
73
74 /* Initialize the linked lists for BO reuse cache. */
75 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
76 add_bucket(cache, size);
77 if (!course) {
78 add_bucket(cache, size + size * 1 / 4);
79 add_bucket(cache, size + size * 2 / 4);
80 add_bucket(cache, size + size * 3 / 4);
81 }
82 }
83}
84
85/* Frees older cached buffers. Called under table_lock */
86drm_private void
87fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
88{
89 int i;
90
91 if (cache->time == time)
92 return;
93
94 for (i = 0; i < cache->num_buckets; i++) {
95 struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
96 struct fd_bo *bo;
97
98 while (!LIST_IS_EMPTY(&bucket->list)) {
99 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
100
101 /* keep things in cache for at least 1 second: */
102 if (time && ((time - bo->free_time) <= 1))
103 break;
104
105 list_del(&bo->list);
106 bo_del(bo);
107 }
108 }
109
110 cache->time = time;
111}
112
113static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
114{
115 int i;
116
117 /* hmm, this is what intel does, but I suppose we could calculate our
118 * way to the correct bucket size rather than looping..
119 */
120 for (i = 0; i < cache->num_buckets; i++) {
121 struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
122 if (bucket->size >= size) {
123 return bucket;
124 }
125 }
126
127 return NULL;
128}
129
130static int is_idle(struct fd_bo *bo)
131{
132 return fd_bo_cpu_prep(bo, NULL,
133 DRM_FREEDRENO_PREP_READ |
134 DRM_FREEDRENO_PREP_WRITE |
135 DRM_FREEDRENO_PREP_NOSYNC) == 0;
136}
137
138static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
139{
140 struct fd_bo *bo = NULL;
141
142 /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
143 * skip the busy check.. if it is only going to be a render target
144 * then we probably don't need to stall..
145 *
146 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
147 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
148 */
149 pthread_mutex_lock(&table_lock);
150 if (!LIST_IS_EMPTY(&bucket->list)) {
151 bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
152 /* TODO check for compatible flags? */
153 if (is_idle(bo)) {
154 list_del(&bo->list);
155 } else {
156 bo = NULL;
157 }
158 }
159 pthread_mutex_unlock(&table_lock);
160
161 return bo;
162}
163
164/* NOTE: size is potentially rounded up to bucket size: */
165drm_private struct fd_bo *
166fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
167{
168 struct fd_bo *bo = NULL;
169 struct fd_bo_bucket *bucket;
170
171 *size = ALIGN(*size, 4096);
172 bucket = get_bucket(cache, *size);
173
174 /* see if we can be green and recycle: */
175retry:
176 if (bucket) {
177 *size = bucket->size;
178 bo = find_in_bucket(bucket, flags);
179 if (bo) {
180 if (bo->funcs->madvise(bo, TRUE) <= 0) {
181 /* we've lost the backing pages, delete and try again: */
182 pthread_mutex_lock(&table_lock);
183 bo_del(bo);
184 pthread_mutex_unlock(&table_lock);
185 goto retry;
186 }
187 atomic_set(&bo->refcnt, 1);
188 fd_device_ref(bo->dev);
189 return bo;
190 }
191 }
192
193 return NULL;
194}
195
196drm_private int
197fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
198{
199 struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
200
201 /* see if we can be green and recycle: */
202 if (bucket) {
203 struct timespec time;
204
205 bo->funcs->madvise(bo, FALSE);
206
207 clock_gettime(CLOCK_MONOTONIC, &time);
208
209 bo->free_time = time.tv_sec;
210 list_addtail(&bo->list, &bucket->list);
211 fd_bo_cache_cleanup(cache, time.tv_sec);
212
213 /* bo's in the bucket cache don't have a ref and
214 * don't hold a ref to the dev:
215 */
216 fd_device_del_locked(bo->dev);
217
218 return 0;
219 }
220
221 return -1;
222}
diff --git a/freedreno/freedreno_device.c b/freedreno/freedreno_device.c
index ddb95455..fcbf1402 100644
--- a/freedreno/freedreno_device.c
+++ b/freedreno/freedreno_device.c
@@ -42,44 +42,6 @@ static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
42struct fd_device * kgsl_device_new(int fd); 42struct fd_device * kgsl_device_new(int fd);
43struct fd_device * msm_device_new(int fd); 43struct fd_device * msm_device_new(int fd);
44 44
45static void
46add_bucket(struct fd_device *dev, int size)
47{
48 unsigned int i = dev->num_buckets;
49
50 assert(i < ARRAY_SIZE(dev->cache_bucket));
51
52 list_inithead(&dev->cache_bucket[i].list);
53 dev->cache_bucket[i].size = size;
54 dev->num_buckets++;
55}
56
57static void
58init_cache_buckets(struct fd_device *dev)
59{
60 unsigned long size, cache_max_size = 64 * 1024 * 1024;
61
62 /* OK, so power of two buckets was too wasteful of memory.
63 * Give 3 other sizes between each power of two, to hopefully
64 * cover things accurately enough. (The alternative is
65 * probably to just go for exact matching of sizes, and assume
66 * that for things like composited window resize the tiled
67 * width/height alignment and rounding of sizes to pages will
68 * get us useful cache hit rates anyway)
69 */
70 add_bucket(dev, 4096);
71 add_bucket(dev, 4096 * 2);
72 add_bucket(dev, 4096 * 3);
73
74 /* Initialize the linked lists for BO reuse cache. */
75 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
76 add_bucket(dev, size);
77 add_bucket(dev, size + size * 1 / 4);
78 add_bucket(dev, size + size * 2 / 4);
79 add_bucket(dev, size + size * 3 / 4);
80 }
81}
82
83struct fd_device * fd_device_new(int fd) 45struct fd_device * fd_device_new(int fd)
84{ 46{
85 struct fd_device *dev; 47 struct fd_device *dev;
@@ -94,7 +56,15 @@ struct fd_device * fd_device_new(int fd)
94 56
95 if (!strcmp(version->name, "msm")) { 57 if (!strcmp(version->name, "msm")) {
96 DEBUG_MSG("msm DRM device"); 58 DEBUG_MSG("msm DRM device");
59 if (version->version_major != 1) {
60 ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
61 version->version_minor, version->version_patchlevel);
62 dev = NULL;
63 goto out;
64 }
65
97 dev = msm_device_new(fd); 66 dev = msm_device_new(fd);
67 dev->version = version->version_minor;
98#ifdef HAVE_FREEDRENO_KGSL 68#ifdef HAVE_FREEDRENO_KGSL
99 } else if (!strcmp(version->name, "kgsl")) { 69 } else if (!strcmp(version->name, "kgsl")) {
100 DEBUG_MSG("kgsl DRM device"); 70 DEBUG_MSG("kgsl DRM device");
@@ -104,6 +74,8 @@ struct fd_device * fd_device_new(int fd)
104 ERROR_MSG("unknown device: %s", version->name); 74 ERROR_MSG("unknown device: %s", version->name);
105 dev = NULL; 75 dev = NULL;
106 } 76 }
77
78out:
107 drmFreeVersion(version); 79 drmFreeVersion(version);
108 80
109 if (!dev) 81 if (!dev)
@@ -113,7 +85,7 @@ struct fd_device * fd_device_new(int fd)
113 dev->fd = fd; 85 dev->fd = fd;
114 dev->handle_table = drmHashCreate(); 86 dev->handle_table = drmHashCreate();
115 dev->name_table = drmHashCreate(); 87 dev->name_table = drmHashCreate();
116 init_cache_buckets(dev); 88 fd_bo_cache_init(&dev->bo_cache, FALSE);
117 89
118 return dev; 90 return dev;
119} 91}
@@ -123,9 +95,12 @@ struct fd_device * fd_device_new(int fd)
123 */ 95 */
124struct fd_device * fd_device_new_dup(int fd) 96struct fd_device * fd_device_new_dup(int fd)
125{ 97{
126 struct fd_device *dev = fd_device_new(dup(fd)); 98 int dup_fd = dup(fd);
99 struct fd_device *dev = fd_device_new(dup_fd);
127 if (dev) 100 if (dev)
128 dev->closefd = 1; 101 dev->closefd = 1;
102 else
103 close(dup_fd);
129 return dev; 104 return dev;
130} 105}
131 106
@@ -137,7 +112,7 @@ struct fd_device * fd_device_ref(struct fd_device *dev)
137 112
138static void fd_device_del_impl(struct fd_device *dev) 113static void fd_device_del_impl(struct fd_device *dev)
139{ 114{
140 fd_cleanup_bo_cache(dev, 0); 115 fd_bo_cache_cleanup(&dev->bo_cache, 0);
141 drmHashDestroy(dev->handle_table); 116 drmHashDestroy(dev->handle_table);
142 drmHashDestroy(dev->name_table); 117 drmHashDestroy(dev->name_table);
143 if (dev->closefd) 118 if (dev->closefd)
@@ -165,3 +140,8 @@ int fd_device_fd(struct fd_device *dev)
165{ 140{
166 return dev->fd; 141 return dev->fd;
167} 142}
143
144enum fd_version fd_device_version(struct fd_device *dev)
145{
146 return dev->version;
147}
diff --git a/freedreno/freedreno_drmif.h b/freedreno/freedreno_drmif.h
index 5547e943..7a8073ff 100644
--- a/freedreno/freedreno_drmif.h
+++ b/freedreno/freedreno_drmif.h
@@ -32,6 +32,15 @@
32#include <xf86drm.h> 32#include <xf86drm.h>
33#include <stdint.h> 33#include <stdint.h>
34 34
35#if defined(__GNUC__)
36# define drm_deprecated __attribute__((__deprecated__))
37#else
38# define drm_deprecated
39#endif
40
41/* an empty marker for things that will be deprecated in the future: */
42#define will_be_deprecated
43
35struct fd_bo; 44struct fd_bo;
36struct fd_pipe; 45struct fd_pipe;
37struct fd_device; 46struct fd_device;
@@ -50,6 +59,8 @@ enum fd_param_id {
50 FD_GMEM_SIZE, 59 FD_GMEM_SIZE,
51 FD_GPU_ID, 60 FD_GPU_ID,
52 FD_CHIP_ID, 61 FD_CHIP_ID,
62 FD_MAX_FREQ,
63 FD_TIMESTAMP,
53}; 64};
54 65
55/* bo flags: */ 66/* bo flags: */
@@ -78,6 +89,12 @@ struct fd_device * fd_device_ref(struct fd_device *dev);
78void fd_device_del(struct fd_device *dev); 89void fd_device_del(struct fd_device *dev);
79int fd_device_fd(struct fd_device *dev); 90int fd_device_fd(struct fd_device *dev);
80 91
92enum fd_version {
93 FD_VERSION_MADVISE = 1, /* kernel supports madvise */
94 FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
95 FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
96};
97enum fd_version fd_device_version(struct fd_device *dev);
81 98
82/* pipe functions: 99/* pipe functions:
83 */ 100 */
diff --git a/freedreno/freedreno_pipe.c b/freedreno/freedreno_pipe.c
index 4a756d70..3f8c8342 100644
--- a/freedreno/freedreno_pipe.c
+++ b/freedreno/freedreno_pipe.c
@@ -37,6 +37,7 @@ struct fd_pipe *
37fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id) 37fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
38{ 38{
39 struct fd_pipe *pipe = NULL; 39 struct fd_pipe *pipe = NULL;
40 uint64_t val;
40 41
41 if (id > FD_PIPE_MAX) { 42 if (id > FD_PIPE_MAX) {
42 ERROR_MSG("invalid pipe id: %d", id); 43 ERROR_MSG("invalid pipe id: %d", id);
@@ -52,6 +53,9 @@ fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
52 pipe->dev = dev; 53 pipe->dev = dev;
53 pipe->id = id; 54 pipe->id = id;
54 55
56 fd_pipe_get_param(pipe, FD_GPU_ID, &val);
57 pipe->gpu_id = val;
58
55 return pipe; 59 return pipe;
56fail: 60fail:
57 if (pipe) 61 if (pipe)
diff --git a/freedreno/freedreno_priv.h b/freedreno/freedreno_priv.h
index 53817b19..32170391 100644
--- a/freedreno/freedreno_priv.h
+++ b/freedreno/freedreno_priv.h
@@ -54,6 +54,13 @@
54#include "freedreno_ringbuffer.h" 54#include "freedreno_ringbuffer.h"
55#include "drm.h" 55#include "drm.h"
56 56
57#ifndef TRUE
58# define TRUE 1
59#endif
60#ifndef FALSE
61# define FALSE 0
62#endif
63
57struct fd_device_funcs { 64struct fd_device_funcs {
58 int (*bo_new_handle)(struct fd_device *dev, uint32_t size, 65 int (*bo_new_handle)(struct fd_device *dev, uint32_t size,
59 uint32_t flags, uint32_t *handle); 66 uint32_t flags, uint32_t *handle);
@@ -68,8 +75,15 @@ struct fd_bo_bucket {
68 struct list_head list; 75 struct list_head list;
69}; 76};
70 77
78struct fd_bo_cache {
79 struct fd_bo_bucket cache_bucket[14 * 4];
80 int num_buckets;
81 time_t time;
82};
83
71struct fd_device { 84struct fd_device {
72 int fd; 85 int fd;
86 enum fd_version version;
73 atomic_t refcnt; 87 atomic_t refcnt;
74 88
75 /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects: 89 /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
@@ -85,14 +99,16 @@ struct fd_device {
85 99
86 const struct fd_device_funcs *funcs; 100 const struct fd_device_funcs *funcs;
87 101
88 struct fd_bo_bucket cache_bucket[14 * 4]; 102 struct fd_bo_cache bo_cache;
89 int num_buckets;
90 time_t time;
91 103
92 int closefd; /* call close(fd) upon destruction */ 104 int closefd; /* call close(fd) upon destruction */
93}; 105};
94 106
95drm_private void fd_cleanup_bo_cache(struct fd_device *dev, time_t time); 107drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
108drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
109drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
110 uint32_t *size, uint32_t flags);
111drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
96 112
97/* for where @table_lock is already held: */ 113/* for where @table_lock is already held: */
98drm_private void fd_device_del_locked(struct fd_device *dev); 114drm_private void fd_device_del_locked(struct fd_device *dev);
@@ -107,6 +123,7 @@ struct fd_pipe_funcs {
107struct fd_pipe { 123struct fd_pipe {
108 struct fd_device *dev; 124 struct fd_device *dev;
109 enum fd_pipe_id id; 125 enum fd_pipe_id id;
126 uint32_t gpu_id;
110 const struct fd_pipe_funcs *funcs; 127 const struct fd_pipe_funcs *funcs;
111}; 128};
112 129
@@ -117,12 +134,16 @@ struct fd_ringmarker {
117 134
118struct fd_ringbuffer_funcs { 135struct fd_ringbuffer_funcs {
119 void * (*hostptr)(struct fd_ringbuffer *ring); 136 void * (*hostptr)(struct fd_ringbuffer *ring);
120 int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start); 137 int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start,
138 int in_fence_fd, int *out_fence_fd);
139 void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
121 void (*reset)(struct fd_ringbuffer *ring); 140 void (*reset)(struct fd_ringbuffer *ring);
122 void (*emit_reloc)(struct fd_ringbuffer *ring, 141 void (*emit_reloc)(struct fd_ringbuffer *ring,
123 const struct fd_reloc *reloc); 142 const struct fd_reloc *reloc);
124 void (*emit_reloc_ring)(struct fd_ringbuffer *ring, 143 uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
125 struct fd_ringmarker *target, struct fd_ringmarker *end); 144 struct fd_ringbuffer *target, uint32_t cmd_idx,
145 uint32_t submit_offset, uint32_t size);
146 uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
126 void (*destroy)(struct fd_ringbuffer *ring); 147 void (*destroy)(struct fd_ringbuffer *ring);
127}; 148};
128 149
@@ -130,6 +151,7 @@ struct fd_bo_funcs {
130 int (*offset)(struct fd_bo *bo, uint64_t *offset); 151 int (*offset)(struct fd_bo *bo, uint64_t *offset);
131 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op); 152 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
132 void (*cpu_fini)(struct fd_bo *bo); 153 void (*cpu_fini)(struct fd_bo *bo);
154 int (*madvise)(struct fd_bo *bo, int willneed);
133 void (*destroy)(struct fd_bo *bo); 155 void (*destroy)(struct fd_bo *bo);
134}; 156};
135 157
@@ -168,4 +190,10 @@ struct fd_bo {
168#define U642VOID(x) ((void *)(unsigned long)(x)) 190#define U642VOID(x) ((void *)(unsigned long)(x))
169#define VOID2U64(x) ((uint64_t)(unsigned long)(x)) 191#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
170 192
193static inline uint32_t
194offset_bytes(void *end, void *start)
195{
196 return ((char *)end) - ((char *)start);
197}
198
171#endif /* FREEDRENO_PRIV_H_ */ 199#endif /* FREEDRENO_PRIV_H_ */
diff --git a/freedreno/freedreno_ringbuffer.c b/freedreno/freedreno_ringbuffer.c
index 984da241..7310f1fd 100644
--- a/freedreno/freedreno_ringbuffer.c
+++ b/freedreno/freedreno_ringbuffer.c
@@ -45,10 +45,9 @@ fd_ringbuffer_new(struct fd_pipe *pipe, uint32_t size)
45 if (!ring) 45 if (!ring)
46 return NULL; 46 return NULL;
47 47
48 ring->size = size;
49 ring->pipe = pipe; 48 ring->pipe = pipe;
50 ring->start = ring->funcs->hostptr(ring); 49 ring->start = ring->funcs->hostptr(ring);
51 ring->end = &(ring->start[size/4]); 50 ring->end = &(ring->start[ring->size/4]);
52 51
53 ring->cur = ring->last_start = ring->start; 52 ring->cur = ring->last_start = ring->start;
54 53
@@ -57,6 +56,7 @@ fd_ringbuffer_new(struct fd_pipe *pipe, uint32_t size)
57 56
58void fd_ringbuffer_del(struct fd_ringbuffer *ring) 57void fd_ringbuffer_del(struct fd_ringbuffer *ring)
59{ 58{
59 fd_ringbuffer_reset(ring);
60 ring->funcs->destroy(ring); 60 ring->funcs->destroy(ring);
61} 61}
62 62
@@ -80,10 +80,31 @@ void fd_ringbuffer_reset(struct fd_ringbuffer *ring)
80 ring->funcs->reset(ring); 80 ring->funcs->reset(ring);
81} 81}
82 82
83/* maybe get rid of this and use fd_ringmarker_flush() from DDX too? */
84int fd_ringbuffer_flush(struct fd_ringbuffer *ring) 83int fd_ringbuffer_flush(struct fd_ringbuffer *ring)
85{ 84{
86 return ring->funcs->flush(ring, ring->last_start); 85 return ring->funcs->flush(ring, ring->last_start, -1, NULL);
86}
87
88int fd_ringbuffer_flush2(struct fd_ringbuffer *ring, int in_fence_fd,
89 int *out_fence_fd)
90{
91 return ring->funcs->flush(ring, ring->last_start, in_fence_fd, out_fence_fd);
92}
93
94void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
95{
96 assert(ring->funcs->grow); /* unsupported on kgsl */
97
98 /* there is an upper bound on IB size, which appears to be 0x100000 */
99 if (ring->size < 0x100000)
100 ring->size *= 2;
101
102 ring->funcs->grow(ring, ring->size);
103
104 ring->start = ring->funcs->hostptr(ring);
105 ring->end = &(ring->start[ring->size/4]);
106
107 ring->cur = ring->last_start = ring->start;
87} 108}
88 109
89uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring) 110uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
@@ -94,16 +115,44 @@ uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring)
94void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, 115void fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
95 const struct fd_reloc *reloc) 116 const struct fd_reloc *reloc)
96{ 117{
118 assert(ring->pipe->gpu_id < 500);
119 ring->funcs->emit_reloc(ring, reloc);
120}
121
122void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring,
123 const struct fd_reloc *reloc)
124{
97 ring->funcs->emit_reloc(ring, reloc); 125 ring->funcs->emit_reloc(ring, reloc);
98} 126}
99 127
100void 128void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
101fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring, 129 struct fd_ringmarker *target, struct fd_ringmarker *end)
102 struct fd_ringmarker *target,
103 struct fd_ringmarker *end)
104{ 130{
131 uint32_t submit_offset, size;
132
133 /* This function is deprecated and not supported on 64b devices: */
134 assert(ring->pipe->gpu_id < 500);
105 assert(target->ring == end->ring); 135 assert(target->ring == end->ring);
106 ring->funcs->emit_reloc_ring(ring, target, end); 136
137 submit_offset = offset_bytes(target->cur, target->ring->start);
138 size = offset_bytes(end->cur, target->cur);
139
140 ring->funcs->emit_reloc_ring(ring, target->ring, 0, submit_offset, size);
141}
142
143uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
144{
145 if (!ring->funcs->cmd_count)
146 return 1;
147 return ring->funcs->cmd_count(ring);
148}
149
150uint32_t
151fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
152 struct fd_ringbuffer *target, uint32_t cmd_idx)
153{
154 uint32_t size = offset_bytes(target->cur, target->start);
155 return ring->funcs->emit_reloc_ring(ring, target, cmd_idx, 0, size);
107} 156}
108 157
109struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring) 158struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring)
@@ -118,7 +167,7 @@ struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring)
118 167
119 marker->ring = ring; 168 marker->ring = ring;
120 169
121 fd_ringmarker_mark(marker); 170 marker->cur = marker->ring->cur;
122 171
123 return marker; 172 return marker;
124} 173}
@@ -142,5 +191,5 @@ uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
142int fd_ringmarker_flush(struct fd_ringmarker *marker) 191int fd_ringmarker_flush(struct fd_ringmarker *marker)
143{ 192{
144 struct fd_ringbuffer *ring = marker->ring; 193 struct fd_ringbuffer *ring = marker->ring;
145 return ring->funcs->flush(ring, marker->cur); 194 return ring->funcs->flush(ring, marker->cur, -1, NULL);
146} 195}
diff --git a/freedreno/freedreno_ringbuffer.h b/freedreno/freedreno_ringbuffer.h
index 578cdb24..c501fbad 100644
--- a/freedreno/freedreno_ringbuffer.h
+++ b/freedreno/freedreno_ringbuffer.h
@@ -56,6 +56,12 @@ void fd_ringbuffer_set_parent(struct fd_ringbuffer *ring,
56 struct fd_ringbuffer *parent); 56 struct fd_ringbuffer *parent);
57void fd_ringbuffer_reset(struct fd_ringbuffer *ring); 57void fd_ringbuffer_reset(struct fd_ringbuffer *ring);
58int fd_ringbuffer_flush(struct fd_ringbuffer *ring); 58int fd_ringbuffer_flush(struct fd_ringbuffer *ring);
59/* in_fence_fd: -1 for no in-fence, else fence fd
60 * out_fence_fd: NULL for no output-fence requested, else ptr to return out-fence
61 */
62int fd_ringbuffer_flush2(struct fd_ringbuffer *ring, int in_fence_fd,
63 int *out_fence_fd);
64void fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords);
59uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring); 65uint32_t fd_ringbuffer_timestamp(struct fd_ringbuffer *ring);
60 66
61static inline void fd_ringbuffer_emit(struct fd_ringbuffer *ring, 67static inline void fd_ringbuffer_emit(struct fd_ringbuffer *ring,
@@ -72,17 +78,24 @@ struct fd_reloc {
72 uint32_t offset; 78 uint32_t offset;
73 uint32_t or; 79 uint32_t or;
74 int32_t shift; 80 int32_t shift;
81 uint32_t orhi; /* used for a5xx+ */
75}; 82};
76 83
77void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc); 84/* NOTE: relocs are 2 dwords on a5xx+ */
78void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring, 85
86void fd_ringbuffer_reloc2(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
87will_be_deprecated void fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
88will_be_deprecated void fd_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
79 struct fd_ringmarker *target, struct fd_ringmarker *end); 89 struct fd_ringmarker *target, struct fd_ringmarker *end);
90uint32_t fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring);
91uint32_t fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
92 struct fd_ringbuffer *target, uint32_t cmd_idx);
80 93
81struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring); 94will_be_deprecated struct fd_ringmarker * fd_ringmarker_new(struct fd_ringbuffer *ring);
82void fd_ringmarker_del(struct fd_ringmarker *marker); 95will_be_deprecated void fd_ringmarker_del(struct fd_ringmarker *marker);
83void fd_ringmarker_mark(struct fd_ringmarker *marker); 96will_be_deprecated void fd_ringmarker_mark(struct fd_ringmarker *marker);
84uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start, 97will_be_deprecated uint32_t fd_ringmarker_dwords(struct fd_ringmarker *start,
85 struct fd_ringmarker *end); 98 struct fd_ringmarker *end);
86int fd_ringmarker_flush(struct fd_ringmarker *marker); 99will_be_deprecated int fd_ringmarker_flush(struct fd_ringmarker *marker);
87 100
88#endif /* FREEDRENO_RINGBUFFER_H_ */ 101#endif /* FREEDRENO_RINGBUFFER_H_ */
diff --git a/freedreno/README b/freedreno/kgsl/README
index ae22e013..56874b42 100644
--- a/freedreno/README
+++ b/freedreno/kgsl/README
@@ -1,3 +1,13 @@
1This is a historical discription of what is now the kgsl backend
2in libdrm freedreno (before the upstream drm/msm driver). Note
3that the kgsl backend requires the "kgsl-drm" shim driver, which
4usually is in disrepair (QCOM does not build it for android), and
5due to random differences between different downstream android
6kernel branches it may or may not work. So YMMV.
7
8Original README:
9----------------
10
1Note that current msm kernel driver is a bit strange. It provides a 11Note that current msm kernel driver is a bit strange. It provides a
2DRM interface for GEM, which is basically sufficient to have DRI2 12DRM interface for GEM, which is basically sufficient to have DRI2
3working. But it does not provide KMS. And interface to 2d and 3d 13working. But it does not provide KMS. And interface to 2d and 3d
diff --git a/freedreno/kgsl/kgsl_bo.c b/freedreno/kgsl/kgsl_bo.c
index 2b45b5e2..ab3485e3 100644
--- a/freedreno/kgsl/kgsl_bo.c
+++ b/freedreno/kgsl/kgsl_bo.c
@@ -116,6 +116,11 @@ static void kgsl_bo_cpu_fini(struct fd_bo *bo)
116{ 116{
117} 117}
118 118
119static int kgsl_bo_madvise(struct fd_bo *bo, int willneed)
120{
121 return willneed; /* not supported by kgsl */
122}
123
119static void kgsl_bo_destroy(struct fd_bo *bo) 124static void kgsl_bo_destroy(struct fd_bo *bo)
120{ 125{
121 struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo); 126 struct kgsl_bo *kgsl_bo = to_kgsl_bo(bo);
@@ -127,6 +132,7 @@ static const struct fd_bo_funcs funcs = {
127 .offset = kgsl_bo_offset, 132 .offset = kgsl_bo_offset,
128 .cpu_prep = kgsl_bo_cpu_prep, 133 .cpu_prep = kgsl_bo_cpu_prep,
129 .cpu_fini = kgsl_bo_cpu_fini, 134 .cpu_fini = kgsl_bo_cpu_fini,
135 .madvise = kgsl_bo_madvise,
130 .destroy = kgsl_bo_destroy, 136 .destroy = kgsl_bo_destroy,
131}; 137};
132 138
diff --git a/freedreno/kgsl/kgsl_drm.h b/freedreno/kgsl/kgsl_drm.h
index f1c7f4e2..281978ea 100644
--- a/freedreno/kgsl/kgsl_drm.h
+++ b/freedreno/kgsl/kgsl_drm.h
@@ -81,7 +81,7 @@ struct drm_kgsl_gem_create_fd)
81/* Memory types - these define the source and caching policies 81/* Memory types - these define the source and caching policies
82 of the GEM memory chunk */ 82 of the GEM memory chunk */
83 83
84/* Legacy definitions left for compatability */ 84/* Legacy definitions left for compatibility */
85 85
86#define DRM_KGSL_GEM_TYPE_EBI 0 86#define DRM_KGSL_GEM_TYPE_EBI 0
87#define DRM_KGSL_GEM_TYPE_SMI 1 87#define DRM_KGSL_GEM_TYPE_SMI 1
diff --git a/freedreno/kgsl/kgsl_pipe.c b/freedreno/kgsl/kgsl_pipe.c
index 58b3b4d5..8a39eb49 100644
--- a/freedreno/kgsl/kgsl_pipe.c
+++ b/freedreno/kgsl/kgsl_pipe.c
@@ -50,6 +50,10 @@ static int kgsl_pipe_get_param(struct fd_pipe *pipe,
50 case FD_CHIP_ID: 50 case FD_CHIP_ID:
51 *value = kgsl_pipe->devinfo.chip_id; 51 *value = kgsl_pipe->devinfo.chip_id;
52 return 0; 52 return 0;
53 case FD_MAX_FREQ:
54 case FD_TIMESTAMP:
55 /* unsupported on kgsl */
56 return -1;
53 default: 57 default:
54 ERROR_MSG("invalid param id: %d", param); 58 ERROR_MSG("invalid param id: %d", param);
55 return -1; 59 return -1;
@@ -251,6 +255,11 @@ drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
251 GETPROP(fd, VERSION, kgsl_pipe->version); 255 GETPROP(fd, VERSION, kgsl_pipe->version);
252 GETPROP(fd, DEVICE_INFO, kgsl_pipe->devinfo); 256 GETPROP(fd, DEVICE_INFO, kgsl_pipe->devinfo);
253 257
258 if (kgsl_pipe->devinfo.gpu_id >= 500) {
259 ERROR_MSG("64b unsupported with kgsl");
260 goto fail;
261 }
262
254 INFO_MSG("Pipe Info:"); 263 INFO_MSG("Pipe Info:");
255 INFO_MSG(" Device: %s", paths[id]); 264 INFO_MSG(" Device: %s", paths[id]);
256 INFO_MSG(" Chip-id: %d.%d.%d.%d", 265 INFO_MSG(" Chip-id: %d.%d.%d.%d",
diff --git a/freedreno/kgsl/kgsl_ringbuffer.c b/freedreno/kgsl/kgsl_ringbuffer.c
index 6f68f2f3..e4696b1b 100644
--- a/freedreno/kgsl/kgsl_ringbuffer.c
+++ b/freedreno/kgsl/kgsl_ringbuffer.c
@@ -113,7 +113,8 @@ static void * kgsl_ringbuffer_hostptr(struct fd_ringbuffer *ring)
113 return kgsl_ring->bo->hostptr; 113 return kgsl_ring->bo->hostptr;
114} 114}
115 115
116static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start) 116static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
117 int in_fence_fd, int *out_fence_fd)
117{ 118{
118 struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring); 119 struct kgsl_ringbuffer *kgsl_ring = to_kgsl_ringbuffer(ring);
119 struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe); 120 struct kgsl_pipe *kgsl_pipe = to_kgsl_pipe(ring->pipe);
@@ -131,6 +132,9 @@ static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_star
131 }; 132 };
132 int ret; 133 int ret;
133 134
135 assert(in_fence_fd == -1);
136 assert(out_fence_fd == NULL);
137
134 kgsl_pipe_pre_submit(kgsl_pipe); 138 kgsl_pipe_pre_submit(kgsl_pipe);
135 139
136 /* z180_cmdstream_issueibcmds() is made of fail: */ 140 /* z180_cmdstream_issueibcmds() is made of fail: */
@@ -173,12 +177,14 @@ static void kgsl_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
173 kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo); 177 kgsl_pipe_add_submit(to_kgsl_pipe(ring->pipe), kgsl_bo);
174} 178}
175 179
176static void kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring, 180static uint32_t kgsl_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
177 struct fd_ringmarker *target, struct fd_ringmarker *end) 181 struct fd_ringbuffer *target, uint32_t cmd_idx,
182 uint32_t submit_offset, uint32_t size)
178{ 183{
179 struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target->ring); 184 struct kgsl_ringbuffer *target_ring = to_kgsl_ringbuffer(target);
180 (*ring->cur++) = target_ring->bo->gpuaddr + 185 assert(cmd_idx == 0);
181 (uint8_t *)target->cur - (uint8_t *)target->ring->start; 186 (*ring->cur++) = target_ring->bo->gpuaddr + submit_offset;
187 return size;
182} 188}
183 189
184static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring) 190static void kgsl_ringbuffer_destroy(struct fd_ringbuffer *ring)
@@ -213,6 +219,7 @@ drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
213 219
214 ring = &kgsl_ring->base; 220 ring = &kgsl_ring->base;
215 ring->funcs = &funcs; 221 ring->funcs = &funcs;
222 ring->size = size;
216 223
217 kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size); 224 kgsl_ring->bo = kgsl_rb_bo_new(to_kgsl_pipe(pipe), size);
218 if (!kgsl_ring->bo) { 225 if (!kgsl_ring->bo) {
diff --git a/freedreno/kgsl/msm_kgsl.h b/freedreno/kgsl/msm_kgsl.h
index e67190f0..5b36eeb4 100644
--- a/freedreno/kgsl/msm_kgsl.h
+++ b/freedreno/kgsl/msm_kgsl.h
@@ -31,7 +31,7 @@
31#define KGSL_FLAGS_SOFT_RESET 0x00000100 31#define KGSL_FLAGS_SOFT_RESET 0x00000100
32#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200 32#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
33 33
34/* Clock flags to show which clocks should be controled by a given platform */ 34/* Clock flags to show which clocks should be controlled by a given platform */
35#define KGSL_CLK_SRC 0x00000001 35#define KGSL_CLK_SRC 0x00000001
36#define KGSL_CLK_CORE 0x00000002 36#define KGSL_CLK_CORE 0x00000002
37#define KGSL_CLK_IFACE 0x00000004 37#define KGSL_CLK_IFACE 0x00000004
@@ -295,7 +295,7 @@ struct kgsl_cmdstream_freememontimestamp {
295 295
296/* Previous versions of this header had incorrectly defined 296/* Previous versions of this header had incorrectly defined
297 IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead 297 IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
298 of a write only ioctl. To ensure binary compatability, the following 298 of a write only ioctl. To ensure binary compatibility, the following
299 #define will be used to intercept the incorrect ioctl 299 #define will be used to intercept the incorrect ioctl
300*/ 300*/
301 301
diff --git a/freedreno/msm/msm_bo.c b/freedreno/msm/msm_bo.c
index cd05a6cd..72471df6 100644
--- a/freedreno/msm/msm_bo.c
+++ b/freedreno/msm/msm_bo.c
@@ -89,6 +89,25 @@ static void msm_bo_cpu_fini(struct fd_bo *bo)
89 drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_FINI, &req, sizeof(req)); 89 drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_FINI, &req, sizeof(req));
90} 90}
91 91
92static int msm_bo_madvise(struct fd_bo *bo, int willneed)
93{
94 struct drm_msm_gem_madvise req = {
95 .handle = bo->handle,
96 .madv = willneed ? MSM_MADV_WILLNEED : MSM_MADV_DONTNEED,
97 };
98 int ret;
99
100 /* older kernels do not support this: */
101 if (bo->dev->version < FD_VERSION_MADVISE)
102 return willneed;
103
104 ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_MADVISE, &req, sizeof(req));
105 if (ret)
106 return ret;
107
108 return req.retained;
109}
110
92static void msm_bo_destroy(struct fd_bo *bo) 111static void msm_bo_destroy(struct fd_bo *bo)
93{ 112{
94 struct msm_bo *msm_bo = to_msm_bo(bo); 113 struct msm_bo *msm_bo = to_msm_bo(bo);
@@ -100,6 +119,7 @@ static const struct fd_bo_funcs funcs = {
100 .offset = msm_bo_offset, 119 .offset = msm_bo_offset,
101 .cpu_prep = msm_bo_cpu_prep, 120 .cpu_prep = msm_bo_cpu_prep,
102 .cpu_fini = msm_bo_cpu_fini, 121 .cpu_fini = msm_bo_cpu_fini,
122 .madvise = msm_bo_madvise,
103 .destroy = msm_bo_destroy, 123 .destroy = msm_bo_destroy,
104}; 124};
105 125
diff --git a/freedreno/msm/msm_device.c b/freedreno/msm/msm_device.c
index 25c097c2..727baa44 100644
--- a/freedreno/msm/msm_device.c
+++ b/freedreno/msm/msm_device.c
@@ -39,6 +39,7 @@
39static void msm_device_destroy(struct fd_device *dev) 39static void msm_device_destroy(struct fd_device *dev)
40{ 40{
41 struct msm_device *msm_dev = to_msm_device(dev); 41 struct msm_device *msm_dev = to_msm_device(dev);
42 fd_bo_cache_cleanup(&msm_dev->ring_cache, 0);
42 free(msm_dev); 43 free(msm_dev);
43} 44}
44 45
@@ -61,5 +62,7 @@ drm_private struct fd_device * msm_device_new(int fd)
61 dev = &msm_dev->base; 62 dev = &msm_dev->base;
62 dev->funcs = &funcs; 63 dev->funcs = &funcs;
63 64
65 fd_bo_cache_init(&msm_dev->ring_cache, TRUE);
66
64 return dev; 67 return dev;
65} 68}
diff --git a/freedreno/msm/msm_drm.h b/freedreno/msm/msm_drm.h
index f7474c59..ed4c8d47 100644
--- a/freedreno/msm/msm_drm.h
+++ b/freedreno/msm/msm_drm.h
@@ -28,9 +28,13 @@
28#include <stddef.h> 28#include <stddef.h>
29#include "drm.h" 29#include "drm.h"
30 30
31#if defined(__cplusplus)
32extern "C" {
33#endif
34
31/* Please note that modifications to all structs defined here are 35/* Please note that modifications to all structs defined here are
32 * subject to backwards-compatibility constraints: 36 * subject to backwards-compatibility constraints:
33 * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit 37 * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
34 * user/kernel compatibility 38 * user/kernel compatibility
35 * 2) Keep fields aligned to their size 39 * 2) Keep fields aligned to their size
36 * 3) Because of how drm_ioctl() works, we can add new fields at 40 * 3) Because of how drm_ioctl() works, we can add new fields at
@@ -46,23 +50,34 @@
46#define MSM_PIPE_2D1 0x02 50#define MSM_PIPE_2D1 0x02
47#define MSM_PIPE_3D0 0x10 51#define MSM_PIPE_3D0 0x10
48 52
53/* The pipe-id just uses the lower bits, so can be OR'd with flags in
54 * the upper 16 bits (which could be extended further, if needed, maybe
55 * we extend/overload the pipe-id some day to deal with multiple rings,
56 * but even then I don't think we need the full lower 16 bits).
57 */
58#define MSM_PIPE_ID_MASK 0xffff
59#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
60#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
61
49/* timeouts are specified in clock-monotonic absolute times (to simplify 62/* timeouts are specified in clock-monotonic absolute times (to simplify
50 * restarting interrupted ioctls). The following struct is logically the 63 * restarting interrupted ioctls). The following struct is logically the
51 * same as 'struct timespec' but 32/64b ABI safe. 64 * same as 'struct timespec' but 32/64b ABI safe.
52 */ 65 */
53struct drm_msm_timespec { 66struct drm_msm_timespec {
54 int64_t tv_sec; /* seconds */ 67 __s64 tv_sec; /* seconds */
55 int64_t tv_nsec; /* nanoseconds */ 68 __s64 tv_nsec; /* nanoseconds */
56}; 69};
57 70
58#define MSM_PARAM_GPU_ID 0x01 71#define MSM_PARAM_GPU_ID 0x01
59#define MSM_PARAM_GMEM_SIZE 0x02 72#define MSM_PARAM_GMEM_SIZE 0x02
60#define MSM_PARAM_CHIP_ID 0x03 73#define MSM_PARAM_CHIP_ID 0x03
74#define MSM_PARAM_MAX_FREQ 0x04
75#define MSM_PARAM_TIMESTAMP 0x05
61 76
62struct drm_msm_param { 77struct drm_msm_param {
63 uint32_t pipe; /* in, MSM_PIPE_x */ 78 __u32 pipe; /* in, MSM_PIPE_x */
64 uint32_t param; /* in, MSM_PARAM_x */ 79 __u32 param; /* in, MSM_PARAM_x */
65 uint64_t value; /* out (get_param) or in (set_param) */ 80 __u64 value; /* out (get_param) or in (set_param) */
66}; 81};
67 82
68/* 83/*
@@ -84,15 +99,15 @@ struct drm_msm_param {
84 MSM_BO_UNCACHED) 99 MSM_BO_UNCACHED)
85 100
86struct drm_msm_gem_new { 101struct drm_msm_gem_new {
87 uint64_t size; /* in */ 102 __u64 size; /* in */
88 uint32_t flags; /* in, mask of MSM_BO_x */ 103 __u32 flags; /* in, mask of MSM_BO_x */
89 uint32_t handle; /* out */ 104 __u32 handle; /* out */
90}; 105};
91 106
92struct drm_msm_gem_info { 107struct drm_msm_gem_info {
93 uint32_t handle; /* in */ 108 __u32 handle; /* in */
94 uint32_t pad; 109 __u32 pad;
95 uint64_t offset; /* out, offset to pass to mmap() */ 110 __u64 offset; /* out, offset to pass to mmap() */
96}; 111};
97 112
98#define MSM_PREP_READ 0x01 113#define MSM_PREP_READ 0x01
@@ -102,13 +117,13 @@ struct drm_msm_gem_info {
102#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC) 117#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
103 118
104struct drm_msm_gem_cpu_prep { 119struct drm_msm_gem_cpu_prep {
105 uint32_t handle; /* in */ 120 __u32 handle; /* in */
106 uint32_t op; /* in, mask of MSM_PREP_x */ 121 __u32 op; /* in, mask of MSM_PREP_x */
107 struct drm_msm_timespec timeout; /* in */ 122 struct drm_msm_timespec timeout; /* in */
108}; 123};
109 124
110struct drm_msm_gem_cpu_fini { 125struct drm_msm_gem_cpu_fini {
111 uint32_t handle; /* in */ 126 __u32 handle; /* in */
112}; 127};
113 128
114/* 129/*
@@ -127,11 +142,11 @@ struct drm_msm_gem_cpu_fini {
127 * otherwise EINVAL. 142 * otherwise EINVAL.
128 */ 143 */
129struct drm_msm_gem_submit_reloc { 144struct drm_msm_gem_submit_reloc {
130 uint32_t submit_offset; /* in, offset from submit_bo */ 145 __u32 submit_offset; /* in, offset from submit_bo */
131 uint32_t or; /* in, value OR'd with result */ 146 __u32 or; /* in, value OR'd with result */
132 int32_t shift; /* in, amount of left shift (can be negative) */ 147 __s32 shift; /* in, amount of left shift (can be negative) */
133 uint32_t reloc_idx; /* in, index of reloc_bo buffer */ 148 __u32 reloc_idx; /* in, index of reloc_bo buffer */
134 uint64_t reloc_offset; /* in, offset from start of reloc_bo */ 149 __u64 reloc_offset; /* in, offset from start of reloc_bo */
135}; 150};
136 151
137/* submit-types: 152/* submit-types:
@@ -146,13 +161,13 @@ struct drm_msm_gem_submit_reloc {
146#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002 161#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
147#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003 162#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
148struct drm_msm_gem_submit_cmd { 163struct drm_msm_gem_submit_cmd {
149 uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */ 164 __u32 type; /* in, one of MSM_SUBMIT_CMD_x */
150 uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */ 165 __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
151 uint32_t submit_offset; /* in, offset into submit_bo */ 166 __u32 submit_offset; /* in, offset into submit_bo */
152 uint32_t size; /* in, cmdstream size */ 167 __u32 size; /* in, cmdstream size */
153 uint32_t pad; 168 __u32 pad;
154 uint32_t nr_relocs; /* in, number of submit_reloc's */ 169 __u32 nr_relocs; /* in, number of submit_reloc's */
155 uint64_t __user relocs; /* in, ptr to array of submit_reloc's */ 170 __u64 __user relocs; /* in, ptr to array of submit_reloc's */
156}; 171};
157 172
158/* Each buffer referenced elsewhere in the cmdstream submit (ie. the 173/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -172,22 +187,33 @@ struct drm_msm_gem_submit_cmd {
172#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) 187#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
173 188
174struct drm_msm_gem_submit_bo { 189struct drm_msm_gem_submit_bo {
175 uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */ 190 __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
176 uint32_t handle; /* in, GEM handle */ 191 __u32 handle; /* in, GEM handle */
177 uint64_t presumed; /* in/out, presumed buffer address */ 192 __u64 presumed; /* in/out, presumed buffer address */
178}; 193};
179 194
195/* Valid submit ioctl flags: */
196#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
197#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
198#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
199#define MSM_SUBMIT_FLAGS ( \
200 MSM_SUBMIT_NO_IMPLICIT | \
201 MSM_SUBMIT_FENCE_FD_IN | \
202 MSM_SUBMIT_FENCE_FD_OUT | \
203 0)
204
180/* Each cmdstream submit consists of a table of buffers involved, and 205/* Each cmdstream submit consists of a table of buffers involved, and
181 * one or more cmdstream buffers. This allows for conditional execution 206 * one or more cmdstream buffers. This allows for conditional execution
182 * (context-restore), and IB buffers needed for per tile/bin draw cmds. 207 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
183 */ 208 */
184struct drm_msm_gem_submit { 209struct drm_msm_gem_submit {
185 uint32_t pipe; /* in, MSM_PIPE_x */ 210 __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
186 uint32_t fence; /* out */ 211 __u32 fence; /* out */
187 uint32_t nr_bos; /* in, number of submit_bo's */ 212 __u32 nr_bos; /* in, number of submit_bo's */
188 uint32_t nr_cmds; /* in, number of submit_cmd's */ 213 __u32 nr_cmds; /* in, number of submit_cmd's */
189 uint64_t __user bos; /* in, ptr to array of submit_bo's */ 214 __u64 __user bos; /* in, ptr to array of submit_bo's */
190 uint64_t __user cmds; /* in, ptr to array of submit_cmd's */ 215 __u64 __user cmds; /* in, ptr to array of submit_cmd's */
216 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
191}; 217};
192 218
193/* The normal way to synchronize with the GPU is just to CPU_PREP on 219/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -198,11 +224,32 @@ struct drm_msm_gem_submit {
198 * APIs without requiring a dummy bo to synchronize on. 224 * APIs without requiring a dummy bo to synchronize on.
199 */ 225 */
200struct drm_msm_wait_fence { 226struct drm_msm_wait_fence {
201 uint32_t fence; /* in */ 227 __u32 fence; /* in */
202 uint32_t pad; 228 __u32 pad;
203 struct drm_msm_timespec timeout; /* in */ 229 struct drm_msm_timespec timeout; /* in */
204}; 230};
205 231
232/* madvise provides a way to tell the kernel in case a buffers contents
233 * can be discarded under memory pressure, which is useful for userspace
234 * bo cache where we want to optimistically hold on to buffer allocate
235 * and potential mmap, but allow the pages to be discarded under memory
236 * pressure.
237 *
238 * Typical usage would involve madvise(DONTNEED) when buffer enters BO
239 * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
240 * In the WILLNEED case, 'retained' indicates to userspace whether the
241 * backing pages still exist.
242 */
243#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
244#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
245#define __MSM_MADV_PURGED 2 /* internal state */
246
247struct drm_msm_gem_madvise {
248 __u32 handle; /* in, GEM handle */
249 __u32 madv; /* in, MSM_MADV_x */
250 __u32 retained; /* out, whether backing store still exists */
251};
252
206#define DRM_MSM_GET_PARAM 0x00 253#define DRM_MSM_GET_PARAM 0x00
207/* placeholder: 254/* placeholder:
208#define DRM_MSM_SET_PARAM 0x01 255#define DRM_MSM_SET_PARAM 0x01
@@ -213,7 +260,8 @@ struct drm_msm_wait_fence {
213#define DRM_MSM_GEM_CPU_FINI 0x05 260#define DRM_MSM_GEM_CPU_FINI 0x05
214#define DRM_MSM_GEM_SUBMIT 0x06 261#define DRM_MSM_GEM_SUBMIT 0x06
215#define DRM_MSM_WAIT_FENCE 0x07 262#define DRM_MSM_WAIT_FENCE 0x07
216#define DRM_MSM_NUM_IOCTLS 0x08 263#define DRM_MSM_GEM_MADVISE 0x08
264#define DRM_MSM_NUM_IOCTLS 0x09
217 265
218#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) 266#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
219#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) 267#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -222,5 +270,10 @@ struct drm_msm_wait_fence {
222#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini) 270#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
223#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) 271#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
224#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) 272#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
273#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
274
275#if defined(__cplusplus)
276}
277#endif
225 278
226#endif /* __MSM_DRM_H__ */ 279#endif /* __MSM_DRM_H__ */
diff --git a/freedreno/msm/msm_pipe.c b/freedreno/msm/msm_pipe.c
index aa0866b4..f872e245 100644
--- a/freedreno/msm/msm_pipe.c
+++ b/freedreno/msm/msm_pipe.c
@@ -32,6 +32,25 @@
32 32
33#include "msm_priv.h" 33#include "msm_priv.h"
34 34
35static int query_param(struct fd_pipe *pipe, uint32_t param,
36 uint64_t *value)
37{
38 struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
39 struct drm_msm_param req = {
40 .pipe = msm_pipe->pipe,
41 .param = param,
42 };
43 int ret;
44
45 ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_GET_PARAM,
46 &req, sizeof(req));
47 if (ret)
48 return ret;
49
50 *value = req.value;
51
52 return 0;
53}
35 54
36static int msm_pipe_get_param(struct fd_pipe *pipe, 55static int msm_pipe_get_param(struct fd_pipe *pipe,
37 enum fd_param_id param, uint64_t *value) 56 enum fd_param_id param, uint64_t *value)
@@ -48,6 +67,10 @@ static int msm_pipe_get_param(struct fd_pipe *pipe,
48 case FD_CHIP_ID: 67 case FD_CHIP_ID:
49 *value = msm_pipe->chip_id; 68 *value = msm_pipe->chip_id;
50 return 0; 69 return 0;
70 case FD_MAX_FREQ:
71 return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
72 case FD_TIMESTAMP:
73 return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
51 default: 74 default:
52 ERROR_MSG("invalid param id: %d", param); 75 ERROR_MSG("invalid param id: %d", param);
53 return -1; 76 return -1;
@@ -87,21 +110,15 @@ static const struct fd_pipe_funcs funcs = {
87 .destroy = msm_pipe_destroy, 110 .destroy = msm_pipe_destroy,
88}; 111};
89 112
90static uint64_t get_param(struct fd_device *dev, uint32_t pipe, uint32_t param) 113static uint64_t get_param(struct fd_pipe *pipe, uint32_t param)
91{ 114{
92 struct drm_msm_param req = { 115 uint64_t value;
93 .pipe = pipe, 116 int ret = query_param(pipe, param, &value);
94 .param = param,
95 };
96 int ret;
97
98 ret = drmCommandWriteRead(dev->fd, DRM_MSM_GET_PARAM, &req, sizeof(req));
99 if (ret) { 117 if (ret) {
100 ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno)); 118 ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno));
101 return 0; 119 return 0;
102 } 120 }
103 121 return value;
104 return req.value;
105} 122}
106 123
107drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev, 124drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
@@ -123,10 +140,14 @@ drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
123 pipe = &msm_pipe->base; 140 pipe = &msm_pipe->base;
124 pipe->funcs = &funcs; 141 pipe->funcs = &funcs;
125 142
143 /* initialize before get_param(): */
144 pipe->dev = dev;
126 msm_pipe->pipe = pipe_id[id]; 145 msm_pipe->pipe = pipe_id[id];
127 msm_pipe->gpu_id = get_param(dev, pipe_id[id], MSM_PARAM_GPU_ID); 146
128 msm_pipe->gmem = get_param(dev, pipe_id[id], MSM_PARAM_GMEM_SIZE); 147 /* these params should be supported since the first version of drm/msm: */
129 msm_pipe->chip_id = get_param(dev, pipe_id[id], MSM_PARAM_CHIP_ID); 148 msm_pipe->gpu_id = get_param(pipe, MSM_PARAM_GPU_ID);
149 msm_pipe->gmem = get_param(pipe, MSM_PARAM_GMEM_SIZE);
150 msm_pipe->chip_id = get_param(pipe, MSM_PARAM_CHIP_ID);
130 151
131 if (! msm_pipe->gpu_id) 152 if (! msm_pipe->gpu_id)
132 goto fail; 153 goto fail;
diff --git a/freedreno/msm/msm_priv.h b/freedreno/msm/msm_priv.h
index e499b3b8..6d670aab 100644
--- a/freedreno/msm/msm_priv.h
+++ b/freedreno/msm/msm_priv.h
@@ -39,6 +39,8 @@
39 39
40struct msm_device { 40struct msm_device {
41 struct fd_device base; 41 struct fd_device base;
42 struct fd_bo_cache ring_cache;
43 unsigned ring_cnt;
42}; 44};
43 45
44static inline struct msm_device * to_msm_device(struct fd_device *x) 46static inline struct msm_device * to_msm_device(struct fd_device *x)
@@ -71,18 +73,11 @@ struct msm_bo {
71 struct fd_bo base; 73 struct fd_bo base;
72 uint64_t offset; 74 uint64_t offset;
73 uint64_t presumed; 75 uint64_t presumed;
74 /* in the common case, a bo won't be referenced by more than a single 76 /* to avoid excess hashtable lookups, cache the ring this bo was
75 * (parent) ring[*]. So to avoid looping over all the bo's in the 77 * last emitted on (since that will probably also be the next ring
76 * reloc table to find the idx of a bo that might already be in the 78 * it is emitted on)
77 * table, we cache the idx in the bo. But in order to detect the
78 * slow-path where bo is ref'd in multiple rb's, we also must track
79 * the current_ring for which the idx is valid. See bo2idx().
80 *
81 * [*] in case multiple ringbuffers, ie. one toplevel and other rb(s)
82 * used for IB target(s), the toplevel rb is the parent which is
83 * tracking bo's for the submit
84 */ 79 */
85 struct fd_ringbuffer *current_ring; 80 unsigned current_ring_seqno;
86 uint32_t idx; 81 uint32_t idx;
87}; 82};
88 83
diff --git a/freedreno/msm/msm_ringbuffer.c b/freedreno/msm/msm_ringbuffer.c
index becf2458..17194f4c 100644
--- a/freedreno/msm/msm_ringbuffer.c
+++ b/freedreno/msm/msm_ringbuffer.c
@@ -36,11 +36,30 @@
36#include "freedreno_ringbuffer.h" 36#include "freedreno_ringbuffer.h"
37#include "msm_priv.h" 37#include "msm_priv.h"
38 38
39/* represents a single cmd buffer in the submit ioctl. Each cmd buffer has
40 * a backing bo, and a reloc table.
41 */
42struct msm_cmd {
43 struct list_head list;
44
45 struct fd_ringbuffer *ring;
46 struct fd_bo *ring_bo;
47
48 /* reloc's table: */
49 struct drm_msm_gem_submit_reloc *relocs;
50 uint32_t nr_relocs, max_relocs;
51
52 uint32_t size;
53};
54
39struct msm_ringbuffer { 55struct msm_ringbuffer {
40 struct fd_ringbuffer base; 56 struct fd_ringbuffer base;
41 struct fd_bo *ring_bo;
42 57
43 /* submit ioctl related tables: */ 58 /* submit ioctl related tables:
59 * Note that bos and cmds are tracked by the parent ringbuffer, since
60 * that is global to the submit ioctl call. The reloc's table is tracked
61 * per cmd-buffer.
62 */
44 struct { 63 struct {
45 /* bo's table: */ 64 /* bo's table: */
46 struct drm_msm_gem_submit_bo *bos; 65 struct drm_msm_gem_submit_bo *bos;
@@ -49,22 +68,111 @@ struct msm_ringbuffer {
49 /* cmd's table: */ 68 /* cmd's table: */
50 struct drm_msm_gem_submit_cmd *cmds; 69 struct drm_msm_gem_submit_cmd *cmds;
51 uint32_t nr_cmds, max_cmds; 70 uint32_t nr_cmds, max_cmds;
52
53 /* reloc's table: */
54 struct drm_msm_gem_submit_reloc *relocs;
55 uint32_t nr_relocs, max_relocs;
56 } submit; 71 } submit;
57 72
58 /* should have matching entries in submit.bos: */ 73 /* should have matching entries in submit.bos: */
74 /* Note, only in parent ringbuffer */
59 struct fd_bo **bos; 75 struct fd_bo **bos;
60 uint32_t nr_bos, max_bos; 76 uint32_t nr_bos, max_bos;
61 77
62 /* should have matching entries in submit.cmds: */ 78 /* should have matching entries in submit.cmds: */
63 struct fd_ringbuffer **rings; 79 struct msm_cmd **cmds;
64 uint32_t nr_rings, max_rings; 80 uint32_t nr_cmds, max_cmds;
81
82 /* List of physical cmdstream buffers (msm_cmd) assocated with this
83 * logical fd_ringbuffer.
84 *
85 * Note that this is different from msm_ringbuffer::cmds (which
86 * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
87 * related stuff, and *only* is tracked in the parent ringbuffer.
88 * And only has "completed" cmd buffers (ie. we already know the
89 * size) added via get_cmd().
90 */
91 struct list_head cmd_list;
92
93 int is_growable;
94 unsigned cmd_count;
95
96 unsigned seqno;
97
98 /* maps fd_bo to idx: */
99 void *bo_table;
65}; 100};
66 101
102static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
103{
104 return (struct msm_ringbuffer *)x;
105}
106
107#define INIT_SIZE 0x1000
108
67static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER; 109static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
110drm_private extern pthread_mutex_t table_lock;
111
112static void ring_bo_del(struct fd_device *dev, struct fd_bo *bo)
113{
114 int ret;
115
116 pthread_mutex_lock(&table_lock);
117 ret = fd_bo_cache_free(&to_msm_device(dev)->ring_cache, bo);
118 pthread_mutex_unlock(&table_lock);
119
120 if (ret == 0)
121 return;
122
123 fd_bo_del(bo);
124}
125
126static struct fd_bo * ring_bo_new(struct fd_device *dev, uint32_t size)
127{
128 struct fd_bo *bo;
129
130 bo = fd_bo_cache_alloc(&to_msm_device(dev)->ring_cache, &size, 0);
131 if (bo)
132 return bo;
133
134 bo = fd_bo_new(dev, size, 0);
135 if (!bo)
136 return NULL;
137
138 /* keep ringbuffer bo's out of the normal bo cache: */
139 bo->bo_reuse = FALSE;
140
141 return bo;
142}
143
144static void ring_cmd_del(struct msm_cmd *cmd)
145{
146 if (cmd->ring_bo)
147 ring_bo_del(cmd->ring->pipe->dev, cmd->ring_bo);
148 list_del(&cmd->list);
149 to_msm_ringbuffer(cmd->ring)->cmd_count--;
150 free(cmd->relocs);
151 free(cmd);
152}
153
154static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size)
155{
156 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
157 struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
158
159 if (!cmd)
160 return NULL;
161
162 cmd->ring = ring;
163 cmd->ring_bo = ring_bo_new(ring->pipe->dev, size);
164 if (!cmd->ring_bo)
165 goto fail;
166
167 list_addtail(&cmd->list, &msm_ring->cmd_list);
168 msm_ring->cmd_count++;
169
170 return cmd;
171
172fail:
173 ring_cmd_del(cmd);
174 return NULL;
175}
68 176
69static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz) 177static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
70{ 178{
@@ -83,9 +191,11 @@ static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
83 (x)->nr_ ## name ++; \ 191 (x)->nr_ ## name ++; \
84}) 192})
85 193
86static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x) 194static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
87{ 195{
88 return (struct msm_ringbuffer *)x; 196 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
197 assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
198 return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
89} 199}
90 200
91static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo) 201static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
@@ -112,21 +222,24 @@ static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t fl
112 struct msm_bo *msm_bo = to_msm_bo(bo); 222 struct msm_bo *msm_bo = to_msm_bo(bo);
113 uint32_t idx; 223 uint32_t idx;
114 pthread_mutex_lock(&idx_lock); 224 pthread_mutex_lock(&idx_lock);
115 if (!msm_bo->current_ring) { 225 if (msm_bo->current_ring_seqno == msm_ring->seqno) {
116 idx = append_bo(ring, bo);
117 msm_bo->current_ring = ring;
118 msm_bo->idx = idx;
119 } else if (msm_bo->current_ring == ring) {
120 idx = msm_bo->idx; 226 idx = msm_bo->idx;
121 } else { 227 } else {
122 /* slow-path: */ 228 void *val;
123 for (idx = 0; idx < msm_ring->nr_bos; idx++) 229
124 if (msm_ring->bos[idx] == bo) 230 if (!msm_ring->bo_table)
125 break; 231 msm_ring->bo_table = drmHashCreate();
126 if (idx == msm_ring->nr_bos) { 232
127 /* not found */ 233 if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
234 /* found */
235 idx = (uint32_t)(uintptr_t)val;
236 } else {
128 idx = append_bo(ring, bo); 237 idx = append_bo(ring, bo);
238 val = (void *)(uintptr_t)idx;
239 drmHashInsert(msm_ring->bo_table, bo->handle, val);
129 } 240 }
241 msm_bo->current_ring_seqno = msm_ring->seqno;
242 msm_bo->idx = idx;
130 } 243 }
131 pthread_mutex_unlock(&idx_lock); 244 pthread_mutex_unlock(&idx_lock);
132 if (flags & FD_RELOC_READ) 245 if (flags & FD_RELOC_READ)
@@ -143,17 +256,14 @@ static int check_cmd_bo(struct fd_ringbuffer *ring,
143 return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle; 256 return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
144} 257}
145 258
146static uint32_t offset_bytes(void *end, void *start) 259/* Ensure that submit has corresponding entry in cmds table for the
147{ 260 * target cmdstream buffer:
148 return ((char *)end) - ((char *)start); 261 */
149} 262static void get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
150
151static struct drm_msm_gem_submit_cmd * get_cmd(struct fd_ringbuffer *ring,
152 struct fd_ringbuffer *target_ring, struct fd_bo *target_bo,
153 uint32_t submit_offset, uint32_t size, uint32_t type) 263 uint32_t submit_offset, uint32_t size, uint32_t type)
154{ 264{
155 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring); 265 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
156 struct drm_msm_gem_submit_cmd *cmd = NULL; 266 struct drm_msm_gem_submit_cmd *cmd;
157 uint32_t i; 267 uint32_t i;
158 268
159 /* figure out if we already have a cmd buf: */ 269 /* figure out if we already have a cmd buf: */
@@ -162,41 +272,37 @@ static struct drm_msm_gem_submit_cmd * get_cmd(struct fd_ringbuffer *ring,
162 if ((cmd->submit_offset == submit_offset) && 272 if ((cmd->submit_offset == submit_offset) &&
163 (cmd->size == size) && 273 (cmd->size == size) &&
164 (cmd->type == type) && 274 (cmd->type == type) &&
165 check_cmd_bo(ring, cmd, target_bo)) 275 check_cmd_bo(ring, cmd, target_cmd->ring_bo))
166 break; 276 return;
167 cmd = NULL;
168 } 277 }
169 278
170 /* create cmd buf if not: */ 279 /* create cmd buf if not: */
171 if (!cmd) { 280 i = APPEND(&msm_ring->submit, cmds);
172 uint32_t idx = APPEND(&msm_ring->submit, cmds); 281 APPEND(msm_ring, cmds);
173 APPEND(msm_ring, rings); 282 msm_ring->cmds[i] = target_cmd;
174 msm_ring->rings[idx] = target_ring; 283 cmd = &msm_ring->submit.cmds[i];
175 cmd = &msm_ring->submit.cmds[idx]; 284 cmd->type = type;
176 cmd->type = type; 285 cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
177 cmd->submit_idx = bo2idx(ring, target_bo, FD_RELOC_READ); 286 cmd->submit_offset = submit_offset;
178 cmd->submit_offset = submit_offset; 287 cmd->size = size;
179 cmd->size = size; 288 cmd->pad = 0;
180 cmd->pad = 0; 289
181 } 290 target_cmd->size = size;
182
183 return cmd;
184} 291}
185 292
186static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring) 293static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
187{ 294{
188 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring); 295 return fd_bo_map(current_cmd(ring)->ring_bo);
189 return fd_bo_map(msm_ring->ring_bo);
190} 296}
191 297
192static uint32_t find_next_reloc_idx(struct msm_ringbuffer *msm_ring, 298static uint32_t find_next_reloc_idx(struct msm_cmd *msm_cmd,
193 uint32_t start, uint32_t offset) 299 uint32_t start, uint32_t offset)
194{ 300{
195 uint32_t i; 301 uint32_t i;
196 302
197 /* a binary search would be more clever.. */ 303 /* a binary search would be more clever.. */
198 for (i = start; i < msm_ring->submit.nr_relocs; i++) { 304 for (i = start; i < msm_cmd->nr_relocs; i++) {
199 struct drm_msm_gem_submit_reloc *reloc = &msm_ring->submit.relocs[i]; 305 struct drm_msm_gem_submit_reloc *reloc = &msm_cmd->relocs[i];
200 if (reloc->submit_offset >= offset) 306 if (reloc->submit_offset >= offset)
201 return i; 307 return i;
202 } 308 }
@@ -204,38 +310,111 @@ static uint32_t find_next_reloc_idx(struct msm_ringbuffer *msm_ring,
204 return i; 310 return i;
205} 311}
206 312
313static void delete_cmds(struct msm_ringbuffer *msm_ring)
314{
315 struct msm_cmd *cmd, *tmp;
316
317 LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
318 ring_cmd_del(cmd);
319 }
320}
321
207static void flush_reset(struct fd_ringbuffer *ring) 322static void flush_reset(struct fd_ringbuffer *ring)
208{ 323{
209 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring); 324 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
210 unsigned i; 325 unsigned i;
211 326
327 for (i = 0; i < msm_ring->nr_bos; i++) {
328 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
329 msm_bo->current_ring_seqno = 0;
330 fd_bo_del(&msm_bo->base);
331 }
332
212 /* for each of the cmd buffers, clear their reloc's: */ 333 /* for each of the cmd buffers, clear their reloc's: */
213 for (i = 0; i < msm_ring->submit.nr_cmds; i++) { 334 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
214 struct msm_ringbuffer *target_ring = to_msm_ringbuffer(msm_ring->rings[i]); 335 struct msm_cmd *target_cmd = msm_ring->cmds[i];
215 target_ring->submit.nr_relocs = 0; 336 target_cmd->nr_relocs = 0;
216 } 337 }
217 338
218 msm_ring->submit.nr_relocs = 0;
219 msm_ring->submit.nr_cmds = 0; 339 msm_ring->submit.nr_cmds = 0;
220 msm_ring->submit.nr_bos = 0; 340 msm_ring->submit.nr_bos = 0;
221 msm_ring->nr_rings = 0; 341 msm_ring->nr_cmds = 0;
222 msm_ring->nr_bos = 0; 342 msm_ring->nr_bos = 0;
343
344 if (msm_ring->bo_table) {
345 drmHashDestroy(msm_ring->bo_table);
346 msm_ring->bo_table = NULL;
347 }
348
349 if (msm_ring->is_growable) {
350 delete_cmds(msm_ring);
351 } else {
352 /* in old mode, just reset the # of relocs: */
353 current_cmd(ring)->nr_relocs = 0;
354 }
355}
356
357static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
358{
359 uint32_t submit_offset, size, type;
360 struct fd_ringbuffer *parent;
361
362 if (ring->parent) {
363 parent = ring->parent;
364 type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
365 } else {
366 parent = ring;
367 type = MSM_SUBMIT_CMD_BUF;
368 }
369
370 submit_offset = offset_bytes(last_start, ring->start);
371 size = offset_bytes(ring->cur, last_start);
372
373 get_cmd(parent, current_cmd(ring), submit_offset, size, type);
223} 374}
224 375
225static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start) 376static void dump_submit(struct msm_ringbuffer *msm_ring)
377{
378 uint32_t i, j;
379
380 for (i = 0; i < msm_ring->submit.nr_bos; i++) {
381 struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
382 ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
383 }
384 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
385 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
386 struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
387 ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
388 i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
389 for (j = 0; j < cmd->nr_relocs; j++) {
390 struct drm_msm_gem_submit_reloc *r = &relocs[j];
391 ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
392 ", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
393 r->reloc_idx, r->reloc_offset);
394 }
395 }
396}
397
398static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
399 int in_fence_fd, int *out_fence_fd)
226{ 400{
227 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring); 401 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
228 struct fd_bo *ring_bo = msm_ring->ring_bo;
229 struct drm_msm_gem_submit req = { 402 struct drm_msm_gem_submit req = {
230 .pipe = to_msm_pipe(ring->pipe)->pipe, 403 .flags = to_msm_pipe(ring->pipe)->pipe,
231 }; 404 };
232 uint32_t i, j, submit_offset, size; 405 uint32_t i;
233 int ret; 406 int ret;
234 407
235 submit_offset = offset_bytes(last_start, ring->start); 408 if (in_fence_fd != -1) {
236 size = offset_bytes(ring->cur, last_start); 409 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
410 req.fence_fd = in_fence_fd;
411 }
412
413 if (out_fence_fd) {
414 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
415 }
237 416
238 get_cmd(ring, ring, ring_bo, submit_offset, size, MSM_SUBMIT_CMD_BUF); 417 finalize_current_cmd(ring, last_start);
239 418
240 /* needs to be after get_cmd() as that could create bos/cmds table: */ 419 /* needs to be after get_cmd() as that could create bos/cmds table: */
241 req.bos = VOID2U64(msm_ring->submit.bos), 420 req.bos = VOID2U64(msm_ring->submit.bos),
@@ -246,10 +425,10 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
246 /* for each of the cmd's fix up their reloc's: */ 425 /* for each of the cmd's fix up their reloc's: */
247 for (i = 0; i < msm_ring->submit.nr_cmds; i++) { 426 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
248 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i]; 427 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
249 struct msm_ringbuffer *target_ring = to_msm_ringbuffer(msm_ring->rings[i]); 428 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
250 uint32_t a = find_next_reloc_idx(target_ring, 0, cmd->submit_offset); 429 uint32_t a = find_next_reloc_idx(msm_cmd, 0, cmd->submit_offset);
251 uint32_t b = find_next_reloc_idx(target_ring, a, cmd->submit_offset + cmd->size); 430 uint32_t b = find_next_reloc_idx(msm_cmd, a, cmd->submit_offset + cmd->size);
252 cmd->relocs = VOID2U64(&target_ring->submit.relocs[a]); 431 cmd->relocs = VOID2U64(&msm_cmd->relocs[a]);
253 cmd->nr_relocs = (b > a) ? b - a : 0; 432 cmd->nr_relocs = (b > a) ? b - a : 0;
254 } 433 }
255 434
@@ -259,36 +438,17 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
259 &req, sizeof(req)); 438 &req, sizeof(req));
260 if (ret) { 439 if (ret) {
261 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno)); 440 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
262 ERROR_MSG(" pipe: %u", req.pipe); 441 dump_submit(msm_ring);
263 for (i = 0; i < msm_ring->submit.nr_bos; i++) { 442 } else if (!ret) {
264 struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
265 ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
266 }
267 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
268 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
269 struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
270 ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
271 i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
272 for (j = 0; j < cmd->nr_relocs; j++) {
273 struct drm_msm_gem_submit_reloc *r = &relocs[j];
274 ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
275 ", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
276 r->reloc_idx, r->reloc_offset);
277 }
278 }
279 } else {
280 /* update timestamp on all rings associated with submit: */ 443 /* update timestamp on all rings associated with submit: */
281 for (i = 0; i < msm_ring->submit.nr_cmds; i++) { 444 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
282 struct fd_ringbuffer *target_ring = msm_ring->rings[i]; 445 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
283 if (!ret) 446 msm_cmd->ring->last_timestamp = req.fence;
284 target_ring->last_timestamp = req.fence;
285 } 447 }
286 }
287 448
288 for (i = 0; i < msm_ring->nr_bos; i++) { 449 if (out_fence_fd) {
289 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]); 450 *out_fence_fd = req.fence_fd;
290 msm_bo->current_ring = NULL; 451 }
291 fd_bo_del(&msm_bo->base);
292 } 452 }
293 453
294 flush_reset(ring); 454 flush_reset(ring);
@@ -296,6 +456,13 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
296 return ret; 456 return ret;
297} 457}
298 458
459static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
460{
461 assert(to_msm_ringbuffer(ring)->is_growable);
462 finalize_current_cmd(ring, ring->last_start);
463 ring_cmd_new(ring, size);
464}
465
299static void msm_ringbuffer_reset(struct fd_ringbuffer *ring) 466static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
300{ 467{
301 flush_reset(ring); 468 flush_reset(ring);
@@ -304,14 +471,14 @@ static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
304static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring, 471static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
305 const struct fd_reloc *r) 472 const struct fd_reloc *r)
306{ 473{
307 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
308 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring; 474 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
309 struct msm_bo *msm_bo = to_msm_bo(r->bo); 475 struct msm_bo *msm_bo = to_msm_bo(r->bo);
310 struct drm_msm_gem_submit_reloc *reloc; 476 struct drm_msm_gem_submit_reloc *reloc;
311 uint32_t idx = APPEND(&msm_ring->submit, relocs); 477 struct msm_cmd *cmd = current_cmd(ring);
478 uint32_t idx = APPEND(cmd, relocs);
312 uint32_t addr; 479 uint32_t addr;
313 480
314 reloc = &msm_ring->submit.relocs[idx]; 481 reloc = &cmd->relocs[idx];
315 482
316 reloc->reloc_idx = bo2idx(parent, r->bo, r->flags); 483 reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
317 reloc->reloc_offset = r->offset; 484 reloc->reloc_offset = r->offset;
@@ -320,48 +487,96 @@ static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
320 reloc->submit_offset = offset_bytes(ring->cur, ring->start); 487 reloc->submit_offset = offset_bytes(ring->cur, ring->start);
321 488
322 addr = msm_bo->presumed; 489 addr = msm_bo->presumed;
323 if (r->shift < 0) 490 if (reloc->shift < 0)
324 addr >>= -r->shift; 491 addr >>= -reloc->shift;
325 else 492 else
326 addr <<= r->shift; 493 addr <<= reloc->shift;
327 (*ring->cur++) = addr | r->or; 494 (*ring->cur++) = addr | r->or;
495
496 if (ring->pipe->gpu_id >= 500) {
497 struct drm_msm_gem_submit_reloc *reloc_hi;
498
499 idx = APPEND(cmd, relocs);
500
501 reloc_hi = &cmd->relocs[idx];
502
503 reloc_hi->reloc_idx = reloc->reloc_idx;
504 reloc_hi->reloc_offset = r->offset;
505 reloc_hi->or = r->orhi;
506 reloc_hi->shift = r->shift - 32;
507 reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start);
508
509 addr = msm_bo->presumed >> 32;
510 if (reloc_hi->shift < 0)
511 addr >>= -reloc_hi->shift;
512 else
513 addr <<= reloc_hi->shift;
514 (*ring->cur++) = addr | r->orhi;
515 }
328} 516}
329 517
330static void msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring, 518static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
331 struct fd_ringmarker *target, struct fd_ringmarker *end) 519 struct fd_ringbuffer *target, uint32_t cmd_idx,
520 uint32_t submit_offset, uint32_t size)
332{ 521{
333 struct fd_bo *target_bo = to_msm_ringbuffer(target->ring)->ring_bo; 522 struct msm_cmd *cmd = NULL;
334 struct drm_msm_gem_submit_cmd *cmd; 523 uint32_t idx = 0;
335 uint32_t submit_offset, size;
336 524
337 submit_offset = offset_bytes(target->cur, target->ring->start); 525 LIST_FOR_EACH_ENTRY(cmd, &to_msm_ringbuffer(target)->cmd_list, list) {
338 size = offset_bytes(end->cur, target->cur); 526 if (idx == cmd_idx)
527 break;
528 idx++;
529 }
530
531 assert(cmd && (idx == cmd_idx));
339 532
340 cmd = get_cmd(ring, target->ring, target_bo, submit_offset, size, 533 if (idx < (to_msm_ringbuffer(target)->cmd_count - 1)) {
341 MSM_SUBMIT_CMD_IB_TARGET_BUF); 534 /* All but the last cmd buffer is fully "baked" (ie. already has
342 assert(cmd); 535 * done get_cmd() to add it to the cmds table). But in this case,
536 * the size we get is invalid (since it is calculated from the
537 * last cmd buffer):
538 */
539 size = cmd->size;
540 } else {
541 get_cmd(ring, cmd, submit_offset, size, MSM_SUBMIT_CMD_IB_TARGET_BUF);
542 }
343 543
344 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){ 544 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
345 .bo = target_bo, 545 .bo = cmd->ring_bo,
346 .flags = FD_RELOC_READ, 546 .flags = FD_RELOC_READ,
347 .offset = submit_offset, 547 .offset = submit_offset,
348 }); 548 });
549
550 return size;
551}
552
553static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
554{
555 return to_msm_ringbuffer(ring)->cmd_count;
349} 556}
350 557
351static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring) 558static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
352{ 559{
353 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring); 560 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
354 if (msm_ring->ring_bo) 561
355 fd_bo_del(msm_ring->ring_bo); 562 flush_reset(ring);
563 delete_cmds(msm_ring);
564
565 free(msm_ring->submit.cmds);
566 free(msm_ring->submit.bos);
567 free(msm_ring->bos);
568 free(msm_ring->cmds);
356 free(msm_ring); 569 free(msm_ring);
357} 570}
358 571
359static const struct fd_ringbuffer_funcs funcs = { 572static const struct fd_ringbuffer_funcs funcs = {
360 .hostptr = msm_ringbuffer_hostptr, 573 .hostptr = msm_ringbuffer_hostptr,
361 .flush = msm_ringbuffer_flush, 574 .flush = msm_ringbuffer_flush,
575 .grow = msm_ringbuffer_grow,
362 .reset = msm_ringbuffer_reset, 576 .reset = msm_ringbuffer_reset,
363 .emit_reloc = msm_ringbuffer_emit_reloc, 577 .emit_reloc = msm_ringbuffer_emit_reloc,
364 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring, 578 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
579 .cmd_count = msm_ringbuffer_cmd_count,
365 .destroy = msm_ringbuffer_destroy, 580 .destroy = msm_ringbuffer_destroy,
366}; 581};
367 582
@@ -377,14 +592,21 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
377 goto fail; 592 goto fail;
378 } 593 }
379 594
595 if (size == 0) {
596 assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
597 size = INIT_SIZE;
598 msm_ring->is_growable = TRUE;
599 }
600
601 list_inithead(&msm_ring->cmd_list);
602 msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
603
380 ring = &msm_ring->base; 604 ring = &msm_ring->base;
381 ring->funcs = &funcs; 605 ring->funcs = &funcs;
606 ring->size = size;
607 ring->pipe = pipe; /* needed in ring_cmd_new() */
382 608
383 msm_ring->ring_bo = fd_bo_new(pipe->dev, size, 0); 609 ring_cmd_new(ring, size);
384 if (!msm_ring->ring_bo) {
385 ERROR_MSG("ringbuffer allocation failed");
386 goto fail;
387 }
388 610
389 return ring; 611 return ring;
390fail: 612fail:
diff --git a/include/drm/README b/include/drm/README
new file mode 100644
index 00000000..a50b02c0
--- /dev/null
+++ b/include/drm/README
@@ -0,0 +1,157 @@
1What are these headers ?
2------------------------
3This is the canonical source of drm headers that user space should use for
4communicating with the kernel DRM subsystem.
5
6They flow from the kernel, thus any changes must be merged there first.
7Do _not_ attempt to "fix" these by deviating from the kernel ones !
8
9
10Non-linux platforms - changes/patches
11-------------------------------------
12If your platform has local changes, please send them upstream for inclusion.
13Even if your patches don't get accepted in their current form, devs will
14give you feedback on how to address things properly.
15
16git send-email --subject-prefix="PATCH libdrm" your patches to dri-devel
17mailing list.
18
19Before doing so, please consider the following:
20 - Have the [libdrm vs kernel] headers on your platform deviated ?
21Consider unifying them first.
22
23 - Have you introduced additional ABI that's not available in Linux ?
24Propose it for [Linux kernel] upstream inclusion.
25If that doesn't work out (hopefully it never does), move it to another header
26and/or keep the change(s) local ?
27
28 - Are your changes DRI1/UMS specific ?
29There is virtually no interest/power in keeping those legacy interfaces. They
30are around due to the kernel "thou shalt not break existing user space" rule.
31
32Consider porting the driver to DRI2/KMS - all (almost?) sensible hardware is
33capable of supporting those.
34
35
36Which headers go where ?
37------------------------
38A snipped from the, now removed, Makefile.am used to state:
39
40 XXX airlied says, nothing besides *_drm.h and drm*.h should be necessary.
41 however, r300 and via need their reg headers installed in order to build.
42 better solutions are welcome.
43
44Obviously the r300 and via headers are no longer around ;-)
45
46Reason behind is that the drm headers can be used as a basic communications
47channel with the respective kernel modules. If more advanced functionality is
48required one can pull the specific libdrm_$driver which is free to pull
49additional files from the kernel.
50
51For example: nouveau has nouveau/nvif/*.h while vc4 has vc4/*.h
52
53If your driver is still in prototyping/staging state, consider moving the
54$driver_drm.h into $driver and _not_ installing it. An header providing opaque
55definitions and access [via $driver_drmif.h or similar] would be better fit.
56
57
58When and which headers to update
59--------------------------------
60Ideally all files will be synced (updated) with the latest released kernel on
61each libdrm release. Sadly that's not yet possible since quite a few headers
62differ significantly - see Outdated or Broken Headers section below.
63
64That said, it's up-to the individual developers to sync with newer version
65(from drm-next) as they see fit.
66
67
68When and how to update these files
69----------------------------------
70In order to update the files do the following:
71 - Switch to a Linux kernel tree/branch which is not rebased.
72For example: airlied/drm-next
73 - Install the headers via `make headers_install' to a separate location.
74 - Copy the drm header[s] + git add + git commit.
75 - Note: Your commit message must include:
76 a) Brief summary on the delta. If there's any change that looks like an
77API/ABI break one _must_ explicitly state why it's safe to do so.
78 b) "Generated using make headers_install."
79 c) "Generated from $tree/branch commit $sha"
80
81
82Outdated or Broken Headers
83--------------------------
84This section contains a list of headers and the respective "issues" they might
85have relative to their kernel equivalent.
86
87Nearly all headers:
88 - Missing extern C notation.
89Status: Trivial.
90
91Most UMS headers:
92 - Not using fixed size integers - compat ioctls are broken.
93Status: ?
94Promote to fixed size ints, which match the current (32bit) ones.
95
96
97amdgpu_drm.h
98 - Using the stdint.h uint*_t over the respective __u* ones
99Status: Trivial.
100
101drm_mode.h
102 - Missing DPI encode/connector pair.
103Status: Trivial.
104
105i915_drm.h
106 - Missing PARAMS - HAS_POOLED_EU, MIN_EU_IN_POOL CONTEXT_PARAM_NO_ERROR_CAPTURE
107Status: Trivial.
108
109mga_drm.h
110 - Typo fix, use struct over typedef.
111Status: Trivial.
112
113nouveau_drm.h
114 - Missing macros NOUVEAU_GETPARAM*, NOUVEAU_DRM_HEADER_PATCHLEVEL, structs,
115enums, using stdint.h over the __u* types.
116Status: ?
117
118qxl_drm.h
119 - Using the stdint.h uint*_t over the respective __u* ones
120Status: Trivial.
121
122r128_drm.h
123 - Broken compat ioctls.
124
125radeon_drm.h
126 - Missing RADEON_TILING_R600_NO_SCANOUT, CIK_TILE_MODE_*, broken UMS ioctls,
127using stdint types.
128 - Both kernel and libdrm: missing padding -
129drm_radeon_gem_{create,{g,s}et_tiling,set_domain} others ?
130Status: ?
131
132savage_drm.h
133 - Renamed ioctls - DRM_IOCTL_SAVAGE_{,BCI}_EVENT_EMIT, compat ioctls are broken.
134Status: ?
135
136sis_drm.h
137 - Borken ioctls + libdrm uses int vs kernel long
138Status: ?
139
140via_drm.h
141 - Borken ioctls - libdrm int vs kernel long
142Status: ?
143
144
145omap_drm.h (living in $TOP/omap)
146 - License mismatch, missing DRM_IOCTL_OMAP_GEM_NEW and related struct
147Status: ?
148
149msm_drm.h (located in $TOP/freedreno/msm/)
150 - License mismatch, missing MSM_PIPE_*, MSM_SUBMIT_*. Renamed
151drm_msm_gem_submit::flags, missing drm_msm_gem_submit::fence_fd.
152Status: ?
153
154exynos_drm.h (living in $TOP/exynos)
155 - License mismatch, now using fixed size ints (but not everywhere). Lots of
156new stuff.
157Status: ?
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index fbdd1185..d8f24976 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -34,6 +34,10 @@
34 34
35#include "drm.h" 35#include "drm.h"
36 36
37#if defined(__cplusplus)
38extern "C" {
39#endif
40
37#define DRM_AMDGPU_GEM_CREATE 0x00 41#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 42#define DRM_AMDGPU_GEM_MMAP 0x01
39#define DRM_AMDGPU_CTX 0x02 43#define DRM_AMDGPU_CTX 0x02
@@ -73,6 +77,8 @@
73#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) 77#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
74/* Flag that USWC attributes should be used for GTT */ 78/* Flag that USWC attributes should be used for GTT */
75#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 79#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
80/* Flag that the memory should be in VRAM and cleared */
81#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
76 82
77struct drm_amdgpu_gem_create_in { 83struct drm_amdgpu_gem_create_in {
78 /** the requested memory size */ 84 /** the requested memory size */
@@ -483,6 +489,22 @@ struct drm_amdgpu_cs_chunk_data {
483#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 489#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
484#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff 490#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
485 491
492struct drm_amdgpu_query_fw {
493 /** AMDGPU_INFO_FW_* */
494 uint32_t fw_type;
495 /**
496 * Index of the IP if there are more IPs of
497 * the same type.
498 */
499 uint32_t ip_instance;
500 /**
501 * Index of the engine. Whether this is used depends
502 * on the firmware type. (e.g. MEC, SDMA)
503 */
504 uint32_t index;
505 uint32_t _pad;
506};
507
486/* Input structure for the INFO ioctl */ 508/* Input structure for the INFO ioctl */
487struct drm_amdgpu_info { 509struct drm_amdgpu_info {
488 /* Where the return value will be stored */ 510 /* Where the return value will be stored */
@@ -518,21 +540,7 @@ struct drm_amdgpu_info {
518 uint32_t flags; 540 uint32_t flags;
519 } read_mmr_reg; 541 } read_mmr_reg;
520 542
521 struct { 543 struct drm_amdgpu_query_fw query_fw;
522 /** AMDGPU_INFO_FW_* */
523 uint32_t fw_type;
524 /**
525 * Index of the IP if there are more IPs of
526 * the same type.
527 */
528 uint32_t ip_instance;
529 /**
530 * Index of the engine. Whether this is used depends
531 * on the firmware type. (e.g. MEC, SDMA)
532 */
533 uint32_t index;
534 uint32_t _pad;
535 } query_fw;
536 }; 544 };
537}; 545};
538 546
@@ -640,6 +648,10 @@ struct drm_amdgpu_info_hw_ip {
640#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 648#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
641#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 649#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
642#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 650#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
643#define AMDGPU_FAMILY_CZ 135 /* Carrizo */ 651#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
652
653#if defined(__cplusplus)
654}
655#endif
644 656
645#endif 657#endif
diff --git a/include/drm/drm.h b/include/drm/drm.h
index d36331a8..f6fd5c2c 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,7 +36,7 @@
36#ifndef _DRM_H_ 36#ifndef _DRM_H_
37#define _DRM_H_ 37#define _DRM_H_
38 38
39#if defined(__linux__) 39#if defined(__linux__)
40 40
41#include <linux/types.h> 41#include <linux/types.h>
42#include <asm/ioctl.h> 42#include <asm/ioctl.h>
@@ -54,10 +54,15 @@ typedef int32_t __s32;
54typedef uint32_t __u32; 54typedef uint32_t __u32;
55typedef int64_t __s64; 55typedef int64_t __s64;
56typedef uint64_t __u64; 56typedef uint64_t __u64;
57typedef size_t __kernel_size_t;
57typedef unsigned long drm_handle_t; 58typedef unsigned long drm_handle_t;
58 59
59#endif 60#endif
60 61
62#if defined(__cplusplus)
63extern "C" {
64#endif
65
61#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 66#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
62#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 67#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
63#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 68#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
@@ -129,11 +134,11 @@ struct drm_version {
129 int version_major; /**< Major version */ 134 int version_major; /**< Major version */
130 int version_minor; /**< Minor version */ 135 int version_minor; /**< Minor version */
131 int version_patchlevel; /**< Patch level */ 136 int version_patchlevel; /**< Patch level */
132 size_t name_len; /**< Length of name buffer */ 137 __kernel_size_t name_len; /**< Length of name buffer */
133 char *name; /**< Name of driver */ 138 char *name; /**< Name of driver */
134 size_t date_len; /**< Length of date buffer */ 139 __kernel_size_t date_len; /**< Length of date buffer */
135 char *date; /**< User-space buffer to hold date */ 140 char *date; /**< User-space buffer to hold date */
136 size_t desc_len; /**< Length of desc buffer */ 141 __kernel_size_t desc_len; /**< Length of desc buffer */
137 char *desc; /**< User-space buffer to hold desc */ 142 char *desc; /**< User-space buffer to hold desc */
138}; 143};
139 144
@@ -143,7 +148,7 @@ struct drm_version {
143 * \sa drmGetBusid() and drmSetBusId(). 148 * \sa drmGetBusid() and drmSetBusId().
144 */ 149 */
145struct drm_unique { 150struct drm_unique {
146 size_t unique_len; /**< Length of unique */ 151 __kernel_size_t unique_len; /**< Length of unique */
147 char *unique; /**< Unique name for driver instantiation */ 152 char *unique; /**< Unique name for driver instantiation */
148}; 153};
149 154
@@ -180,8 +185,7 @@ enum drm_map_type {
180 _DRM_SHM = 2, /**< shared, cached */ 185 _DRM_SHM = 2, /**< shared, cached */
181 _DRM_AGP = 3, /**< AGP/GART */ 186 _DRM_AGP = 3, /**< AGP/GART */
182 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ 187 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
183 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ 188 _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
184 _DRM_GEM = 6 /**< GEM object */
185}; 189};
186 190
187/** 191/**
@@ -467,12 +471,15 @@ struct drm_irq_busid {
467enum drm_vblank_seq_type { 471enum drm_vblank_seq_type {
468 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
469 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
474 /* bits 1-6 are reserved for high crtcs */
475 _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
470 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ 476 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
471 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 477 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
472 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 478 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
473 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 479 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
474 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ 480 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
475}; 481};
482#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
476 483
477#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) 484#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
478#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ 485#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
@@ -612,6 +619,29 @@ struct drm_gem_open {
612 __u64 size; 619 __u64 size;
613}; 620};
614 621
622#define DRM_CAP_DUMB_BUFFER 0x1
623#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
624#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
625#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
626#define DRM_CAP_PRIME 0x5
627#define DRM_PRIME_CAP_IMPORT 0x1
628#define DRM_PRIME_CAP_EXPORT 0x2
629#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
630#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
631/*
632 * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
633 * combination for the hardware cursor. The intention is that a hardware
634 * agnostic userspace can query a cursor plane size to use.
635 *
636 * Note that the cross-driver contract is to merely return a valid size;
637 * drivers are free to attach another meaning on top, eg. i915 returns the
638 * maximum plane size.
639 */
640#define DRM_CAP_CURSOR_WIDTH 0x8
641#define DRM_CAP_CURSOR_HEIGHT 0x9
642#define DRM_CAP_ADDFB2_MODIFIERS 0x10
643#define DRM_CAP_PAGE_FLIP_TARGET 0x11
644
615/** DRM_IOCTL_GET_CAP ioctl argument type */ 645/** DRM_IOCTL_GET_CAP ioctl argument type */
616struct drm_get_cap { 646struct drm_get_cap {
617 __u64 capability; 647 __u64 capability;
@@ -642,19 +672,13 @@ struct drm_get_cap {
642 */ 672 */
643#define DRM_CLIENT_CAP_ATOMIC 3 673#define DRM_CLIENT_CAP_ATOMIC 3
644 674
645/**
646 * DRM_CLIENT_CAP_ATOMIC
647 *
648 * If set to 1, the DRM core will allow atomic modesetting requests.
649 */
650#define DRM_CLIENT_CAP_ATOMIC 3
651
652/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 675/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
653struct drm_set_client_cap { 676struct drm_set_client_cap {
654 __u64 capability; 677 __u64 capability;
655 __u64 value; 678 __u64 value;
656}; 679};
657 680
681#define DRM_RDWR O_RDWR
658#define DRM_CLOEXEC O_CLOEXEC 682#define DRM_CLOEXEC O_CLOEXEC
659struct drm_prime_handle { 683struct drm_prime_handle {
660 __u32 handle; 684 __u32 handle;
@@ -666,8 +690,16 @@ struct drm_prime_handle {
666 __s32 fd; 690 __s32 fd;
667}; 691};
668 692
693#if defined(__cplusplus)
694}
695#endif
696
669#include "drm_mode.h" 697#include "drm_mode.h"
670 698
699#if defined(__cplusplus)
700extern "C" {
701#endif
702
671#define DRM_IOCTL_BASE 'd' 703#define DRM_IOCTL_BASE 'd'
672#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 704#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
673#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) 705#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
@@ -750,8 +782,8 @@ struct drm_prime_handle {
750#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) 782#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
751#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) 783#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
752#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) 784#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
753#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) 785#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
754#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) 786#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
755 787
756#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) 788#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
757#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) 789#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
@@ -778,7 +810,7 @@ struct drm_prime_handle {
778 810
779/** 811/**
780 * Device specific ioctls should only be in their respective headers 812 * Device specific ioctls should only be in their respective headers
781 * The device specific ioctl range is from 0x40 to 0x99. 813 * The device specific ioctl range is from 0x40 to 0x9f.
782 * Generic IOCTLS restart at 0xA0. 814 * Generic IOCTLS restart at 0xA0.
783 * 815 *
784 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and 816 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
@@ -816,18 +848,6 @@ struct drm_event_vblank {
816 __u32 reserved; 848 __u32 reserved;
817}; 849};
818 850
819#define DRM_CAP_DUMB_BUFFER 0x1
820#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
821#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
822#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
823#define DRM_CAP_PRIME 0x5
824#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
825#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
826#define DRM_CAP_ADDFB2_MODIFIERS 0x10
827
828#define DRM_PRIME_CAP_IMPORT 0x1
829#define DRM_PRIME_CAP_EXPORT 0x2
830
831/* typedef area */ 851/* typedef area */
832typedef struct drm_clip_rect drm_clip_rect_t; 852typedef struct drm_clip_rect drm_clip_rect_t;
833typedef struct drm_drawable_info drm_drawable_info_t; 853typedef struct drm_drawable_info drm_drawable_info_t;
@@ -871,4 +891,8 @@ typedef struct drm_agp_info drm_agp_info_t;
871typedef struct drm_scatter_gather drm_scatter_gather_t; 891typedef struct drm_scatter_gather drm_scatter_gather_t;
872typedef struct drm_set_version drm_set_version_t; 892typedef struct drm_set_version drm_set_version_t;
873 893
894#if defined(__cplusplus)
895}
896#endif
897
874#endif 898#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index e741b09a..4d8da699 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -24,16 +24,23 @@
24#ifndef DRM_FOURCC_H 24#ifndef DRM_FOURCC_H
25#define DRM_FOURCC_H 25#define DRM_FOURCC_H
26 26
27#include <inttypes.h> 27#include "drm.h"
28 28
29#define fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \ 29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24)) 30 ((__u32)(c) << 16) | ((__u32)(d) << 24))
31 31
32#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ 32#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
33 33
34/* color index */ 34/* color index */
35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ 35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
36 36
37/* 8 bpp Red */
38#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
39
40/* 16 bpp RG */
41#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
42#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
43
37/* 8 bpp RGB */ 44/* 8 bpp RGB */
38#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ 45#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
39#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ 46#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
@@ -106,6 +113,8 @@
106#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ 113#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
107#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ 114#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
108#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ 115#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
116#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
117#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
109 118
110/* 119/*
111 * 3 plane YCbCr 120 * 3 plane YCbCr
@@ -216,7 +225,7 @@
216 * - multiple of 128 pixels for the width 225 * - multiple of 128 pixels for the width
217 * - multiple of 32 pixels for the height 226 * - multiple of 32 pixels for the height
218 * 227 *
219 * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html 228 * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html
220 */ 229 */
221#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) 230#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
222 231
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 59e67b14..6708e2b7 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,6 +27,12 @@
27#ifndef _DRM_MODE_H 27#ifndef _DRM_MODE_H
28#define _DRM_MODE_H 28#define _DRM_MODE_H
29 29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
30#define DRM_DISPLAY_INFO_LEN 32 36#define DRM_DISPLAY_INFO_LEN 32
31#define DRM_CONNECTOR_NAME_LEN 32 37#define DRM_CONNECTOR_NAME_LEN 32
32#define DRM_DISPLAY_MODE_LEN 32 38#define DRM_DISPLAY_MODE_LEN 32
@@ -56,6 +62,10 @@
56#define DRM_MODE_FLAG_PIXMUX (1<<11) 62#define DRM_MODE_FLAG_PIXMUX (1<<11)
57#define DRM_MODE_FLAG_DBLCLK (1<<12) 63#define DRM_MODE_FLAG_DBLCLK (1<<12)
58#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 64#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
65 /*
66 * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
67 * (define not exposed to user space).
68 */
59#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) 69#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
60#define DRM_MODE_FLAG_3D_NONE (0<<14) 70#define DRM_MODE_FLAG_3D_NONE (0<<14)
61#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) 71#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
@@ -82,6 +92,11 @@
82#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ 92#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
83#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ 93#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
84 94
95/* Picture aspect ratio options */
96#define DRM_MODE_PICTURE_ASPECT_NONE 0
97#define DRM_MODE_PICTURE_ASPECT_4_3 1
98#define DRM_MODE_PICTURE_ASPECT_16_9 2
99
85/* Dithering mode options */ 100/* Dithering mode options */
86#define DRM_MODE_DITHERING_OFF 0 101#define DRM_MODE_DITHERING_OFF 0
87#define DRM_MODE_DITHERING_ON 1 102#define DRM_MODE_DITHERING_ON 1
@@ -102,8 +117,16 @@
102 117
103struct drm_mode_modeinfo { 118struct drm_mode_modeinfo {
104 __u32 clock; 119 __u32 clock;
105 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; 120 __u16 hdisplay;
106 __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; 121 __u16 hsync_start;
122 __u16 hsync_end;
123 __u16 htotal;
124 __u16 hskew;
125 __u16 vdisplay;
126 __u16 vsync_start;
127 __u16 vsync_end;
128 __u16 vtotal;
129 __u16 vscan;
107 130
108 __u32 vrefresh; 131 __u32 vrefresh;
109 132
@@ -121,8 +144,10 @@ struct drm_mode_card_res {
121 __u32 count_crtcs; 144 __u32 count_crtcs;
122 __u32 count_connectors; 145 __u32 count_connectors;
123 __u32 count_encoders; 146 __u32 count_encoders;
124 __u32 min_width, max_width; 147 __u32 min_width;
125 __u32 min_height, max_height; 148 __u32 max_width;
149 __u32 min_height;
150 __u32 max_height;
126}; 151};
127 152
128struct drm_mode_crtc { 153struct drm_mode_crtc {
@@ -132,30 +157,35 @@ struct drm_mode_crtc {
132 __u32 crtc_id; /**< Id */ 157 __u32 crtc_id; /**< Id */
133 __u32 fb_id; /**< Id of framebuffer */ 158 __u32 fb_id; /**< Id of framebuffer */
134 159
135 __u32 x, y; /**< Position on the frameuffer */ 160 __u32 x; /**< x Position on the framebuffer */
161 __u32 y; /**< y Position on the framebuffer */
136 162
137 __u32 gamma_size; 163 __u32 gamma_size;
138 __u32 mode_valid; 164 __u32 mode_valid;
139 struct drm_mode_modeinfo mode; 165 struct drm_mode_modeinfo mode;
140}; 166};
141 167
142#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) 168#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
143#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) 169#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
144 170
145/* Planes blend with or override other bits on the CRTC */ 171/* Planes blend with or override other bits on the CRTC */
146struct drm_mode_set_plane { 172struct drm_mode_set_plane {
147 __u32 plane_id; 173 __u32 plane_id;
148 __u32 crtc_id; 174 __u32 crtc_id;
149 __u32 fb_id; /* fb object contains surface format type */ 175 __u32 fb_id; /* fb object contains surface format type */
150 __u32 flags; 176 __u32 flags; /* see above flags */
151 177
152 /* Signed dest location allows it to be partially off screen */ 178 /* Signed dest location allows it to be partially off screen */
153 __s32 crtc_x, crtc_y; 179 __s32 crtc_x;
154 __u32 crtc_w, crtc_h; 180 __s32 crtc_y;
181 __u32 crtc_w;
182 __u32 crtc_h;
155 183
156 /* Source values are 16.16 fixed point */ 184 /* Source values are 16.16 fixed point */
157 __u32 src_x, src_y; 185 __u32 src_x;
158 __u32 src_h, src_w; 186 __u32 src_y;
187 __u32 src_h;
188 __u32 src_w;
159}; 189};
160 190
161struct drm_mode_get_plane { 191struct drm_mode_get_plane {
@@ -184,6 +214,7 @@ struct drm_mode_get_plane_res {
184#define DRM_MODE_ENCODER_VIRTUAL 5 214#define DRM_MODE_ENCODER_VIRTUAL 5
185#define DRM_MODE_ENCODER_DSI 6 215#define DRM_MODE_ENCODER_DSI 6
186#define DRM_MODE_ENCODER_DPMST 7 216#define DRM_MODE_ENCODER_DPMST 7
217#define DRM_MODE_ENCODER_DPI 8
187 218
188struct drm_mode_get_encoder { 219struct drm_mode_get_encoder {
189 __u32 encoder_id; 220 __u32 encoder_id;
@@ -223,6 +254,7 @@ struct drm_mode_get_encoder {
223#define DRM_MODE_CONNECTOR_eDP 14 254#define DRM_MODE_CONNECTOR_eDP 14
224#define DRM_MODE_CONNECTOR_VIRTUAL 15 255#define DRM_MODE_CONNECTOR_VIRTUAL 15
225#define DRM_MODE_CONNECTOR_DSI 16 256#define DRM_MODE_CONNECTOR_DSI 16
257#define DRM_MODE_CONNECTOR_DPI 17
226 258
227struct drm_mode_get_connector { 259struct drm_mode_get_connector {
228 260
@@ -241,8 +273,11 @@ struct drm_mode_get_connector {
241 __u32 connector_type_id; 273 __u32 connector_type_id;
242 274
243 __u32 connection; 275 __u32 connection;
244 __u32 mm_width, mm_height; /**< HxW in millimeters */ 276 __u32 mm_width; /**< width in millimeters */
277 __u32 mm_height; /**< height in millimeters */
245 __u32 subpixel; 278 __u32 subpixel;
279
280 __u32 pad;
246}; 281};
247 282
248#define DRM_MODE_PROP_PENDING (1<<0) 283#define DRM_MODE_PROP_PENDING (1<<0)
@@ -288,6 +323,8 @@ struct drm_mode_get_property {
288 char name[DRM_PROP_NAME_LEN]; 323 char name[DRM_PROP_NAME_LEN];
289 324
290 __u32 count_values; 325 __u32 count_values;
326 /* This is only used to count enum values, not blobs. The _blobs is
327 * simply because of a historical reason, i.e. backwards compat. */
291 __u32 count_enum_blobs; 328 __u32 count_enum_blobs;
292}; 329};
293 330
@@ -305,6 +342,7 @@ struct drm_mode_connector_set_property {
305#define DRM_MODE_OBJECT_FB 0xfbfbfbfb 342#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
306#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 343#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
307#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee 344#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
345#define DRM_MODE_OBJECT_ANY 0
308 346
309struct drm_mode_obj_get_properties { 347struct drm_mode_obj_get_properties {
310 __u64 props_ptr; 348 __u64 props_ptr;
@@ -329,7 +367,8 @@ struct drm_mode_get_blob {
329 367
330struct drm_mode_fb_cmd { 368struct drm_mode_fb_cmd {
331 __u32 fb_id; 369 __u32 fb_id;
332 __u32 width, height; 370 __u32 width;
371 __u32 height;
333 __u32 pitch; 372 __u32 pitch;
334 __u32 bpp; 373 __u32 bpp;
335 __u32 depth; 374 __u32 depth;
@@ -342,9 +381,10 @@ struct drm_mode_fb_cmd {
342 381
343struct drm_mode_fb_cmd2 { 382struct drm_mode_fb_cmd2 {
344 __u32 fb_id; 383 __u32 fb_id;
345 __u32 width, height; 384 __u32 width;
385 __u32 height;
346 __u32 pixel_format; /* fourcc code from drm_fourcc.h */ 386 __u32 pixel_format; /* fourcc code from drm_fourcc.h */
347 __u32 flags; 387 __u32 flags; /* see above flags */
348 388
349 /* 389 /*
350 * In case of planar formats, this ioctl allows up to 4 390 * In case of planar formats, this ioctl allows up to 4
@@ -356,9 +396,9 @@ struct drm_mode_fb_cmd2 {
356 * followed by an interleaved U/V plane containing 396 * followed by an interleaved U/V plane containing
357 * 8 bit 2x2 subsampled colour difference samples. 397 * 8 bit 2x2 subsampled colour difference samples.
358 * 398 *
359 * So it would consist of Y as offset[0] and UV as 399 * So it would consist of Y as offsets[0] and UV as
360 * offset[1]. Note that offset[0] will generally 400 * offsets[1]. Note that offsets[0] will generally
361 * be 0. 401 * be 0 (but this is not required).
362 * 402 *
363 * To accommodate tiled, compressed, etc formats, a per-plane 403 * To accommodate tiled, compressed, etc formats, a per-plane
364 * modifier can be specified. The default value of zero 404 * modifier can be specified. The default value of zero
@@ -377,6 +417,8 @@ struct drm_mode_fb_cmd2 {
377#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 417#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
378#define DRM_MODE_FB_DIRTY_FLAGS 0x03 418#define DRM_MODE_FB_DIRTY_FLAGS 0x03
379 419
420#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
421
380/* 422/*
381 * Mark a region of a framebuffer as dirty. 423 * Mark a region of a framebuffer as dirty.
382 * 424 *
@@ -417,20 +459,21 @@ struct drm_mode_mode_cmd {
417 struct drm_mode_modeinfo mode; 459 struct drm_mode_modeinfo mode;
418}; 460};
419 461
420#define DRM_MODE_CURSOR_BO (1<<0) 462#define DRM_MODE_CURSOR_BO 0x01
421#define DRM_MODE_CURSOR_MOVE (1<<1) 463#define DRM_MODE_CURSOR_MOVE 0x02
464#define DRM_MODE_CURSOR_FLAGS 0x03
422 465
423/* 466/*
424 * depending on the value in flags diffrent members are used. 467 * depending on the value in flags different members are used.
425 * 468 *
426 * CURSOR_BO uses 469 * CURSOR_BO uses
427 * crtc 470 * crtc_id
428 * width 471 * width
429 * height 472 * height
430 * handle - if 0 turns the cursor of 473 * handle - if 0 turns the cursor off
431 * 474 *
432 * CURSOR_MOVE uses 475 * CURSOR_MOVE uses
433 * crtc 476 * crtc_id
434 * x 477 * x
435 * y 478 * y
436 */ 479 */
@@ -468,9 +511,30 @@ struct drm_mode_crtc_lut {
468 __u64 blue; 511 __u64 blue;
469}; 512};
470 513
514struct drm_color_ctm {
515 /* Conversion matrix in S31.32 format. */
516 __s64 matrix[9];
517};
518
519struct drm_color_lut {
520 /*
521 * Data is U0.16 fixed point format.
522 */
523 __u16 red;
524 __u16 green;
525 __u16 blue;
526 __u16 reserved;
527};
528
471#define DRM_MODE_PAGE_FLIP_EVENT 0x01 529#define DRM_MODE_PAGE_FLIP_EVENT 0x01
472#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 530#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
473#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) 531#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
532#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
533#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
534 DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
535#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
536 DRM_MODE_PAGE_FLIP_ASYNC | \
537 DRM_MODE_PAGE_FLIP_TARGET)
474 538
475/* 539/*
476 * Request a page flip on the specified crtc. 540 * Request a page flip on the specified crtc.
@@ -484,14 +548,16 @@ struct drm_mode_crtc_lut {
484 * flip is already pending as the ioctl is called, EBUSY will be 548 * flip is already pending as the ioctl is called, EBUSY will be
485 * returned. 549 * returned.
486 * 550 *
487 * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will 551 * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
488 * request that drm sends back a vblank event (see drm.h: struct 552 * event (see drm.h: struct drm_event_vblank) when the page flip is
489 * drm_event_vblank) when the page flip is done. The user_data field 553 * done. The user_data field passed in with this ioctl will be
490 * passed in with this ioctl will be returned as the user_data field 554 * returned as the user_data field in the vblank event struct.
491 * in the vblank event struct.
492 * 555 *
493 * The reserved field must be zero until we figure out something 556 * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
494 * clever to use it for. 557 * 'as soon as possible', meaning that it not delay waiting for vblank.
558 * This may cause tearing on the screen.
559 *
560 * The reserved field must be zero.
495 */ 561 */
496 562
497struct drm_mode_crtc_page_flip { 563struct drm_mode_crtc_page_flip {
@@ -502,29 +568,57 @@ struct drm_mode_crtc_page_flip {
502 __u64 user_data; 568 __u64 user_data;
503}; 569};
504 570
571/*
572 * Request a page flip on the specified crtc.
573 *
574 * Same as struct drm_mode_crtc_page_flip, but supports new flags and
575 * re-purposes the reserved field:
576 *
577 * The sequence field must be zero unless either of the
578 * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
579 * the ABSOLUTE flag is specified, the sequence field denotes the absolute
580 * vblank sequence when the flip should take effect. When the RELATIVE
581 * flag is specified, the sequence field denotes the relative (to the
582 * current one when the ioctl is called) vblank sequence when the flip
583 * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
584 * make sure the vblank sequence before the target one has passed before
585 * calling this ioctl. The purpose of the
586 * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
587 * the target for when code dealing with a page flip runs during a
588 * vertical blank period.
589 */
590
591struct drm_mode_crtc_page_flip_target {
592 __u32 crtc_id;
593 __u32 fb_id;
594 __u32 flags;
595 __u32 sequence;
596 __u64 user_data;
597};
598
505/* create a dumb scanout buffer */ 599/* create a dumb scanout buffer */
506struct drm_mode_create_dumb { 600struct drm_mode_create_dumb {
507 __u32 height; 601 __u32 height;
508 __u32 width; 602 __u32 width;
509 __u32 bpp; 603 __u32 bpp;
510 __u32 flags; 604 __u32 flags;
511 /* handle, pitch, size will be returned */ 605 /* handle, pitch, size will be returned */
512 __u32 handle; 606 __u32 handle;
513 __u32 pitch; 607 __u32 pitch;
514 __u64 size; 608 __u64 size;
515}; 609};
516 610
517/* set up for mmap of a dumb scanout buffer */ 611/* set up for mmap of a dumb scanout buffer */
518struct drm_mode_map_dumb { 612struct drm_mode_map_dumb {
519 /** Handle for the object being mapped. */ 613 /** Handle for the object being mapped. */
520 __u32 handle; 614 __u32 handle;
521 __u32 pad; 615 __u32 pad;
522 /** 616 /**
523 * Fake offset to use for subsequent mmap call 617 * Fake offset to use for subsequent mmap call
524 * 618 *
525 * This is a fixed-size type for 32/64 compatibility. 619 * This is a fixed-size type for 32/64 compatibility.
526 */ 620 */
527 __u64 offset; 621 __u64 offset;
528}; 622};
529 623
530struct drm_mode_destroy_dumb { 624struct drm_mode_destroy_dumb {
@@ -532,9 +626,16 @@ struct drm_mode_destroy_dumb {
532}; 626};
533 627
534/* page-flip flags are valid, plus: */ 628/* page-flip flags are valid, plus: */
535#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100 629#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
536#define DRM_MODE_ATOMIC_NONBLOCK 0x0200 630#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
537#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400 631#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
632
633#define DRM_MODE_ATOMIC_FLAGS (\
634 DRM_MODE_PAGE_FLIP_EVENT |\
635 DRM_MODE_PAGE_FLIP_ASYNC |\
636 DRM_MODE_ATOMIC_TEST_ONLY |\
637 DRM_MODE_ATOMIC_NONBLOCK |\
638 DRM_MODE_ATOMIC_ALLOW_MODESET)
538 639
539#define DRM_MODE_ATOMIC_FLAGS (\ 640#define DRM_MODE_ATOMIC_FLAGS (\
540 DRM_MODE_PAGE_FLIP_EVENT |\ 641 DRM_MODE_PAGE_FLIP_EVENT |\
@@ -574,5 +675,8 @@ struct drm_mode_destroy_blob {
574 __u32 blob_id; 675 __u32 blob_id;
575}; 676};
576 677
678#if defined(__cplusplus)
679}
680#endif
577 681
578#endif 682#endif
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index 7325558d..502934ed 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -37,6 +37,8 @@
37/* SAREA area needs to be at least a page */ 37/* SAREA area needs to be at least a page */
38#if defined(__alpha__) 38#if defined(__alpha__)
39#define SAREA_MAX 0x2000U 39#define SAREA_MAX 0x2000U
40#elif defined(__mips__)
41#define SAREA_MAX 0x4000U
40#elif defined(__ia64__) 42#elif defined(__ia64__)
41#define SAREA_MAX 0x10000U /* 64kB */ 43#define SAREA_MAX 0x10000U /* 64kB */
42#else 44#else
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 0e51d421..5ebe0462 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -29,6 +29,10 @@
29 29
30#include "drm.h" 30#include "drm.h"
31 31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
32/* Please note that modifications to all structs defined here are 36/* Please note that modifications to all structs defined here are
33 * subject to backwards-compatibility constraints. 37 * subject to backwards-compatibility constraints.
34 */ 38 */
@@ -58,6 +62,30 @@
58#define I915_ERROR_UEVENT "ERROR" 62#define I915_ERROR_UEVENT "ERROR"
59#define I915_RESET_UEVENT "RESET" 63#define I915_RESET_UEVENT "RESET"
60 64
65/*
66 * MOCS indexes used for GPU surfaces, defining the cacheability of the
67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
68 */
69enum i915_mocs_table_index {
70 /*
71 * Not cached anywhere, coherency between CPU and GPU accesses is
72 * guaranteed.
73 */
74 I915_MOCS_UNCACHED,
75 /*
76 * Cacheability and coherency controlled by the kernel automatically
77 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
78 * usage of the surface (used for display scanout or not).
79 */
80 I915_MOCS_PTE,
81 /*
82 * Cached in all GPU caches available on the platform.
83 * Coherency between CPU and GPU accesses to the surface is not
84 * guaranteed without extra synchronization.
85 */
86 I915_MOCS_CACHED,
87};
88
61/* Each region is a minimum of 16k, and there are at most 255 of them. 89/* Each region is a minimum of 16k, and there are at most 255 of them.
62 */ 90 */
63#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -218,6 +246,7 @@ typedef struct _drm_i915_sarea {
218#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 246#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
219#define DRM_I915_OVERLAY_ATTRS 0x28 247#define DRM_I915_OVERLAY_ATTRS 0x28
220#define DRM_I915_GEM_EXECBUFFER2 0x29 248#define DRM_I915_GEM_EXECBUFFER2 0x29
249#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
221#define DRM_I915_GET_SPRITE_COLORKEY 0x2a 250#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
222#define DRM_I915_SET_SPRITE_COLORKEY 0x2b 251#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
223#define DRM_I915_GEM_WAIT 0x2c 252#define DRM_I915_GEM_WAIT 0x2c
@@ -230,6 +259,7 @@ typedef struct _drm_i915_sarea {
230#define DRM_I915_GEM_USERPTR 0x33 259#define DRM_I915_GEM_USERPTR 0x33
231#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
232#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
262#define DRM_I915_PERF_OPEN 0x36
233 263
234#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 264#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
235#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 265#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -251,6 +281,7 @@ typedef struct _drm_i915_sarea {
251#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 281#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
252#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 282#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
253#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 283#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
284#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
254#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 285#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
255#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 286#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
256#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 287#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -283,6 +314,7 @@ typedef struct _drm_i915_sarea {
283#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 314#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
284#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
285#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
317#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
286 318
287/* Allow drivers to submit batchbuffers directly to hardware, relying 319/* Allow drivers to submit batchbuffers directly to hardware, relying
288 * on the security mechanisms provided by hardware. 320 * on the security mechanisms provided by hardware.
@@ -357,6 +389,28 @@ typedef struct drm_i915_irq_wait {
357#define I915_PARAM_HAS_GPU_RESET 35 389#define I915_PARAM_HAS_GPU_RESET 35
358#define I915_PARAM_HAS_RESOURCE_STREAMER 36 390#define I915_PARAM_HAS_RESOURCE_STREAMER 36
359#define I915_PARAM_HAS_EXEC_SOFTPIN 37 391#define I915_PARAM_HAS_EXEC_SOFTPIN 37
392#define I915_PARAM_HAS_POOLED_EU 38
393#define I915_PARAM_MIN_EU_IN_POOL 39
394#define I915_PARAM_MMAP_GTT_VERSION 40
395
396/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
397 * priorities and the driver will attempt to execute batches in priority order.
398 */
399#define I915_PARAM_HAS_SCHEDULER 41
400#define I915_PARAM_HUC_STATUS 42
401
402/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
403 * synchronisation with implicit fencing on individual objects.
404 * See EXEC_OBJECT_ASYNC.
405 */
406#define I915_PARAM_HAS_EXEC_ASYNC 43
407
408/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
409 * both being able to pass in a sync_file fd to wait upon before executing,
410 * and being able to return a new sync_file fd that is signaled when the
411 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
412 */
413#define I915_PARAM_HAS_EXEC_FENCE 44
360 414
361typedef struct drm_i915_getparam { 415typedef struct drm_i915_getparam {
362 __s32 param; 416 __s32 param;
@@ -692,15 +746,41 @@ struct drm_i915_gem_exec_object2 {
692 */ 746 */
693 __u64 offset; 747 __u64 offset;
694 748
695#define EXEC_OBJECT_NEEDS_FENCE (1<<0) 749#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
696#define EXEC_OBJECT_NEEDS_GTT (1<<1) 750#define EXEC_OBJECT_NEEDS_GTT (1<<1)
697#define EXEC_OBJECT_WRITE (1<<2) 751#define EXEC_OBJECT_WRITE (1<<2)
698#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 752#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
699#define EXEC_OBJECT_PINNED (1<<4) 753#define EXEC_OBJECT_PINNED (1<<4)
700#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1) 754#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
755/* The kernel implicitly tracks GPU activity on all GEM objects, and
756 * synchronises operations with outstanding rendering. This includes
757 * rendering on other devices if exported via dma-buf. However, sometimes
758 * this tracking is too coarse and the user knows better. For example,
759 * if the object is split into non-overlapping ranges shared between different
760 * clients or engines (i.e. suballocating objects), the implicit tracking
761 * by kernel assumes that each operation affects the whole object rather
762 * than an individual range, causing needless synchronisation between clients.
763 * The kernel will also forgo any CPU cache flushes prior to rendering from
764 * the object as the client is expected to be also handling such domain
765 * tracking.
766 *
767 * The kernel maintains the implicit tracking in order to manage resources
768 * used by the GPU - this flag only disables the synchronisation prior to
769 * rendering with this object in this execbuf.
770 *
771 * Opting out of implicit synhronisation requires the user to do its own
772 * explicit tracking to avoid rendering corruption. See, for example,
773 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
774 */
775#define EXEC_OBJECT_ASYNC (1<<6)
776/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
777#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1)
701 __u64 flags; 778 __u64 flags;
702 779
703 __u64 rsvd1; 780 union {
781 __u64 rsvd1;
782 __u64 pad_to_size;
783 };
704 __u64 rsvd2; 784 __u64 rsvd2;
705}; 785};
706 786
@@ -772,17 +852,44 @@ struct drm_i915_gem_execbuffer2 {
772#define I915_EXEC_HANDLE_LUT (1<<12) 852#define I915_EXEC_HANDLE_LUT (1<<12)
773 853
774/** Used for switching BSD rings on the platforms with two BSD rings */ 854/** Used for switching BSD rings on the platforms with two BSD rings */
775#define I915_EXEC_BSD_MASK (3<<13) 855#define I915_EXEC_BSD_SHIFT (13)
776#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */ 856#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
777#define I915_EXEC_BSD_RING1 (1<<13) 857/* default ping-pong mode */
778#define I915_EXEC_BSD_RING2 (2<<13) 858#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
859#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
860#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
779 861
780/** Tell the kernel that the batchbuffer is processed by 862/** Tell the kernel that the batchbuffer is processed by
781 * the resource streamer. 863 * the resource streamer.
782 */ 864 */
783#define I915_EXEC_RESOURCE_STREAMER (1<<15) 865#define I915_EXEC_RESOURCE_STREAMER (1<<15)
784 866
785#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1) 867/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
868 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
869 * the batch.
870 *
871 * Returns -EINVAL if the sync_file fd cannot be found.
872 */
873#define I915_EXEC_FENCE_IN (1<<16)
874
875/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
876 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
877 * to the caller, and it should be close() after use. (The fd is a regular
878 * file descriptor and will be cleaned up on process termination. It holds
879 * a reference to the request, but nothing else.)
880 *
881 * The sync_file fd can be combined with other sync_file and passed either
882 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
883 * will only occur after this request completes), or to other devices.
884 *
885 * Using I915_EXEC_FENCE_OUT requires use of
886 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
887 * back to userspace. Failure to do so will cause the out-fence to always
888 * be reported as zero, and the real fence fd to be leaked.
889 */
890#define I915_EXEC_FENCE_OUT (1<<17)
891
892#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1))
786 893
787#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 894#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
788#define i915_execbuffer2_set_context_id(eb2, context) \ 895#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -812,10 +919,49 @@ struct drm_i915_gem_busy {
812 /** Handle of the buffer to check for busy */ 919 /** Handle of the buffer to check for busy */
813 __u32 handle; 920 __u32 handle;
814 921
815 /** Return busy status (1 if busy, 0 if idle). 922 /** Return busy status
816 * The high word is used to indicate on which rings the object 923 *
817 * currently resides: 924 * A return of 0 implies that the object is idle (after
818 * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) 925 * having flushed any pending activity), and a non-zero return that
926 * the object is still in-flight on the GPU. (The GPU has not yet
927 * signaled completion for all pending requests that reference the
928 * object.) An object is guaranteed to become idle eventually (so
929 * long as no new GPU commands are executed upon it). Due to the
930 * asynchronous nature of the hardware, an object reported
931 * as busy may become idle before the ioctl is completed.
932 *
933 * Furthermore, if the object is busy, which engine is busy is only
934 * provided as a guide. There are race conditions which prevent the
935 * report of which engines are busy from being always accurate.
936 * However, the converse is not true. If the object is idle, the
937 * result of the ioctl, that all engines are idle, is accurate.
938 *
939 * The returned dword is split into two fields to indicate both
940 * the engines on which the object is being read, and the
941 * engine on which it is currently being written (if any).
942 *
943 * The low word (bits 0:15) indicate if the object is being written
944 * to by any engine (there can only be one, as the GEM implicit
945 * synchronisation rules force writes to be serialised). Only the
946 * engine for the last write is reported.
947 *
948 * The high word (bits 16:31) are a bitmask of which engines are
949 * currently reading from the object. Multiple engines may be
950 * reading from the object simultaneously.
951 *
952 * The value of each engine is the same as specified in the
953 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
954 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
955 * the I915_EXEC_RENDER engine for execution, and so it is never
956 * reported as active itself. Some hardware may have parallel
957 * execution engines, e.g. multiple media engines, which are
958 * mapped to the same identifier in the EXECBUFFER2 ioctl and
959 * so are not separately reported for busyness.
960 *
961 * Caveat emptor:
962 * Only the boolean result of this query is reliable; that is whether
963 * the object is idle or busy. The report of which engines are busy
964 * should be only used as a heuristic.
819 */ 965 */
820 __u32 busy; 966 __u32 busy;
821}; 967};
@@ -864,6 +1010,7 @@ struct drm_i915_gem_caching {
864#define I915_TILING_NONE 0 1010#define I915_TILING_NONE 0
865#define I915_TILING_X 1 1011#define I915_TILING_X 1
866#define I915_TILING_Y 2 1012#define I915_TILING_Y 2
1013#define I915_TILING_LAST I915_TILING_Y
867 1014
868#define I915_BIT_6_SWIZZLE_NONE 0 1015#define I915_BIT_6_SWIZZLE_NONE 0
869#define I915_BIT_6_SWIZZLE_9 1 1016#define I915_BIT_6_SWIZZLE_9 1
@@ -1140,7 +1287,145 @@ struct drm_i915_gem_context_param {
1140#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1287#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1141#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1288#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1142#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1289#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1290#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1291#define I915_CONTEXT_PARAM_BANNABLE 0x5
1143 __u64 value; 1292 __u64 value;
1144}; 1293};
1145 1294
1295enum drm_i915_oa_format {
1296 I915_OA_FORMAT_A13 = 1,
1297 I915_OA_FORMAT_A29,
1298 I915_OA_FORMAT_A13_B8_C8,
1299 I915_OA_FORMAT_B4_C8,
1300 I915_OA_FORMAT_A45_B8_C8,
1301 I915_OA_FORMAT_B4_C8_A16,
1302 I915_OA_FORMAT_C4_B8,
1303
1304 I915_OA_FORMAT_MAX /* non-ABI */
1305};
1306
1307enum drm_i915_perf_property_id {
1308 /**
1309 * Open the stream for a specific context handle (as used with
1310 * execbuffer2). A stream opened for a specific context this way
1311 * won't typically require root privileges.
1312 */
1313 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1314
1315 /**
1316 * A value of 1 requests the inclusion of raw OA unit reports as
1317 * part of stream samples.
1318 */
1319 DRM_I915_PERF_PROP_SAMPLE_OA,
1320
1321 /**
1322 * The value specifies which set of OA unit metrics should be
1323 * be configured, defining the contents of any OA unit reports.
1324 */
1325 DRM_I915_PERF_PROP_OA_METRICS_SET,
1326
1327 /**
1328 * The value specifies the size and layout of OA unit reports.
1329 */
1330 DRM_I915_PERF_PROP_OA_FORMAT,
1331
1332 /**
1333 * Specifying this property implicitly requests periodic OA unit
1334 * sampling and (at least on Haswell) the sampling frequency is derived
1335 * from this exponent as follows:
1336 *
1337 * 80ns * 2^(period_exponent + 1)
1338 */
1339 DRM_I915_PERF_PROP_OA_EXPONENT,
1340
1341 DRM_I915_PERF_PROP_MAX /* non-ABI */
1342};
1343
1344struct drm_i915_perf_open_param {
1345 __u32 flags;
1346#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
1347#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
1348#define I915_PERF_FLAG_DISABLED (1<<2)
1349
1350 /** The number of u64 (id, value) pairs */
1351 __u32 num_properties;
1352
1353 /**
1354 * Pointer to array of u64 (id, value) pairs configuring the stream
1355 * to open.
1356 */
1357 __u64 properties_ptr;
1358};
1359
1360/**
1361 * Enable data capture for a stream that was either opened in a disabled state
1362 * via I915_PERF_FLAG_DISABLED or was later disabled via
1363 * I915_PERF_IOCTL_DISABLE.
1364 *
1365 * It is intended to be cheaper to disable and enable a stream than it may be
1366 * to close and re-open a stream with the same configuration.
1367 *
1368 * It's undefined whether any pending data for the stream will be lost.
1369 */
1370#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
1371
1372/**
1373 * Disable data capture for a stream.
1374 *
1375 * It is an error to try and read a stream that is disabled.
1376 */
1377#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1378
1379/**
1380 * Common to all i915 perf records
1381 */
1382struct drm_i915_perf_record_header {
1383 __u32 type;
1384 __u16 pad;
1385 __u16 size;
1386};
1387
1388enum drm_i915_perf_record_type {
1389
1390 /**
1391 * Samples are the work horse record type whose contents are extensible
1392 * and defined when opening an i915 perf stream based on the given
1393 * properties.
1394 *
1395 * Boolean properties following the naming convention
1396 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1397 * every sample.
1398 *
1399 * The order of these sample properties given by userspace has no
1400 * affect on the ordering of data within a sample. The order is
1401 * documented here.
1402 *
1403 * struct {
1404 * struct drm_i915_perf_record_header header;
1405 *
1406 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1407 * };
1408 */
1409 DRM_I915_PERF_RECORD_SAMPLE = 1,
1410
1411 /*
1412 * Indicates that one or more OA reports were not written by the
1413 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1414 * command collides with periodic sampling - which would be more likely
1415 * at higher sampling frequencies.
1416 */
1417 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1418
1419 /**
1420 * An error occurred that resulted in all pending OA reports being lost.
1421 */
1422 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1423
1424 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1425};
1426
1427#if defined(__cplusplus)
1428}
1429#endif
1430
1146#endif /* _I915_DRM_H_ */ 1431#endif /* _I915_DRM_H_ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index cd31794f..f09cc04c 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -35,6 +35,10 @@
35 35
36#include "drm.h" 36#include "drm.h"
37 37
38#if defined(__cplusplus)
39extern "C" {
40#endif
41
38/* WARNING: If you change any of these defines, make sure to change the 42/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 43 * defines in the X server file (radeon_sarea.h)
40 */ 44 */
@@ -511,6 +515,7 @@ typedef struct {
511#define DRM_RADEON_GEM_BUSY 0x2a 515#define DRM_RADEON_GEM_BUSY 0x2a
512#define DRM_RADEON_GEM_VA 0x2b 516#define DRM_RADEON_GEM_VA 0x2b
513#define DRM_RADEON_GEM_OP 0x2c 517#define DRM_RADEON_GEM_OP 0x2c
518#define DRM_RADEON_GEM_USERPTR 0x2d
514 519
515#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 520#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
516#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 521#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -554,6 +559,7 @@ typedef struct {
554#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) 559#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
555#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) 560#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
556#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) 561#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
562#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
557 563
558typedef struct drm_radeon_init { 564typedef struct drm_radeon_init {
559 enum { 565 enum {
@@ -796,7 +802,13 @@ struct drm_radeon_gem_info {
796 uint64_t vram_visible; 802 uint64_t vram_visible;
797}; 803};
798 804
799#define RADEON_GEM_NO_BACKING_STORE 1 805#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
806#define RADEON_GEM_GTT_UC (1 << 1)
807#define RADEON_GEM_GTT_WC (1 << 2)
808/* BO is expected to be accessed by the CPU */
809#define RADEON_GEM_CPU_ACCESS (1 << 3)
810/* CPU access is not expected to work for this BO */
811#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
800 812
801struct drm_radeon_gem_create { 813struct drm_radeon_gem_create {
802 uint64_t size; 814 uint64_t size;
@@ -806,6 +818,23 @@ struct drm_radeon_gem_create {
806 uint32_t flags; 818 uint32_t flags;
807}; 819};
808 820
821/*
822 * This is not a reliable API and you should expect it to fail for any
823 * number of reasons and have fallback path that do not use userptr to
824 * perform any operation.
825 */
826#define RADEON_GEM_USERPTR_READONLY (1 << 0)
827#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
828#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
829#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
830
831struct drm_radeon_gem_userptr {
832 uint64_t addr;
833 uint64_t size;
834 uint32_t flags;
835 uint32_t handle;
836};
837
809#define RADEON_TILING_MACRO 0x1 838#define RADEON_TILING_MACRO 0x1
810#define RADEON_TILING_MICRO 0x2 839#define RADEON_TILING_MICRO 0x2
811#define RADEON_TILING_SWAP_16BIT 0x4 840#define RADEON_TILING_SWAP_16BIT 0x4
@@ -943,6 +972,7 @@ struct drm_radeon_cs_chunk {
943}; 972};
944 973
945/* drm_radeon_cs_reloc.flags */ 974/* drm_radeon_cs_reloc.flags */
975#define RADEON_RELOC_PRIO_MASK (0xf << 0)
946 976
947struct drm_radeon_cs_reloc { 977struct drm_radeon_cs_reloc {
948 uint32_t handle; 978 uint32_t handle;
@@ -1008,7 +1038,13 @@ struct drm_radeon_cs {
1008#define RADEON_INFO_NUM_BYTES_MOVED 0x1d 1038#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
1009#define RADEON_INFO_VRAM_USAGE 0x1e 1039#define RADEON_INFO_VRAM_USAGE 0x1e
1010#define RADEON_INFO_GTT_USAGE 0x1f 1040#define RADEON_INFO_GTT_USAGE 0x1f
1011 1041#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
1042#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
1043#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
1044#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
1045#define RADEON_INFO_READ_REG 0x24
1046#define RADEON_INFO_VA_UNMAP_WORKING 0x25
1047#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1012 1048
1013struct drm_radeon_info { 1049struct drm_radeon_info {
1014 uint32_t request; 1050 uint32_t request;
@@ -1034,13 +1070,10 @@ struct drm_radeon_info {
1034#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 1070#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
1035#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 1071#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
1036 1072
1037#define CIK_TILE_MODE_COLOR_2D 14
1038#define CIK_TILE_MODE_COLOR_2D_SCANOUT 10
1039#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64 0
1040#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128 1
1041#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256 2
1042#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512 3
1043#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE 4
1044#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 1073#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
1045 1074
1075#if defined(__cplusplus)
1076}
1077#endif
1078
1046#endif 1079#endif
diff --git a/include/drm/vc4_drm.h b/include/drm/vc4_drm.h
new file mode 100644
index 00000000..319881d8
--- /dev/null
+++ b/include/drm/vc4_drm.h
@@ -0,0 +1,302 @@
1/*
2 * Copyright © 2014-2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef _VC4_DRM_H_
25#define _VC4_DRM_H_
26
27#include "drm.h"
28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
33#define DRM_VC4_SUBMIT_CL 0x00
34#define DRM_VC4_WAIT_SEQNO 0x01
35#define DRM_VC4_WAIT_BO 0x02
36#define DRM_VC4_CREATE_BO 0x03
37#define DRM_VC4_MMAP_BO 0x04
38#define DRM_VC4_CREATE_SHADER_BO 0x05
39#define DRM_VC4_GET_HANG_STATE 0x06
40#define DRM_VC4_GET_PARAM 0x07
41
42#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
43#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
44#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
45#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
46#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
47#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
48#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
49#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
50
51struct drm_vc4_submit_rcl_surface {
52 __u32 hindex; /* Handle index, or ~0 if not present. */
53 __u32 offset; /* Offset to start of buffer. */
54 /*
55 * Bits for either render config (color_write) or load/store packet.
56 * Bits should all be 0 for MSAA load/stores.
57 */
58 __u16 bits;
59
60#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
61 __u16 flags;
62};
63
64/**
65 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
66 * engine.
67 *
68 * Drivers typically use GPU BOs to store batchbuffers / command lists and
69 * their associated state. However, because the VC4 lacks an MMU, we have to
70 * do validation of memory accesses by the GPU commands. If we were to store
71 * our commands in BOs, we'd need to do uncached readback from them to do the
72 * validation process, which is too expensive. Instead, userspace accumulates
73 * commands and associated state in plain memory, then the kernel copies the
74 * data to its own address space, and then validates and stores it in a GPU
75 * BO.
76 */
77struct drm_vc4_submit_cl {
78 /* Pointer to the binner command list.
79 *
80 * This is the first set of commands executed, which runs the
81 * coordinate shader to determine where primitives land on the screen,
82 * then writes out the state updates and draw calls necessary per tile
83 * to the tile allocation BO.
84 */
85 __u64 bin_cl;
86
87 /* Pointer to the shader records.
88 *
89 * Shader records are the structures read by the hardware that contain
90 * pointers to uniforms, shaders, and vertex attributes. The
91 * reference to the shader record has enough information to determine
92 * how many pointers are necessary (fixed number for shaders/uniforms,
93 * and an attribute count), so those BO indices into bo_handles are
94 * just stored as __u32s before each shader record passed in.
95 */
96 __u64 shader_rec;
97
98 /* Pointer to uniform data and texture handles for the textures
99 * referenced by the shader.
100 *
101 * For each shader state record, there is a set of uniform data in the
102 * order referenced by the record (FS, VS, then CS). Each set of
103 * uniform data has a __u32 index into bo_handles per texture
104 * sample operation, in the order the QPU_W_TMUn_S writes appear in
105 * the program. Following the texture BO handle indices is the actual
106 * uniform data.
107 *
108 * The individual uniform state blocks don't have sizes passed in,
109 * because the kernel has to determine the sizes anyway during shader
110 * code validation.
111 */
112 __u64 uniforms;
113 __u64 bo_handles;
114
115 /* Size in bytes of the binner command list. */
116 __u32 bin_cl_size;
117 /* Size in bytes of the set of shader records. */
118 __u32 shader_rec_size;
119 /* Number of shader records.
120 *
121 * This could just be computed from the contents of shader_records and
122 * the address bits of references to them from the bin CL, but it
123 * keeps the kernel from having to resize some allocations it makes.
124 */
125 __u32 shader_rec_count;
126 /* Size in bytes of the uniform state. */
127 __u32 uniforms_size;
128
129 /* Number of BO handles passed in (size is that times 4). */
130 __u32 bo_handle_count;
131
132 /* RCL setup: */
133 __u16 width;
134 __u16 height;
135 __u8 min_x_tile;
136 __u8 min_y_tile;
137 __u8 max_x_tile;
138 __u8 max_y_tile;
139 struct drm_vc4_submit_rcl_surface color_read;
140 struct drm_vc4_submit_rcl_surface color_write;
141 struct drm_vc4_submit_rcl_surface zs_read;
142 struct drm_vc4_submit_rcl_surface zs_write;
143 struct drm_vc4_submit_rcl_surface msaa_color_write;
144 struct drm_vc4_submit_rcl_surface msaa_zs_write;
145 __u32 clear_color[2];
146 __u32 clear_z;
147 __u8 clear_s;
148
149 __u32 pad:24;
150
151#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
152 __u32 flags;
153
154 /* Returned value of the seqno of this render job (for the
155 * wait ioctl).
156 */
157 __u64 seqno;
158};
159
160/**
161 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
162 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
163 *
164 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
165 * block, just return the status."
166 */
167struct drm_vc4_wait_seqno {
168 __u64 seqno;
169 __u64 timeout_ns;
170};
171
172/**
173 * struct drm_vc4_wait_bo - ioctl argument for waiting for
174 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
175 *
176 * This is useful for cases where multiple processes might be
177 * rendering to a BO and you want to wait for all rendering to be
178 * completed.
179 */
180struct drm_vc4_wait_bo {
181 __u32 handle;
182 __u32 pad;
183 __u64 timeout_ns;
184};
185
186/**
187 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
188 *
189 * There are currently no values for the flags argument, but it may be
190 * used in a future extension.
191 */
192struct drm_vc4_create_bo {
193 __u32 size;
194 __u32 flags;
195 /** Returned GEM handle for the BO. */
196 __u32 handle;
197 __u32 pad;
198};
199
200/**
201 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
202 *
203 * This doesn't actually perform an mmap. Instead, it returns the
204 * offset you need to use in an mmap on the DRM device node. This
205 * means that tools like valgrind end up knowing about the mapped
206 * memory.
207 *
208 * There are currently no values for the flags argument, but it may be
209 * used in a future extension.
210 */
211struct drm_vc4_mmap_bo {
212 /** Handle for the object being mapped. */
213 __u32 handle;
214 __u32 flags;
215 /** offset into the drm node to use for subsequent mmap call. */
216 __u64 offset;
217};
218
219/**
220 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
221 * shader BOs.
222 *
223 * Since allowing a shader to be overwritten while it's also being
224 * executed from would allow privlege escalation, shaders must be
225 * created using this ioctl, and they can't be mmapped later.
226 */
227struct drm_vc4_create_shader_bo {
228 /* Size of the data argument. */
229 __u32 size;
230 /* Flags, currently must be 0. */
231 __u32 flags;
232
233 /* Pointer to the data. */
234 __u64 data;
235
236 /** Returned GEM handle for the BO. */
237 __u32 handle;
238 /* Pad, must be 0. */
239 __u32 pad;
240};
241
242struct drm_vc4_get_hang_state_bo {
243 __u32 handle;
244 __u32 paddr;
245 __u32 size;
246 __u32 pad;
247};
248
249/**
250 * struct drm_vc4_hang_state - ioctl argument for collecting state
251 * from a GPU hang for analysis.
252*/
253struct drm_vc4_get_hang_state {
254 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
255 __u64 bo;
256 /**
257 * On input, the size of the bo array. Output is the number
258 * of bos to be returned.
259 */
260 __u32 bo_count;
261
262 __u32 start_bin, start_render;
263
264 __u32 ct0ca, ct0ea;
265 __u32 ct1ca, ct1ea;
266 __u32 ct0cs, ct1cs;
267 __u32 ct0ra0, ct1ra0;
268
269 __u32 bpca, bpcs;
270 __u32 bpoa, bpos;
271
272 __u32 vpmbase;
273
274 __u32 dbge;
275 __u32 fdbgo;
276 __u32 fdbgb;
277 __u32 fdbgr;
278 __u32 fdbgs;
279 __u32 errstat;
280
281 /* Pad that we may save more registers into in the future. */
282 __u32 pad[16];
283};
284
285#define DRM_VC4_PARAM_V3D_IDENT0 0
286#define DRM_VC4_PARAM_V3D_IDENT1 1
287#define DRM_VC4_PARAM_V3D_IDENT2 2
288#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
289#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
290#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
291
292struct drm_vc4_get_param {
293 __u32 param;
294 __u32 pad;
295 __u64 value;
296};
297
298#if defined(__cplusplus)
299}
300#endif
301
302#endif /* _VC4_DRM_H_ */
diff --git a/include/drm/virtgpu_drm.h b/include/drm/virtgpu_drm.h
index abf11c58..91a31ffe 100644
--- a/include/drm/virtgpu_drm.h
+++ b/include/drm/virtgpu_drm.h
@@ -24,13 +24,16 @@
24#ifndef VIRTGPU_DRM_H 24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H 25#define VIRTGPU_DRM_H
26 26
27#include <stddef.h> 27#include "drm.h"
28#include "drm/drm.h" 28
29#if defined(__cplusplus)
30extern "C" {
31#endif
29 32
30/* Please note that modifications to all structs defined here are 33/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 34 * subject to backwards-compatibility constraints.
32 * 35 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 36 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 37 * compatibility Keep fields aligned to their size
35 */ 38 */
36 39
@@ -45,88 +48,88 @@
45#define DRM_VIRTGPU_GET_CAPS 0x09 48#define DRM_VIRTGPU_GET_CAPS 0x09
46 49
47struct drm_virtgpu_map { 50struct drm_virtgpu_map {
48 uint64_t offset; /* use for mmap system call */ 51 __u64 offset; /* use for mmap system call */
49 uint32_t handle; 52 __u32 handle;
50 uint32_t pad; 53 __u32 pad;
51}; 54};
52 55
53struct drm_virtgpu_execbuffer { 56struct drm_virtgpu_execbuffer {
54 uint32_t flags; /* for future use */ 57 __u32 flags; /* for future use */
55 uint32_t size; 58 __u32 size;
56 uint64_t command; /* void* */ 59 __u64 command; /* void* */
57 uint64_t bo_handles; 60 __u64 bo_handles;
58 uint32_t num_bo_handles; 61 __u32 num_bo_handles;
59 uint32_t pad; 62 __u32 pad;
60}; 63};
61 64
62#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
63 66
64struct drm_virtgpu_getparam { 67struct drm_virtgpu_getparam {
65 uint64_t param; 68 __u64 param;
66 uint64_t value; 69 __u64 value;
67}; 70};
68 71
69/* NO_BO flags? NO resource flag? */ 72/* NO_BO flags? NO resource flag? */
70/* resource flag for y_0_top */ 73/* resource flag for y_0_top */
71struct drm_virtgpu_resource_create { 74struct drm_virtgpu_resource_create {
72 uint32_t target; 75 __u32 target;
73 uint32_t format; 76 __u32 format;
74 uint32_t bind; 77 __u32 bind;
75 uint32_t width; 78 __u32 width;
76 uint32_t height; 79 __u32 height;
77 uint32_t depth; 80 __u32 depth;
78 uint32_t array_size; 81 __u32 array_size;
79 uint32_t last_level; 82 __u32 last_level;
80 uint32_t nr_samples; 83 __u32 nr_samples;
81 uint32_t flags; 84 __u32 flags;
82 uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 85 __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
83 uint32_t res_handle; /* returned by kernel */ 86 __u32 res_handle; /* returned by kernel */
84 uint32_t size; /* validate transfer in the host */ 87 __u32 size; /* validate transfer in the host */
85 uint32_t stride; /* validate transfer in the host */ 88 __u32 stride; /* validate transfer in the host */
86}; 89};
87 90
88struct drm_virtgpu_resource_info { 91struct drm_virtgpu_resource_info {
89 uint32_t bo_handle; 92 __u32 bo_handle;
90 uint32_t res_handle; 93 __u32 res_handle;
91 uint32_t size; 94 __u32 size;
92 uint32_t stride; 95 __u32 stride;
93}; 96};
94 97
95struct drm_virtgpu_3d_box { 98struct drm_virtgpu_3d_box {
96 uint32_t x; 99 __u32 x;
97 uint32_t y; 100 __u32 y;
98 uint32_t z; 101 __u32 z;
99 uint32_t w; 102 __u32 w;
100 uint32_t h; 103 __u32 h;
101 uint32_t d; 104 __u32 d;
102}; 105};
103 106
104struct drm_virtgpu_3d_transfer_to_host { 107struct drm_virtgpu_3d_transfer_to_host {
105 uint32_t bo_handle; 108 __u32 bo_handle;
106 struct drm_virtgpu_3d_box box; 109 struct drm_virtgpu_3d_box box;
107 uint32_t level; 110 __u32 level;
108 uint32_t offset; 111 __u32 offset;
109}; 112};
110 113
111struct drm_virtgpu_3d_transfer_from_host { 114struct drm_virtgpu_3d_transfer_from_host {
112 uint32_t bo_handle; 115 __u32 bo_handle;
113 struct drm_virtgpu_3d_box box; 116 struct drm_virtgpu_3d_box box;
114 uint32_t level; 117 __u32 level;
115 uint32_t offset; 118 __u32 offset;
116}; 119};
117 120
118#define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 121#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
119struct drm_virtgpu_3d_wait { 122struct drm_virtgpu_3d_wait {
120 uint32_t handle; /* 0 is an invalid handle */ 123 __u32 handle; /* 0 is an invalid handle */
121 uint32_t flags; 124 __u32 flags;
122}; 125};
123 126
124struct drm_virtgpu_get_caps { 127struct drm_virtgpu_get_caps {
125 uint32_t cap_set_id; 128 __u32 cap_set_id;
126 uint32_t cap_set_ver; 129 __u32 cap_set_ver;
127 uint64_t addr; 130 __u64 addr;
128 uint32_t size; 131 __u32 size;
129 uint32_t pad; 132 __u32 pad;
130}; 133};
131 134
132#define DRM_IOCTL_VIRTGPU_MAP \ 135#define DRM_IOCTL_VIRTGPU_MAP \
@@ -164,4 +167,8 @@ struct drm_virtgpu_get_caps {
164 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \ 167 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
165 struct drm_virtgpu_get_caps) 168 struct drm_virtgpu_get_caps)
166 169
170#if defined(__cplusplus)
171}
172#endif
173
167#endif 174#endif
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 4d084239..5b68b4d1 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,10 +28,11 @@
28#ifndef __VMWGFX_DRM_H__ 28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__ 29#define __VMWGFX_DRM_H__
30 30
31#include "drm.h"
32
31#define DRM_VMW_MAX_SURFACE_FACES 6 33#define DRM_VMW_MAX_SURFACE_FACES 6
32#define DRM_VMW_MAX_MIP_LEVELS 24 34#define DRM_VMW_MAX_MIP_LEVELS 24
33 35
34#define DRM_VMW_EXT_NAME_LEN 128
35 36
36#define DRM_VMW_GET_PARAM 0 37#define DRM_VMW_GET_PARAM 0
37#define DRM_VMW_ALLOC_DMABUF 1 38#define DRM_VMW_ALLOC_DMABUF 1
@@ -48,11 +49,20 @@
48#define DRM_VMW_UNREF_SURFACE 10 49#define DRM_VMW_UNREF_SURFACE 10
49#define DRM_VMW_REF_SURFACE 11 50#define DRM_VMW_REF_SURFACE 11
50#define DRM_VMW_EXECBUF 12 51#define DRM_VMW_EXECBUF 12
51#define DRM_VMW_FIFO_DEBUG 13 52#define DRM_VMW_GET_3D_CAP 13
52#define DRM_VMW_FENCE_WAIT 14 53#define DRM_VMW_FENCE_WAIT 14
53/* guarded by minor version >= 2 */ 54#define DRM_VMW_FENCE_SIGNALED 15
54#define DRM_VMW_UPDATE_LAYOUT 15 55#define DRM_VMW_FENCE_UNREF 16
55 56#define DRM_VMW_FENCE_EVENT 17
57#define DRM_VMW_PRESENT 18
58#define DRM_VMW_PRESENT_READBACK 19
59#define DRM_VMW_UPDATE_LAYOUT 20
60#define DRM_VMW_CREATE_SHADER 21
61#define DRM_VMW_UNREF_SHADER 22
62#define DRM_VMW_GB_SURFACE_CREATE 23
63#define DRM_VMW_GB_SURFACE_REF 24
64#define DRM_VMW_SYNCCPU 25
65#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
56 66
57/*************************************************************************/ 67/*************************************************************************/
58/** 68/**
@@ -69,66 +79,39 @@
69#define DRM_VMW_PARAM_NUM_STREAMS 0 79#define DRM_VMW_PARAM_NUM_STREAMS 0
70#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 80#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
71#define DRM_VMW_PARAM_3D 2 81#define DRM_VMW_PARAM_3D 2
72#define DRM_VMW_PARAM_FIFO_OFFSET 3 82#define DRM_VMW_PARAM_HW_CAPS 3
73#define DRM_VMW_PARAM_HW_CAPS 4 83#define DRM_VMW_PARAM_FIFO_CAPS 4
74#define DRM_VMW_PARAM_FIFO_CAPS 5 84#define DRM_VMW_PARAM_MAX_FB_SIZE 5
75 85#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
76/** 86#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
77 * struct drm_vmw_getparam_arg 87#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
78 * 88#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
79 * @value: Returned value. //Out 89#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
80 * @param: Parameter to query. //In. 90#define DRM_VMW_PARAM_SCREEN_TARGET 11
81 * 91#define DRM_VMW_PARAM_DX 12
82 * Argument to the DRM_VMW_GET_PARAM Ioctl.
83 */
84
85struct drm_vmw_getparam_arg {
86 uint64_t value;
87 uint32_t param;
88 uint32_t pad64;
89};
90
91/*************************************************************************/
92/**
93 * DRM_VMW_EXTENSION - Query device extensions.
94 */
95 92
96/** 93/**
97 * struct drm_vmw_extension_rep 94 * enum drm_vmw_handle_type - handle type for ref ioctls
98 *
99 * @exists: The queried extension exists.
100 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
101 * @driver_sarea_offset: Offset to any space in the DRI SAREA
102 * used by the extension.
103 * @major: Major version number of the extension.
104 * @minor: Minor version number of the extension.
105 * @pl: Patch level version number of the extension.
106 * 95 *
107 * Output argument to the DRM_VMW_EXTENSION Ioctl.
108 */ 96 */
109 97enum drm_vmw_handle_type {
110struct drm_vmw_extension_rep { 98 DRM_VMW_HANDLE_LEGACY = 0,
111 int32_t exists; 99 DRM_VMW_HANDLE_PRIME = 1
112 uint32_t driver_ioctl_offset;
113 uint32_t driver_sarea_offset;
114 uint32_t major;
115 uint32_t minor;
116 uint32_t pl;
117 uint32_t pad64;
118}; 100};
119 101
120/** 102/**
121 * union drm_vmw_extension_arg 103 * struct drm_vmw_getparam_arg
122 * 104 *
123 * @extension - Ascii name of the extension to be queried. //In 105 * @value: Returned value. //Out
124 * @rep - Reply as defined above. //Out 106 * @param: Parameter to query. //In.
125 * 107 *
126 * Argument to the DRM_VMW_EXTENSION Ioctl. 108 * Argument to the DRM_VMW_GET_PARAM Ioctl.
127 */ 109 */
128 110
129union drm_vmw_extension_arg { 111struct drm_vmw_getparam_arg {
130 char extension[DRM_VMW_EXT_NAME_LEN]; 112 __u64 value;
131 struct drm_vmw_extension_rep rep; 113 __u32 param;
114 __u32 pad64;
132}; 115};
133 116
134/*************************************************************************/ 117/*************************************************************************/
@@ -149,8 +132,8 @@ union drm_vmw_extension_arg {
149 */ 132 */
150 133
151struct drm_vmw_context_arg { 134struct drm_vmw_context_arg {
152 int32_t cid; 135 __s32 cid;
153 uint32_t pad64; 136 __u32 pad64;
154}; 137};
155 138
156/*************************************************************************/ 139/*************************************************************************/
@@ -180,7 +163,7 @@ struct drm_vmw_context_arg {
180 * @mip_levels: Number of mip levels for each face. 163 * @mip_levels: Number of mip levels for each face.
181 * An unused face should have 0 encoded. 164 * An unused face should have 0 encoded.
182 * @size_addr: Address of a user-space array of sruct drm_vmw_size 165 * @size_addr: Address of a user-space array of sruct drm_vmw_size
183 * cast to an uint64_t for 32-64 bit compatibility. 166 * cast to an __u64 for 32-64 bit compatibility.
184 * The size of the array should equal the total number of mipmap levels. 167 * The size of the array should equal the total number of mipmap levels.
185 * @shareable: Boolean whether other clients (as identified by file descriptors) 168 * @shareable: Boolean whether other clients (as identified by file descriptors)
186 * may reference this surface. 169 * may reference this surface.
@@ -192,18 +175,19 @@ struct drm_vmw_context_arg {
192 */ 175 */
193 176
194struct drm_vmw_surface_create_req { 177struct drm_vmw_surface_create_req {
195 uint32_t flags; 178 __u32 flags;
196 uint32_t format; 179 __u32 format;
197 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 180 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
198 uint64_t size_addr; 181 __u64 size_addr;
199 int32_t shareable; 182 __s32 shareable;
200 int32_t scanout; 183 __s32 scanout;
201}; 184};
202 185
203/** 186/**
204 * struct drm_wmv_surface_arg 187 * struct drm_wmv_surface_arg
205 * 188 *
206 * @sid: Surface id of created surface or surface to destroy or reference. 189 * @sid: Surface id of created surface or surface to destroy or reference.
190 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
207 * 191 *
208 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 192 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
209 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 193 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
@@ -211,8 +195,8 @@ struct drm_vmw_surface_create_req {
211 */ 195 */
212 196
213struct drm_vmw_surface_arg { 197struct drm_vmw_surface_arg {
214 int32_t sid; 198 __s32 sid;
215 uint32_t pad64; 199 enum drm_vmw_handle_type handle_type;
216}; 200};
217 201
218/** 202/**
@@ -227,10 +211,10 @@ struct drm_vmw_surface_arg {
227 */ 211 */
228 212
229struct drm_vmw_size { 213struct drm_vmw_size {
230 uint32_t width; 214 __u32 width;
231 uint32_t height; 215 __u32 height;
232 uint32_t depth; 216 __u32 depth;
233 uint32_t pad64; 217 __u32 pad64;
234}; 218};
235 219
236/** 220/**
@@ -291,20 +275,20 @@ union drm_vmw_surface_reference_arg {
291 * DRM_VMW_EXECBUF 275 * DRM_VMW_EXECBUF
292 * 276 *
293 * Submit a command buffer for execution on the host, and return a 277 * Submit a command buffer for execution on the host, and return a
294 * fence sequence that when signaled, indicates that the command buffer has 278 * fence seqno that when signaled, indicates that the command buffer has
295 * executed. 279 * executed.
296 */ 280 */
297 281
298/** 282/**
299 * struct drm_vmw_execbuf_arg 283 * struct drm_vmw_execbuf_arg
300 * 284 *
301 * @commands: User-space address of a command buffer cast to an uint64_t. 285 * @commands: User-space address of a command buffer cast to an __u64.
302 * @command-size: Size in bytes of the command buffer. 286 * @command-size: Size in bytes of the command buffer.
303 * @throttle-us: Sleep until software is less than @throttle_us 287 * @throttle-us: Sleep until software is less than @throttle_us
304 * microseconds ahead of hardware. The driver may round this value 288 * microseconds ahead of hardware. The driver may round this value
305 * to the nearest kernel tick. 289 * to the nearest kernel tick.
306 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 290 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
307 * uint64_t. 291 * __u64.
308 * @version: Allows expanding the execbuf ioctl parameters without breaking 292 * @version: Allows expanding the execbuf ioctl parameters without breaking
309 * backwards compatibility, since user-space will always tell the kernel 293 * backwards compatibility, since user-space will always tell the kernel
310 * which version it uses. 294 * which version it uses.
@@ -313,21 +297,32 @@ union drm_vmw_surface_reference_arg {
313 * Argument to the DRM_VMW_EXECBUF Ioctl. 297 * Argument to the DRM_VMW_EXECBUF Ioctl.
314 */ 298 */
315 299
316#define DRM_VMW_EXECBUF_VERSION 0 300#define DRM_VMW_EXECBUF_VERSION 2
317 301
318struct drm_vmw_execbuf_arg { 302struct drm_vmw_execbuf_arg {
319 uint64_t commands; 303 __u64 commands;
320 uint32_t command_size; 304 __u32 command_size;
321 uint32_t throttle_us; 305 __u32 throttle_us;
322 uint64_t fence_rep; 306 __u64 fence_rep;
323 uint32_t version; 307 __u32 version;
324 uint32_t flags; 308 __u32 flags;
309 __u32 context_handle;
310 __u32 pad64;
325}; 311};
326 312
327/** 313/**
328 * struct drm_vmw_fence_rep 314 * struct drm_vmw_fence_rep
329 * 315 *
330 * @fence_seq: Fence sequence associated with a command submission. 316 * @handle: Fence object handle for fence associated with a command submission.
317 * @mask: Fence flags relevant for this fence object.
318 * @seqno: Fence sequence number in fifo. A fence object with a lower
319 * seqno will signal the EXEC flag before a fence object with a higher
320 * seqno. This can be used by user-space to avoid kernel calls to determine
321 * whether a fence has signaled the EXEC flag. Note that @seqno will
322 * wrap at 32-bit.
323 * @passed_seqno: The highest seqno number processed by the hardware
324 * so far. This can be used to mark user-space fence objects as signaled, and
325 * to determine whether a fence seqno might be stale.
331 * @error: This member should've been set to -EFAULT on submission. 326 * @error: This member should've been set to -EFAULT on submission.
332 * The following actions should be take on completion: 327 * The following actions should be take on completion:
333 * error == -EFAULT: Fence communication failed. The host is synchronized. 328 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -341,9 +336,12 @@ struct drm_vmw_execbuf_arg {
341 */ 336 */
342 337
343struct drm_vmw_fence_rep { 338struct drm_vmw_fence_rep {
344 uint64_t fence_seq; 339 __u32 handle;
345 int32_t error; 340 __u32 mask;
346 uint32_t pad64; 341 __u32 seqno;
342 __u32 passed_seqno;
343 __u32 pad64;
344 __s32 error;
347}; 345};
348 346
349/*************************************************************************/ 347/*************************************************************************/
@@ -373,8 +371,8 @@ struct drm_vmw_fence_rep {
373 */ 371 */
374 372
375struct drm_vmw_alloc_dmabuf_req { 373struct drm_vmw_alloc_dmabuf_req {
376 uint32_t size; 374 __u32 size;
377 uint32_t pad64; 375 __u32 pad64;
378}; 376};
379 377
380/** 378/**
@@ -391,11 +389,11 @@ struct drm_vmw_alloc_dmabuf_req {
391 */ 389 */
392 390
393struct drm_vmw_dmabuf_rep { 391struct drm_vmw_dmabuf_rep {
394 uint64_t map_handle; 392 __u64 map_handle;
395 uint32_t handle; 393 __u32 handle;
396 uint32_t cur_gmr_id; 394 __u32 cur_gmr_id;
397 uint32_t cur_gmr_offset; 395 __u32 cur_gmr_offset;
398 uint32_t pad64; 396 __u32 pad64;
399}; 397};
400 398
401/** 399/**
@@ -428,41 +426,8 @@ union drm_vmw_alloc_dmabuf_arg {
428 */ 426 */
429 427
430struct drm_vmw_unref_dmabuf_arg { 428struct drm_vmw_unref_dmabuf_arg {
431 uint32_t handle; 429 __u32 handle;
432 uint32_t pad64; 430 __u32 pad64;
433};
434
435/*************************************************************************/
436/**
437 * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
438 *
439 * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
440 */
441
442/**
443 * struct drm_vmw_fifo_debug_arg
444 *
445 * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
446 * @debug_buffer_size: Size in bytes of debug buffer //In
447 * @used_size: Number of bytes copied to the buffer // Out
448 * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
449 *
450 * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
451 */
452
453struct drm_vmw_fifo_debug_arg {
454 uint64_t debug_buffer;
455 uint32_t debug_buffer_size;
456 uint32_t used_size;
457 int32_t did_not_fit;
458 uint32_t pad64;
459};
460
461struct drm_vmw_fence_wait_arg {
462 uint64_t sequence;
463 uint64_t kernel_cookie;
464 int32_t cookie_valid;
465 int32_t pad64;
466}; 431};
467 432
468/*************************************************************************/ 433/*************************************************************************/
@@ -485,10 +450,10 @@ struct drm_vmw_fence_wait_arg {
485 */ 450 */
486 451
487struct drm_vmw_rect { 452struct drm_vmw_rect {
488 int32_t x; 453 __s32 x;
489 int32_t y; 454 __s32 y;
490 uint32_t w; 455 __u32 w;
491 uint32_t h; 456 __u32 h;
492}; 457};
493 458
494/** 459/**
@@ -510,21 +475,21 @@ struct drm_vmw_rect {
510 */ 475 */
511 476
512struct drm_vmw_control_stream_arg { 477struct drm_vmw_control_stream_arg {
513 uint32_t stream_id; 478 __u32 stream_id;
514 uint32_t enabled; 479 __u32 enabled;
515 480
516 uint32_t flags; 481 __u32 flags;
517 uint32_t color_key; 482 __u32 color_key;
518 483
519 uint32_t handle; 484 __u32 handle;
520 uint32_t offset; 485 __u32 offset;
521 int32_t format; 486 __s32 format;
522 uint32_t size; 487 __u32 size;
523 uint32_t width; 488 __u32 width;
524 uint32_t height; 489 __u32 height;
525 uint32_t pitch[3]; 490 __u32 pitch[3];
526 491
527 uint32_t pad64; 492 __u32 pad64;
528 struct drm_vmw_rect src; 493 struct drm_vmw_rect src;
529 struct drm_vmw_rect dst; 494 struct drm_vmw_rect dst;
530}; 495};
@@ -552,12 +517,12 @@ struct drm_vmw_control_stream_arg {
552 */ 517 */
553 518
554struct drm_vmw_cursor_bypass_arg { 519struct drm_vmw_cursor_bypass_arg {
555 uint32_t flags; 520 __u32 flags;
556 uint32_t crtc_id; 521 __u32 crtc_id;
557 int32_t xpos; 522 __s32 xpos;
558 int32_t ypos; 523 __s32 ypos;
559 int32_t xhot; 524 __s32 xhot;
560 int32_t yhot; 525 __s32 yhot;
561}; 526};
562 527
563/*************************************************************************/ 528/*************************************************************************/
@@ -575,8 +540,8 @@ struct drm_vmw_cursor_bypass_arg {
575 */ 540 */
576 541
577struct drm_vmw_stream_arg { 542struct drm_vmw_stream_arg {
578 uint32_t stream_id; 543 __u32 stream_id;
579 uint32_t pad64; 544 __u32 pad64;
580}; 545};
581 546
582/*************************************************************************/ 547/*************************************************************************/
@@ -589,26 +554,537 @@ struct drm_vmw_stream_arg {
589 554
590/*************************************************************************/ 555/*************************************************************************/
591/** 556/**
557 * DRM_VMW_GET_3D_CAP
558 *
559 * Read 3D capabilities from the FIFO
560 *
561 */
562
563/**
564 * struct drm_vmw_get_3d_cap_arg
565 *
566 * @buffer: Pointer to a buffer for capability data, cast to an __u64
567 * @size: Max size to copy
568 *
569 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
570 * ioctls.
571 */
572
573struct drm_vmw_get_3d_cap_arg {
574 __u64 buffer;
575 __u32 max_size;
576 __u32 pad64;
577};
578
579/*************************************************************************/
580/**
581 * DRM_VMW_FENCE_WAIT
582 *
583 * Waits for a fence object to signal. The wait is interruptible, so that
584 * signals may be delivered during the interrupt. The wait may timeout,
585 * in which case the calls returns -EBUSY. If the wait is restarted,
586 * that is restarting without resetting @cookie_valid to zero,
587 * the timeout is computed from the first call.
588 *
589 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
590 * on:
591 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
592 * stream
593 * have executed.
594 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
595 * commands
596 * in the buffer given to the EXECBUF ioctl returning the fence object handle
597 * are available to user-space.
598 *
599 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
600 * fenc wait ioctl returns 0, the fence object has been unreferenced after
601 * the wait.
602 */
603
604#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
605#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
606
607#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
608
609/**
610 * struct drm_vmw_fence_wait_arg
611 *
612 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
613 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
614 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
615 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
616 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
617 * before returning.
618 * @flags: Fence flags to wait on.
619 * @wait_options: Options that control the behaviour of the wait ioctl.
620 *
621 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
622 */
623
624struct drm_vmw_fence_wait_arg {
625 __u32 handle;
626 __s32 cookie_valid;
627 __u64 kernel_cookie;
628 __u64 timeout_us;
629 __s32 lazy;
630 __s32 flags;
631 __s32 wait_options;
632 __s32 pad64;
633};
634
635/*************************************************************************/
636/**
637 * DRM_VMW_FENCE_SIGNALED
638 *
639 * Checks if a fence object is signaled..
640 */
641
642/**
643 * struct drm_vmw_fence_signaled_arg
644 *
645 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
646 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
647 * @signaled: Out: Flags signaled.
648 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
649 * EXEC flag of user-space fence objects.
650 *
651 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
652 * ioctls.
653 */
654
655struct drm_vmw_fence_signaled_arg {
656 __u32 handle;
657 __u32 flags;
658 __s32 signaled;
659 __u32 passed_seqno;
660 __u32 signaled_flags;
661 __u32 pad64;
662};
663
664/*************************************************************************/
665/**
666 * DRM_VMW_FENCE_UNREF
667 *
668 * Unreferences a fence object, and causes it to be destroyed if there are no
669 * other references to it.
670 *
671 */
672
673/**
674 * struct drm_vmw_fence_arg
675 *
676 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
677 *
678 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
679 */
680
681struct drm_vmw_fence_arg {
682 __u32 handle;
683 __u32 pad64;
684};
685
686
687/*************************************************************************/
688/**
689 * DRM_VMW_FENCE_EVENT
690 *
691 * Queues an event on a fence to be delivered on the drm character device
692 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
693 * Optionally the approximate time when the fence signaled is
694 * given by the event.
695 */
696
697/*
698 * The event type
699 */
700#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
701
702struct drm_vmw_event_fence {
703 struct drm_event base;
704 __u64 user_data;
705 __u32 tv_sec;
706 __u32 tv_usec;
707};
708
709/*
710 * Flags that may be given to the command.
711 */
712/* Request fence signaled time on the event. */
713#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
714
715/**
716 * struct drm_vmw_fence_event_arg
717 *
718 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
719 * the fence is not supposed to be referenced by user-space.
720 * @user_info: Info to be delivered with the event.
721 * @handle: Attach the event to this fence only.
722 * @flags: A set of flags as defined above.
723 */
724struct drm_vmw_fence_event_arg {
725 __u64 fence_rep;
726 __u64 user_data;
727 __u32 handle;
728 __u32 flags;
729};
730
731
732/*************************************************************************/
733/**
734 * DRM_VMW_PRESENT
735 *
736 * Executes an SVGA present on a given fb for a given surface. The surface
737 * is placed on the framebuffer. Cliprects are given relative to the given
738 * point (the point disignated by dest_{x|y}).
739 *
740 */
741
742/**
743 * struct drm_vmw_present_arg
744 * @fb_id: framebuffer id to present / read back from.
745 * @sid: Surface id to present from.
746 * @dest_x: X placement coordinate for surface.
747 * @dest_y: Y placement coordinate for surface.
748 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
749 * @num_clips: Number of cliprects given relative to the framebuffer origin,
750 * in the same coordinate space as the frame buffer.
751 * @pad64: Unused 64-bit padding.
752 *
753 * Input argument to the DRM_VMW_PRESENT ioctl.
754 */
755
756struct drm_vmw_present_arg {
757 __u32 fb_id;
758 __u32 sid;
759 __s32 dest_x;
760 __s32 dest_y;
761 __u64 clips_ptr;
762 __u32 num_clips;
763 __u32 pad64;
764};
765
766
767/*************************************************************************/
768/**
769 * DRM_VMW_PRESENT_READBACK
770 *
771 * Executes an SVGA present readback from a given fb to the dma buffer
772 * currently bound as the fb. If there is no dma buffer bound to the fb,
773 * an error will be returned.
774 *
775 */
776
777/**
778 * struct drm_vmw_present_arg
779 * @fb_id: fb_id to present / read back from.
780 * @num_clips: Number of cliprects.
781 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
782 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
783 * If this member is NULL, then the ioctl should not return a fence.
784 */
785
786struct drm_vmw_present_readback_arg {
787 __u32 fb_id;
788 __u32 num_clips;
789 __u64 clips_ptr;
790 __u64 fence_rep;
791};
792
793/*************************************************************************/
794/**
592 * DRM_VMW_UPDATE_LAYOUT - Update layout 795 * DRM_VMW_UPDATE_LAYOUT - Update layout
593 * 796 *
594 * Updates the prefered modes and connection status for connectors. The 797 * Updates the preferred modes and connection status for connectors. The
595 * command conisits of one drm_vmw_update_layout_arg pointing out a array 798 * command consists of one drm_vmw_update_layout_arg pointing to an array
596 * of num_outputs drm_vmw_rect's. 799 * of num_outputs drm_vmw_rect's.
597 */ 800 */
598 801
599/** 802/**
600 * struct drm_vmw_update_layout_arg 803 * struct drm_vmw_update_layout_arg
601 * 804 *
602 * @num_outputs: number of active 805 * @num_outputs: number of active connectors
603 * @rects: pointer to array of drm_vmw_rect 806 * @rects: pointer to array of drm_vmw_rect cast to an __u64
604 * 807 *
605 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 808 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
606 */ 809 */
607
608struct drm_vmw_update_layout_arg { 810struct drm_vmw_update_layout_arg {
609 uint32_t num_outputs; 811 __u32 num_outputs;
610 uint32_t pad64; 812 __u32 pad64;
611 uint64_t rects; 813 __u64 rects;
814};
815
816
817/*************************************************************************/
818/**
819 * DRM_VMW_CREATE_SHADER - Create shader
820 *
821 * Creates a shader and optionally binds it to a dma buffer containing
822 * the shader byte-code.
823 */
824
825/**
826 * enum drm_vmw_shader_type - Shader types
827 */
828enum drm_vmw_shader_type {
829 drm_vmw_shader_type_vs = 0,
830 drm_vmw_shader_type_ps,
612}; 831};
613 832
833
834/**
835 * struct drm_vmw_shader_create_arg
836 *
837 * @shader_type: Shader type of the shader to create.
838 * @size: Size of the byte-code in bytes.
839 * where the shader byte-code starts
840 * @buffer_handle: Buffer handle identifying the buffer containing the
841 * shader byte-code
842 * @shader_handle: On successful completion contains a handle that
843 * can be used to subsequently identify the shader.
844 * @offset: Offset in bytes into the buffer given by @buffer_handle,
845 *
846 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
847 */
848struct drm_vmw_shader_create_arg {
849 enum drm_vmw_shader_type shader_type;
850 __u32 size;
851 __u32 buffer_handle;
852 __u32 shader_handle;
853 __u64 offset;
854};
855
856/*************************************************************************/
857/**
858 * DRM_VMW_UNREF_SHADER - Unreferences a shader
859 *
860 * Destroys a user-space reference to a shader, optionally destroying
861 * it.
862 */
863
864/**
865 * struct drm_vmw_shader_arg
866 *
867 * @handle: Handle identifying the shader to destroy.
868 *
869 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
870 */
871struct drm_vmw_shader_arg {
872 __u32 handle;
873 __u32 pad64;
874};
875
876/*************************************************************************/
877/**
878 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
879 *
880 * Allocates a surface handle and queues a create surface command
881 * for the host on the first use of the surface. The surface ID can
882 * be used as the surface ID in commands referencing the surface.
883 */
884
885/**
886 * enum drm_vmw_surface_flags
887 *
888 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
889 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
890 * surface.
891 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
892 * given.
893 */
894enum drm_vmw_surface_flags {
895 drm_vmw_surface_flag_shareable = (1 << 0),
896 drm_vmw_surface_flag_scanout = (1 << 1),
897 drm_vmw_surface_flag_create_buffer = (1 << 2)
898};
899
900/**
901 * struct drm_vmw_gb_surface_create_req
902 *
903 * @svga3d_flags: SVGA3d surface flags for the device.
904 * @format: SVGA3d format.
905 * @mip_level: Number of mip levels for all faces.
906 * @drm_surface_flags Flags as described above.
907 * @multisample_count Future use. Set to 0.
908 * @autogen_filter Future use. Set to 0.
909 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
910 * if none.
911 * @base_size Size of the base mip level for all faces.
912 * @array_size Must be zero for non-DX hardware, and if non-zero
913 * svga3d_flags must have proper bind flags setup.
914 *
915 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
916 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
917 */
918struct drm_vmw_gb_surface_create_req {
919 __u32 svga3d_flags;
920 __u32 format;
921 __u32 mip_levels;
922 enum drm_vmw_surface_flags drm_surface_flags;
923 __u32 multisample_count;
924 __u32 autogen_filter;
925 __u32 buffer_handle;
926 __u32 array_size;
927 struct drm_vmw_size base_size;
928};
929
930/**
931 * struct drm_vmw_gb_surface_create_rep
932 *
933 * @handle: Surface handle.
934 * @backup_size: Size of backup buffers for this surface.
935 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
936 * @buffer_size: Actual size of the buffer identified by
937 * @buffer_handle
938 * @buffer_map_handle: Offset into device address space for the buffer
939 * identified by @buffer_handle.
940 *
941 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
942 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
943 */
944struct drm_vmw_gb_surface_create_rep {
945 __u32 handle;
946 __u32 backup_size;
947 __u32 buffer_handle;
948 __u32 buffer_size;
949 __u64 buffer_map_handle;
950};
951
952/**
953 * union drm_vmw_gb_surface_create_arg
954 *
955 * @req: Input argument as described above.
956 * @rep: Output argument as described above.
957 *
958 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
959 */
960union drm_vmw_gb_surface_create_arg {
961 struct drm_vmw_gb_surface_create_rep rep;
962 struct drm_vmw_gb_surface_create_req req;
963};
964
965/*************************************************************************/
966/**
967 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
968 *
969 * Puts a reference on a host surface with a given handle, as previously
970 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
971 * A reference will make sure the surface isn't destroyed while we hold
972 * it and will allow the calling client to use the surface handle in
973 * the command stream.
974 *
975 * On successful return, the Ioctl returns the surface information given
976 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
977 */
978
979/**
980 * struct drm_vmw_gb_surface_reference_arg
981 *
982 * @creq: The data used as input when the surface was created, as described
983 * above at "struct drm_vmw_gb_surface_create_req"
984 * @crep: Additional data output when the surface was created, as described
985 * above at "struct drm_vmw_gb_surface_create_rep"
986 *
987 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
988 */
989struct drm_vmw_gb_surface_ref_rep {
990 struct drm_vmw_gb_surface_create_req creq;
991 struct drm_vmw_gb_surface_create_rep crep;
992};
993
994/**
995 * union drm_vmw_gb_surface_reference_arg
996 *
997 * @req: Input data as described above at "struct drm_vmw_surface_arg"
998 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
999 *
1000 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1001 */
1002union drm_vmw_gb_surface_reference_arg {
1003 struct drm_vmw_gb_surface_ref_rep rep;
1004 struct drm_vmw_surface_arg req;
1005};
1006
1007
1008/*************************************************************************/
1009/**
1010 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1011 *
1012 * Idles any previously submitted GPU operations on the buffer and
1013 * by default blocks command submissions that reference the buffer.
1014 * If the file descriptor used to grab a blocking CPU sync is closed, the
1015 * cpu sync is released.
1016 * The flags argument indicates how the grab / release operation should be
1017 * performed:
1018 */
1019
1020/**
1021 * enum drm_vmw_synccpu_flags - Synccpu flags:
1022 *
1023 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1024 * hint to the kernel to allow command submissions that references the buffer
1025 * for read-only.
1026 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1027 * referencing this buffer.
1028 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1029 * -EBUSY should the buffer be busy.
1030 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1031 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1032 * behavior.
1033 */
1034enum drm_vmw_synccpu_flags {
1035 drm_vmw_synccpu_read = (1 << 0),
1036 drm_vmw_synccpu_write = (1 << 1),
1037 drm_vmw_synccpu_dontblock = (1 << 2),
1038 drm_vmw_synccpu_allow_cs = (1 << 3)
1039};
1040
1041/**
1042 * enum drm_vmw_synccpu_op - Synccpu operations:
1043 *
1044 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1045 * @drm_vmw_synccpu_release: Release a previous grab.
1046 */
1047enum drm_vmw_synccpu_op {
1048 drm_vmw_synccpu_grab,
1049 drm_vmw_synccpu_release
1050};
1051
1052/**
1053 * struct drm_vmw_synccpu_arg
1054 *
1055 * @op: The synccpu operation as described above.
1056 * @handle: Handle identifying the buffer object.
1057 * @flags: Flags as described above.
1058 */
1059struct drm_vmw_synccpu_arg {
1060 enum drm_vmw_synccpu_op op;
1061 enum drm_vmw_synccpu_flags flags;
1062 __u32 handle;
1063 __u32 pad64;
1064};
1065
1066/*************************************************************************/
1067/**
1068 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1069 *
1070 * Allocates a device unique context id, and queues a create context command
1071 * for the host. Does not wait for host completion.
1072 */
1073enum drm_vmw_extended_context {
1074 drm_vmw_context_legacy,
1075 drm_vmw_context_dx
1076};
1077
1078/**
1079 * union drm_vmw_extended_context_arg
1080 *
1081 * @req: Context type.
1082 * @rep: Context identifier.
1083 *
1084 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1085 */
1086union drm_vmw_extended_context_arg {
1087 enum drm_vmw_extended_context req;
1088 struct drm_vmw_context_arg rep;
1089};
614#endif 1090#endif
diff --git a/intel/Android.mk b/intel/Android.mk
index 9084dc3b..2a0dc4cb 100644
--- a/intel/Android.mk
+++ b/intel/Android.mk
@@ -29,11 +29,10 @@ include $(LOCAL_PATH)/Makefile.sources
29 29
30LOCAL_MODULE := libdrm_intel 30LOCAL_MODULE := libdrm_intel
31 31
32# Removed dependency to libpciaccess: not used on Android
32LOCAL_SHARED_LIBRARIES := libdrm 33LOCAL_SHARED_LIBRARIES := libdrm
33 34
34LOCAL_SRC_FILES := $(filter-out %.h,$(LIBDRM_INTEL_FILES)) 35LOCAL_SRC_FILES := $(LIBDRM_INTEL_FILES)
35
36LOCAL_CFLAGS := \
37 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
38 36
37include $(LIBDRM_COMMON_MK)
39include $(BUILD_SHARED_LIBRARY) 38include $(BUILD_SHARED_LIBRARY)
diff --git a/intel/Makefile.am b/intel/Makefile.am
index d0045684..c52e8c08 100644
--- a/intel/Makefile.am
+++ b/intel/Makefile.am
@@ -66,7 +66,6 @@ EXTRA_DIST = \
66 $(BATCHES:.batch=.batch-ref.txt) \ 66 $(BATCHES:.batch=.batch-ref.txt) \
67 $(BATCHES:.batch=.batch-ref.txt) \ 67 $(BATCHES:.batch=.batch-ref.txt) \
68 tests/test-batch.sh \ 68 tests/test-batch.sh \
69 Android.mk \
70 $(TESTS) 69 $(TESTS)
71 70
72test_decode_LDADD = libdrm_intel.la ../libdrm.la 71test_decode_LDADD = libdrm_intel.la ../libdrm.la
diff --git a/intel/Makefile.sources b/intel/Makefile.sources
index 7b2272c7..6947ab74 100644
--- a/intel/Makefile.sources
+++ b/intel/Makefile.sources
@@ -6,7 +6,8 @@ LIBDRM_INTEL_FILES := \
6 intel_decode.c \ 6 intel_decode.c \
7 intel_chipset.h \ 7 intel_chipset.h \
8 mm.c \ 8 mm.c \
9 mm.h 9 mm.h \
10 uthash.h
10 11
11LIBDRM_INTEL_H_FILES := \ 12LIBDRM_INTEL_H_FILES := \
12 intel_bufmgr.h \ 13 intel_bufmgr.h \
diff --git a/intel/intel-symbol-check b/intel/intel-symbol-check
index bde7634c..2aa2d819 100755
--- a/intel/intel-symbol-check
+++ b/intel/intel-symbol-check
@@ -50,6 +50,7 @@ drm_intel_bufmgr_fake_init
50drm_intel_bufmgr_fake_set_exec_callback 50drm_intel_bufmgr_fake_set_exec_callback
51drm_intel_bufmgr_fake_set_fence_callback 51drm_intel_bufmgr_fake_set_fence_callback
52drm_intel_bufmgr_fake_set_last_dispatch 52drm_intel_bufmgr_fake_set_last_dispatch
53drm_intel_bufmgr_gem_can_disable_implicit_sync
53drm_intel_bufmgr_gem_enable_fenced_relocs 54drm_intel_bufmgr_gem_enable_fenced_relocs
54drm_intel_bufmgr_gem_enable_reuse 55drm_intel_bufmgr_gem_enable_reuse
55drm_intel_bufmgr_gem_get_devid 56drm_intel_bufmgr_gem_get_devid
@@ -69,7 +70,13 @@ drm_intel_decode_set_output_file
69drm_intel_gem_bo_aub_dump_bmp 70drm_intel_gem_bo_aub_dump_bmp
70drm_intel_gem_bo_clear_relocs 71drm_intel_gem_bo_clear_relocs
71drm_intel_gem_bo_context_exec 72drm_intel_gem_bo_context_exec
73drm_intel_gem_bo_disable_implicit_sync
74drm_intel_gem_bo_enable_implicit_sync
75drm_intel_gem_bo_fence_exec
72drm_intel_gem_bo_get_reloc_count 76drm_intel_gem_bo_get_reloc_count
77drm_intel_gem_bo_map__cpu
78drm_intel_gem_bo_map__gtt
79drm_intel_gem_bo_map__wc
73drm_intel_gem_bo_map_gtt 80drm_intel_gem_bo_map_gtt
74drm_intel_gem_bo_map_unsynchronized 81drm_intel_gem_bo_map_unsynchronized
75drm_intel_gem_bo_start_gtt_access 82drm_intel_gem_bo_start_gtt_access
@@ -77,9 +84,12 @@ drm_intel_gem_bo_unmap_gtt
77drm_intel_gem_bo_wait 84drm_intel_gem_bo_wait
78drm_intel_gem_context_create 85drm_intel_gem_context_create
79drm_intel_gem_context_destroy 86drm_intel_gem_context_destroy
87drm_intel_gem_context_get_id
80drm_intel_get_aperture_sizes 88drm_intel_get_aperture_sizes
81drm_intel_get_eu_total 89drm_intel_get_eu_total
90drm_intel_get_min_eu_in_pool
82drm_intel_get_pipe_from_crtc_id 91drm_intel_get_pipe_from_crtc_id
92drm_intel_get_pooled_eu
83drm_intel_get_reset_stats 93drm_intel_get_reset_stats
84drm_intel_get_subslice_total 94drm_intel_get_subslice_total
85drm_intel_reg_read 95drm_intel_reg_read
diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index a1abbcd2..693472a5 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -184,6 +184,15 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
184int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo); 184int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
185int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo); 185int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
186 186
187#define HAVE_DRM_INTEL_GEM_BO_DISABLE_IMPLICIT_SYNC 1
188int drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr);
189void drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo);
190void drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo);
191
192void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo);
193void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo);
194void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo);
195
187int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo); 196int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
188void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start); 197void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
189void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable); 198void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
@@ -208,9 +217,17 @@ int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
208int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns); 217int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
209 218
210drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr); 219drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
220int drm_intel_gem_context_get_id(drm_intel_context *ctx,
221 uint32_t *ctx_id);
211void drm_intel_gem_context_destroy(drm_intel_context *ctx); 222void drm_intel_gem_context_destroy(drm_intel_context *ctx);
212int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, 223int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
213 int used, unsigned int flags); 224 int used, unsigned int flags);
225int drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
226 drm_intel_context *ctx,
227 int used,
228 int in_fence,
229 int *out_fence,
230 unsigned int flags);
214 231
215int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd); 232int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
216drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, 233drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
@@ -273,6 +290,9 @@ int drm_intel_get_reset_stats(drm_intel_context *ctx,
273int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total); 290int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total);
274int drm_intel_get_eu_total(int fd, unsigned int *eu_total); 291int drm_intel_get_eu_total(int fd, unsigned int *eu_total);
275 292
293int drm_intel_get_pooled_eu(int fd);
294int drm_intel_get_min_eu_in_pool(int fd);
295
276/** @{ Compatibility defines to keep old code building despite the symbol rename 296/** @{ Compatibility defines to keep old code building despite the symbol rename
277 * from dri_* to drm_intel_* 297 * from dri_* to drm_intel_*
278 */ 298 */
diff --git a/intel/intel_bufmgr_fake.c b/intel/intel_bufmgr_fake.c
index 7f4c7b9f..641df6a1 100644
--- a/intel/intel_bufmgr_fake.c
+++ b/intel/intel_bufmgr_fake.c
@@ -312,7 +312,7 @@ _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
312 * 312 *
313 * Assume that in userland we treat sequence numbers as ints, which 313 * Assume that in userland we treat sequence numbers as ints, which
314 * makes some of the comparisons convenient, since the sequence 314 * makes some of the comparisons convenient, since the sequence
315 * numbers are all postive signed integers. 315 * numbers are all positive signed integers.
316 * 316 *
317 * From this we get several cases we need to handle. Here's a timeline. 317 * From this we get several cases we need to handle. Here's a timeline.
318 * 0x2 0x7 0x7ffffff8 0x7ffffffd 318 * 0x2 0x7 0x7ffffff8 0x7ffffffd
@@ -737,7 +737,7 @@ drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
737/** 737/**
738 * Wait for rendering to a buffer to complete. 738 * Wait for rendering to a buffer to complete.
739 * 739 *
740 * It is assumed that the bathcbuffer which performed the rendering included 740 * It is assumed that the batchbuffer which performed the rendering included
741 * the necessary flushing. 741 * the necessary flushing.
742 */ 742 */
743static void 743static void
@@ -1200,7 +1200,7 @@ static int
1200 assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))); 1200 assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
1201 1201
1202 /* Actually, should be able to just wait for a fence on the 1202 /* Actually, should be able to just wait for a fence on the
1203 * mmory, hich we would be tracking when we free it. Waiting 1203 * memory, which we would be tracking when we free it. Waiting
1204 * for idle is a sufficiently large hammer for now. 1204 * for idle is a sufficiently large hammer for now.
1205 */ 1205 */
1206 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake); 1206 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index dc28200f..a6656003 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -64,6 +64,7 @@
64#include "string.h" 64#include "string.h"
65 65
66#include "i915_drm.h" 66#include "i915_drm.h"
67#include "uthash.h"
67 68
68#ifdef HAVE_VALGRIND 69#ifdef HAVE_VALGRIND
69#include <valgrind.h> 70#include <valgrind.h>
@@ -130,7 +131,9 @@ typedef struct _drm_intel_bufmgr_gem {
130 131
131 drmMMListHead managers; 132 drmMMListHead managers;
132 133
133 drmMMListHead named; 134 drm_intel_bo_gem *name_table;
135 drm_intel_bo_gem *handle_table;
136
134 drmMMListHead vma_cache; 137 drmMMListHead vma_cache;
135 int vma_count, vma_open, vma_max; 138 int vma_count, vma_open, vma_max;
136 139
@@ -146,6 +149,7 @@ typedef struct _drm_intel_bufmgr_gem {
146 unsigned int bo_reuse : 1; 149 unsigned int bo_reuse : 1;
147 unsigned int no_exec : 1; 150 unsigned int no_exec : 1;
148 unsigned int has_vebox : 1; 151 unsigned int has_vebox : 1;
152 unsigned int has_exec_async : 1;
149 bool fenced_relocs; 153 bool fenced_relocs;
150 154
151 struct { 155 struct {
@@ -175,7 +179,9 @@ struct _drm_intel_bo_gem {
175 * List contains both flink named and prime fd'd objects 179 * List contains both flink named and prime fd'd objects
176 */ 180 */
177 unsigned int global_name; 181 unsigned int global_name;
178 drmMMListHead name_list; 182
183 UT_hash_handle handle_hh;
184 UT_hash_handle name_hh;
179 185
180 /** 186 /**
181 * Index of the buffer within the validation list while preparing a 187 * Index of the buffer within the validation list while preparing a
@@ -190,6 +196,8 @@ struct _drm_intel_bo_gem {
190 uint32_t swizzle_mode; 196 uint32_t swizzle_mode;
191 unsigned long stride; 197 unsigned long stride;
192 198
199 unsigned long kflags;
200
193 time_t free_time; 201 time_t free_time;
194 202
195 /** Array passed to the DRM containing relocation information. */ 203 /** Array passed to the DRM containing relocation information. */
@@ -211,6 +219,8 @@ struct _drm_intel_bo_gem {
211 void *mem_virtual; 219 void *mem_virtual;
212 /** GTT virtual address for the buffer, saved across map/unmap cycles */ 220 /** GTT virtual address for the buffer, saved across map/unmap cycles */
213 void *gtt_virtual; 221 void *gtt_virtual;
222 /** WC CPU address for the buffer, saved across map/unmap cycles */
223 void *wc_virtual;
214 /** 224 /**
215 * Virtual address of the buffer allocated by user, used for userptr 225 * Virtual address of the buffer allocated by user, used for userptr
216 * objects only. 226 * objects only.
@@ -249,7 +259,7 @@ struct _drm_intel_bo_gem {
249 * Boolean of whether the GPU is definitely not accessing the buffer. 259 * Boolean of whether the GPU is definitely not accessing the buffer.
250 * 260 *
251 * This is only valid when reusable, since non-reusable 261 * This is only valid when reusable, since non-reusable
252 * buffers are those that have been shared wth other 262 * buffers are those that have been shared with other
253 * processes, so we don't know their state. 263 * processes, so we don't know their state.
254 */ 264 */
255 bool idle; 265 bool idle;
@@ -287,7 +297,7 @@ struct _drm_intel_bo_gem {
287 */ 297 */
288 int reloc_tree_fences; 298 int reloc_tree_fences;
289 299
290 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */ 300 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
291 bool mapped_cpu_write; 301 bool mapped_cpu_write;
292}; 302};
293 303
@@ -568,12 +578,11 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
568 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; 578 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
569 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; 579 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
570 bufmgr_gem->exec2_objects[index].alignment = bo->align; 580 bufmgr_gem->exec2_objects[index].alignment = bo->align;
571 bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ? 581 bufmgr_gem->exec2_objects[index].offset = bo->offset64;
572 bo->offset64 : 0; 582 bufmgr_gem->exec2_objects[index].flags = flags | bo_gem->kflags;
573 bufmgr_gem->exec_bos[index] = bo;
574 bufmgr_gem->exec2_objects[index].flags = flags;
575 bufmgr_gem->exec2_objects[index].rsvd1 = 0; 583 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
576 bufmgr_gem->exec2_objects[index].rsvd2 = 0; 584 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
585 bufmgr_gem->exec_bos[index] = bo;
577 bufmgr_gem->exec_count++; 586 bufmgr_gem->exec_count++;
578} 587}
579 588
@@ -797,14 +806,17 @@ retry:
797 } 806 }
798 } 807 }
799 } 808 }
800 pthread_mutex_unlock(&bufmgr_gem->lock);
801 809
802 if (!alloc_from_cache) { 810 if (!alloc_from_cache) {
803 struct drm_i915_gem_create create; 811 struct drm_i915_gem_create create;
804 812
805 bo_gem = calloc(1, sizeof(*bo_gem)); 813 bo_gem = calloc(1, sizeof(*bo_gem));
806 if (!bo_gem) 814 if (!bo_gem)
807 return NULL; 815 goto err;
816
817 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
818 list (vma_list), so better set the list head here */
819 DRMINITLISTHEAD(&bo_gem->vma_list);
808 820
809 bo_gem->bo.size = bo_size; 821 bo_gem->bo.size = bo_size;
810 822
@@ -814,12 +826,13 @@ retry:
814 ret = drmIoctl(bufmgr_gem->fd, 826 ret = drmIoctl(bufmgr_gem->fd,
815 DRM_IOCTL_I915_GEM_CREATE, 827 DRM_IOCTL_I915_GEM_CREATE,
816 &create); 828 &create);
817 bo_gem->gem_handle = create.handle;
818 bo_gem->bo.handle = bo_gem->gem_handle;
819 if (ret != 0) { 829 if (ret != 0) {
820 free(bo_gem); 830 free(bo_gem);
821 return NULL; 831 goto err;
822 } 832 }
833
834 bo_gem->gem_handle = create.handle;
835 bo_gem->bo.handle = bo_gem->gem_handle;
823 bo_gem->bo.bufmgr = bufmgr; 836 bo_gem->bo.bufmgr = bufmgr;
824 bo_gem->bo.align = alignment; 837 bo_gem->bo.align = alignment;
825 838
@@ -827,16 +840,14 @@ retry:
827 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 840 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
828 bo_gem->stride = 0; 841 bo_gem->stride = 0;
829 842
830 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
831 list (vma_list), so better set the list head here */
832 DRMINITLISTHEAD(&bo_gem->name_list);
833 DRMINITLISTHEAD(&bo_gem->vma_list);
834 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 843 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
835 tiling_mode, 844 tiling_mode,
836 stride)) { 845 stride))
837 drm_intel_gem_bo_free(&bo_gem->bo); 846 goto err_free;
838 return NULL; 847
839 } 848 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
849 gem_handle, sizeof(bo_gem->gem_handle),
850 bo_gem);
840 } 851 }
841 852
842 bo_gem->name = name; 853 bo_gem->name = name;
@@ -849,11 +860,18 @@ retry:
849 bo_gem->use_48b_address_range = false; 860 bo_gem->use_48b_address_range = false;
850 861
851 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment); 862 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
863 pthread_mutex_unlock(&bufmgr_gem->lock);
852 864
853 DBG("bo_create: buf %d (%s) %ldb\n", 865 DBG("bo_create: buf %d (%s) %ldb\n",
854 bo_gem->gem_handle, bo_gem->name, size); 866 bo_gem->gem_handle, bo_gem->name, size);
855 867
856 return &bo_gem->bo; 868 return &bo_gem->bo;
869
870err_free:
871 drm_intel_gem_bo_free(&bo_gem->bo);
872err:
873 pthread_mutex_unlock(&bufmgr_gem->lock);
874 return NULL;
857} 875}
858 876
859static drm_intel_bo * 877static drm_intel_bo *
@@ -954,6 +972,9 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
954 if (!bo_gem) 972 if (!bo_gem)
955 return NULL; 973 return NULL;
956 974
975 atomic_set(&bo_gem->refcount, 1);
976 DRMINITLISTHEAD(&bo_gem->vma_list);
977
957 bo_gem->bo.size = size; 978 bo_gem->bo.size = size;
958 979
959 memclear(userptr); 980 memclear(userptr);
@@ -972,6 +993,8 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
972 return NULL; 993 return NULL;
973 } 994 }
974 995
996 pthread_mutex_lock(&bufmgr_gem->lock);
997
975 bo_gem->gem_handle = userptr.handle; 998 bo_gem->gem_handle = userptr.handle;
976 bo_gem->bo.handle = bo_gem->gem_handle; 999 bo_gem->bo.handle = bo_gem->gem_handle;
977 bo_gem->bo.bufmgr = bufmgr; 1000 bo_gem->bo.bufmgr = bufmgr;
@@ -983,11 +1006,11 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
983 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 1006 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
984 bo_gem->stride = 0; 1007 bo_gem->stride = 0;
985 1008
986 DRMINITLISTHEAD(&bo_gem->name_list); 1009 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
987 DRMINITLISTHEAD(&bo_gem->vma_list); 1010 gem_handle, sizeof(bo_gem->gem_handle),
1011 bo_gem);
988 1012
989 bo_gem->name = name; 1013 bo_gem->name = name;
990 atomic_set(&bo_gem->refcount, 1);
991 bo_gem->validate_index = -1; 1014 bo_gem->validate_index = -1;
992 bo_gem->reloc_tree_fences = 0; 1015 bo_gem->reloc_tree_fences = 0;
993 bo_gem->used_as_reloc_target = false; 1016 bo_gem->used_as_reloc_target = false;
@@ -996,6 +1019,7 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
996 bo_gem->use_48b_address_range = false; 1019 bo_gem->use_48b_address_range = false;
997 1020
998 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 1021 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1022 pthread_mutex_unlock(&bufmgr_gem->lock);
999 1023
1000 DBG("bo_create_userptr: " 1024 DBG("bo_create_userptr: "
1001 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n", 1025 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
@@ -1085,7 +1109,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1085 int ret; 1109 int ret;
1086 struct drm_gem_open open_arg; 1110 struct drm_gem_open open_arg;
1087 struct drm_i915_gem_get_tiling get_tiling; 1111 struct drm_i915_gem_get_tiling get_tiling;
1088 drmMMListHead *list;
1089 1112
1090 /* At the moment most applications only have a few named bo. 1113 /* At the moment most applications only have a few named bo.
1091 * For instance, in a DRI client only the render buffers passed 1114 * For instance, in a DRI client only the render buffers passed
@@ -1094,15 +1117,11 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1094 * provides a sufficiently fast match. 1117 * provides a sufficiently fast match.
1095 */ 1118 */
1096 pthread_mutex_lock(&bufmgr_gem->lock); 1119 pthread_mutex_lock(&bufmgr_gem->lock);
1097 for (list = bufmgr_gem->named.next; 1120 HASH_FIND(name_hh, bufmgr_gem->name_table,
1098 list != &bufmgr_gem->named; 1121 &handle, sizeof(handle), bo_gem);
1099 list = list->next) { 1122 if (bo_gem) {
1100 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); 1123 drm_intel_gem_bo_reference(&bo_gem->bo);
1101 if (bo_gem->global_name == handle) { 1124 goto out;
1102 drm_intel_gem_bo_reference(&bo_gem->bo);
1103 pthread_mutex_unlock(&bufmgr_gem->lock);
1104 return &bo_gem->bo;
1105 }
1106 } 1125 }
1107 1126
1108 memclear(open_arg); 1127 memclear(open_arg);
@@ -1113,29 +1132,26 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1113 if (ret != 0) { 1132 if (ret != 0) {
1114 DBG("Couldn't reference %s handle 0x%08x: %s\n", 1133 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1115 name, handle, strerror(errno)); 1134 name, handle, strerror(errno));
1116 pthread_mutex_unlock(&bufmgr_gem->lock); 1135 bo_gem = NULL;
1117 return NULL; 1136 goto out;
1118 } 1137 }
1119 /* Now see if someone has used a prime handle to get this 1138 /* Now see if someone has used a prime handle to get this
1120 * object from the kernel before by looking through the list 1139 * object from the kernel before by looking through the list
1121 * again for a matching gem_handle 1140 * again for a matching gem_handle
1122 */ 1141 */
1123 for (list = bufmgr_gem->named.next; 1142 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
1124 list != &bufmgr_gem->named; 1143 &open_arg.handle, sizeof(open_arg.handle), bo_gem);
1125 list = list->next) { 1144 if (bo_gem) {
1126 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); 1145 drm_intel_gem_bo_reference(&bo_gem->bo);
1127 if (bo_gem->gem_handle == open_arg.handle) { 1146 goto out;
1128 drm_intel_gem_bo_reference(&bo_gem->bo);
1129 pthread_mutex_unlock(&bufmgr_gem->lock);
1130 return &bo_gem->bo;
1131 }
1132 } 1147 }
1133 1148
1134 bo_gem = calloc(1, sizeof(*bo_gem)); 1149 bo_gem = calloc(1, sizeof(*bo_gem));
1135 if (!bo_gem) { 1150 if (!bo_gem)
1136 pthread_mutex_unlock(&bufmgr_gem->lock); 1151 goto out;
1137 return NULL; 1152
1138 } 1153 atomic_set(&bo_gem->refcount, 1);
1154 DRMINITLISTHEAD(&bo_gem->vma_list);
1139 1155
1140 bo_gem->bo.size = open_arg.size; 1156 bo_gem->bo.size = open_arg.size;
1141 bo_gem->bo.offset = 0; 1157 bo_gem->bo.offset = 0;
@@ -1143,7 +1159,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1143 bo_gem->bo.virtual = NULL; 1159 bo_gem->bo.virtual = NULL;
1144 bo_gem->bo.bufmgr = bufmgr; 1160 bo_gem->bo.bufmgr = bufmgr;
1145 bo_gem->name = name; 1161 bo_gem->name = name;
1146 atomic_set(&bo_gem->refcount, 1);
1147 bo_gem->validate_index = -1; 1162 bo_gem->validate_index = -1;
1148 bo_gem->gem_handle = open_arg.handle; 1163 bo_gem->gem_handle = open_arg.handle;
1149 bo_gem->bo.handle = open_arg.handle; 1164 bo_gem->bo.handle = open_arg.handle;
@@ -1151,27 +1166,33 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1151 bo_gem->reusable = false; 1166 bo_gem->reusable = false;
1152 bo_gem->use_48b_address_range = false; 1167 bo_gem->use_48b_address_range = false;
1153 1168
1169 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1170 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
1171 HASH_ADD(name_hh, bufmgr_gem->name_table,
1172 global_name, sizeof(bo_gem->global_name), bo_gem);
1173
1154 memclear(get_tiling); 1174 memclear(get_tiling);
1155 get_tiling.handle = bo_gem->gem_handle; 1175 get_tiling.handle = bo_gem->gem_handle;
1156 ret = drmIoctl(bufmgr_gem->fd, 1176 ret = drmIoctl(bufmgr_gem->fd,
1157 DRM_IOCTL_I915_GEM_GET_TILING, 1177 DRM_IOCTL_I915_GEM_GET_TILING,
1158 &get_tiling); 1178 &get_tiling);
1159 if (ret != 0) { 1179 if (ret != 0)
1160 drm_intel_gem_bo_unreference(&bo_gem->bo); 1180 goto err_unref;
1161 pthread_mutex_unlock(&bufmgr_gem->lock); 1181
1162 return NULL;
1163 }
1164 bo_gem->tiling_mode = get_tiling.tiling_mode; 1182 bo_gem->tiling_mode = get_tiling.tiling_mode;
1165 bo_gem->swizzle_mode = get_tiling.swizzle_mode; 1183 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1166 /* XXX stride is unknown */ 1184 /* XXX stride is unknown */
1167 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 1185 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1168
1169 DRMINITLISTHEAD(&bo_gem->vma_list);
1170 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1171 pthread_mutex_unlock(&bufmgr_gem->lock);
1172 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); 1186 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1173 1187
1188out:
1189 pthread_mutex_unlock(&bufmgr_gem->lock);
1174 return &bo_gem->bo; 1190 return &bo_gem->bo;
1191
1192err_unref:
1193 drm_intel_gem_bo_free(&bo_gem->bo);
1194 pthread_mutex_unlock(&bufmgr_gem->lock);
1195 return NULL;
1175} 1196}
1176 1197
1177static void 1198static void
@@ -1188,11 +1209,20 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
1188 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size); 1209 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1189 bufmgr_gem->vma_count--; 1210 bufmgr_gem->vma_count--;
1190 } 1211 }
1212 if (bo_gem->wc_virtual) {
1213 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1214 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1215 bufmgr_gem->vma_count--;
1216 }
1191 if (bo_gem->gtt_virtual) { 1217 if (bo_gem->gtt_virtual) {
1192 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size); 1218 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1193 bufmgr_gem->vma_count--; 1219 bufmgr_gem->vma_count--;
1194 } 1220 }
1195 1221
1222 if (bo_gem->global_name)
1223 HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1224 HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1225
1196 /* Close this object */ 1226 /* Close this object */
1197 memclear(close); 1227 memclear(close);
1198 close.handle = bo_gem->gem_handle; 1228 close.handle = bo_gem->gem_handle;
@@ -1213,6 +1243,9 @@ drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1213 if (bo_gem->mem_virtual) 1243 if (bo_gem->mem_virtual)
1214 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size); 1244 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1215 1245
1246 if (bo_gem->wc_virtual)
1247 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1248
1216 if (bo_gem->gtt_virtual) 1249 if (bo_gem->gtt_virtual)
1217 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size); 1250 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1218#endif 1251#endif
@@ -1277,6 +1310,11 @@ static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1277 bo_gem->mem_virtual = NULL; 1310 bo_gem->mem_virtual = NULL;
1278 bufmgr_gem->vma_count--; 1311 bufmgr_gem->vma_count--;
1279 } 1312 }
1313 if (bo_gem->wc_virtual) {
1314 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1315 bo_gem->wc_virtual = NULL;
1316 bufmgr_gem->vma_count--;
1317 }
1280 if (bo_gem->gtt_virtual) { 1318 if (bo_gem->gtt_virtual) {
1281 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size); 1319 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1282 bo_gem->gtt_virtual = NULL; 1320 bo_gem->gtt_virtual = NULL;
@@ -1292,6 +1330,8 @@ static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1292 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); 1330 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1293 if (bo_gem->mem_virtual) 1331 if (bo_gem->mem_virtual)
1294 bufmgr_gem->vma_count++; 1332 bufmgr_gem->vma_count++;
1333 if (bo_gem->wc_virtual)
1334 bufmgr_gem->vma_count++;
1295 if (bo_gem->gtt_virtual) 1335 if (bo_gem->gtt_virtual)
1296 bufmgr_gem->vma_count++; 1336 bufmgr_gem->vma_count++;
1297 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 1337 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
@@ -1304,6 +1344,8 @@ static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1304 DRMLISTDEL(&bo_gem->vma_list); 1344 DRMLISTDEL(&bo_gem->vma_list);
1305 if (bo_gem->mem_virtual) 1345 if (bo_gem->mem_virtual)
1306 bufmgr_gem->vma_count--; 1346 bufmgr_gem->vma_count--;
1347 if (bo_gem->wc_virtual)
1348 bufmgr_gem->vma_count--;
1307 if (bo_gem->gtt_virtual) 1349 if (bo_gem->gtt_virtual)
1308 bufmgr_gem->vma_count--; 1350 bufmgr_gem->vma_count--;
1309 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 1351 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
@@ -1328,6 +1370,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1328 for (i = 0; i < bo_gem->softpin_target_count; i++) 1370 for (i = 0; i < bo_gem->softpin_target_count; i++)
1329 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i], 1371 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1330 time); 1372 time);
1373 bo_gem->kflags = 0;
1331 bo_gem->reloc_count = 0; 1374 bo_gem->reloc_count = 0;
1332 bo_gem->used_as_reloc_target = false; 1375 bo_gem->used_as_reloc_target = false;
1333 bo_gem->softpin_target_count = 0; 1376 bo_gem->softpin_target_count = 0;
@@ -1358,8 +1401,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1358 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1401 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1359 } 1402 }
1360 1403
1361 DRMLISTDEL(&bo_gem->name_list);
1362
1363 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); 1404 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1364 /* Put the buffer into our internal cache for reuse if we can. */ 1405 /* Put the buffer into our internal cache for reuse if we can. */
1365 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && 1406 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
@@ -1370,6 +1411,8 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1370 bo_gem->name = NULL; 1411 bo_gem->name = NULL;
1371 bo_gem->validate_index = -1; 1412 bo_gem->validate_index = -1;
1372 1413
1414 bo_gem->kflags = 0;
1415
1373 DRMLISTADDTAIL(&bo_gem->head, &bucket->head); 1416 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1374 } else { 1417 } else {
1375 drm_intel_gem_bo_free(bo); 1418 drm_intel_gem_bo_free(bo);
@@ -1681,7 +1724,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1681 } 1724 }
1682 1725
1683 /* We need to unmap after every innovation as we cannot track 1726 /* We need to unmap after every innovation as we cannot track
1684 * an open vma for every bo as that will exhaasut the system 1727 * an open vma for every bo as that will exhaust the system
1685 * limits and cause later failures. 1728 * limits and cause later failures.
1686 */ 1729 */
1687 if (--bo_gem->map_count == 0) { 1730 if (--bo_gem->map_count == 0) {
@@ -2335,6 +2378,7 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2335static int 2378static int
2336do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, 2379do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2337 drm_clip_rect_t *cliprects, int num_cliprects, int DR4, 2380 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2381 int in_fence, int *out_fence,
2338 unsigned int flags) 2382 unsigned int flags)
2339{ 2383{
2340 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 2384 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
@@ -2389,12 +2433,20 @@ do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2389 else 2433 else
2390 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id); 2434 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2391 execbuf.rsvd2 = 0; 2435 execbuf.rsvd2 = 0;
2436 if (in_fence != -1) {
2437 execbuf.rsvd2 = in_fence;
2438 execbuf.flags |= I915_EXEC_FENCE_IN;
2439 }
2440 if (out_fence != NULL) {
2441 *out_fence = -1;
2442 execbuf.flags |= I915_EXEC_FENCE_OUT;
2443 }
2392 2444
2393 if (bufmgr_gem->no_exec) 2445 if (bufmgr_gem->no_exec)
2394 goto skip_execution; 2446 goto skip_execution;
2395 2447
2396 ret = drmIoctl(bufmgr_gem->fd, 2448 ret = drmIoctl(bufmgr_gem->fd,
2397 DRM_IOCTL_I915_GEM_EXECBUFFER2, 2449 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
2398 &execbuf); 2450 &execbuf);
2399 if (ret != 0) { 2451 if (ret != 0) {
2400 ret = -errno; 2452 ret = -errno;
@@ -2410,6 +2462,9 @@ do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2410 } 2462 }
2411 drm_intel_update_buffer_offsets2(bufmgr_gem); 2463 drm_intel_update_buffer_offsets2(bufmgr_gem);
2412 2464
2465 if (ret == 0 && out_fence != NULL)
2466 *out_fence = execbuf.rsvd2 >> 32;
2467
2413skip_execution: 2468skip_execution:
2414 if (bufmgr_gem->bufmgr.debug) 2469 if (bufmgr_gem->bufmgr.debug)
2415 drm_intel_gem_dump_validation_list(bufmgr_gem); 2470 drm_intel_gem_dump_validation_list(bufmgr_gem);
@@ -2435,7 +2490,7 @@ drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2435 int DR4) 2490 int DR4)
2436{ 2491{
2437 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, 2492 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2438 I915_EXEC_RENDER); 2493 -1, NULL, I915_EXEC_RENDER);
2439} 2494}
2440 2495
2441static int 2496static int
@@ -2444,14 +2499,25 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2444 unsigned int flags) 2499 unsigned int flags)
2445{ 2500{
2446 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, 2501 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2447 flags); 2502 -1, NULL, flags);
2448} 2503}
2449 2504
2450int 2505int
2451drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, 2506drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2452 int used, unsigned int flags) 2507 int used, unsigned int flags)
2453{ 2508{
2454 return do_exec2(bo, used, ctx, NULL, 0, 0, flags); 2509 return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
2510}
2511
2512int
2513drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
2514 drm_intel_context *ctx,
2515 int used,
2516 int in_fence,
2517 int *out_fence,
2518 unsigned int flags)
2519{
2520 return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
2455} 2521}
2456 2522
2457static int 2523static int
@@ -2591,7 +2657,6 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
2591 uint32_t handle; 2657 uint32_t handle;
2592 drm_intel_bo_gem *bo_gem; 2658 drm_intel_bo_gem *bo_gem;
2593 struct drm_i915_gem_get_tiling get_tiling; 2659 struct drm_i915_gem_get_tiling get_tiling;
2594 drmMMListHead *list;
2595 2660
2596 pthread_mutex_lock(&bufmgr_gem->lock); 2661 pthread_mutex_lock(&bufmgr_gem->lock);
2597 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); 2662 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
@@ -2606,22 +2671,20 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
2606 * for named buffers, we must not create two bo's pointing at the same 2671 * for named buffers, we must not create two bo's pointing at the same
2607 * kernel object 2672 * kernel object
2608 */ 2673 */
2609 for (list = bufmgr_gem->named.next; 2674 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
2610 list != &bufmgr_gem->named; 2675 &handle, sizeof(handle), bo_gem);
2611 list = list->next) { 2676 if (bo_gem) {
2612 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); 2677 drm_intel_gem_bo_reference(&bo_gem->bo);
2613 if (bo_gem->gem_handle == handle) { 2678 goto out;
2614 drm_intel_gem_bo_reference(&bo_gem->bo);
2615 pthread_mutex_unlock(&bufmgr_gem->lock);
2616 return &bo_gem->bo;
2617 }
2618 } 2679 }
2619 2680
2620 bo_gem = calloc(1, sizeof(*bo_gem)); 2681 bo_gem = calloc(1, sizeof(*bo_gem));
2621 if (!bo_gem) { 2682 if (!bo_gem)
2622 pthread_mutex_unlock(&bufmgr_gem->lock); 2683 goto out;
2623 return NULL; 2684
2624 } 2685 atomic_set(&bo_gem->refcount, 1);
2686 DRMINITLISTHEAD(&bo_gem->vma_list);
2687
2625 /* Determine size of bo. The fd-to-handle ioctl really should 2688 /* Determine size of bo. The fd-to-handle ioctl really should
2626 * return the size, but it doesn't. If we have kernel 3.12 or 2689 * return the size, but it doesn't. If we have kernel 3.12 or
2627 * later, we can lseek on the prime fd to get the size. Older 2690 * later, we can lseek on the prime fd to get the size. Older
@@ -2637,8 +2700,8 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
2637 bo_gem->bo.bufmgr = bufmgr; 2700 bo_gem->bo.bufmgr = bufmgr;
2638 2701
2639 bo_gem->gem_handle = handle; 2702 bo_gem->gem_handle = handle;
2640 2703 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2641 atomic_set(&bo_gem->refcount, 1); 2704 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
2642 2705
2643 bo_gem->name = "prime"; 2706 bo_gem->name = "prime";
2644 bo_gem->validate_index = -1; 2707 bo_gem->validate_index = -1;
@@ -2648,26 +2711,26 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
2648 bo_gem->reusable = false; 2711 bo_gem->reusable = false;
2649 bo_gem->use_48b_address_range = false; 2712 bo_gem->use_48b_address_range = false;
2650 2713
2651 DRMINITLISTHEAD(&bo_gem->vma_list);
2652 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2653 pthread_mutex_unlock(&bufmgr_gem->lock);
2654
2655 memclear(get_tiling); 2714 memclear(get_tiling);
2656 get_tiling.handle = bo_gem->gem_handle; 2715 get_tiling.handle = bo_gem->gem_handle;
2657 ret = drmIoctl(bufmgr_gem->fd, 2716 if (drmIoctl(bufmgr_gem->fd,
2658 DRM_IOCTL_I915_GEM_GET_TILING, 2717 DRM_IOCTL_I915_GEM_GET_TILING,
2659 &get_tiling); 2718 &get_tiling))
2660 if (ret != 0) { 2719 goto err;
2661 DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno)); 2720
2662 drm_intel_gem_bo_unreference(&bo_gem->bo);
2663 return NULL;
2664 }
2665 bo_gem->tiling_mode = get_tiling.tiling_mode; 2721 bo_gem->tiling_mode = get_tiling.tiling_mode;
2666 bo_gem->swizzle_mode = get_tiling.swizzle_mode; 2722 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2667 /* XXX stride is unknown */ 2723 /* XXX stride is unknown */
2668 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 2724 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2669 2725
2726out:
2727 pthread_mutex_unlock(&bufmgr_gem->lock);
2670 return &bo_gem->bo; 2728 return &bo_gem->bo;
2729
2730err:
2731 drm_intel_gem_bo_free(&bo_gem->bo);
2732 pthread_mutex_unlock(&bufmgr_gem->lock);
2733 return NULL;
2671} 2734}
2672 2735
2673int 2736int
@@ -2676,11 +2739,6 @@ drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2676 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2739 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2677 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2740 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2678 2741
2679 pthread_mutex_lock(&bufmgr_gem->lock);
2680 if (DRMLISTEMPTY(&bo_gem->name_list))
2681 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2682 pthread_mutex_unlock(&bufmgr_gem->lock);
2683
2684 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, 2742 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2685 DRM_CLOEXEC, prime_fd) != 0) 2743 DRM_CLOEXEC, prime_fd) != 0)
2686 return -errno; 2744 return -errno;
@@ -2695,27 +2753,24 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2695{ 2753{
2696 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2754 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2697 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2755 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2698 int ret;
2699 2756
2700 if (!bo_gem->global_name) { 2757 if (!bo_gem->global_name) {
2701 struct drm_gem_flink flink; 2758 struct drm_gem_flink flink;
2702 2759
2703 memclear(flink); 2760 memclear(flink);
2704 flink.handle = bo_gem->gem_handle; 2761 flink.handle = bo_gem->gem_handle;
2762 if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2763 return -errno;
2705 2764
2706 pthread_mutex_lock(&bufmgr_gem->lock); 2765 pthread_mutex_lock(&bufmgr_gem->lock);
2766 if (!bo_gem->global_name) {
2767 bo_gem->global_name = flink.name;
2768 bo_gem->reusable = false;
2707 2769
2708 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); 2770 HASH_ADD(name_hh, bufmgr_gem->name_table,
2709 if (ret != 0) { 2771 global_name, sizeof(bo_gem->global_name),
2710 pthread_mutex_unlock(&bufmgr_gem->lock); 2772 bo_gem);
2711 return -errno;
2712 } 2773 }
2713
2714 bo_gem->global_name = flink.name;
2715 bo_gem->reusable = false;
2716
2717 if (DRMLISTEMPTY(&bo_gem->name_list))
2718 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2719 pthread_mutex_unlock(&bufmgr_gem->lock); 2774 pthread_mutex_unlock(&bufmgr_gem->lock);
2720 } 2775 }
2721 2776
@@ -2739,6 +2794,59 @@ drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2739} 2794}
2740 2795
2741/** 2796/**
2797 * Disables implicit synchronisation before executing the bo
2798 *
2799 * This will cause rendering corruption unless you correctly manage explicit
2800 * fences for all rendering involving this buffer - including use by others.
2801 * Disabling the implicit serialisation is only required if that serialisation
2802 * is too coarse (for example, you have split the buffer into many
2803 * non-overlapping regions and are sharing the whole buffer between concurrent
2804 * independent command streams).
2805 *
2806 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2807 * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
2808 * or subsequent execbufs involving the bo will generate EINVAL.
2809 */
2810void
2811drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
2812{
2813 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2814
2815 bo_gem->kflags |= EXEC_OBJECT_ASYNC;
2816}
2817
2818/**
2819 * Enables implicit synchronisation before executing the bo
2820 *
2821 * This is the default behaviour of the kernel, to wait upon prior writes
2822 * completing on the object before rendering with it, or to wait for prior
2823 * reads to complete before writing into the object.
2824 * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2825 * the kernel never to insert a stall before using the object. Then this
2826 * function can be used to restore the implicit sync before subsequent
2827 * rendering.
2828 */
2829void
2830drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
2831{
2832 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2833
2834 bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
2835}
2836
2837/**
2838 * Query whether the kernel supports disabling of its implicit synchronisation
2839 * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
2840 */
2841int
2842drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
2843{
2844 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2845
2846 return bufmgr_gem->has_exec_async;
2847}
2848
2849/**
2742 * Enable use of fenced reloc type. 2850 * Enable use of fenced reloc type.
2743 * 2851 *
2744 * New code should enable this to avoid unnecessary fence register 2852 * New code should enable this to avoid unnecessary fence register
@@ -3034,6 +3142,34 @@ drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3034 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 3142 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3035} 3143}
3036 3144
3145static int
3146parse_devid_override(const char *devid_override)
3147{
3148 static const struct {
3149 const char *name;
3150 int pci_id;
3151 } name_map[] = {
3152 { "brw", PCI_CHIP_I965_GM },
3153 { "g4x", PCI_CHIP_GM45_GM },
3154 { "ilk", PCI_CHIP_ILD_G },
3155 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
3156 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
3157 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
3158 { "byt", PCI_CHIP_VALLEYVIEW_3 },
3159 { "bdw", 0x1620 | BDW_ULX },
3160 { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
3161 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
3162 };
3163 unsigned int i;
3164
3165 for (i = 0; i < ARRAY_SIZE(name_map); i++) {
3166 if (!strcmp(name_map[i].name, devid_override))
3167 return name_map[i].pci_id;
3168 }
3169
3170 return strtod(devid_override, NULL);
3171}
3172
3037/** 3173/**
3038 * Get the PCI ID for the device. This can be overridden by setting the 3174 * Get the PCI ID for the device. This can be overridden by setting the
3039 * INTEL_DEVID_OVERRIDE environment variable to the desired ID. 3175 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
@@ -3050,7 +3186,7 @@ get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3050 devid_override = getenv("INTEL_DEVID_OVERRIDE"); 3186 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3051 if (devid_override) { 3187 if (devid_override) {
3052 bufmgr_gem->no_exec = true; 3188 bufmgr_gem->no_exec = true;
3053 return strtod(devid_override, NULL); 3189 return parse_devid_override(devid_override);
3054 } 3190 }
3055 } 3191 }
3056 3192
@@ -3130,6 +3266,17 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3130 return context; 3266 return context;
3131} 3267}
3132 3268
3269int
3270drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
3271{
3272 if (ctx == NULL)
3273 return -EINVAL;
3274
3275 *ctx_id = ctx->ctx_id;
3276
3277 return 0;
3278}
3279
3133void 3280void
3134drm_intel_gem_context_destroy(drm_intel_context *ctx) 3281drm_intel_gem_context_destroy(drm_intel_context *ctx)
3135{ 3282{
@@ -3237,6 +3384,36 @@ drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3237 return 0; 3384 return 0;
3238} 3385}
3239 3386
3387int
3388drm_intel_get_pooled_eu(int fd)
3389{
3390 drm_i915_getparam_t gp;
3391 int ret = -1;
3392
3393 memclear(gp);
3394 gp.param = I915_PARAM_HAS_POOLED_EU;
3395 gp.value = &ret;
3396 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3397 return -errno;
3398
3399 return ret;
3400}
3401
3402int
3403drm_intel_get_min_eu_in_pool(int fd)
3404{
3405 drm_i915_getparam_t gp;
3406 int ret = -1;
3407
3408 memclear(gp);
3409 gp.param = I915_PARAM_MIN_EU_IN_POOL;
3410 gp.value = &ret;
3411 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3412 return -errno;
3413
3414 return ret;
3415}
3416
3240/** 3417/**
3241 * Annotate the given bo for use in aub dumping. 3418 * Annotate the given bo for use in aub dumping.
3242 * 3419 *
@@ -3300,6 +3477,141 @@ drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3300 } 3477 }
3301} 3478}
3302 3479
3480void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3481{
3482 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3483 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3484
3485 if (bo_gem->gtt_virtual)
3486 return bo_gem->gtt_virtual;
3487
3488 if (bo_gem->is_userptr)
3489 return NULL;
3490
3491 pthread_mutex_lock(&bufmgr_gem->lock);
3492 if (bo_gem->gtt_virtual == NULL) {
3493 struct drm_i915_gem_mmap_gtt mmap_arg;
3494 void *ptr;
3495
3496 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3497 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3498
3499 if (bo_gem->map_count++ == 0)
3500 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3501
3502 memclear(mmap_arg);
3503 mmap_arg.handle = bo_gem->gem_handle;
3504
3505 /* Get the fake offset back... */
3506 ptr = MAP_FAILED;
3507 if (drmIoctl(bufmgr_gem->fd,
3508 DRM_IOCTL_I915_GEM_MMAP_GTT,
3509 &mmap_arg) == 0) {
3510 /* and mmap it */
3511 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3512 MAP_SHARED, bufmgr_gem->fd,
3513 mmap_arg.offset);
3514 }
3515 if (ptr == MAP_FAILED) {
3516 if (--bo_gem->map_count == 0)
3517 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3518 ptr = NULL;
3519 }
3520
3521 bo_gem->gtt_virtual = ptr;
3522 }
3523 pthread_mutex_unlock(&bufmgr_gem->lock);
3524
3525 return bo_gem->gtt_virtual;
3526}
3527
3528void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3529{
3530 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3531 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3532
3533 if (bo_gem->mem_virtual)
3534 return bo_gem->mem_virtual;
3535
3536 if (bo_gem->is_userptr) {
3537 /* Return the same user ptr */
3538 return bo_gem->user_virtual;
3539 }
3540
3541 pthread_mutex_lock(&bufmgr_gem->lock);
3542 if (!bo_gem->mem_virtual) {
3543 struct drm_i915_gem_mmap mmap_arg;
3544
3545 if (bo_gem->map_count++ == 0)
3546 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3547
3548 DBG("bo_map: %d (%s), map_count=%d\n",
3549 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3550
3551 memclear(mmap_arg);
3552 mmap_arg.handle = bo_gem->gem_handle;
3553 mmap_arg.size = bo->size;
3554 if (drmIoctl(bufmgr_gem->fd,
3555 DRM_IOCTL_I915_GEM_MMAP,
3556 &mmap_arg)) {
3557 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3558 __FILE__, __LINE__, bo_gem->gem_handle,
3559 bo_gem->name, strerror(errno));
3560 if (--bo_gem->map_count == 0)
3561 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3562 } else {
3563 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3564 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3565 }
3566 }
3567 pthread_mutex_unlock(&bufmgr_gem->lock);
3568
3569 return bo_gem->mem_virtual;
3570}
3571
3572void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3573{
3574 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3575 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3576
3577 if (bo_gem->wc_virtual)
3578 return bo_gem->wc_virtual;
3579
3580 if (bo_gem->is_userptr)
3581 return NULL;
3582
3583 pthread_mutex_lock(&bufmgr_gem->lock);
3584 if (!bo_gem->wc_virtual) {
3585 struct drm_i915_gem_mmap mmap_arg;
3586
3587 if (bo_gem->map_count++ == 0)
3588 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3589
3590 DBG("bo_map: %d (%s), map_count=%d\n",
3591 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3592
3593 memclear(mmap_arg);
3594 mmap_arg.handle = bo_gem->gem_handle;
3595 mmap_arg.size = bo->size;
3596 mmap_arg.flags = I915_MMAP_WC;
3597 if (drmIoctl(bufmgr_gem->fd,
3598 DRM_IOCTL_I915_GEM_MMAP,
3599 &mmap_arg)) {
3600 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3601 __FILE__, __LINE__, bo_gem->gem_handle,
3602 bo_gem->name, strerror(errno));
3603 if (--bo_gem->map_count == 0)
3604 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3605 } else {
3606 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3607 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3608 }
3609 }
3610 pthread_mutex_unlock(&bufmgr_gem->lock);
3611
3612 return bo_gem->wc_virtual;
3613}
3614
3303/** 3615/**
3304 * Initializes the GEM buffer manager, which uses the kernel to allocate, map, 3616 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3305 * and manage map buffer objections. 3617 * and manage map buffer objections.
@@ -3379,7 +3691,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
3379 bufmgr_gem->gtt_size > 256*1024*1024) { 3691 bufmgr_gem->gtt_size > 256*1024*1024) {
3380 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't 3692 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3381 * be used for tiled blits. To simplify the accounting, just 3693 * be used for tiled blits. To simplify the accounting, just
3382 * substract the unmappable part (fixed to 256MB on all known 3694 * subtract the unmappable part (fixed to 256MB on all known
3383 * gen3 devices) if the kernel advertises it. */ 3695 * gen3 devices) if the kernel advertises it. */
3384 bufmgr_gem->gtt_size -= 256*1024*1024; 3696 bufmgr_gem->gtt_size -= 256*1024*1024;
3385 } 3697 }
@@ -3404,6 +3716,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
3404 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3716 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3405 bufmgr_gem->has_relaxed_fencing = ret == 0; 3717 bufmgr_gem->has_relaxed_fencing = ret == 0;
3406 3718
3719 gp.param = I915_PARAM_HAS_EXEC_ASYNC;
3720 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3721 bufmgr_gem->has_exec_async = ret == 0;
3722
3407 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr; 3723 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3408 3724
3409 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT; 3725 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
@@ -3507,7 +3823,6 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
3507 drm_intel_gem_get_pipe_from_crtc_id; 3823 drm_intel_gem_get_pipe_from_crtc_id;
3508 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references; 3824 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3509 3825
3510 DRMINITLISTHEAD(&bufmgr_gem->named);
3511 init_cache_buckets(bufmgr_gem); 3826 init_cache_buckets(bufmgr_gem);
3512 3827
3513 DRMINITLISTHEAD(&bufmgr_gem->vma_cache); 3828 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
diff --git a/intel/intel_chipset.h b/intel/intel_chipset.h
index 26fbee4d..41fc0da0 100644
--- a/intel/intel_chipset.h
+++ b/intel/intel_chipset.h
@@ -168,6 +168,7 @@
168#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902 168#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902
169#define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906 169#define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906
170#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A /* Reserved */ 170#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A /* Reserved */
171#define PCI_CHIP_SKYLAKE_H_GT1 0x190B
171#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E /* Reserved */ 172#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E /* Reserved */
172#define PCI_CHIP_SKYLAKE_DT_GT2 0x1912 173#define PCI_CHIP_SKYLAKE_DT_GT2 0x1912
173#define PCI_CHIP_SKYLAKE_FUSED0_GT2 0x1913 /* Reserved */ 174#define PCI_CHIP_SKYLAKE_FUSED0_GT2 0x1913 /* Reserved */
@@ -179,17 +180,46 @@
179#define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D 180#define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D
180#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E 181#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E
181#define PCI_CHIP_SKYLAKE_MOBILE_GT2 0x1921 /* Reserved */ 182#define PCI_CHIP_SKYLAKE_MOBILE_GT2 0x1921 /* Reserved */
182#define PCI_CHIP_SKYLAKE_GT3 0x1926 183#define PCI_CHIP_SKYLAKE_ULT_GT3_0 0x1923
183#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B /* Reserved */ 184#define PCI_CHIP_SKYLAKE_ULT_GT3_1 0x1926
185#define PCI_CHIP_SKYLAKE_ULT_GT3_2 0x1927
184#define PCI_CHIP_SKYLAKE_SRV_GT4 0x192A 186#define PCI_CHIP_SKYLAKE_SRV_GT4 0x192A
187#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B /* Reserved */
188#define PCI_CHIP_SKYLAKE_SRV_GT3 0x192D
185#define PCI_CHIP_SKYLAKE_DT_GT4 0x1932 189#define PCI_CHIP_SKYLAKE_DT_GT4 0x1932
186#define PCI_CHIP_SKYLAKE_SRV_GT4X 0x193A 190#define PCI_CHIP_SKYLAKE_SRV_GT4X 0x193A
187#define PCI_CHIP_SKYLAKE_H_GT4 0x193B 191#define PCI_CHIP_SKYLAKE_H_GT4 0x193B
188#define PCI_CHIP_SKYLAKE_WKS_GT4 0x193D 192#define PCI_CHIP_SKYLAKE_WKS_GT4 0x193D
189 193
194#define PCI_CHIP_KABYLAKE_ULT_GT2 0x5916
195#define PCI_CHIP_KABYLAKE_ULT_GT1_5 0x5913
196#define PCI_CHIP_KABYLAKE_ULT_GT1 0x5906
197#define PCI_CHIP_KABYLAKE_ULT_GT3_0 0x5923
198#define PCI_CHIP_KABYLAKE_ULT_GT3_1 0x5926
199#define PCI_CHIP_KABYLAKE_ULT_GT3_2 0x5927
200#define PCI_CHIP_KABYLAKE_ULT_GT2F 0x5921
201#define PCI_CHIP_KABYLAKE_ULX_GT1_5 0x5915
202#define PCI_CHIP_KABYLAKE_ULX_GT1 0x590E
203#define PCI_CHIP_KABYLAKE_ULX_GT2 0x591E
204#define PCI_CHIP_KABYLAKE_DT_GT2 0x5912
205#define PCI_CHIP_KABYLAKE_DT_GT1_5 0x5917
206#define PCI_CHIP_KABYLAKE_DT_GT1 0x5902
207#define PCI_CHIP_KABYLAKE_HALO_GT2 0x591B
208#define PCI_CHIP_KABYLAKE_HALO_GT4 0x593B
209#define PCI_CHIP_KABYLAKE_HALO_GT1_0 0x5908
210#define PCI_CHIP_KABYLAKE_HALO_GT1_1 0x590B
211#define PCI_CHIP_KABYLAKE_SRV_GT2 0x591A
212#define PCI_CHIP_KABYLAKE_SRV_GT1 0x590A
213#define PCI_CHIP_KABYLAKE_WKS_GT2 0x591D
214
190#define PCI_CHIP_BROXTON_0 0x0A84 215#define PCI_CHIP_BROXTON_0 0x0A84
191#define PCI_CHIP_BROXTON_1 0x1A84 216#define PCI_CHIP_BROXTON_1 0x1A84
192#define PCI_CHIP_BROXTON_2 0x5A84 217#define PCI_CHIP_BROXTON_2 0x5A84
218#define PCI_CHIP_BROXTON_3 0x1A85
219#define PCI_CHIP_BROXTON_4 0x5A85
220
221#define PCI_CHIP_GLK 0x3184
222#define PCI_CHIP_GLK_2X6 0x3185
193 223
194#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \ 224#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
195 (devid) == PCI_CHIP_I915_GM || \ 225 (devid) == PCI_CHIP_I915_GM || \
@@ -350,10 +380,11 @@
350#define IS_GEN8(devid) (IS_BROADWELL(devid) || \ 380#define IS_GEN8(devid) (IS_BROADWELL(devid) || \
351 IS_CHERRYVIEW(devid)) 381 IS_CHERRYVIEW(devid))
352 382
353#define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \ 383#define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \
354 (devid) == PCI_CHIP_SKYLAKE_ULX_GT1 || \ 384 (devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \
355 (devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \ 385 (devid) == PCI_CHIP_SKYLAKE_SRV_GT1 || \
356 (devid) == PCI_CHIP_SKYLAKE_SRV_GT1) 386 (devid) == PCI_CHIP_SKYLAKE_H_GT1 || \
387 (devid) == PCI_CHIP_SKYLAKE_ULX_GT1)
357 388
358#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \ 389#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \
359 (devid) == PCI_CHIP_SKYLAKE_FUSED0_GT2 || \ 390 (devid) == PCI_CHIP_SKYLAKE_FUSED0_GT2 || \
@@ -366,8 +397,11 @@
366 (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \ 397 (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \
367 (devid) == PCI_CHIP_SKYLAKE_MOBILE_GT2) 398 (devid) == PCI_CHIP_SKYLAKE_MOBILE_GT2)
368 399
369#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_GT3 || \ 400#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT3_0 || \
370 (devid) == PCI_CHIP_SKYLAKE_HALO_GT3) 401 (devid) == PCI_CHIP_SKYLAKE_ULT_GT3_1 || \
402 (devid) == PCI_CHIP_SKYLAKE_ULT_GT3_2 || \
403 (devid) == PCI_CHIP_SKYLAKE_HALO_GT3 || \
404 (devid) == PCI_CHIP_SKYLAKE_SRV_GT3)
371 405
372#define IS_SKL_GT4(devid) ((devid) == PCI_CHIP_SKYLAKE_SRV_GT4 || \ 406#define IS_SKL_GT4(devid) ((devid) == PCI_CHIP_SKYLAKE_SRV_GT4 || \
373 (devid) == PCI_CHIP_SKYLAKE_DT_GT4 || \ 407 (devid) == PCI_CHIP_SKYLAKE_DT_GT4 || \
@@ -375,6 +409,35 @@
375 (devid) == PCI_CHIP_SKYLAKE_H_GT4 || \ 409 (devid) == PCI_CHIP_SKYLAKE_H_GT4 || \
376 (devid) == PCI_CHIP_SKYLAKE_WKS_GT4) 410 (devid) == PCI_CHIP_SKYLAKE_WKS_GT4)
377 411
412#define IS_KBL_GT1(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT1_5 || \
413 (devid) == PCI_CHIP_KABYLAKE_ULX_GT1_5 || \
414 (devid) == PCI_CHIP_KABYLAKE_DT_GT1_5 || \
415 (devid) == PCI_CHIP_KABYLAKE_ULT_GT1 || \
416 (devid) == PCI_CHIP_KABYLAKE_ULX_GT1 || \
417 (devid) == PCI_CHIP_KABYLAKE_DT_GT1 || \
418 (devid) == PCI_CHIP_KABYLAKE_HALO_GT1_0 || \
419 (devid) == PCI_CHIP_KABYLAKE_HALO_GT1_1 || \
420 (devid) == PCI_CHIP_KABYLAKE_SRV_GT1)
421
422#define IS_KBL_GT2(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT2 || \
423 (devid) == PCI_CHIP_KABYLAKE_ULT_GT2F || \
424 (devid) == PCI_CHIP_KABYLAKE_ULX_GT2 || \
425 (devid) == PCI_CHIP_KABYLAKE_DT_GT2 || \
426 (devid) == PCI_CHIP_KABYLAKE_HALO_GT2 || \
427 (devid) == PCI_CHIP_KABYLAKE_SRV_GT2 || \
428 (devid) == PCI_CHIP_KABYLAKE_WKS_GT2)
429
430#define IS_KBL_GT3(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT3_0 || \
431 (devid) == PCI_CHIP_KABYLAKE_ULT_GT3_1 || \
432 (devid) == PCI_CHIP_KABYLAKE_ULT_GT3_2)
433
434#define IS_KBL_GT4(devid) ((devid) == PCI_CHIP_KABYLAKE_HALO_GT4)
435
436#define IS_KABYLAKE(devid) (IS_KBL_GT1(devid) || \
437 IS_KBL_GT2(devid) || \
438 IS_KBL_GT3(devid) || \
439 IS_KBL_GT4(devid))
440
378#define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \ 441#define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \
379 IS_SKL_GT2(devid) || \ 442 IS_SKL_GT2(devid) || \
380 IS_SKL_GT3(devid) || \ 443 IS_SKL_GT3(devid) || \
@@ -382,10 +445,17 @@
382 445
383#define IS_BROXTON(devid) ((devid) == PCI_CHIP_BROXTON_0 || \ 446#define IS_BROXTON(devid) ((devid) == PCI_CHIP_BROXTON_0 || \
384 (devid) == PCI_CHIP_BROXTON_1 || \ 447 (devid) == PCI_CHIP_BROXTON_1 || \
385 (devid) == PCI_CHIP_BROXTON_2) 448 (devid) == PCI_CHIP_BROXTON_2 || \
449 (devid) == PCI_CHIP_BROXTON_3 || \
450 (devid) == PCI_CHIP_BROXTON_4)
451
452#define IS_GEMINILAKE(devid) ((devid) == PCI_CHIP_GLK || \
453 (devid) == PCI_CHIP_GLK_2X6)
386 454
387#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \ 455#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \
388 IS_BROXTON(devid)) 456 IS_BROXTON(devid) || \
457 IS_KABYLAKE(devid) || \
458 IS_GEMINILAKE(devid))
389 459
390#define IS_9XX(dev) (IS_GEN3(dev) || \ 460#define IS_9XX(dev) (IS_GEN3(dev) || \
391 IS_GEN4(dev) || \ 461 IS_GEN4(dev) || \
diff --git a/intel/intel_decode.c b/intel/intel_decode.c
index e7aef742..803d2029 100644
--- a/intel/intel_decode.c
+++ b/intel/intel_decode.c
@@ -38,8 +38,6 @@
38#include "intel_chipset.h" 38#include "intel_chipset.h"
39#include "intel_bufmgr.h" 39#include "intel_bufmgr.h"
40 40
41/* The compiler throws ~90 warnings. Do not spam the build, until we fix them. */
42#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
43 41
44/* Struct for tracking drm_intel_decode state. */ 42/* Struct for tracking drm_intel_decode state. */
45struct drm_intel_decode { 43struct drm_intel_decode {
@@ -3600,7 +3598,7 @@ decode_3d_965(struct drm_intel_decode *ctx)
3600 instr_out(ctx, 0, "3DSTATE_DEPTH_BUFFER\n"); 3598 instr_out(ctx, 0, "3DSTATE_DEPTH_BUFFER\n");
3601 if (IS_GEN5(devid) || IS_GEN6(devid)) 3599 if (IS_GEN5(devid) || IS_GEN6(devid))
3602 instr_out(ctx, 1, 3600 instr_out(ctx, 1,
3603 "%s, %s, pitch = %d bytes, %stiled, HiZ %d, Seperate Stencil %d\n", 3601 "%s, %s, pitch = %d bytes, %stiled, HiZ %d, Separate Stencil %d\n",
3604 get_965_surfacetype(data[1] >> 29), 3602 get_965_surfacetype(data[1] >> 29),
3605 get_965_depthformat((data[1] >> 18) & 0x7), 3603 get_965_depthformat((data[1] >> 18) & 0x7),
3606 (data[1] & 0x0001ffff) + 1, 3604 (data[1] & 0x0001ffff) + 1,
diff --git a/intel/tests/gen5-3d.batch-ref.txt b/intel/tests/gen5-3d.batch-ref.txt
index a0271ab7..51dd85f3 100644
--- a/intel/tests/gen5-3d.batch-ref.txt
+++ b/intel/tests/gen5-3d.batch-ref.txt
@@ -24,7 +24,7 @@
240x1230005c: 0x00000000: dword 3 240x1230005c: 0x00000000: dword 3
250x12300060: 0x00000000: dword 4 250x12300060: 0x00000000: dword 4
260x12300064: 0x79050004: 3DSTATE_DEPTH_BUFFER 260x12300064: 0x79050004: 3DSTATE_DEPTH_BUFFER
270x12300068: 0x2c0805ff: 2D, z24s8, pitch = 1536 bytes, tiled, HiZ 0, Seperate Stencil 0 270x12300068: 0x2c0805ff: 2D, z24s8, pitch = 1536 bytes, tiled, HiZ 0, Separate Stencil 0
280x1230006c: 0x00000000: depth offset 280x1230006c: 0x00000000: depth offset
290x12300070: 0x09584ac0: 300x300 290x12300070: 0x09584ac0: 300x300
300x12300074: 0x00000000: volume depth 300x12300074: 0x00000000: volume depth
diff --git a/intel/tests/gen6-3d.batch-ref.txt b/intel/tests/gen6-3d.batch-ref.txt
index 9035663d..04cbddc7 100644
--- a/intel/tests/gen6-3d.batch-ref.txt
+++ b/intel/tests/gen6-3d.batch-ref.txt
@@ -140,7 +140,7 @@
1400x1230022c: 0x00000000: 1400x1230022c: 0x00000000:
1410x12300230: 0x00000000: 1410x12300230: 0x00000000:
1420x12300234: 0x79050005: 3DSTATE_DEPTH_BUFFER 1420x12300234: 0x79050005: 3DSTATE_DEPTH_BUFFER
1430x12300238: 0x2c6c05ff: 2D, unknown, pitch = 1536 bytes, tiled, HiZ 1, Seperate Stencil 1 1430x12300238: 0x2c6c05ff: 2D, unknown, pitch = 1536 bytes, tiled, HiZ 1, Separate Stencil 1
1440x1230023c: 0x00000000: depth offset 1440x1230023c: 0x00000000: depth offset
1450x12300240: 0x09584ac0: 300x300 1450x12300240: 0x09584ac0: 300x300
1460x12300244: 0x00000000: volume depth 1460x12300244: 0x00000000: volume depth
diff --git a/intel/uthash.h b/intel/uthash.h
new file mode 100644
index 00000000..45d1f9fc
--- /dev/null
+++ b/intel/uthash.h
@@ -0,0 +1,1074 @@
1/*
2Copyright (c) 2003-2016, Troy D. Hanson http://troydhanson.github.com/uthash/
3All rights reserved.
4
5Redistribution and use in source and binary forms, with or without
6modification, are permitted provided that the following conditions are met:
7
8 * Redistributions of source code must retain the above copyright
9 notice, this list of conditions and the following disclaimer.
10
11THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22*/
23
24#ifndef UTHASH_H
25#define UTHASH_H
26
27#define UTHASH_VERSION 2.0.1
28
29#include <string.h> /* memcmp,strlen */
30#include <stddef.h> /* ptrdiff_t */
31#include <stdlib.h> /* exit() */
32
33/* These macros use decltype or the earlier __typeof GNU extension.
34 As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35 when compiling c++ source) this code uses whatever method is needed
36 or, for VS2008 where neither is available, uses casting workarounds. */
37#if defined(_MSC_VER) /* MS compiler */
38#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
39#define DECLTYPE(x) (decltype(x))
40#else /* VS2008 or older (or VS2010 in C mode) */
41#define NO_DECLTYPE
42#define DECLTYPE(x)
43#endif
44#elif defined(__BORLANDC__) || defined(__LCC__) || defined(__WATCOMC__)
45#define NO_DECLTYPE
46#define DECLTYPE(x)
47#else /* GNU, Sun and other compilers */
48#define DECLTYPE(x) (__typeof(x))
49#endif
50
51#ifdef NO_DECLTYPE
52#define DECLTYPE_ASSIGN(dst,src) \
53do { \
54 char **_da_dst = (char**)(&(dst)); \
55 *_da_dst = (char*)(src); \
56} while (0)
57#else
58#define DECLTYPE_ASSIGN(dst,src) \
59do { \
60 (dst) = DECLTYPE(dst)(src); \
61} while (0)
62#endif
63
64/* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
65#if defined(_WIN32)
66#if defined(_MSC_VER) && _MSC_VER >= 1600
67#include <stdint.h>
68#elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
69#include <stdint.h>
70#else
71typedef unsigned int uint32_t;
72typedef unsigned char uint8_t;
73#endif
74#elif defined(__GNUC__) && !defined(__VXWORKS__)
75#include <stdint.h>
76#else
77typedef unsigned int uint32_t;
78typedef unsigned char uint8_t;
79#endif
80
81#ifndef uthash_fatal
82#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
83#endif
84#ifndef uthash_malloc
85#define uthash_malloc(sz) malloc(sz) /* malloc fcn */
86#endif
87#ifndef uthash_free
88#define uthash_free(ptr,sz) free(ptr) /* free fcn */
89#endif
90#ifndef uthash_strlen
91#define uthash_strlen(s) strlen(s)
92#endif
93#ifndef uthash_memcmp
94#define uthash_memcmp(a,b,n) memcmp(a,b,n)
95#endif
96
97#ifndef uthash_noexpand_fyi
98#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
99#endif
100#ifndef uthash_expand_fyi
101#define uthash_expand_fyi(tbl) /* can be defined to log expands */
102#endif
103
104/* initial number of buckets */
105#define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
106#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
107#define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
108
109/* calculate the element whose hash handle address is hhp */
110#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
111/* calculate the hash handle from element address elp */
112#define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
113
114#define HASH_VALUE(keyptr,keylen,hashv) \
115do { \
116 HASH_FCN(keyptr, keylen, hashv); \
117} while (0)
118
119#define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
120do { \
121 (out) = NULL; \
122 if (head) { \
123 unsigned _hf_bkt; \
124 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
125 if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
126 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
127 } \
128 } \
129} while (0)
130
131#define HASH_FIND(hh,head,keyptr,keylen,out) \
132do { \
133 unsigned _hf_hashv; \
134 HASH_VALUE(keyptr, keylen, _hf_hashv); \
135 HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
136} while (0)
137
138#ifdef HASH_BLOOM
139#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
140#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
141#define HASH_BLOOM_MAKE(tbl) \
142do { \
143 (tbl)->bloom_nbits = HASH_BLOOM; \
144 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
145 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
146 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
147 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
148} while (0)
149
150#define HASH_BLOOM_FREE(tbl) \
151do { \
152 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
153} while (0)
154
155#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
156#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
157
158#define HASH_BLOOM_ADD(tbl,hashv) \
159 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
160
161#define HASH_BLOOM_TEST(tbl,hashv) \
162 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
163
164#else
165#define HASH_BLOOM_MAKE(tbl)
166#define HASH_BLOOM_FREE(tbl)
167#define HASH_BLOOM_ADD(tbl,hashv)
168#define HASH_BLOOM_TEST(tbl,hashv) (1)
169#define HASH_BLOOM_BYTELEN 0U
170#endif
171
172#define HASH_MAKE_TABLE(hh,head) \
173do { \
174 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
175 sizeof(UT_hash_table)); \
176 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
177 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
178 (head)->hh.tbl->tail = &((head)->hh); \
179 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
180 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
181 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
182 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
183 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
184 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
185 memset((head)->hh.tbl->buckets, 0, \
186 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
187 HASH_BLOOM_MAKE((head)->hh.tbl); \
188 (head)->hh.tbl->signature = HASH_SIGNATURE; \
189} while (0)
190
191#define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
192do { \
193 (replaced) = NULL; \
194 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
195 if (replaced) { \
196 HASH_DELETE(hh, head, replaced); \
197 } \
198 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
199} while (0)
200
201#define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
202do { \
203 (replaced) = NULL; \
204 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
205 if (replaced) { \
206 HASH_DELETE(hh, head, replaced); \
207 } \
208 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
209} while (0)
210
211#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
212do { \
213 unsigned _hr_hashv; \
214 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
215 HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
216} while (0)
217
218#define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
219do { \
220 unsigned _hr_hashv; \
221 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
222 HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
223} while (0)
224
225#define HASH_APPEND_LIST(hh, head, add) \
226do { \
227 (add)->hh.next = NULL; \
228 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
229 (head)->hh.tbl->tail->next = (add); \
230 (head)->hh.tbl->tail = &((add)->hh); \
231} while (0)
232
233#define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
234do { \
235 unsigned _ha_bkt; \
236 (add)->hh.hashv = (hashval); \
237 (add)->hh.key = (char*) (keyptr); \
238 (add)->hh.keylen = (unsigned) (keylen_in); \
239 if (!(head)) { \
240 (add)->hh.next = NULL; \
241 (add)->hh.prev = NULL; \
242 (head) = (add); \
243 HASH_MAKE_TABLE(hh, head); \
244 } else { \
245 struct UT_hash_handle *_hs_iter = &(head)->hh; \
246 (add)->hh.tbl = (head)->hh.tbl; \
247 do { \
248 if (cmpfcn(DECLTYPE(head) ELMT_FROM_HH((head)->hh.tbl, _hs_iter), add) > 0) \
249 break; \
250 } while ((_hs_iter = _hs_iter->next)); \
251 if (_hs_iter) { \
252 (add)->hh.next = _hs_iter; \
253 if (((add)->hh.prev = _hs_iter->prev)) { \
254 HH_FROM_ELMT((head)->hh.tbl, _hs_iter->prev)->next = (add); \
255 } else { \
256 (head) = (add); \
257 } \
258 _hs_iter->prev = (add); \
259 } else { \
260 HASH_APPEND_LIST(hh, head, add); \
261 } \
262 } \
263 (head)->hh.tbl->num_items++; \
264 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
265 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
266 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
267 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
268 HASH_FSCK(hh, head); \
269} while (0)
270
271#define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
272do { \
273 unsigned _hs_hashv; \
274 HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
275 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
276} while (0)
277
278#define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
279 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
280
281#define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
282 HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
283
284#define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
285do { \
286 unsigned _ha_bkt; \
287 (add)->hh.hashv = (hashval); \
288 (add)->hh.key = (char*) (keyptr); \
289 (add)->hh.keylen = (unsigned) (keylen_in); \
290 if (!(head)) { \
291 (add)->hh.next = NULL; \
292 (add)->hh.prev = NULL; \
293 (head) = (add); \
294 HASH_MAKE_TABLE(hh, head); \
295 } else { \
296 (add)->hh.tbl = (head)->hh.tbl; \
297 HASH_APPEND_LIST(hh, head, add); \
298 } \
299 (head)->hh.tbl->num_items++; \
300 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
301 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
302 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
303 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
304 HASH_FSCK(hh, head); \
305} while (0)
306
307#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
308do { \
309 unsigned _ha_hashv; \
310 HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
311 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
312} while (0)
313
314#define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
315 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
316
317#define HASH_ADD(hh,head,fieldname,keylen_in,add) \
318 HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
319
320#define HASH_TO_BKT(hashv,num_bkts,bkt) \
321do { \
322 bkt = ((hashv) & ((num_bkts) - 1U)); \
323} while (0)
324
325/* delete "delptr" from the hash table.
326 * "the usual" patch-up process for the app-order doubly-linked-list.
327 * The use of _hd_hh_del below deserves special explanation.
328 * These used to be expressed using (delptr) but that led to a bug
329 * if someone used the same symbol for the head and deletee, like
330 * HASH_DELETE(hh,users,users);
331 * We want that to work, but by changing the head (users) below
332 * we were forfeiting our ability to further refer to the deletee (users)
333 * in the patch-up process. Solution: use scratch space to
334 * copy the deletee pointer, then the latter references are via that
335 * scratch pointer rather than through the repointed (users) symbol.
336 */
337#define HASH_DELETE(hh,head,delptr) \
338do { \
339 struct UT_hash_handle *_hd_hh_del; \
340 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
341 uthash_free((head)->hh.tbl->buckets, \
342 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
343 HASH_BLOOM_FREE((head)->hh.tbl); \
344 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
345 head = NULL; \
346 } else { \
347 unsigned _hd_bkt; \
348 _hd_hh_del = &((delptr)->hh); \
349 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
350 (head)->hh.tbl->tail = \
351 (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
352 (head)->hh.tbl->hho); \
353 } \
354 if ((delptr)->hh.prev != NULL) { \
355 ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
356 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
357 } else { \
358 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
359 } \
360 if (_hd_hh_del->next != NULL) { \
361 ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \
362 (head)->hh.tbl->hho))->prev = \
363 _hd_hh_del->prev; \
364 } \
365 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
366 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
367 (head)->hh.tbl->num_items--; \
368 } \
369 HASH_FSCK(hh,head); \
370} while (0)
371
372
373/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
374#define HASH_FIND_STR(head,findstr,out) \
375 HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
376#define HASH_ADD_STR(head,strfield,add) \
377 HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
378#define HASH_REPLACE_STR(head,strfield,add,replaced) \
379 HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
380#define HASH_FIND_INT(head,findint,out) \
381 HASH_FIND(hh,head,findint,sizeof(int),out)
382#define HASH_ADD_INT(head,intfield,add) \
383 HASH_ADD(hh,head,intfield,sizeof(int),add)
384#define HASH_REPLACE_INT(head,intfield,add,replaced) \
385 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
386#define HASH_FIND_PTR(head,findptr,out) \
387 HASH_FIND(hh,head,findptr,sizeof(void *),out)
388#define HASH_ADD_PTR(head,ptrfield,add) \
389 HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
390#define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
391 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
392#define HASH_DEL(head,delptr) \
393 HASH_DELETE(hh,head,delptr)
394
395/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
396 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
397 */
398#ifdef HASH_DEBUG
399#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
400#define HASH_FSCK(hh,head) \
401do { \
402 struct UT_hash_handle *_thh; \
403 if (head) { \
404 unsigned _bkt_i; \
405 unsigned _count; \
406 char *_prev; \
407 _count = 0; \
408 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
409 unsigned _bkt_count = 0; \
410 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
411 _prev = NULL; \
412 while (_thh) { \
413 if (_prev != (char*)(_thh->hh_prev)) { \
414 HASH_OOPS("invalid hh_prev %p, actual %p\n", \
415 _thh->hh_prev, _prev ); \
416 } \
417 _bkt_count++; \
418 _prev = (char*)(_thh); \
419 _thh = _thh->hh_next; \
420 } \
421 _count += _bkt_count; \
422 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
423 HASH_OOPS("invalid bucket count %u, actual %u\n", \
424 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
425 } \
426 } \
427 if (_count != (head)->hh.tbl->num_items) { \
428 HASH_OOPS("invalid hh item count %u, actual %u\n", \
429 (head)->hh.tbl->num_items, _count ); \
430 } \
431 /* traverse hh in app order; check next/prev integrity, count */ \
432 _count = 0; \
433 _prev = NULL; \
434 _thh = &(head)->hh; \
435 while (_thh) { \
436 _count++; \
437 if (_prev !=(char*)(_thh->prev)) { \
438 HASH_OOPS("invalid prev %p, actual %p\n", \
439 _thh->prev, _prev ); \
440 } \
441 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
442 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
443 (head)->hh.tbl->hho) : NULL ); \
444 } \
445 if (_count != (head)->hh.tbl->num_items) { \
446 HASH_OOPS("invalid app item count %u, actual %u\n", \
447 (head)->hh.tbl->num_items, _count ); \
448 } \
449 } \
450} while (0)
451#else
452#define HASH_FSCK(hh,head)
453#endif
454
455/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
456 * the descriptor to which this macro is defined for tuning the hash function.
457 * The app can #include <unistd.h> to get the prototype for write(2). */
458#ifdef HASH_EMIT_KEYS
459#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
460do { \
461 unsigned _klen = fieldlen; \
462 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
463 write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
464} while (0)
465#else
466#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
467#endif
468
469/* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
470#ifdef HASH_FUNCTION
471#define HASH_FCN HASH_FUNCTION
472#else
473#define HASH_FCN HASH_JEN
474#endif
475
476/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
477#define HASH_BER(key,keylen,hashv) \
478do { \
479 unsigned _hb_keylen=(unsigned)keylen; \
480 const unsigned char *_hb_key=(const unsigned char*)(key); \
481 (hashv) = 0; \
482 while (_hb_keylen-- != 0U) { \
483 (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
484 } \
485} while (0)
486
487
488/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
489 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
490#define HASH_SAX(key,keylen,hashv) \
491do { \
492 unsigned _sx_i; \
493 const unsigned char *_hs_key=(const unsigned char*)(key); \
494 hashv = 0; \
495 for(_sx_i=0; _sx_i < keylen; _sx_i++) { \
496 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
497 } \
498} while (0)
499/* FNV-1a variation */
500#define HASH_FNV(key,keylen,hashv) \
501do { \
502 unsigned _fn_i; \
503 const unsigned char *_hf_key=(const unsigned char*)(key); \
504 hashv = 2166136261U; \
505 for(_fn_i=0; _fn_i < keylen; _fn_i++) { \
506 hashv = hashv ^ _hf_key[_fn_i]; \
507 hashv = hashv * 16777619U; \
508 } \
509} while (0)
510
511#define HASH_OAT(key,keylen,hashv) \
512do { \
513 unsigned _ho_i; \
514 const unsigned char *_ho_key=(const unsigned char*)(key); \
515 hashv = 0; \
516 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
517 hashv += _ho_key[_ho_i]; \
518 hashv += (hashv << 10); \
519 hashv ^= (hashv >> 6); \
520 } \
521 hashv += (hashv << 3); \
522 hashv ^= (hashv >> 11); \
523 hashv += (hashv << 15); \
524} while (0)
525
526#define HASH_JEN_MIX(a,b,c) \
527do { \
528 a -= b; a -= c; a ^= ( c >> 13 ); \
529 b -= c; b -= a; b ^= ( a << 8 ); \
530 c -= a; c -= b; c ^= ( b >> 13 ); \
531 a -= b; a -= c; a ^= ( c >> 12 ); \
532 b -= c; b -= a; b ^= ( a << 16 ); \
533 c -= a; c -= b; c ^= ( b >> 5 ); \
534 a -= b; a -= c; a ^= ( c >> 3 ); \
535 b -= c; b -= a; b ^= ( a << 10 ); \
536 c -= a; c -= b; c ^= ( b >> 15 ); \
537} while (0)
538
539#define HASH_JEN(key,keylen,hashv) \
540do { \
541 unsigned _hj_i,_hj_j,_hj_k; \
542 unsigned const char *_hj_key=(unsigned const char*)(key); \
543 hashv = 0xfeedbeefu; \
544 _hj_i = _hj_j = 0x9e3779b9u; \
545 _hj_k = (unsigned)(keylen); \
546 while (_hj_k >= 12U) { \
547 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
548 + ( (unsigned)_hj_key[2] << 16 ) \
549 + ( (unsigned)_hj_key[3] << 24 ) ); \
550 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
551 + ( (unsigned)_hj_key[6] << 16 ) \
552 + ( (unsigned)_hj_key[7] << 24 ) ); \
553 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
554 + ( (unsigned)_hj_key[10] << 16 ) \
555 + ( (unsigned)_hj_key[11] << 24 ) ); \
556 \
557 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
558 \
559 _hj_key += 12; \
560 _hj_k -= 12U; \
561 } \
562 hashv += (unsigned)(keylen); \
563 switch ( _hj_k ) { \
564 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
565 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
566 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
567 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
568 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
569 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
570 case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
571 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
572 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
573 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
574 case 1: _hj_i += _hj_key[0]; \
575 } \
576 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
577} while (0)
578
579/* The Paul Hsieh hash function */
580#undef get16bits
581#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
582 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
583#define get16bits(d) (*((const uint16_t *) (d)))
584#endif
585
586#if !defined (get16bits)
587#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
588 +(uint32_t)(((const uint8_t *)(d))[0]) )
589#endif
590#define HASH_SFH(key,keylen,hashv) \
591do { \
592 unsigned const char *_sfh_key=(unsigned const char*)(key); \
593 uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
594 \
595 unsigned _sfh_rem = _sfh_len & 3U; \
596 _sfh_len >>= 2; \
597 hashv = 0xcafebabeu; \
598 \
599 /* Main loop */ \
600 for (;_sfh_len > 0U; _sfh_len--) { \
601 hashv += get16bits (_sfh_key); \
602 _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
603 hashv = (hashv << 16) ^ _sfh_tmp; \
604 _sfh_key += 2U*sizeof (uint16_t); \
605 hashv += hashv >> 11; \
606 } \
607 \
608 /* Handle end cases */ \
609 switch (_sfh_rem) { \
610 case 3: hashv += get16bits (_sfh_key); \
611 hashv ^= hashv << 16; \
612 hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
613 hashv += hashv >> 11; \
614 break; \
615 case 2: hashv += get16bits (_sfh_key); \
616 hashv ^= hashv << 11; \
617 hashv += hashv >> 17; \
618 break; \
619 case 1: hashv += *_sfh_key; \
620 hashv ^= hashv << 10; \
621 hashv += hashv >> 1; \
622 } \
623 \
624 /* Force "avalanching" of final 127 bits */ \
625 hashv ^= hashv << 3; \
626 hashv += hashv >> 5; \
627 hashv ^= hashv << 4; \
628 hashv += hashv >> 17; \
629 hashv ^= hashv << 25; \
630 hashv += hashv >> 6; \
631} while (0)
632
633#ifdef HASH_USING_NO_STRICT_ALIASING
634/* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
635 * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
636 * MurmurHash uses the faster approach only on CPU's where we know it's safe.
637 *
638 * Note the preprocessor built-in defines can be emitted using:
639 *
640 * gcc -m64 -dM -E - < /dev/null (on gcc)
641 * cc -## a.c (where a.c is a simple test file) (Sun Studio)
642 */
643#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
644#define MUR_GETBLOCK(p,i) p[i]
645#else /* non intel */
646#define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
647#define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
648#define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
649#define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
650#define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
651#if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
652#define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
653#define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
654#define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
655#else /* assume little endian non-intel */
656#define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
657#define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
658#define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
659#endif
660#define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
661 (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
662 (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
663 MUR_ONE_THREE(p))))
664#endif
665#define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
666#define MUR_FMIX(_h) \
667do { \
668 _h ^= _h >> 16; \
669 _h *= 0x85ebca6bu; \
670 _h ^= _h >> 13; \
671 _h *= 0xc2b2ae35u; \
672 _h ^= _h >> 16; \
673} while (0)
674
675#define HASH_MUR(key,keylen,hashv) \
676do { \
677 const uint8_t *_mur_data = (const uint8_t*)(key); \
678 const int _mur_nblocks = (int)(keylen) / 4; \
679 uint32_t _mur_h1 = 0xf88D5353u; \
680 uint32_t _mur_c1 = 0xcc9e2d51u; \
681 uint32_t _mur_c2 = 0x1b873593u; \
682 uint32_t _mur_k1 = 0; \
683 const uint8_t *_mur_tail; \
684 const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
685 int _mur_i; \
686 for(_mur_i = -_mur_nblocks; _mur_i!=0; _mur_i++) { \
687 _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
688 _mur_k1 *= _mur_c1; \
689 _mur_k1 = MUR_ROTL32(_mur_k1,15); \
690 _mur_k1 *= _mur_c2; \
691 \
692 _mur_h1 ^= _mur_k1; \
693 _mur_h1 = MUR_ROTL32(_mur_h1,13); \
694 _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
695 } \
696 _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
697 _mur_k1=0; \
698 switch((keylen) & 3U) { \
699 case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
700 case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
701 case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
702 _mur_k1 *= _mur_c1; \
703 _mur_k1 = MUR_ROTL32(_mur_k1,15); \
704 _mur_k1 *= _mur_c2; \
705 _mur_h1 ^= _mur_k1; \
706 } \
707 _mur_h1 ^= (uint32_t)(keylen); \
708 MUR_FMIX(_mur_h1); \
709 hashv = _mur_h1; \
710} while (0)
711#endif /* HASH_USING_NO_STRICT_ALIASING */
712
713/* iterate over items in a known bucket to find desired item */
714#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
715do { \
716 if ((head).hh_head != NULL) { \
717 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
718 } else { \
719 (out) = NULL; \
720 } \
721 while ((out) != NULL) { \
722 if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
723 if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
724 break; \
725 } \
726 } \
727 if ((out)->hh.hh_next != NULL) { \
728 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
729 } else { \
730 (out) = NULL; \
731 } \
732 } \
733} while (0)
734
735/* add an item to a bucket */
736#define HASH_ADD_TO_BKT(head,addhh) \
737do { \
738 head.count++; \
739 (addhh)->hh_next = head.hh_head; \
740 (addhh)->hh_prev = NULL; \
741 if (head.hh_head != NULL) { (head).hh_head->hh_prev = (addhh); } \
742 (head).hh_head=addhh; \
743 if ((head.count >= ((head.expand_mult+1U) * HASH_BKT_CAPACITY_THRESH)) \
744 && ((addhh)->tbl->noexpand != 1U)) { \
745 HASH_EXPAND_BUCKETS((addhh)->tbl); \
746 } \
747} while (0)
748
749/* remove an item from a given bucket */
750#define HASH_DEL_IN_BKT(hh,head,hh_del) \
751 (head).count--; \
752 if ((head).hh_head == hh_del) { \
753 (head).hh_head = hh_del->hh_next; \
754 } \
755 if (hh_del->hh_prev) { \
756 hh_del->hh_prev->hh_next = hh_del->hh_next; \
757 } \
758 if (hh_del->hh_next) { \
759 hh_del->hh_next->hh_prev = hh_del->hh_prev; \
760 }
761
762/* Bucket expansion has the effect of doubling the number of buckets
763 * and redistributing the items into the new buckets. Ideally the
764 * items will distribute more or less evenly into the new buckets
765 * (the extent to which this is true is a measure of the quality of
766 * the hash function as it applies to the key domain).
767 *
768 * With the items distributed into more buckets, the chain length
769 * (item count) in each bucket is reduced. Thus by expanding buckets
770 * the hash keeps a bound on the chain length. This bounded chain
771 * length is the essence of how a hash provides constant time lookup.
772 *
773 * The calculation of tbl->ideal_chain_maxlen below deserves some
774 * explanation. First, keep in mind that we're calculating the ideal
775 * maximum chain length based on the *new* (doubled) bucket count.
776 * In fractions this is just n/b (n=number of items,b=new num buckets).
777 * Since the ideal chain length is an integer, we want to calculate
778 * ceil(n/b). We don't depend on floating point arithmetic in this
779 * hash, so to calculate ceil(n/b) with integers we could write
780 *
781 * ceil(n/b) = (n/b) + ((n%b)?1:0)
782 *
783 * and in fact a previous version of this hash did just that.
784 * But now we have improved things a bit by recognizing that b is
785 * always a power of two. We keep its base 2 log handy (call it lb),
786 * so now we can write this with a bit shift and logical AND:
787 *
788 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
789 *
790 */
791#define HASH_EXPAND_BUCKETS(tbl) \
792do { \
793 unsigned _he_bkt; \
794 unsigned _he_bkt_i; \
795 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
796 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
797 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
798 2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
799 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
800 memset(_he_new_buckets, 0, \
801 2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
802 tbl->ideal_chain_maxlen = \
803 (tbl->num_items >> (tbl->log2_num_buckets+1U)) + \
804 (((tbl->num_items & ((tbl->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
805 tbl->nonideal_items = 0; \
806 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
807 { \
808 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
809 while (_he_thh != NULL) { \
810 _he_hh_nxt = _he_thh->hh_next; \
811 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2U, _he_bkt); \
812 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
813 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
814 tbl->nonideal_items++; \
815 _he_newbkt->expand_mult = _he_newbkt->count / \
816 tbl->ideal_chain_maxlen; \
817 } \
818 _he_thh->hh_prev = NULL; \
819 _he_thh->hh_next = _he_newbkt->hh_head; \
820 if (_he_newbkt->hh_head != NULL) { _he_newbkt->hh_head->hh_prev = \
821 _he_thh; } \
822 _he_newbkt->hh_head = _he_thh; \
823 _he_thh = _he_hh_nxt; \
824 } \
825 } \
826 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
827 tbl->num_buckets *= 2U; \
828 tbl->log2_num_buckets++; \
829 tbl->buckets = _he_new_buckets; \
830 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
831 (tbl->ineff_expands+1U) : 0U; \
832 if (tbl->ineff_expands > 1U) { \
833 tbl->noexpand=1; \
834 uthash_noexpand_fyi(tbl); \
835 } \
836 uthash_expand_fyi(tbl); \
837} while (0)
838
839
840/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
841/* Note that HASH_SORT assumes the hash handle name to be hh.
842 * HASH_SRT was added to allow the hash handle name to be passed in. */
843#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
844#define HASH_SRT(hh,head,cmpfcn) \
845do { \
846 unsigned _hs_i; \
847 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
848 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
849 if (head != NULL) { \
850 _hs_insize = 1; \
851 _hs_looping = 1; \
852 _hs_list = &((head)->hh); \
853 while (_hs_looping != 0U) { \
854 _hs_p = _hs_list; \
855 _hs_list = NULL; \
856 _hs_tail = NULL; \
857 _hs_nmerges = 0; \
858 while (_hs_p != NULL) { \
859 _hs_nmerges++; \
860 _hs_q = _hs_p; \
861 _hs_psize = 0; \
862 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
863 _hs_psize++; \
864 _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
865 ((void*)((char*)(_hs_q->next) + \
866 (head)->hh.tbl->hho)) : NULL); \
867 if (! (_hs_q) ) { break; } \
868 } \
869 _hs_qsize = _hs_insize; \
870 while ((_hs_psize > 0U) || ((_hs_qsize > 0U) && (_hs_q != NULL))) {\
871 if (_hs_psize == 0U) { \
872 _hs_e = _hs_q; \
873 _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
874 ((void*)((char*)(_hs_q->next) + \
875 (head)->hh.tbl->hho)) : NULL); \
876 _hs_qsize--; \
877 } else if ( (_hs_qsize == 0U) || (_hs_q == NULL) ) { \
878 _hs_e = _hs_p; \
879 if (_hs_p != NULL){ \
880 _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
881 ((void*)((char*)(_hs_p->next) + \
882 (head)->hh.tbl->hho)) : NULL); \
883 } \
884 _hs_psize--; \
885 } else if (( \
886 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
887 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
888 ) <= 0) { \
889 _hs_e = _hs_p; \
890 if (_hs_p != NULL){ \
891 _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
892 ((void*)((char*)(_hs_p->next) + \
893 (head)->hh.tbl->hho)) : NULL); \
894 } \
895 _hs_psize--; \
896 } else { \
897 _hs_e = _hs_q; \
898 _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
899 ((void*)((char*)(_hs_q->next) + \
900 (head)->hh.tbl->hho)) : NULL); \
901 _hs_qsize--; \
902 } \
903 if ( _hs_tail != NULL ) { \
904 _hs_tail->next = ((_hs_e != NULL) ? \
905 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
906 } else { \
907 _hs_list = _hs_e; \
908 } \
909 if (_hs_e != NULL) { \
910 _hs_e->prev = ((_hs_tail != NULL) ? \
911 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
912 } \
913 _hs_tail = _hs_e; \
914 } \
915 _hs_p = _hs_q; \
916 } \
917 if (_hs_tail != NULL){ \
918 _hs_tail->next = NULL; \
919 } \
920 if ( _hs_nmerges <= 1U ) { \
921 _hs_looping=0; \
922 (head)->hh.tbl->tail = _hs_tail; \
923 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
924 } \
925 _hs_insize *= 2U; \
926 } \
927 HASH_FSCK(hh,head); \
928 } \
929} while (0)
930
931/* This function selects items from one hash into another hash.
932 * The end result is that the selected items have dual presence
933 * in both hashes. There is no copy of the items made; rather
934 * they are added into the new hash through a secondary hash
935 * hash handle that must be present in the structure. */
936#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
937do { \
938 unsigned _src_bkt, _dst_bkt; \
939 void *_last_elt=NULL, *_elt; \
940 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
941 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
942 if (src != NULL) { \
943 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
944 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
945 _src_hh != NULL; \
946 _src_hh = _src_hh->hh_next) { \
947 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
948 if (cond(_elt)) { \
949 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
950 _dst_hh->key = _src_hh->key; \
951 _dst_hh->keylen = _src_hh->keylen; \
952 _dst_hh->hashv = _src_hh->hashv; \
953 _dst_hh->prev = _last_elt; \
954 _dst_hh->next = NULL; \
955 if (_last_elt_hh != NULL) { _last_elt_hh->next = _elt; } \
956 if (dst == NULL) { \
957 DECLTYPE_ASSIGN(dst,_elt); \
958 HASH_MAKE_TABLE(hh_dst,dst); \
959 } else { \
960 _dst_hh->tbl = (dst)->hh_dst.tbl; \
961 } \
962 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
963 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
964 (dst)->hh_dst.tbl->num_items++; \
965 _last_elt = _elt; \
966 _last_elt_hh = _dst_hh; \
967 } \
968 } \
969 } \
970 } \
971 HASH_FSCK(hh_dst,dst); \
972} while (0)
973
974#define HASH_CLEAR(hh,head) \
975do { \
976 if (head != NULL) { \
977 uthash_free((head)->hh.tbl->buckets, \
978 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
979 HASH_BLOOM_FREE((head)->hh.tbl); \
980 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
981 (head)=NULL; \
982 } \
983} while (0)
984
985#define HASH_OVERHEAD(hh,head) \
986 ((head != NULL) ? ( \
987 (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
988 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
989 sizeof(UT_hash_table) + \
990 (HASH_BLOOM_BYTELEN))) : 0U)
991
992#ifdef NO_DECLTYPE
993#define HASH_ITER(hh,head,el,tmp) \
994for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
995 (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
996#else
997#define HASH_ITER(hh,head,el,tmp) \
998for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
999 (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1000#endif
1001
1002/* obtain a count of items in the hash */
1003#define HASH_COUNT(head) HASH_CNT(hh,head)
1004#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1005
1006typedef struct UT_hash_bucket {
1007 struct UT_hash_handle *hh_head;
1008 unsigned count;
1009
1010 /* expand_mult is normally set to 0. In this situation, the max chain length
1011 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1012 * the bucket's chain exceeds this length, bucket expansion is triggered).
1013 * However, setting expand_mult to a non-zero value delays bucket expansion
1014 * (that would be triggered by additions to this particular bucket)
1015 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1016 * (The multiplier is simply expand_mult+1). The whole idea of this
1017 * multiplier is to reduce bucket expansions, since they are expensive, in
1018 * situations where we know that a particular bucket tends to be overused.
1019 * It is better to let its chain length grow to a longer yet-still-bounded
1020 * value, than to do an O(n) bucket expansion too often.
1021 */
1022 unsigned expand_mult;
1023
1024} UT_hash_bucket;
1025
1026/* random signature used only to find hash tables in external analysis */
1027#define HASH_SIGNATURE 0xa0111fe1u
1028#define HASH_BLOOM_SIGNATURE 0xb12220f2u
1029
1030typedef struct UT_hash_table {
1031 UT_hash_bucket *buckets;
1032 unsigned num_buckets, log2_num_buckets;
1033 unsigned num_items;
1034 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1035 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1036
1037 /* in an ideal situation (all buckets used equally), no bucket would have
1038 * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1039 unsigned ideal_chain_maxlen;
1040
1041 /* nonideal_items is the number of items in the hash whose chain position
1042 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1043 * hash distribution; reaching them in a chain traversal takes >ideal steps */
1044 unsigned nonideal_items;
1045
1046 /* ineffective expands occur when a bucket doubling was performed, but
1047 * afterward, more than half the items in the hash had nonideal chain
1048 * positions. If this happens on two consecutive expansions we inhibit any
1049 * further expansion, as it's not helping; this happens when the hash
1050 * function isn't a good fit for the key domain. When expansion is inhibited
1051 * the hash will still work, albeit no longer in constant time. */
1052 unsigned ineff_expands, noexpand;
1053
1054 uint32_t signature; /* used only to find hash tables in external analysis */
1055#ifdef HASH_BLOOM
1056 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1057 uint8_t *bloom_bv;
1058 uint8_t bloom_nbits;
1059#endif
1060
1061} UT_hash_table;
1062
1063typedef struct UT_hash_handle {
1064 struct UT_hash_table *tbl;
1065 void *prev; /* prev element in app order */
1066 void *next; /* next element in app order */
1067 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1068 struct UT_hash_handle *hh_next; /* next hh in bucket order */
1069 void *key; /* ptr to enclosing struct's key */
1070 unsigned keylen; /* enclosing struct's key len */
1071 unsigned hashv; /* result of hash-fcn(key) */
1072} UT_hash_handle;
1073
1074#endif /* UTHASH_H */
diff --git a/libkms/Android.mk b/libkms/Android.mk
index aafebb64..9f81d8e3 100644
--- a/libkms/Android.mk
+++ b/libkms/Android.mk
@@ -4,6 +4,7 @@ intel_drivers := i915 i965 i915g ilo
4radeon_drivers := r300g r600g radeonsi 4radeon_drivers := r300g r600g radeonsi
5rockchip_drivers := rockchip 5rockchip_drivers := rockchip
6nouveau_drivers := nouveau 6nouveau_drivers := nouveau
7virgl_drivers := virgl
7vmwgfx_drivers := vmwgfx 8vmwgfx_drivers := vmwgfx
8tegra_drivers := tegra 9tegra_drivers := tegra
9 10
@@ -12,6 +13,7 @@ valid_drivers := \
12 $(radeon_drivers) \ 13 $(radeon_drivers) \
13 $(rockchip_drivers) \ 14 $(rockchip_drivers) \
14 $(nouveau_drivers) \ 15 $(nouveau_drivers) \
16 $(virgl_drivers) \
15 $(vmwgfx_drivers) \ 17 $(vmwgfx_drivers) \
16 $(tegra_drivers) 18 $(tegra_drivers)
17 19
@@ -46,10 +48,8 @@ ifneq ($(filter $(radeon_drivers), $(DRM_GPU_DRIVERS)),)
46LOCAL_SRC_FILES += $(LIBKMS_RADEON_FILES) 48LOCAL_SRC_FILES += $(LIBKMS_RADEON_FILES)
47endif 49endif
48 50
49LOCAL_SRC_FILES := $(filter-out %.h,$(LOCAL_SRC_FILES))
50
51LOCAL_MODULE := libkms 51LOCAL_MODULE := libkms
52LOCAL_MODULE_TAGS := optional
53LOCAL_SHARED_LIBRARIES := libdrm 52LOCAL_SHARED_LIBRARIES := libdrm
54 53
54include $(LIBDRM_COMMON_MK)
55include $(BUILD_SHARED_LIBRARY) 55include $(BUILD_SHARED_LIBRARY)
diff --git a/libkms/Makefile.am b/libkms/Makefile.am
index 6c0ab7a2..461fc35b 100644
--- a/libkms/Makefile.am
+++ b/libkms/Makefile.am
@@ -10,10 +10,6 @@ libkms_ladir = $(libdir)
10libkms_la_LDFLAGS = -version-number 1:0:0 -no-undefined 10libkms_la_LDFLAGS = -version-number 1:0:0 -no-undefined
11libkms_la_LIBADD = ../libdrm.la 11libkms_la_LIBADD = ../libdrm.la
12 12
13#if HAVE_LIBUDEV
14#libkms_la_LIBADD += $(LIBUDEV_LIBS)
15#endif
16
17libkms_la_SOURCES = $(LIBKMS_FILES) 13libkms_la_SOURCES = $(LIBKMS_FILES)
18 14
19if HAVE_VMWGFX 15if HAVE_VMWGFX
@@ -44,4 +40,4 @@ pkgconfigdir = @pkgconfigdir@
44pkgconfig_DATA = libkms.pc 40pkgconfig_DATA = libkms.pc
45 41
46TESTS = kms-symbol-check 42TESTS = kms-symbol-check
47EXTRA_DIST = Android.mk $(TESTS) 43EXTRA_DIST = $(TESTS)
diff --git a/libkms/exynos.c b/libkms/exynos.c
index 5de2e5a9..0e97fb51 100644
--- a/libkms/exynos.c
+++ b/libkms/exynos.c
@@ -88,7 +88,8 @@ exynos_bo_create(struct kms_driver *kms,
88 pitch = (pitch + 512 - 1) & ~(512 - 1); 88 pitch = (pitch + 512 - 1) & ~(512 - 1);
89 size = pitch * ((height + 4 - 1) & ~(4 - 1)); 89 size = pitch * ((height + 4 - 1) & ~(4 - 1));
90 } else { 90 } else {
91 return -EINVAL; 91 ret = -EINVAL;
92 goto err_free;
92 } 93 }
93 94
94 memset(&arg, 0, sizeof(arg)); 95 memset(&arg, 0, sizeof(arg));
diff --git a/libkms/libkms.pc.in b/libkms/libkms.pc.in
index 511535ad..1421b3ea 100644
--- a/libkms/libkms.pc.in
+++ b/libkms/libkms.pc.in
@@ -8,3 +8,4 @@ Description: Library that abstract aways the different mm interface for kernel d
8Version: 1.0.0 8Version: 1.0.0
9Libs: -L${libdir} -lkms 9Libs: -L${libdir} -lkms
10Cflags: -I${includedir}/libkms 10Cflags: -I${includedir}/libkms
11Requires.private: libdrm
diff --git a/libkms/linux.c b/libkms/linux.c
index 6e0da830..0b50777e 100644
--- a/libkms/linux.c
+++ b/libkms/linux.c
@@ -41,9 +41,12 @@
41#include <unistd.h> 41#include <unistd.h>
42#include <sys/stat.h> 42#include <sys/stat.h>
43#include <sys/types.h> 43#include <sys/types.h>
44#ifdef HAVE_SYS_MKDEV_H 44#ifdef MAJOR_IN_MKDEV
45#include <sys/mkdev.h> 45#include <sys/mkdev.h>
46#endif 46#endif
47#ifdef MAJOR_IN_SYSMACROS
48#include <sys/sysmacros.h>
49#endif
47 50
48#include "libdrm_macros.h" 51#include "libdrm_macros.h"
49#include "internal.h" 52#include "internal.h"
@@ -138,105 +141,11 @@ linux_from_sysfs(int fd, struct kms_driver **out)
138 return ret; 141 return ret;
139} 142}
140 143
141#if 0
142#define LIBUDEV_I_KNOW_THE_API_IS_SUBJECT_TO_CHANGE
143#include <libudev.h>
144
145struct create_record
146{
147 unsigned vendor;
148 unsigned chip;
149 int (*func)(int fd, struct kms_driver **out);
150};
151
152static const struct create_record table[] = {
153 { 0x8086, 0x2a42, intel_create }, /* i965 */
154#ifdef HAVE_VMWGFX
155 { 0x15ad, 0x0405, vmwgfx_create }, /* VMware vGPU */
156#endif
157 { 0, 0, NULL },
158};
159
160static int
161linux_get_pciid_from_fd(int fd, unsigned *vendor_id, unsigned *chip_id)
162{
163 struct udev *udev;
164 struct udev_device *device;
165 struct udev_device *parent;
166 const char *pci_id;
167 struct stat buffer;
168 int ret;
169
170 ret = fstat(fd, &buffer);
171 if (ret)
172 return -EINVAL;
173
174 if (!S_ISCHR(buffer.st_mode))
175 return -EINVAL;
176
177 udev = udev_new();
178 if (!udev)
179 return -ENOMEM;
180
181 device = udev_device_new_from_devnum(udev, 'c', buffer.st_rdev);
182 if (!device)
183 goto err_free_udev;
184
185 parent = udev_device_get_parent(device);
186 if (!parent)
187 goto err_free_device;
188
189 pci_id = udev_device_get_property_value(parent, "PCI_ID");
190 if (!pci_id)
191 goto err_free_device;
192
193 if (sscanf(pci_id, "%x:%x", vendor_id, chip_id) != 2)
194 goto err_free_device;
195
196 udev_device_unref(device);
197 udev_unref(udev);
198
199 return 0;
200
201err_free_device:
202 udev_device_unref(device);
203err_free_udev:
204 udev_unref(udev);
205 return -EINVAL;
206}
207
208static int
209linux_from_udev(int fd, struct kms_driver **out)
210{
211 unsigned vendor_id, chip_id;
212 int ret, i;
213
214 ret = linux_get_pciid_from_fd(fd, &vendor_id, &chip_id);
215 if (ret)
216 return ret;
217
218 for (i = 0; table[i].func; i++)
219 if (table[i].vendor == vendor_id && table[i].chip == chip_id)
220 return table[i].func(fd, out);
221
222 return -ENOSYS;
223}
224#else
225static int
226linux_from_udev(int fd, struct kms_driver **out)
227{
228 return -ENOSYS;
229}
230#endif
231
232drm_private int 144drm_private int
233linux_create(int fd, struct kms_driver **out) 145linux_create(int fd, struct kms_driver **out)
234{ 146{
235 if (!dumb_create(fd, out)) 147 if (!dumb_create(fd, out))
236 return 0; 148 return 0;
237 149
238 if (!linux_from_udev(fd, out))
239 return 0;
240
241 return linux_from_sysfs(fd, out); 150 return linux_from_sysfs(fd, out);
242} 151}
diff --git a/libsync.h b/libsync.h
new file mode 100644
index 00000000..f1a2f96d
--- /dev/null
+++ b/libsync.h
@@ -0,0 +1,148 @@
1/*
2 * sync abstraction
3 * Copyright 2015-2016 Collabora Ltd.
4 *
5 * Based on the implementation from the Android Open Source Project,
6 *
7 * Copyright 2012 Google, Inc
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#ifndef _LIBSYNC_H
29#define _LIBSYNC_H
30
31#include <assert.h>
32#include <errno.h>
33#include <stdint.h>
34#include <string.h>
35#include <sys/ioctl.h>
36#include <sys/poll.h>
37#include <unistd.h>
38
39#if defined(__cplusplus)
40extern "C" {
41#endif
42
43#ifndef SYNC_IOC_MERGE
44/* duplicated from linux/sync_file.h to avoid build-time dependency
45 * on new (v4.7) kernel headers. Once distro's are mostly using
46 * something newer than v4.7 drop this and #include <linux/sync_file.h>
47 * instead.
48 */
49struct sync_merge_data {
50 char name[32];
51 int32_t fd2;
52 int32_t fence;
53 uint32_t flags;
54 uint32_t pad;
55};
56#define SYNC_IOC_MAGIC '>'
57#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
58#endif
59
60
61static inline int sync_wait(int fd, int timeout)
62{
63 struct pollfd fds = {0};
64 int ret;
65
66 fds.fd = fd;
67 fds.events = POLLIN;
68
69 do {
70 ret = poll(&fds, 1, timeout);
71 if (ret > 0) {
72 if (fds.revents & (POLLERR | POLLNVAL)) {
73 errno = EINVAL;
74 return -1;
75 }
76 return 0;
77 } else if (ret == 0) {
78 errno = ETIME;
79 return -1;
80 }
81 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
82
83 return ret;
84}
85
86static inline int sync_merge(const char *name, int fd1, int fd2)
87{
88 struct sync_merge_data data = {0};
89 int ret;
90
91 data.fd2 = fd2;
92 strncpy(data.name, name, sizeof(data.name));
93
94 do {
95 ret = ioctl(fd1, SYNC_IOC_MERGE, &data);
96 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
97
98 if (ret < 0)
99 return ret;
100
101 return data.fence;
102}
103
104/* accumulate fd2 into fd1. If *fd1 is not a valid fd then dup fd2,
105 * otherwise sync_merge() and close the old *fd1. This can be used
106 * to implement the pattern:
107 *
108 * init()
109 * {
110 * batch.fence_fd = -1;
111 * }
112 *
113 * // does *NOT* take ownership of fd
114 * server_sync(int fd)
115 * {
116 * if (sync_accumulate("foo", &batch.fence_fd, fd)) {
117 * ... error ...
118 * }
119 * }
120 */
121static inline int sync_accumulate(const char *name, int *fd1, int fd2)
122{
123 int ret;
124
125 assert(fd2 >= 0);
126
127 if (*fd1 < 0) {
128 *fd1 = dup(fd2);
129 return 0;
130 }
131
132 ret = sync_merge(name, *fd1, fd2);
133 if (ret < 0) {
134 /* leave *fd1 as it is */
135 return ret;
136 }
137
138 close(*fd1);
139 *fd1 = ret;
140
141 return 0;
142}
143
144#if defined(__cplusplus)
145}
146#endif
147
148#endif
diff --git a/man/drm-kms.xml b/man/drm-kms.xml
index 5f041578..ae38dc8d 100644
--- a/man/drm-kms.xml
+++ b/man/drm-kms.xml
@@ -126,7 +126,7 @@
126 <listitem> 126 <listitem>
127 <para><emphasis>Framebuffers</emphasis> are abstract memory objects 127 <para><emphasis>Framebuffers</emphasis> are abstract memory objects
128 that provide a source of pixel data to scanout to a CRTC. 128 that provide a source of pixel data to scanout to a CRTC.
129 Applications explicitely request the creation of framebuffers 129 Applications explicitly request the creation of framebuffers
130 and can control their behavior. Framebuffers rely on the 130 and can control their behavior. Framebuffers rely on the
131 underneath memory manager for low-level memory operations. 131 underneath memory manager for low-level memory operations.
132 When creating a framebuffer, applications pass a memory handle 132 When creating a framebuffer, applications pass a memory handle
diff --git a/nouveau/Android.mk b/nouveau/Android.mk
index 19927973..b430af4f 100644
--- a/nouveau/Android.mk
+++ b/nouveau/Android.mk
@@ -8,9 +8,7 @@ LOCAL_MODULE := libdrm_nouveau
8 8
9LOCAL_SHARED_LIBRARIES := libdrm 9LOCAL_SHARED_LIBRARIES := libdrm
10 10
11LOCAL_SRC_FILES := $(filter-out %.h,$(LIBDRM_NOUVEAU_FILES)) 11LOCAL_SRC_FILES := $(LIBDRM_NOUVEAU_FILES)
12
13LOCAL_CFLAGS := \
14 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
15 12
13include $(LIBDRM_COMMON_MK)
16include $(BUILD_SHARED_LIBRARY) 14include $(BUILD_SHARED_LIBRARY)
diff --git a/nouveau/Makefile.am b/nouveau/Makefile.am
index 76cdecad..344a8445 100644
--- a/nouveau/Makefile.am
+++ b/nouveau/Makefile.am
@@ -30,4 +30,4 @@ pkgconfigdir = @pkgconfigdir@
30pkgconfig_DATA = libdrm_nouveau.pc 30pkgconfig_DATA = libdrm_nouveau.pc
31 31
32TESTS = nouveau-symbol-check 32TESTS = nouveau-symbol-check
33EXTRA_DIST = Android.mk $(TESTS) 33EXTRA_DIST = $(TESTS)
diff --git a/radeon/Android.mk b/radeon/Android.mk
index 890bf541..71040dab 100644
--- a/radeon/Android.mk
+++ b/radeon/Android.mk
@@ -8,9 +8,7 @@ LOCAL_MODULE := libdrm_radeon
8 8
9LOCAL_SHARED_LIBRARIES := libdrm 9LOCAL_SHARED_LIBRARIES := libdrm
10 10
11LOCAL_SRC_FILES := $(filter-out %.h,$(LIBDRM_RADEON_FILES)) 11LOCAL_SRC_FILES := $(LIBDRM_RADEON_FILES)
12
13LOCAL_CFLAGS := \
14 -DHAVE_LIBDRM_ATOMIC_PRIMITIVES=1
15 12
13include $(LIBDRM_COMMON_MK)
16include $(BUILD_SHARED_LIBRARY) 14include $(BUILD_SHARED_LIBRARY)
diff --git a/radeon/Makefile.am b/radeon/Makefile.am
index 25c03d3c..e2415314 100644
--- a/radeon/Makefile.am
+++ b/radeon/Makefile.am
@@ -43,4 +43,5 @@ libdrm_radeoninclude_HEADERS = $(LIBDRM_RADEON_H_FILES)
43pkgconfigdir = @pkgconfigdir@ 43pkgconfigdir = @pkgconfigdir@
44pkgconfig_DATA = libdrm_radeon.pc 44pkgconfig_DATA = libdrm_radeon.pc
45 45
46EXTRA_DIST = Android.mk $(LIBDRM_RADEON_BOF_FILES) $(TESTS) 46TESTS = radeon-symbol-check
47EXTRA_DIST = $(LIBDRM_RADEON_BOF_FILES) $(TESTS)
diff --git a/radeon/libdrm_radeon.pc.in b/radeon/libdrm_radeon.pc.in
index 68ef0ab1..432993a3 100644
--- a/radeon/libdrm_radeon.pc.in
+++ b/radeon/libdrm_radeon.pc.in
@@ -8,3 +8,4 @@ Description: Userspace interface to kernel DRM services for radeon
8Version: @PACKAGE_VERSION@ 8Version: @PACKAGE_VERSION@
9Libs: -L${libdir} -ldrm_radeon 9Libs: -L${libdir} -ldrm_radeon
10Cflags: -I${includedir} -I${includedir}/libdrm 10Cflags: -I${includedir} -I${includedir}/libdrm
11Requires.private: libdrm
diff --git a/radeon/radeon_bo_gem.c b/radeon/radeon_bo_gem.c
index c9fe19ff..fbd453d9 100644
--- a/radeon/radeon_bo_gem.c
+++ b/radeon/radeon_bo_gem.c
@@ -103,7 +103,7 @@ static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
103 args.size = size; 103 args.size = size;
104 args.alignment = alignment; 104 args.alignment = alignment;
105 args.initial_domain = bo->base.domains; 105 args.initial_domain = bo->base.domains;
106 args.flags = 0; 106 args.flags = flags;
107 args.handle = 0; 107 args.handle = 0;
108 r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE, 108 r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
109 &args, sizeof(args)); 109 &args, sizeof(args));
diff --git a/radeon/radeon_cs_gem.c b/radeon/radeon_cs_gem.c
index cdec64e0..f3dccb6c 100644
--- a/radeon/radeon_cs_gem.c
+++ b/radeon/radeon_cs_gem.c
@@ -189,7 +189,7 @@ static int cs_gem_write_reloc(struct radeon_cs_int *cs,
189 /* check domains */ 189 /* check domains */
190 if ((read_domain && write_domain) || (!read_domain && !write_domain)) { 190 if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
191 /* in one CS a bo can only be in read or write domain but not 191 /* in one CS a bo can only be in read or write domain but not
192 * in read & write domain at the same sime 192 * in read & write domain at the same time
193 */ 193 */
194 return -EINVAL; 194 return -EINVAL;
195 } 195 }
@@ -242,7 +242,7 @@ static int cs_gem_write_reloc(struct radeon_cs_int *cs,
242 } 242 }
243 /* new relocation */ 243 /* new relocation */
244 if (csg->base.crelocs >= csg->nrelocs) { 244 if (csg->base.crelocs >= csg->nrelocs) {
245 /* allocate more memory (TODO: should use a slab allocatore maybe) */ 245 /* allocate more memory (TODO: should use a slab allocator maybe) */
246 uint32_t *tmp, size; 246 uint32_t *tmp, size;
247 size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*)); 247 size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
248 tmp = (uint32_t*)realloc(csg->relocs_bo, size); 248 tmp = (uint32_t*)realloc(csg->relocs_bo, size);
@@ -268,7 +268,7 @@ static int cs_gem_write_reloc(struct radeon_cs_int *cs,
268 reloc->flags = flags; 268 reloc->flags = flags;
269 csg->chunks[1].length_dw += RELOC_SIZE; 269 csg->chunks[1].length_dw += RELOC_SIZE;
270 radeon_bo_ref(bo); 270 radeon_bo_ref(bo);
271 /* bo might be referenced from another context so have to use atomic opertions */ 271 /* bo might be referenced from another context so have to use atomic operations */
272 atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id); 272 atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
273 cs->relocs_total_size += boi->size; 273 cs->relocs_total_size += boi->size;
274 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000); 274 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
@@ -323,7 +323,7 @@ static int cs_gem_end(struct radeon_cs_int *cs,
323 return -EPIPE; 323 return -EPIPE;
324 } 324 }
325 if (cs->section_ndw != cs->section_cdw) { 325 if (cs->section_ndw != cs->section_cdw) {
326 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n", 326 fprintf(stderr, "CS section size mismatch start at (%s,%s,%d) %d vs %d\n",
327 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw); 327 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
328 fprintf(stderr, "CS section end at (%s,%s,%d)\n", 328 fprintf(stderr, "CS section end at (%s,%s,%d)\n",
329 file, func, line); 329 file, func, line);
@@ -449,7 +449,7 @@ static int cs_gem_emit(struct radeon_cs_int *cs)
449 &csg->cs, sizeof(struct drm_radeon_cs)); 449 &csg->cs, sizeof(struct drm_radeon_cs));
450 for (i = 0; i < csg->base.crelocs; i++) { 450 for (i = 0; i < csg->base.crelocs; i++) {
451 csg->relocs_bo[i]->space_accounted = 0; 451 csg->relocs_bo[i]->space_accounted = 0;
452 /* bo might be referenced from another context so have to use atomic opertions */ 452 /* bo might be referenced from another context so have to use atomic operations */
453 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id); 453 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
454 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]); 454 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
455 csg->relocs_bo[i] = NULL; 455 csg->relocs_bo[i] = NULL;
@@ -481,7 +481,7 @@ static int cs_gem_erase(struct radeon_cs_int *cs)
481 if (csg->relocs_bo) { 481 if (csg->relocs_bo) {
482 for (i = 0; i < csg->base.crelocs; i++) { 482 for (i = 0; i < csg->base.crelocs; i++) {
483 if (csg->relocs_bo[i]) { 483 if (csg->relocs_bo[i]) {
484 /* bo might be referenced from another context so have to use atomic opertions */ 484 /* bo might be referenced from another context so have to use atomic operations */
485 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id); 485 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
486 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]); 486 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
487 csg->relocs_bo[i] = NULL; 487 csg->relocs_bo[i] = NULL;
diff --git a/radeon/radeon_surface.c b/radeon/radeon_surface.c
index 5ec97454..965be24c 100644
--- a/radeon/radeon_surface.c
+++ b/radeon/radeon_surface.c
@@ -42,6 +42,14 @@
42#include "radeon_drm.h" 42#include "radeon_drm.h"
43#include "radeon_surface.h" 43#include "radeon_surface.h"
44 44
45#define CIK_TILE_MODE_COLOR_2D 14
46#define CIK_TILE_MODE_COLOR_2D_SCANOUT 10
47#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64 0
48#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128 1
49#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256 2
50#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512 3
51#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE 4
52
45#define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1)) 53#define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1))
46#define MAX2(A, B) ((A) > (B) ? (A) : (B)) 54#define MAX2(A, B) ((A) > (B) ? (A) : (B))
47#define MIN2(A, B) ((A) < (B) ? (A) : (B)) 55#define MIN2(A, B) ((A) < (B) ? (A) : (B))
@@ -957,8 +965,10 @@ static int eg_surface_best(struct radeon_surface_manager *surf_man,
957 } 965 }
958 surf->stencil_tile_split = 64; 966 surf->stencil_tile_split = 64;
959 } else { 967 } else {
960 /* tile split must be >= 256 for colorbuffer surfaces */ 968 /* tile split must be >= 256 for colorbuffer surfaces,
961 surf->tile_split = MAX2(surf->nsamples * surf->bpe * 64, 256); 969 * SAMPLE_SPLIT = tile_split / (bpe * 64), the optimal value is 2
970 */
971 surf->tile_split = MAX2(2 * surf->bpe * 64, 256);
962 if (surf->tile_split > 4096) 972 if (surf->tile_split > 4096)
963 surf->tile_split = 4096; 973 surf->tile_split = 4096;
964 } 974 }
@@ -971,7 +981,7 @@ static int eg_surface_best(struct radeon_surface_manager *surf_man,
971 /* bankw or bankh greater than 1 increase alignment requirement, not 981 /* bankw or bankh greater than 1 increase alignment requirement, not
972 * sure if it's worth using smaller bankw & bankh to stick with 2D 982 * sure if it's worth using smaller bankw & bankh to stick with 2D
973 * tiling on small surface rather than falling back to 1D tiling. 983 * tiling on small surface rather than falling back to 1D tiling.
974 * Use recommanded value based on tile size for now. 984 * Use recommended value based on tile size for now.
975 * 985 *
976 * fmask buffer has different optimal value figure them out once we 986 * fmask buffer has different optimal value figure them out once we
977 * use it. 987 * use it.
diff --git a/tests/Makefile.am b/tests/Makefile.am
index d8925764..f2bb4d44 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -22,6 +22,14 @@ if HAVE_TEGRA
22SUBDIRS += tegra 22SUBDIRS += tegra
23endif 23endif
24 24
25if HAVE_ETNAVIV
26SUBDIRS += etnaviv
27endif
28
29if HAVE_NOUVEAU
30SUBDIRS += nouveau
31endif
32
25AM_CFLAGS = \ 33AM_CFLAGS = \
26 $(WARN_CFLAGS)\ 34 $(WARN_CFLAGS)\
27 -I $(top_srcdir)/include/drm \ 35 -I $(top_srcdir)/include/drm \
@@ -29,47 +37,11 @@ AM_CFLAGS = \
29 37
30LDADD = $(top_builddir)/libdrm.la 38LDADD = $(top_builddir)/libdrm.la
31 39
32check_PROGRAMS = \
33 dristat \
34 drmdevice \
35 drmstat
36
37dristat_LDADD = -lm
38
39if HAVE_NOUVEAU
40SUBDIRS += nouveau
41endif
42
43TESTS = \ 40TESTS = \
44 drmsl \ 41 drmsl \
45 hash \ 42 hash \
46 random 43 random
47 44
48if HAVE_LIBUDEV 45check_PROGRAMS = \
49 46 $(TESTS) \
50check_LTLIBRARIES = libdrmtest.la 47 drmdevice
51
52libdrmtest_la_SOURCES = \
53 drmtest.c \
54 drmtest.h
55
56LDADD += \
57 libdrmtest.la \
58 $(LIBUDEV_LIBS)
59
60
61XFAIL_TESTS = \
62 auth \
63 lock
64
65TESTS += \
66 openclose \
67 getversion \
68 getclient \
69 getstats \
70 setversion \
71 updatedraw \
72 name_from_fd
73endif
74
75check_PROGRAMS += $(TESTS)
diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
index 71f357c6..3fd6820a 100644
--- a/tests/amdgpu/amdgpu_test.c
+++ b/tests/amdgpu/amdgpu_test.c
@@ -56,6 +56,9 @@
56 */ 56 */
57int drm_amdgpu[MAX_CARDS_SUPPORTED]; 57int drm_amdgpu[MAX_CARDS_SUPPORTED];
58 58
59/** Open render node to test */
60int open_render_node = 0; /* By default run most tests on primary node */
61
59/** The table of all known test suites to run */ 62/** The table of all known test suites to run */
60static CU_SuiteInfo suites[] = { 63static CU_SuiteInfo suites[] = {
61 { 64 {
@@ -108,12 +111,181 @@ static void display_test_suites(void)
108 111
109 112
110/** Help string for command line parameters */ 113/** Help string for command line parameters */
111static const char usage[] = "Usage: %s [-hl] [<-s <suite id>> [-t <test id>]]\n" 114static const char usage[] =
112 "where:\n" 115 "Usage: %s [-hlpr] [<-s <suite id>> [-t <test id>]] "
113 " l - Display all suites and their tests\n" 116 "[-b <pci_bus_id> [-d <pci_device_id>]]\n"
114 " h - Display this help\n"; 117 "where:\n"
118 " l - Display all suites and their tests\n"
119 " r - Run the tests on render node\n"
120 " b - Specify device's PCI bus id to run tests\n"
121 " d - Specify device's PCI device id to run tests (optional)\n"
122 " p - Display information of AMDGPU devices in system\n"
123 " h - Display this help\n";
115/** Specified options strings for getopt */ 124/** Specified options strings for getopt */
116static const char options[] = "hls:t:"; 125static const char options[] = "hlrps:t:b:d:";
126
127/* Open AMD devices.
128 * Return the number of AMD device openned.
129 */
130static int amdgpu_open_devices(int open_render_node)
131{
132 drmDevicePtr devices[MAX_CARDS_SUPPORTED];
133 int ret;
134 int i;
135 int drm_node;
136 int amd_index = 0;
137 int drm_count;
138 int fd;
139 drmVersionPtr version;
140
141 drm_count = drmGetDevices2(0, devices, MAX_CARDS_SUPPORTED);
142
143 if (drm_count < 0) {
144 fprintf(stderr,
145 "drmGetDevices2() returned an error %d\n",
146 drm_count);
147 return 0;
148 }
149
150 for (i = 0; i < drm_count; i++) {
151 /* If this is not PCI device, skip*/
152 if (devices[i]->bustype != DRM_BUS_PCI)
153 continue;
154
155 /* If this is not AMD GPU vender ID, skip*/
156 if (devices[i]->deviceinfo.pci->vendor_id != 0x1002)
157 continue;
158
159 if (open_render_node)
160 drm_node = DRM_NODE_RENDER;
161 else
162 drm_node = DRM_NODE_PRIMARY;
163
164 fd = -1;
165 if (devices[i]->available_nodes & 1 << drm_node)
166 fd = open(
167 devices[i]->nodes[drm_node],
168 O_RDWR | O_CLOEXEC);
169
170 /* This node is not available. */
171 if (fd < 0) continue;
172
173 version = drmGetVersion(fd);
174 if (!version) {
175 fprintf(stderr,
176 "Warning: Cannot get version for %s."
177 "Error is %s\n",
178 devices[i]->nodes[drm_node],
179 strerror(errno));
180 close(fd);
181 continue;
182 }
183
184 if (strcmp(version->name, "amdgpu")) {
185 /* This is not AMDGPU driver, skip.*/
186 drmFreeVersion(version);
187 close(fd);
188 continue;
189 }
190
191 drmFreeVersion(version);
192
193 drm_amdgpu[amd_index] = fd;
194 amd_index++;
195 }
196
197 drmFreeDevices(devices, drm_count);
198 return amd_index;
199}
200
201/* Close AMD devices.
202 */
203static void amdgpu_close_devices()
204{
205 int i;
206 for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
207 if (drm_amdgpu[i] >=0)
208 close(drm_amdgpu[i]);
209}
210
211/* Print AMD devices information */
212static void amdgpu_print_devices()
213{
214 int i;
215 drmDevicePtr device;
216
217 /* Open the first AMD devcie to print driver information. */
218 if (drm_amdgpu[0] >=0) {
219 /* Display AMD driver version information.*/
220 drmVersionPtr retval = drmGetVersion(drm_amdgpu[0]);
221
222 if (retval == NULL) {
223 perror("Cannot get version for AMDGPU device");
224 return;
225 }
226
227 printf("Driver name: %s, Date: %s, Description: %s.\n",
228 retval->name, retval->date, retval->desc);
229 drmFreeVersion(retval);
230 }
231
232 /* Display information of AMD devices */
233 printf("Devices:\n");
234 for (i = 0; i < MAX_CARDS_SUPPORTED && drm_amdgpu[i] >=0; i++)
235 if (drmGetDevice2(drm_amdgpu[i],
236 DRM_DEVICE_GET_PCI_REVISION,
237 &device) == 0) {
238 if (device->bustype == DRM_BUS_PCI) {
239 printf("PCI ");
240 printf(" domain:%04x",
241 device->businfo.pci->domain);
242 printf(" bus:%02x",
243 device->businfo.pci->bus);
244 printf(" device:%02x",
245 device->businfo.pci->dev);
246 printf(" function:%01x",
247 device->businfo.pci->func);
248 printf(" vendor_id:%04x",
249 device->deviceinfo.pci->vendor_id);
250 printf(" device_id:%04x",
251 device->deviceinfo.pci->device_id);
252 printf(" subvendor_id:%04x",
253 device->deviceinfo.pci->subvendor_id);
254 printf(" subdevice_id:%04x",
255 device->deviceinfo.pci->subdevice_id);
256 printf(" revision_id:%02x",
257 device->deviceinfo.pci->revision_id);
258 printf("\n");
259 }
260 drmFreeDevice(&device);
261 }
262}
263
264/* Find a match AMD device in PCI bus
265 * Return the index of the device or -1 if not found
266 */
267static int amdgpu_find_device(uint8_t bus, uint8_t dev)
268{
269 int i;
270 drmDevicePtr device;
271
272 for (i = 0; i < MAX_CARDS_SUPPORTED && drm_amdgpu[i] >=0; i++)
273 if (drmGetDevice2(drm_amdgpu[i],
274 DRM_DEVICE_GET_PCI_REVISION,
275 &device) == 0) {
276 if (device->bustype == DRM_BUS_PCI)
277 if (device->businfo.pci->bus == bus &&
278 device->businfo.pci->dev == dev) {
279
280 drmFreeDevice(&device);
281 return i;
282 }
283
284 drmFreeDevice(&device);
285 }
286
287 return -1;
288}
117 289
118/* The main() function for setting up and running the tests. 290/* The main() function for setting up and running the tests.
119 * Returns a CUE_SUCCESS on successful running, another 291 * Returns a CUE_SUCCESS on successful running, another
@@ -125,16 +297,12 @@ int main(int argc, char **argv)
125 int i = 0; 297 int i = 0;
126 int suite_id = -1; /* By default run everything */ 298 int suite_id = -1; /* By default run everything */
127 int test_id = -1; /* By default run all tests in the suite */ 299 int test_id = -1; /* By default run all tests in the suite */
300 int pci_bus_id = -1; /* By default PC bus ID is not specified */
301 int pci_device_id = 0; /* By default PC device ID is zero */
302 int display_devices = 0;/* By default not to display devices' info */
128 CU_pSuite pSuite = NULL; 303 CU_pSuite pSuite = NULL;
129 CU_pTest pTest = NULL; 304 CU_pTest pTest = NULL;
130 305 int test_device_index;
131 int aval = drmAvailable();
132
133 if (aval == 0) {
134 fprintf(stderr, "DRM driver is not available\n");
135 exit(EXIT_FAILURE);
136 }
137
138 306
139 for (i = 0; i < MAX_CARDS_SUPPORTED; i++) 307 for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
140 drm_amdgpu[i] = -1; 308 drm_amdgpu[i] = -1;
@@ -153,6 +321,18 @@ int main(int argc, char **argv)
153 case 't': 321 case 't':
154 test_id = atoi(optarg); 322 test_id = atoi(optarg);
155 break; 323 break;
324 case 'b':
325 pci_bus_id = atoi(optarg);
326 break;
327 case 'd':
328 pci_device_id = atoi(optarg);
329 break;
330 case 'p':
331 display_devices = 1;
332 break;
333 case 'r':
334 open_render_node = 1;
335 break;
156 case '?': 336 case '?':
157 case 'h': 337 case 'h':
158 fprintf(stderr, usage, argv[0]); 338 fprintf(stderr, usage, argv[0]);
@@ -163,35 +343,46 @@ int main(int argc, char **argv)
163 } 343 }
164 } 344 }
165 345
166 /* Try to open all possible radeon connections 346 if (amdgpu_open_devices(open_render_node) <= 0) {
167 * Right now: Open only the 0. 347 perror("Cannot open AMDGPU device");
168 */
169 printf("Try to open the card 0..\n");
170 drm_amdgpu[0] = open("/dev/dri/card0", O_RDWR | O_CLOEXEC);
171
172 if (drm_amdgpu[0] < 0) {
173 perror("Cannot open /dev/dri/card0\n");
174 exit(EXIT_FAILURE); 348 exit(EXIT_FAILURE);
175 } 349 }
176 350
177 /** Display version of DRM driver */ 351 if (drm_amdgpu[0] < 0) {
178 drmVersionPtr retval = drmGetVersion(drm_amdgpu[0]); 352 perror("Cannot open AMDGPU device");
179
180 if (retval == NULL) {
181 perror("Could not get information about DRM driver");
182 exit(EXIT_FAILURE); 353 exit(EXIT_FAILURE);
183 } 354 }
184 355
185 printf("DRM Driver: Name: [%s] : Date [%s] : Description [%s]\n", 356 if (display_devices) {
186 retval->name, retval->date, retval->desc); 357 amdgpu_print_devices();
358 amdgpu_close_devices();
359 exit(EXIT_SUCCESS);
360 }
187 361
188 drmFreeVersion(retval); 362 if (pci_bus_id > 0) {
363 /* A device was specified to run the test */
364 test_device_index = amdgpu_find_device((uint8_t)pci_bus_id,
365 (uint8_t)pci_device_id);
366
367 if (test_device_index >= 0) {
368 /* Most tests run on device of drm_amdgpu[0].
369 * Swap the chosen device to drm_amdgpu[0].
370 */
371 i = drm_amdgpu[0];
372 drm_amdgpu[0] = drm_amdgpu[test_device_index];
373 drm_amdgpu[test_device_index] = i;
374 } else {
375 fprintf(stderr,
376 "The specified GPU device does not exist.\n");
377 exit(EXIT_FAILURE);
378 }
379 }
189 380
190 /* Initialize test suites to run */ 381 /* Initialize test suites to run */
191 382
192 /* initialize the CUnit test registry */ 383 /* initialize the CUnit test registry */
193 if (CUE_SUCCESS != CU_initialize_registry()) { 384 if (CUE_SUCCESS != CU_initialize_registry()) {
194 close(drm_amdgpu[0]); 385 amdgpu_close_devices();
195 return CU_get_error(); 386 return CU_get_error();
196 } 387 }
197 388
@@ -200,7 +391,7 @@ int main(int argc, char **argv)
200 fprintf(stderr, "suite registration failed - %s\n", 391 fprintf(stderr, "suite registration failed - %s\n",
201 CU_get_error_msg()); 392 CU_get_error_msg());
202 CU_cleanup_registry(); 393 CU_cleanup_registry();
203 close(drm_amdgpu[0]); 394 amdgpu_close_devices();
204 exit(EXIT_FAILURE); 395 exit(EXIT_FAILURE);
205 } 396 }
206 397
@@ -222,7 +413,7 @@ int main(int argc, char **argv)
222 fprintf(stderr, "Invalid test id: %d\n", 413 fprintf(stderr, "Invalid test id: %d\n",
223 test_id); 414 test_id);
224 CU_cleanup_registry(); 415 CU_cleanup_registry();
225 close(drm_amdgpu[0]); 416 amdgpu_close_devices();
226 exit(EXIT_FAILURE); 417 exit(EXIT_FAILURE);
227 } 418 }
228 } else 419 } else
@@ -231,13 +422,13 @@ int main(int argc, char **argv)
231 fprintf(stderr, "Invalid suite id : %d\n", 422 fprintf(stderr, "Invalid suite id : %d\n",
232 suite_id); 423 suite_id);
233 CU_cleanup_registry(); 424 CU_cleanup_registry();
234 close(drm_amdgpu[0]); 425 amdgpu_close_devices();
235 exit(EXIT_FAILURE); 426 exit(EXIT_FAILURE);
236 } 427 }
237 } else 428 } else
238 CU_basic_run_tests(); 429 CU_basic_run_tests();
239 430
240 CU_cleanup_registry(); 431 CU_cleanup_registry();
241 close(drm_amdgpu[0]); 432 amdgpu_close_devices();
242 return CU_get_error(); 433 return CU_get_error();
243} 434}
diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index fca92ad0..e30e2312 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -35,6 +35,9 @@
35/* Forward reference for array to keep "drm" handles */ 35/* Forward reference for array to keep "drm" handles */
36extern int drm_amdgpu[MAX_CARDS_SUPPORTED]; 36extern int drm_amdgpu[MAX_CARDS_SUPPORTED];
37 37
38/* Global variables */
39extern int open_render_node;
40
38/************************* Basic test suite ********************************/ 41/************************* Basic test suite ********************************/
39 42
40/* 43/*
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index e489e6e8..bfda21b1 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -47,6 +47,11 @@ static void amdgpu_command_submission_gfx(void);
47static void amdgpu_command_submission_compute(void); 47static void amdgpu_command_submission_compute(void);
48static void amdgpu_command_submission_sdma(void); 48static void amdgpu_command_submission_sdma(void);
49static void amdgpu_userptr_test(void); 49static void amdgpu_userptr_test(void);
50static void amdgpu_semaphore_test(void);
51
52static void amdgpu_command_submission_write_linear_helper(unsigned ip_type);
53static void amdgpu_command_submission_const_fill_helper(unsigned ip_type);
54static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type);
50 55
51CU_TestInfo basic_tests[] = { 56CU_TestInfo basic_tests[] = {
52 { "Query Info Test", amdgpu_query_info_test }, 57 { "Query Info Test", amdgpu_query_info_test },
@@ -55,6 +60,7 @@ CU_TestInfo basic_tests[] = {
55 { "Command submission Test (GFX)", amdgpu_command_submission_gfx }, 60 { "Command submission Test (GFX)", amdgpu_command_submission_gfx },
56 { "Command submission Test (Compute)", amdgpu_command_submission_compute }, 61 { "Command submission Test (Compute)", amdgpu_command_submission_compute },
57 { "Command submission Test (SDMA)", amdgpu_command_submission_sdma }, 62 { "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
63 { "SW semaphore Test", amdgpu_semaphore_test },
58 CU_TEST_INFO_NULL, 64 CU_TEST_INFO_NULL,
59}; 65};
60#define BUFFER_SIZE (8 * 1024) 66#define BUFFER_SIZE (8 * 1024)
@@ -77,6 +83,120 @@ CU_TestInfo basic_tests[] = {
77#define SDMA_OPCODE_COPY 1 83#define SDMA_OPCODE_COPY 1
78# define SDMA_COPY_SUB_OPCODE_LINEAR 0 84# define SDMA_COPY_SUB_OPCODE_LINEAR 0
79 85
86#define GFX_COMPUTE_NOP 0xffff1000
87#define SDMA_NOP 0x0
88
89/* PM4 */
90#define PACKET_TYPE0 0
91#define PACKET_TYPE1 1
92#define PACKET_TYPE2 2
93#define PACKET_TYPE3 3
94
95#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
96#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
97#define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
98#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
99#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
100 ((reg) & 0xFFFF) | \
101 ((n) & 0x3FFF) << 16)
102#define CP_PACKET2 0x80000000
103#define PACKET2_PAD_SHIFT 0
104#define PACKET2_PAD_MASK (0x3fffffff << 0)
105
106#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
107
108#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
109 (((op) & 0xFF) << 8) | \
110 ((n) & 0x3FFF) << 16)
111
112/* Packet 3 types */
113#define PACKET3_NOP 0x10
114
115#define PACKET3_WRITE_DATA 0x37
116#define WRITE_DATA_DST_SEL(x) ((x) << 8)
117 /* 0 - register
118 * 1 - memory (sync - via GRBM)
119 * 2 - gl2
120 * 3 - gds
121 * 4 - reserved
122 * 5 - memory (async - direct)
123 */
124#define WR_ONE_ADDR (1 << 16)
125#define WR_CONFIRM (1 << 20)
126#define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
127 /* 0 - LRU
128 * 1 - Stream
129 */
130#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
131 /* 0 - me
132 * 1 - pfp
133 * 2 - ce
134 */
135
136#define PACKET3_DMA_DATA 0x50
137/* 1. header
138 * 2. CONTROL
139 * 3. SRC_ADDR_LO or DATA [31:0]
140 * 4. SRC_ADDR_HI [31:0]
141 * 5. DST_ADDR_LO [31:0]
142 * 6. DST_ADDR_HI [7:0]
143 * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
144 */
145/* CONTROL */
146# define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
147 /* 0 - ME
148 * 1 - PFP
149 */
150# define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
151 /* 0 - LRU
152 * 1 - Stream
153 * 2 - Bypass
154 */
155# define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
156# define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
157 /* 0 - DST_ADDR using DAS
158 * 1 - GDS
159 * 3 - DST_ADDR using L2
160 */
161# define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
162 /* 0 - LRU
163 * 1 - Stream
164 * 2 - Bypass
165 */
166# define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
167# define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
168 /* 0 - SRC_ADDR using SAS
169 * 1 - GDS
170 * 2 - DATA
171 * 3 - SRC_ADDR using L2
172 */
173# define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
174/* COMMAND */
175# define PACKET3_DMA_DATA_DIS_WC (1 << 21)
176# define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
177 /* 0 - none
178 * 1 - 8 in 16
179 * 2 - 8 in 32
180 * 3 - 8 in 64
181 */
182# define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
183 /* 0 - none
184 * 1 - 8 in 16
185 * 2 - 8 in 32
186 * 3 - 8 in 64
187 */
188# define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
189 /* 0 - memory
190 * 1 - register
191 */
192# define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
193 /* 0 - memory
194 * 1 - register
195 */
196# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
197# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
198# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
199
80int suite_basic_tests_init(void) 200int suite_basic_tests_init(void)
81{ 201{
82 int r; 202 int r;
@@ -86,8 +206,13 @@ int suite_basic_tests_init(void)
86 206
87 if (r == 0) 207 if (r == 0)
88 return CUE_SUCCESS; 208 return CUE_SUCCESS;
89 else 209 else {
210 if ((r == -EACCES) && (errno == EACCES))
211 printf("\n\nError:%s. "
212 "Hint:Try to run this test program as root.",
213 strerror(errno));
90 return CUE_SINIT_FAILED; 214 return CUE_SINIT_FAILED;
215 }
91} 216}
92 217
93int suite_basic_tests_clean(void) 218int suite_basic_tests_clean(void)
@@ -226,6 +351,7 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
226 351
227 fence_status.context = context_handle; 352 fence_status.context = context_handle;
228 fence_status.ip_type = AMDGPU_HW_IP_GFX; 353 fence_status.ip_type = AMDGPU_HW_IP_GFX;
354 fence_status.ip_instance = 0;
229 fence_status.fence = ibs_request.seq_no; 355 fence_status.fence = ibs_request.seq_no;
230 356
231 r = amdgpu_cs_query_fence_status(&fence_status, 357 r = amdgpu_cs_query_fence_status(&fence_status,
@@ -307,6 +433,7 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
307 433
308 fence_status.context = context_handle; 434 fence_status.context = context_handle;
309 fence_status.ip_type = AMDGPU_HW_IP_GFX; 435 fence_status.ip_type = AMDGPU_HW_IP_GFX;
436 fence_status.ip_instance = 0;
310 fence_status.fence = ibs_request.seq_no; 437 fence_status.fence = ibs_request.seq_no;
311 438
312 r = amdgpu_cs_query_fence_status(&fence_status, 439 r = amdgpu_cs_query_fence_status(&fence_status,
@@ -325,15 +452,166 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
325 CU_ASSERT_EQUAL(r, 0); 452 CU_ASSERT_EQUAL(r, 0);
326} 453}
327 454
455static void amdgpu_command_submission_gfx_cp_write_data(void)
456{
457 amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_GFX);
458}
459
460static void amdgpu_command_submission_gfx_cp_const_fill(void)
461{
462 amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_GFX);
463}
464
465static void amdgpu_command_submission_gfx_cp_copy_data(void)
466{
467 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX);
468}
469
328static void amdgpu_command_submission_gfx(void) 470static void amdgpu_command_submission_gfx(void)
329{ 471{
472 /* write data using the CP */
473 amdgpu_command_submission_gfx_cp_write_data();
474 /* const fill using the CP */
475 amdgpu_command_submission_gfx_cp_const_fill();
476 /* copy data using the CP */
477 amdgpu_command_submission_gfx_cp_copy_data();
330 /* separate IB buffers for multi-IB submission */ 478 /* separate IB buffers for multi-IB submission */
331 amdgpu_command_submission_gfx_separate_ibs(); 479 amdgpu_command_submission_gfx_separate_ibs();
332 /* shared IB buffer for multi-IB submission */ 480 /* shared IB buffer for multi-IB submission */
333 amdgpu_command_submission_gfx_shared_ib(); 481 amdgpu_command_submission_gfx_shared_ib();
334} 482}
335 483
336static void amdgpu_command_submission_compute(void) 484static void amdgpu_semaphore_test(void)
485{
486 amdgpu_context_handle context_handle[2];
487 amdgpu_semaphore_handle sem;
488 amdgpu_bo_handle ib_result_handle[2];
489 void *ib_result_cpu[2];
490 uint64_t ib_result_mc_address[2];
491 struct amdgpu_cs_request ibs_request[2] = {0};
492 struct amdgpu_cs_ib_info ib_info[2] = {0};
493 struct amdgpu_cs_fence fence_status = {0};
494 uint32_t *ptr;
495 uint32_t expired;
496 amdgpu_bo_list_handle bo_list[2];
497 amdgpu_va_handle va_handle[2];
498 int r, i;
499
500 r = amdgpu_cs_create_semaphore(&sem);
501 CU_ASSERT_EQUAL(r, 0);
502 for (i = 0; i < 2; i++) {
503 r = amdgpu_cs_ctx_create(device_handle, &context_handle[i]);
504 CU_ASSERT_EQUAL(r, 0);
505
506 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
507 AMDGPU_GEM_DOMAIN_GTT, 0,
508 &ib_result_handle[i], &ib_result_cpu[i],
509 &ib_result_mc_address[i], &va_handle[i]);
510 CU_ASSERT_EQUAL(r, 0);
511
512 r = amdgpu_get_bo_list(device_handle, ib_result_handle[i],
513 NULL, &bo_list[i]);
514 CU_ASSERT_EQUAL(r, 0);
515 }
516
517 /* 1. same context different engine */
518 ptr = ib_result_cpu[0];
519 ptr[0] = SDMA_NOP;
520 ib_info[0].ib_mc_address = ib_result_mc_address[0];
521 ib_info[0].size = 1;
522
523 ibs_request[0].ip_type = AMDGPU_HW_IP_DMA;
524 ibs_request[0].number_of_ibs = 1;
525 ibs_request[0].ibs = &ib_info[0];
526 ibs_request[0].resources = bo_list[0];
527 ibs_request[0].fence_info.handle = NULL;
528 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
529 CU_ASSERT_EQUAL(r, 0);
530 r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem);
531 CU_ASSERT_EQUAL(r, 0);
532
533 r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
534 CU_ASSERT_EQUAL(r, 0);
535 ptr = ib_result_cpu[1];
536 ptr[0] = GFX_COMPUTE_NOP;
537 ib_info[1].ib_mc_address = ib_result_mc_address[1];
538 ib_info[1].size = 1;
539
540 ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
541 ibs_request[1].number_of_ibs = 1;
542 ibs_request[1].ibs = &ib_info[1];
543 ibs_request[1].resources = bo_list[1];
544 ibs_request[1].fence_info.handle = NULL;
545
546 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[1], 1);
547 CU_ASSERT_EQUAL(r, 0);
548
549 fence_status.context = context_handle[0];
550 fence_status.ip_type = AMDGPU_HW_IP_GFX;
551 fence_status.ip_instance = 0;
552 fence_status.fence = ibs_request[1].seq_no;
553 r = amdgpu_cs_query_fence_status(&fence_status,
554 500000000, 0, &expired);
555 CU_ASSERT_EQUAL(r, 0);
556 CU_ASSERT_EQUAL(expired, true);
557
558 /* 2. same engine different context */
559 ptr = ib_result_cpu[0];
560 ptr[0] = GFX_COMPUTE_NOP;
561 ib_info[0].ib_mc_address = ib_result_mc_address[0];
562 ib_info[0].size = 1;
563
564 ibs_request[0].ip_type = AMDGPU_HW_IP_GFX;
565 ibs_request[0].number_of_ibs = 1;
566 ibs_request[0].ibs = &ib_info[0];
567 ibs_request[0].resources = bo_list[0];
568 ibs_request[0].fence_info.handle = NULL;
569 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
570 CU_ASSERT_EQUAL(r, 0);
571 r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
572 CU_ASSERT_EQUAL(r, 0);
573
574 r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem);
575 CU_ASSERT_EQUAL(r, 0);
576 ptr = ib_result_cpu[1];
577 ptr[0] = GFX_COMPUTE_NOP;
578 ib_info[1].ib_mc_address = ib_result_mc_address[1];
579 ib_info[1].size = 1;
580
581 ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
582 ibs_request[1].number_of_ibs = 1;
583 ibs_request[1].ibs = &ib_info[1];
584 ibs_request[1].resources = bo_list[1];
585 ibs_request[1].fence_info.handle = NULL;
586 r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request[1], 1);
587
588 CU_ASSERT_EQUAL(r, 0);
589
590 fence_status.context = context_handle[1];
591 fence_status.ip_type = AMDGPU_HW_IP_GFX;
592 fence_status.ip_instance = 0;
593 fence_status.fence = ibs_request[1].seq_no;
594 r = amdgpu_cs_query_fence_status(&fence_status,
595 500000000, 0, &expired);
596 CU_ASSERT_EQUAL(r, 0);
597 CU_ASSERT_EQUAL(expired, true);
598 for (i = 0; i < 2; i++) {
599 r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i],
600 ib_result_mc_address[i], 4096);
601 CU_ASSERT_EQUAL(r, 0);
602
603 r = amdgpu_bo_list_destroy(bo_list[i]);
604 CU_ASSERT_EQUAL(r, 0);
605
606 r = amdgpu_cs_ctx_free(context_handle[i]);
607 CU_ASSERT_EQUAL(r, 0);
608 }
609
610 r = amdgpu_cs_destroy_semaphore(sem);
611 CU_ASSERT_EQUAL(r, 0);
612}
613
614static void amdgpu_command_submission_compute_nop(void)
337{ 615{
338 amdgpu_context_handle context_handle; 616 amdgpu_context_handle context_handle;
339 amdgpu_bo_handle ib_result_handle; 617 amdgpu_bo_handle ib_result_handle;
@@ -384,6 +662,7 @@ static void amdgpu_command_submission_compute(void)
384 662
385 fence_status.context = context_handle; 663 fence_status.context = context_handle;
386 fence_status.ip_type = AMDGPU_HW_IP_COMPUTE; 664 fence_status.ip_type = AMDGPU_HW_IP_COMPUTE;
665 fence_status.ip_instance = 0;
387 fence_status.ring = instance; 666 fence_status.ring = instance;
388 fence_status.fence = ibs_request.seq_no; 667 fence_status.fence = ibs_request.seq_no;
389 668
@@ -404,16 +683,44 @@ static void amdgpu_command_submission_compute(void)
404 CU_ASSERT_EQUAL(r, 0); 683 CU_ASSERT_EQUAL(r, 0);
405} 684}
406 685
686static void amdgpu_command_submission_compute_cp_write_data(void)
687{
688 amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_COMPUTE);
689}
690
691static void amdgpu_command_submission_compute_cp_const_fill(void)
692{
693 amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_COMPUTE);
694}
695
696static void amdgpu_command_submission_compute_cp_copy_data(void)
697{
698 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_COMPUTE);
699}
700
701static void amdgpu_command_submission_compute(void)
702{
703 /* write data using the CP */
704 amdgpu_command_submission_compute_cp_write_data();
705 /* const fill using the CP */
706 amdgpu_command_submission_compute_cp_const_fill();
707 /* copy data using the CP */
708 amdgpu_command_submission_compute_cp_copy_data();
709 /* nop test */
710 amdgpu_command_submission_compute_nop();
711}
712
407/* 713/*
408 * caller need create/release: 714 * caller need create/release:
409 * pm4_src, resources, ib_info, and ibs_request 715 * pm4_src, resources, ib_info, and ibs_request
410 * submit command stream described in ibs_request and wait for this IB accomplished 716 * submit command stream described in ibs_request and wait for this IB accomplished
411 */ 717 */
412static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle, 718static void amdgpu_test_exec_cs_helper(amdgpu_context_handle context_handle,
413 int instance, int pm4_dw, uint32_t *pm4_src, 719 unsigned ip_type,
414 int res_cnt, amdgpu_bo_handle *resources, 720 int instance, int pm4_dw, uint32_t *pm4_src,
415 struct amdgpu_cs_ib_info *ib_info, 721 int res_cnt, amdgpu_bo_handle *resources,
416 struct amdgpu_cs_request *ibs_request) 722 struct amdgpu_cs_ib_info *ib_info,
723 struct amdgpu_cs_request *ibs_request)
417{ 724{
418 int r; 725 int r;
419 uint32_t expired; 726 uint32_t expired;
@@ -446,7 +753,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
446 ib_info->ib_mc_address = ib_result_mc_address; 753 ib_info->ib_mc_address = ib_result_mc_address;
447 ib_info->size = pm4_dw; 754 ib_info->size = pm4_dw;
448 755
449 ibs_request->ip_type = AMDGPU_HW_IP_DMA; 756 ibs_request->ip_type = ip_type;
450 ibs_request->ring = instance; 757 ibs_request->ring = instance;
451 ibs_request->number_of_ibs = 1; 758 ibs_request->number_of_ibs = 1;
452 ibs_request->ibs = ib_info; 759 ibs_request->ibs = ib_info;
@@ -468,7 +775,8 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
468 r = amdgpu_bo_list_destroy(ibs_request->resources); 775 r = amdgpu_bo_list_destroy(ibs_request->resources);
469 CU_ASSERT_EQUAL(r, 0); 776 CU_ASSERT_EQUAL(r, 0);
470 777
471 fence_status.ip_type = AMDGPU_HW_IP_DMA; 778 fence_status.ip_type = ip_type;
779 fence_status.ip_instance = 0;
472 fence_status.ring = ibs_request->ring; 780 fence_status.ring = ibs_request->ring;
473 fence_status.context = context_handle; 781 fence_status.context = context_handle;
474 fence_status.fence = ibs_request->seq_no; 782 fence_status.fence = ibs_request->seq_no;
@@ -485,7 +793,7 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
485 CU_ASSERT_EQUAL(r, 0); 793 CU_ASSERT_EQUAL(r, 0);
486} 794}
487 795
488static void amdgpu_command_submission_sdma_write_linear(void) 796static void amdgpu_command_submission_write_linear_helper(unsigned ip_type)
489{ 797{
490 const int sdma_write_length = 128; 798 const int sdma_write_length = 128;
491 const int pm4_dw = 256; 799 const int pm4_dw = 256;
@@ -533,20 +841,31 @@ static void amdgpu_command_submission_sdma_write_linear(void)
533 841
534 resources[0] = bo; 842 resources[0] = bo;
535 843
536 /* fullfill PM4: test DMA write-linear */ 844 /* fulfill PM4: test DMA write-linear */
537 i = j = 0; 845 i = j = 0;
538 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 846 if (ip_type == AMDGPU_HW_IP_DMA) {
539 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 847 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
540 pm4[i++] = 0xffffffff & bo_mc; 848 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
541 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32; 849 pm4[i++] = 0xffffffff & bo_mc;
542 pm4[i++] = sdma_write_length; 850 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
543 while(j++ < sdma_write_length) 851 pm4[i++] = sdma_write_length;
544 pm4[i++] = 0xdeadbeaf; 852 while(j++ < sdma_write_length)
853 pm4[i++] = 0xdeadbeaf;
854 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
855 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
856 pm4[i++] = PACKET3(PACKET3_WRITE_DATA, 2 + sdma_write_length);
857 pm4[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
858 pm4[i++] = 0xfffffffc & bo_mc;
859 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
860 while(j++ < sdma_write_length)
861 pm4[i++] = 0xdeadbeaf;
862 }
545 863
546 amdgpu_sdma_test_exec_cs(context_handle, 0, 864 amdgpu_test_exec_cs_helper(context_handle,
547 i, pm4, 865 ip_type, 0,
548 1, resources, 866 i, pm4,
549 ib_info, ibs_request); 867 1, resources,
868 ib_info, ibs_request);
550 869
551 /* verify if SDMA test result meets with expected */ 870 /* verify if SDMA test result meets with expected */
552 i = 0; 871 i = 0;
@@ -570,7 +889,12 @@ static void amdgpu_command_submission_sdma_write_linear(void)
570 CU_ASSERT_EQUAL(r, 0); 889 CU_ASSERT_EQUAL(r, 0);
571} 890}
572 891
573static void amdgpu_command_submission_sdma_const_fill(void) 892static void amdgpu_command_submission_sdma_write_linear(void)
893{
894 amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_DMA);
895}
896
897static void amdgpu_command_submission_const_fill_helper(unsigned ip_type)
574{ 898{
575 const int sdma_write_length = 1024 * 1024; 899 const int sdma_write_length = 1024 * 1024;
576 const int pm4_dw = 256; 900 const int pm4_dw = 256;
@@ -617,19 +941,34 @@ static void amdgpu_command_submission_sdma_const_fill(void)
617 941
618 resources[0] = bo; 942 resources[0] = bo;
619 943
620 /* fullfill PM4: test DMA const fill */ 944 /* fulfill PM4: test DMA const fill */
621 i = j = 0; 945 i = j = 0;
622 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 946 if (ip_type == AMDGPU_HW_IP_DMA) {
623 SDMA_CONSTANT_FILL_EXTRA_SIZE(2)); 947 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0,
624 pm4[i++] = 0xffffffff & bo_mc; 948 SDMA_CONSTANT_FILL_EXTRA_SIZE(2));
625 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32; 949 pm4[i++] = 0xffffffff & bo_mc;
626 pm4[i++] = 0xdeadbeaf; 950 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
627 pm4[i++] = sdma_write_length; 951 pm4[i++] = 0xdeadbeaf;
952 pm4[i++] = sdma_write_length;
953 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
954 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
955 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
956 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
957 PACKET3_DMA_DATA_DST_SEL(0) |
958 PACKET3_DMA_DATA_SRC_SEL(2) |
959 PACKET3_DMA_DATA_CP_SYNC;
960 pm4[i++] = 0xdeadbeaf;
961 pm4[i++] = 0;
962 pm4[i++] = 0xfffffffc & bo_mc;
963 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
964 pm4[i++] = sdma_write_length;
965 }
628 966
629 amdgpu_sdma_test_exec_cs(context_handle, 0, 967 amdgpu_test_exec_cs_helper(context_handle,
630 i, pm4, 968 ip_type, 0,
631 1, resources, 969 i, pm4,
632 ib_info, ibs_request); 970 1, resources,
971 ib_info, ibs_request);
633 972
634 /* verify if SDMA test result meets with expected */ 973 /* verify if SDMA test result meets with expected */
635 i = 0; 974 i = 0;
@@ -653,7 +992,12 @@ static void amdgpu_command_submission_sdma_const_fill(void)
653 CU_ASSERT_EQUAL(r, 0); 992 CU_ASSERT_EQUAL(r, 0);
654} 993}
655 994
656static void amdgpu_command_submission_sdma_copy_linear(void) 995static void amdgpu_command_submission_sdma_const_fill(void)
996{
997 amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_DMA);
998}
999
1000static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
657{ 1001{
658 const int sdma_write_length = 1024; 1002 const int sdma_write_length = 1024;
659 const int pm4_dw = 256; 1003 const int pm4_dw = 256;
@@ -716,21 +1060,35 @@ static void amdgpu_command_submission_sdma_copy_linear(void)
716 resources[0] = bo1; 1060 resources[0] = bo1;
717 resources[1] = bo2; 1061 resources[1] = bo2;
718 1062
719 /* fullfill PM4: test DMA copy linear */ 1063 /* fulfill PM4: test DMA copy linear */
720 i = j = 0; 1064 i = j = 0;
721 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); 1065 if (ip_type == AMDGPU_HW_IP_DMA) {
722 pm4[i++] = sdma_write_length; 1066 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
723 pm4[i++] = 0; 1067 pm4[i++] = sdma_write_length;
724 pm4[i++] = 0xffffffff & bo1_mc; 1068 pm4[i++] = 0;
725 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32; 1069 pm4[i++] = 0xffffffff & bo1_mc;
726 pm4[i++] = 0xffffffff & bo2_mc; 1070 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
727 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32; 1071 pm4[i++] = 0xffffffff & bo2_mc;
728 1072 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1073 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1074 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1075 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1076 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1077 PACKET3_DMA_DATA_DST_SEL(0) |
1078 PACKET3_DMA_DATA_SRC_SEL(0) |
1079 PACKET3_DMA_DATA_CP_SYNC;
1080 pm4[i++] = 0xfffffffc & bo1_mc;
1081 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1082 pm4[i++] = 0xfffffffc & bo2_mc;
1083 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1084 pm4[i++] = sdma_write_length;
1085 }
729 1086
730 amdgpu_sdma_test_exec_cs(context_handle, 0, 1087 amdgpu_test_exec_cs_helper(context_handle,
731 i, pm4, 1088 ip_type, 0,
732 2, resources, 1089 i, pm4,
733 ib_info, ibs_request); 1090 2, resources,
1091 ib_info, ibs_request);
734 1092
735 /* verify if SDMA test result meets with expected */ 1093 /* verify if SDMA test result meets with expected */
736 i = 0; 1094 i = 0;
@@ -758,6 +1116,11 @@ static void amdgpu_command_submission_sdma_copy_linear(void)
758 CU_ASSERT_EQUAL(r, 0); 1116 CU_ASSERT_EQUAL(r, 0);
759} 1117}
760 1118
1119static void amdgpu_command_submission_sdma_copy_linear(void)
1120{
1121 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_DMA);
1122}
1123
761static void amdgpu_command_submission_sdma(void) 1124static void amdgpu_command_submission_sdma(void)
762{ 1125{
763 amdgpu_command_submission_sdma_write_linear(); 1126 amdgpu_command_submission_sdma_write_linear();
@@ -821,10 +1184,11 @@ static void amdgpu_userptr_test(void)
821 while (j++ < sdma_write_length) 1184 while (j++ < sdma_write_length)
822 pm4[i++] = 0xdeadbeaf; 1185 pm4[i++] = 0xdeadbeaf;
823 1186
824 amdgpu_sdma_test_exec_cs(context_handle, 0, 1187 amdgpu_test_exec_cs_helper(context_handle,
825 i, pm4, 1188 AMDGPU_HW_IP_DMA, 0,
826 1, &handle, 1189 i, pm4,
827 ib_info, ibs_request); 1190 1, &handle,
1191 ib_info, ibs_request);
828 i = 0; 1192 i = 0;
829 while (i < sdma_write_length) { 1193 while (i < sdma_write_length) {
830 CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf); 1194 CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf);
diff --git a/tests/amdgpu/bo_tests.c b/tests/amdgpu/bo_tests.c
index 993895d8..74b5e77b 100644
--- a/tests/amdgpu/bo_tests.c
+++ b/tests/amdgpu/bo_tests.c
@@ -65,8 +65,14 @@ int suite_bo_tests_init(void)
65 65
66 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version, 66 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
67 &minor_version, &device_handle); 67 &minor_version, &device_handle);
68 if (r) 68 if (r) {
69 if ((r == -EACCES) && (errno == EACCES))
70 printf("\n\nError:%s. "
71 "Hint:Try to run this test program as root.",
72 strerror(errno));
73
69 return CUE_SINIT_FAILED; 74 return CUE_SINIT_FAILED;
75 }
70 76
71 req.alloc_size = BUFFER_SIZE; 77 req.alloc_size = BUFFER_SIZE;
72 req.phys_alignment = BUFFER_ALIGN; 78 req.phys_alignment = BUFFER_ALIGN;
@@ -146,6 +152,11 @@ static void amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)
146 152
147static void amdgpu_bo_export_import(void) 153static void amdgpu_bo_export_import(void)
148{ 154{
155 if (open_render_node) {
156 printf("(DRM render node is used. Skip export/Import test) ");
157 return;
158 }
159
149 amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name); 160 amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name);
150 amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd); 161 amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd);
151} 162}
diff --git a/tests/amdgpu/cs_tests.c b/tests/amdgpu/cs_tests.c
index dfbf5af9..82c55aa8 100644
--- a/tests/amdgpu/cs_tests.c
+++ b/tests/amdgpu/cs_tests.c
@@ -43,6 +43,8 @@ static amdgpu_device_handle device_handle;
43static uint32_t major_version; 43static uint32_t major_version;
44static uint32_t minor_version; 44static uint32_t minor_version;
45static uint32_t family_id; 45static uint32_t family_id;
46static uint32_t chip_rev;
47static uint32_t chip_id;
46 48
47static amdgpu_context_handle context_handle; 49static amdgpu_context_handle context_handle;
48static amdgpu_bo_handle ib_handle; 50static amdgpu_bo_handle ib_handle;
@@ -74,10 +76,19 @@ int suite_cs_tests_init(void)
74 76
75 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version, 77 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
76 &minor_version, &device_handle); 78 &minor_version, &device_handle);
77 if (r) 79 if (r) {
80 if ((r == -EACCES) && (errno == EACCES))
81 printf("\n\nError:%s. "
82 "Hint:Try to run this test program as root.",
83 strerror(errno));
84
78 return CUE_SINIT_FAILED; 85 return CUE_SINIT_FAILED;
86 }
79 87
80 family_id = device_handle->info.family_id; 88 family_id = device_handle->info.family_id;
89 /* VI asic POLARIS10/11 have specific external_rev_id */
90 chip_rev = device_handle->info.chip_rev;
91 chip_id = device_handle->info.chip_external_rev;
81 92
82 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 93 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
83 if (r) 94 if (r)
@@ -200,8 +211,17 @@ static void amdgpu_cs_uvd_create(void)
200 CU_ASSERT_EQUAL(r, 0); 211 CU_ASSERT_EQUAL(r, 0);
201 212
202 memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg)); 213 memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg));
203 if (family_id >= AMDGPU_FAMILY_VI) 214 if (family_id >= AMDGPU_FAMILY_VI) {
204 ((uint8_t*)msg)[0x10] = 7; 215 ((uint8_t*)msg)[0x10] = 7;
216 /* chip polaris 10/11 */
217 if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) {
218 /* dpb size */
219 ((uint8_t*)msg)[0x28] = 0x00;
220 ((uint8_t*)msg)[0x29] = 0x94;
221 ((uint8_t*)msg)[0x2A] = 0x6B;
222 ((uint8_t*)msg)[0x2B] = 0x00;
223 }
224 }
205 225
206 r = amdgpu_bo_cpu_unmap(buf_handle); 226 r = amdgpu_bo_cpu_unmap(buf_handle);
207 CU_ASSERT_EQUAL(r, 0); 227 CU_ASSERT_EQUAL(r, 0);
@@ -230,8 +250,8 @@ static void amdgpu_cs_uvd_create(void)
230 250
231static void amdgpu_cs_uvd_decode(void) 251static void amdgpu_cs_uvd_decode(void)
232{ 252{
233 const unsigned dpb_size = 15923584, dt_size = 737280; 253 const unsigned dpb_size = 15923584, ctx_size = 5287680, dt_size = 737280;
234 uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, dt_addr, it_addr; 254 uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, ctx_addr, dt_addr, it_addr;
235 struct amdgpu_bo_alloc_request req = {0}; 255 struct amdgpu_bo_alloc_request req = {0};
236 amdgpu_bo_handle buf_handle; 256 amdgpu_bo_handle buf_handle;
237 amdgpu_va_handle va_handle; 257 amdgpu_va_handle va_handle;
@@ -266,9 +286,25 @@ static void amdgpu_cs_uvd_decode(void)
266 r = amdgpu_bo_cpu_map(buf_handle, (void **)&ptr); 286 r = amdgpu_bo_cpu_map(buf_handle, (void **)&ptr);
267 CU_ASSERT_EQUAL(r, 0); 287 CU_ASSERT_EQUAL(r, 0);
268 288
269 memcpy(ptr, uvd_decode_msg, sizeof(uvd_decode_msg)); 289 memcpy(ptr, uvd_decode_msg, sizeof(uvd_create_msg));
270 if (family_id >= AMDGPU_FAMILY_VI) 290 if (family_id >= AMDGPU_FAMILY_VI) {
271 ptr[0x10] = 7; 291 ptr[0x10] = 7;
292 ptr[0x98] = 0x00;
293 ptr[0x99] = 0x02;
294 /* chip polaris10/11 */
295 if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) {
296 /*dpb size */
297 ptr[0x24] = 0x00;
298 ptr[0x25] = 0x94;
299 ptr[0x26] = 0x6B;
300 ptr[0x27] = 0x00;
301 /*ctx size */
302 ptr[0x2C] = 0x00;
303 ptr[0x2D] = 0xAF;
304 ptr[0x2E] = 0x50;
305 ptr[0x2F] = 0x00;
306 }
307 }
272 308
273 ptr += 4*1024; 309 ptr += 4*1024;
274 memset(ptr, 0, 4*1024); 310 memset(ptr, 0, 4*1024);
@@ -298,6 +334,12 @@ static void amdgpu_cs_uvd_decode(void)
298 } else 334 } else
299 bs_addr = fb_addr + 4*1024; 335 bs_addr = fb_addr + 4*1024;
300 dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024); 336 dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024);
337
338 if ((family_id >= AMDGPU_FAMILY_VI) &&
339 (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A)) {
340 ctx_addr = ALIGN(dpb_addr + 0x006B9400, 4*1024);
341 }
342
301 dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024); 343 dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024);
302 344
303 i = 0; 345 i = 0;
@@ -306,8 +348,11 @@ static void amdgpu_cs_uvd_decode(void)
306 uvd_cmd(dt_addr, 0x2, &i); 348 uvd_cmd(dt_addr, 0x2, &i);
307 uvd_cmd(fb_addr, 0x3, &i); 349 uvd_cmd(fb_addr, 0x3, &i);
308 uvd_cmd(bs_addr, 0x100, &i); 350 uvd_cmd(bs_addr, 0x100, &i);
309 if (family_id >= AMDGPU_FAMILY_VI) 351 if (family_id >= AMDGPU_FAMILY_VI) {
310 uvd_cmd(it_addr, 0x204, &i); 352 uvd_cmd(it_addr, 0x204, &i);
353 if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A)
354 uvd_cmd(ctx_addr, 0x206, &i);
355}
311 ib_cpu[i++] = 0x3BC6; 356 ib_cpu[i++] = 0x3BC6;
312 ib_cpu[i++] = 0x1; 357 ib_cpu[i++] = 0x1;
313 for (; i % 16; ++i) 358 for (; i % 16; ++i)
diff --git a/tests/amdgpu/vce_ib.h b/tests/amdgpu/vce_ib.h
index bd0bf943..80ab1795 100644
--- a/tests/amdgpu/vce_ib.h
+++ b/tests/amdgpu/vce_ib.h
@@ -53,7 +53,7 @@ static const uint32_t vce_create[] = {
53 0x000000a0, 53 0x000000a0,
54 0x000000a0, 54 0x000000a0,
55 0x00000010, 55 0x00000010,
56 0x00000000, 56 0x00000201,
57}; 57};
58 58
59static const uint32_t vce_rate_ctrl[] = { 59static const uint32_t vce_rate_ctrl[] = {
diff --git a/tests/amdgpu/vce_tests.c b/tests/amdgpu/vce_tests.c
index 32fc001b..de63aa15 100644
--- a/tests/amdgpu/vce_tests.c
+++ b/tests/amdgpu/vce_tests.c
@@ -65,6 +65,7 @@ static amdgpu_device_handle device_handle;
65static uint32_t major_version; 65static uint32_t major_version;
66static uint32_t minor_version; 66static uint32_t minor_version;
67static uint32_t family_id; 67static uint32_t family_id;
68static uint32_t vce_harvest_config;
68 69
69static amdgpu_context_handle context_handle; 70static amdgpu_context_handle context_handle;
70static amdgpu_bo_handle ib_handle; 71static amdgpu_bo_handle ib_handle;
@@ -93,10 +94,17 @@ int suite_vce_tests_init(void)
93 94
94 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version, 95 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
95 &minor_version, &device_handle); 96 &minor_version, &device_handle);
96 if (r) 97 if (r) {
98 if ((r == -EACCES) && (errno == EACCES))
99 printf("\n\nError:%s. "
100 "Hint:Try to run this test program as root.",
101 strerror(errno));
102
97 return CUE_SINIT_FAILED; 103 return CUE_SINIT_FAILED;
104 }
98 105
99 family_id = device_handle->info.family_id; 106 family_id = device_handle->info.family_id;
107 vce_harvest_config = device_handle->info.vce_harvest_config;
100 108
101 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 109 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
102 if (r) 110 if (r)
@@ -440,14 +448,16 @@ static void amdgpu_cs_vce_encode(void)
440 check_result(&enc); 448 check_result(&enc);
441 449
442 /* two instances */ 450 /* two instances */
443 enc.two_instance = true; 451 if (vce_harvest_config == 0) {
444 vce_taskinfo[2] = 0x83; 452 enc.two_instance = true;
445 vce_taskinfo[4] = 1; 453 vce_taskinfo[2] = 0x83;
446 amdgpu_cs_vce_encode_idr(&enc); 454 vce_taskinfo[4] = 1;
447 vce_taskinfo[2] = 0xffffffff; 455 amdgpu_cs_vce_encode_idr(&enc);
448 vce_taskinfo[4] = 2; 456 vce_taskinfo[2] = 0xffffffff;
449 amdgpu_cs_vce_encode_p(&enc); 457 vce_taskinfo[4] = 2;
450 check_result(&enc); 458 amdgpu_cs_vce_encode_p(&enc);
459 check_result(&enc);
460 }
451 } else { 461 } else {
452 vce_taskinfo[3] = 3; 462 vce_taskinfo[3] = 3;
453 vce_encode[16] = 0; 463 vce_encode[16] = 0;
diff --git a/tests/auth.c b/tests/auth.c
deleted file mode 100644
index 9147b115..00000000
--- a/tests/auth.c
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <limits.h>
29#include <sys/ioctl.h>
30#include "drmtest.h"
31
32enum auth_event {
33 SERVER_READY,
34 CLIENT_MAGIC,
35 CLIENT_DONE,
36};
37
38int commfd[2];
39
40static void wait_event(int pipe, enum auth_event expected_event)
41{
42 int ret;
43 enum auth_event event;
44 unsigned char in;
45
46 ret = read(commfd[pipe], &in, 1);
47 if (ret == -1)
48 err(1, "read error");
49 event = in;
50
51 if (event != expected_event)
52 errx(1, "unexpected event: %d\n", event);
53}
54
55static void
56send_event(int pipe, enum auth_event send_event)
57{
58 int ret;
59 unsigned char event;
60
61 event = send_event;
62 ret = write(commfd[pipe], &event, 1);
63 if (ret == -1)
64 err(1, "failed to send event %d", event);
65}
66
67static void client()
68{
69 struct drm_auth auth;
70 int drmfd, ret;
71
72 /* XXX: Should make sure we open the same DRM as the master */
73 wait_event(0, SERVER_READY);
74
75 drmfd = drm_open_any();
76
77 /* Get a client magic number and pass it to the master for auth. */
78 auth.magic = 0; /* Quiet valgrind */
79 ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
80 if (ret == -1)
81 err(1, "Couldn't get client magic");
82 send_event(0, CLIENT_MAGIC);
83 ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
84 if (ret == -1)
85 err(1, "Couldn't write auth data");
86
87 /* Signal that the client is completely done. */
88 send_event(0, CLIENT_DONE);
89}
90
91static void server()
92{
93 int drmfd, ret;
94 struct drm_auth auth;
95
96 drmfd = drm_open_any_master();
97
98 auth.magic = 0xd0d0d0d0;
99 ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
100 if (ret != -1 || errno != EINVAL)
101 errx(1, "Authenticating bad magic succeeded\n");
102
103 send_event(1, SERVER_READY);
104
105 wait_event(1, CLIENT_MAGIC);
106 ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
107 if (ret == -1)
108 err(1, "Failure to read client magic");
109
110 ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
111 if (ret == -1)
112 err(1, "Failure to authenticate client magic\n");
113
114 wait_event(1, CLIENT_DONE);
115}
116
117/**
118 * Checks DRM authentication mechanisms.
119 */
120int main(int argc, char **argv)
121{
122 int ret;
123
124 ret = pipe(commfd);
125 if (ret == -1)
126 err(1, "Couldn't create pipe");
127
128 ret = fork();
129 if (ret == -1)
130 err(1, "failure to fork client");
131 if (ret == 0)
132 client();
133 else
134 server();
135
136 return 0;
137}
138
diff --git a/tests/dristat.c b/tests/dristat.c
deleted file mode 100644
index cca4b03a..00000000
--- a/tests/dristat.c
+++ /dev/null
@@ -1,285 +0,0 @@
1/* dristat.c --
2 * Created: Mon Jan 15 05:05:07 2001 by faith@acm.org
3 *
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
27 *
28 */
29
30#ifdef HAVE_CONFIG_H
31# include <config.h>
32#endif
33
34#include <stdio.h>
35#include <stdlib.h>
36#include <unistd.h>
37#include "xf86drm.h"
38#include "xf86drmRandom.c"
39#include "xf86drmHash.c"
40#include "xf86drm.c"
41
42#define DRM_VERSION 0x00000001
43#define DRM_MEMORY 0x00000002
44#define DRM_CLIENTS 0x00000004
45#define DRM_STATS 0x00000008
46#define DRM_BUSID 0x00000010
47
48static void getversion(int fd)
49{
50 drmVersionPtr version;
51
52 version = drmGetVersion(fd);
53 if (version) {
54 printf(" Version information:\n");
55 printf(" Name: %s\n", version->name ? version->name : "?");
56 printf(" Version: %d.%d.%d\n",
57 version->version_major,
58 version->version_minor,
59 version->version_patchlevel);
60 printf(" Date: %s\n", version->date ? version->date : "?");
61 printf(" Desc: %s\n", version->desc ? version->desc : "?");
62 drmFreeVersion(version);
63 } else {
64 printf(" No version information available\n");
65 }
66}
67
68static void getbusid(int fd)
69{
70 const char *busid = drmGetBusid(fd);
71
72 printf(" Busid: %s\n", *busid ? busid : "(not set)");
73 drmFreeBusid(busid);
74}
75
76
77static void getvm(int fd)
78{
79 int i;
80 const char *typename;
81 char flagname[33];
82 drm_handle_t offset;
83 drmSize size;
84 drmMapType type;
85 drmMapFlags flags;
86 drm_handle_t handle;
87 int mtrr;
88
89 printf(" VM map information:\n");
90 printf(" flags: (R)estricted (r)ead/(w)rite (l)ocked (k)ernel (W)rite-combine (L)ock:\n");
91 printf(" slot offset size type flags address mtrr\n");
92
93 for (i = 0;
94 !drmGetMap(fd, i, &offset, &size, &type, &flags, &handle, &mtrr);
95 i++) {
96
97 switch (type) {
98 case DRM_FRAME_BUFFER: typename = "FB"; break;
99 case DRM_REGISTERS: typename = "REG"; break;
100 case DRM_SHM: typename = "SHM"; break;
101 case DRM_AGP: typename = "AGP"; break;
102 case DRM_SCATTER_GATHER: typename = "SG"; break;
103 case DRM_CONSISTENT: typename = "CON"; break;
104 default: typename = "???"; break;
105 }
106
107 flagname[0] = (flags & DRM_RESTRICTED) ? 'R' : ' ';
108 flagname[1] = (flags & DRM_READ_ONLY) ? 'r' : 'w';
109 flagname[2] = (flags & DRM_LOCKED) ? 'l' : ' ';
110 flagname[3] = (flags & DRM_KERNEL) ? 'k' : ' ';
111 flagname[4] = (flags & DRM_WRITE_COMBINING) ? 'W' : ' ';
112 flagname[5] = (flags & DRM_CONTAINS_LOCK) ? 'L' : ' ';
113 flagname[6] = '\0';
114
115 printf(" %4d 0x%08lx 0x%08lx %3.3s %6.6s 0x%08lx ",
116 i, (unsigned long)offset, (unsigned long)size,
117 typename, flagname, (unsigned long)handle);
118 if (mtrr < 0) printf("none\n");
119 else printf("%4d\n", mtrr);
120 }
121}
122
123static void getclients(int fd)
124{
125 int i;
126 int auth;
127 int pid;
128 int uid;
129 unsigned long magic;
130 unsigned long iocs;
131 char buf[64];
132 char cmd[40];
133 int procfd;
134
135 printf(" DRI client information:\n");
136 printf(" a pid uid magic ioctls prog\n");
137
138 for (i = 0; !drmGetClient(fd, i, &auth, &pid, &uid, &magic, &iocs); i++) {
139 sprintf(buf, "/proc/%d/cmdline", pid);
140 memset(cmd, 0, sizeof(cmd));
141 if ((procfd = open(buf, O_RDONLY, 0)) >= 0) {
142 read(procfd, cmd, sizeof(cmd)-1);
143 close(procfd);
144 }
145 if (*cmd) {
146 char *pt;
147
148 for (pt = cmd; *pt; pt++) if (!isprint(*pt)) *pt = ' ';
149 printf(" %c %5d %5d %10lu %10lu %s\n",
150 auth ? 'y' : 'n', pid, uid, magic, iocs, cmd);
151 } else {
152 printf(" %c %5d %5d %10lu %10lu\n",
153 auth ? 'y' : 'n', pid, uid, magic, iocs);
154 }
155 }
156}
157
158static void printhuman(unsigned long value, const char *name, int mult)
159{
160 const char *p;
161 double f;
162 /* Print width 5 number in width 6 space */
163 if (value < 100000) {
164 printf(" %5lu", value);
165 return;
166 }
167
168 p = name;
169 f = (double)value / (double)mult;
170 if (f < 10.0) {
171 printf(" %4.2f%c", f, *p);
172 return;
173 }
174
175 p++;
176 f = (double)value / (double)mult;
177 if (f < 10.0) {
178 printf(" %4.2f%c", f, *p);
179 return;
180 }
181
182 p++;
183 f = (double)value / (double)mult;
184 if (f < 10.0) {
185 printf(" %4.2f%c", f, *p);
186 return;
187 }
188}
189
190static void getstats(int fd, int i)
191{
192 drmStatsT prev, curr;
193 unsigned j;
194 double rate;
195
196 printf(" System statistics:\n");
197
198 if (drmGetStats(fd, &prev)) return;
199 if (!i) {
200 for (j = 0; j < prev.count; j++) {
201 printf(" ");
202 printf(prev.data[j].long_format, prev.data[j].long_name);
203 if (prev.data[j].isvalue) printf(" 0x%08lx\n", prev.data[j].value);
204 else printf(" %10lu\n", prev.data[j].value);
205 }
206 return;
207 }
208
209 printf(" ");
210 for (j = 0; j < prev.count; j++)
211 if (!prev.data[j].verbose) {
212 printf(" ");
213 printf(prev.data[j].rate_format, prev.data[j].rate_name);
214 }
215 printf("\n");
216
217 for (;;) {
218 sleep(i);
219 if (drmGetStats(fd, &curr)) return;
220 printf(" ");
221 for (j = 0; j < curr.count; j++) {
222 if (curr.data[j].verbose) continue;
223 if (curr.data[j].isvalue) {
224 printf(" %08lx", curr.data[j].value);
225 } else {
226 rate = (curr.data[j].value - prev.data[j].value) / (double)i;
227 printhuman(rate, curr.data[j].mult_names, curr.data[j].mult);
228 }
229 }
230 printf("\n");
231 memcpy(&prev, &curr, sizeof(prev));
232 }
233
234}
235
236int main(int argc, char **argv)
237{
238 int c;
239 int mask = 0;
240 int minor = 0;
241 int interval = 0;
242 int fd;
243 char buf[64];
244 int i;
245
246 while ((c = getopt(argc, argv, "avmcsbM:i:")) != EOF)
247 switch (c) {
248 case 'a': mask = ~0; break;
249 case 'v': mask |= DRM_VERSION; break;
250 case 'm': mask |= DRM_MEMORY; break;
251 case 'c': mask |= DRM_CLIENTS; break;
252 case 's': mask |= DRM_STATS; break;
253 case 'b': mask |= DRM_BUSID; break;
254 case 'i': interval = strtol(optarg, NULL, 0); break;
255 case 'M': minor = strtol(optarg, NULL, 0); break;
256 default:
257 fprintf( stderr, "Usage: dristat [options]\n\n" );
258 fprintf( stderr, "Displays DRM information. Use with no arguments to display available cards.\n\n" );
259 fprintf( stderr, " -a Show all available information\n" );
260 fprintf( stderr, " -b Show DRM bus ID's\n" );
261 fprintf( stderr, " -c Display information about DRM clients\n" );
262 fprintf( stderr, " -i [interval] Continuously display statistics every [interval] seconds\n" );
263 fprintf( stderr, " -v Display DRM module and card version information\n" );
264 fprintf( stderr, " -m Display memory use information\n" );
265 fprintf( stderr, " -s Display DRM statistics\n" );
266 fprintf( stderr, " -M [minor] Select card by minor number\n" );
267 return 1;
268 }
269
270 for (i = 0; i < 16; i++) if (!minor || i == minor) {
271 sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, i);
272 fd = drmOpenMinor(i, 1, DRM_NODE_PRIMARY);
273 if (fd >= 0) {
274 printf("%s\n", buf);
275 if (mask & DRM_BUSID) getbusid(fd);
276 if (mask & DRM_VERSION) getversion(fd);
277 if (mask & DRM_MEMORY) getvm(fd);
278 if (mask & DRM_CLIENTS) getclients(fd);
279 if (mask & DRM_STATS) getstats(fd, interval);
280 close(fd);
281 }
282 }
283
284 return 0;
285}
diff --git a/tests/drmdevice.c b/tests/drmdevice.c
index c3363274..9dd5098a 100644
--- a/tests/drmdevice.c
+++ b/tests/drmdevice.c
@@ -21,8 +21,11 @@
21 * 21 *
22 */ 22 */
23 23
24#include <errno.h>
24#include <stdio.h> 25#include <stdio.h>
25#include <stdlib.h> 26#include <stdlib.h>
27#include <stdbool.h>
28#include <string.h>
26#include <sys/stat.h> 29#include <sys/stat.h>
27#include <fcntl.h> 30#include <fcntl.h>
28#include <unistd.h> 31#include <unistd.h>
@@ -30,7 +33,7 @@
30 33
31 34
32static void 35static void
33print_device_info(drmDevicePtr device, int i) 36print_device_info(drmDevicePtr device, int i, bool print_revision)
34{ 37{
35 printf("device[%i]\n", i); 38 printf("device[%i]\n", i);
36 printf("\tavailable_nodes %04x\n", device->available_nodes); 39 printf("\tavailable_nodes %04x\n", device->available_nodes);
@@ -44,8 +47,8 @@ print_device_info(drmDevicePtr device, int i)
44 if (device->bustype == DRM_BUS_PCI) { 47 if (device->bustype == DRM_BUS_PCI) {
45 printf("\t\tpci\n"); 48 printf("\t\tpci\n");
46 printf("\t\t\tdomain\t%04x\n",device->businfo.pci->domain); 49 printf("\t\t\tdomain\t%04x\n",device->businfo.pci->domain);
47 printf("\t\t\tbu\t%02x\n", device->businfo.pci->bus); 50 printf("\t\t\tbus\t%02x\n", device->businfo.pci->bus);
48 printf("\t\t\tde\t%02x\n", device->businfo.pci->dev); 51 printf("\t\t\tdev\t%02x\n", device->businfo.pci->dev);
49 printf("\t\t\tfunc\t%1u\n", device->businfo.pci->func); 52 printf("\t\t\tfunc\t%1u\n", device->businfo.pci->func);
50 53
51 printf("\tdeviceinfo\n"); 54 printf("\tdeviceinfo\n");
@@ -54,7 +57,48 @@ print_device_info(drmDevicePtr device, int i)
54 printf("\t\t\tdevice_id\t%04x\n", device->deviceinfo.pci->device_id); 57 printf("\t\t\tdevice_id\t%04x\n", device->deviceinfo.pci->device_id);
55 printf("\t\t\tsubvendor_id\t%04x\n", device->deviceinfo.pci->subvendor_id); 58 printf("\t\t\tsubvendor_id\t%04x\n", device->deviceinfo.pci->subvendor_id);
56 printf("\t\t\tsubdevice_id\t%04x\n", device->deviceinfo.pci->subdevice_id); 59 printf("\t\t\tsubdevice_id\t%04x\n", device->deviceinfo.pci->subdevice_id);
57 printf("\t\t\trevision_id\t%02x\n", device->deviceinfo.pci->revision_id); 60 if (print_revision)
61 printf("\t\t\trevision_id\t%02x\n", device->deviceinfo.pci->revision_id);
62 else
63 printf("\t\t\trevision_id\tIGNORED\n");
64
65 } else if (device->bustype == DRM_BUS_USB) {
66 printf("\t\tusb\n");
67 printf("\t\t\tbus\t%03u\n", device->businfo.usb->bus);
68 printf("\t\t\tdev\t%03u\n", device->businfo.usb->dev);
69
70 printf("\tdeviceinfo\n");
71 printf("\t\tusb\n");
72 printf("\t\t\tvendor\t%04x\n", device->deviceinfo.usb->vendor);
73 printf("\t\t\tproduct\t%04x\n", device->deviceinfo.usb->product);
74 } else if (device->bustype == DRM_BUS_PLATFORM) {
75 char **compatible = device->deviceinfo.platform->compatible;
76
77 printf("\t\tplatform\n");
78 printf("\t\t\tfullname\t%s\n", device->businfo.platform->fullname);
79
80 printf("\tdeviceinfo\n");
81 printf("\t\tplatform\n");
82 printf("\t\t\tcompatible\n");
83
84 while (*compatible) {
85 printf("\t\t\t\t%s\n", *compatible);
86 compatible++;
87 }
88 } else if (device->bustype == DRM_BUS_HOST1X) {
89 char **compatible = device->deviceinfo.platform->compatible;
90
91 printf("\t\thost1x\n");
92 printf("\t\t\tfullname\t%s\n", device->businfo.host1x->fullname);
93
94 printf("\tdeviceinfo\n");
95 printf("\t\tplatform\n");
96 printf("\t\t\tcompatible\n");
97
98 while (*compatible) {
99 printf("\t\t\t\t%s\n", *compatible);
100 compatible++;
101 }
58 } else { 102 } else {
59 printf("Unknown/unhandled bustype\n"); 103 printf("Unknown/unhandled bustype\n");
60 } 104 }
@@ -68,10 +112,10 @@ main(void)
68 drmDevicePtr device; 112 drmDevicePtr device;
69 int fd, ret, max_devices; 113 int fd, ret, max_devices;
70 114
71 max_devices = drmGetDevices(NULL, 0); 115 max_devices = drmGetDevices2(0, NULL, 0);
72 116
73 if (max_devices <= 0) { 117 if (max_devices <= 0) {
74 printf("drmGetDevices() has returned %d\n", max_devices); 118 printf("drmGetDevices2() has returned %d\n", max_devices);
75 return -1; 119 return -1;
76 } 120 }
77 121
@@ -81,24 +125,27 @@ main(void)
81 return -1; 125 return -1;
82 } 126 }
83 127
84 ret = drmGetDevices(devices, max_devices); 128 ret = drmGetDevices2(0, devices, max_devices);
85 if (ret < 0) { 129 if (ret < 0) {
86 printf("drmGetDevices() returned an error %d\n", ret); 130 printf("drmGetDevices2() returned an error %d\n", ret);
87 free(devices); 131 free(devices);
88 return -1; 132 return -1;
89 } 133 }
90 134
91 for (int i = 0; i < ret; i++) { 135 for (int i = 0; i < ret; i++) {
92 print_device_info(devices[i], i); 136 print_device_info(devices[i], i, false);
93 137
94 for (int j = 0; j < DRM_NODE_MAX; j++) { 138 for (int j = 0; j < DRM_NODE_MAX; j++) {
95 if (devices[i]->available_nodes & 1 << j) { 139 if (devices[i]->available_nodes & 1 << j) {
140 printf("Opening device %d node %s\n", i, devices[i]->nodes[j]);
96 fd = open(devices[i]->nodes[j], O_RDONLY | O_CLOEXEC, 0); 141 fd = open(devices[i]->nodes[j], O_RDONLY | O_CLOEXEC, 0);
97 if (fd < 0) 142 if (fd < 0) {
143 printf("Failed - %s (%d)\n", strerror(errno), errno);
98 continue; 144 continue;
145 }
99 146
100 if (drmGetDevice(fd, &device) == 0) { 147 if (drmGetDevice2(fd, DRM_DEVICE_GET_PCI_REVISION, &device) == 0) {
101 print_device_info(device, -1); 148 print_device_info(device, i, true);
102 drmFreeDevice(&device); 149 drmFreeDevice(&device);
103 } 150 }
104 close(fd); 151 close(fd);
diff --git a/tests/drmtest.c b/tests/drmtest.c
deleted file mode 100644
index 022994a0..00000000
--- a/tests/drmtest.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <string.h>
29#include <fcntl.h>
30#include <fnmatch.h>
31#include <sys/stat.h>
32#include <sys/ioctl.h>
33#include "drmtest.h"
34
35#define LIBUDEV_I_KNOW_THE_API_IS_SUBJECT_TO_CHANGE
36#include <libudev.h>
37
38static int is_master(int fd)
39{
40 drm_client_t client;
41 int ret;
42
43 /* Check that we're the only opener and authed. */
44 client.idx = 0;
45 ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
46 assert (ret == 0);
47 if (!client.auth)
48 return 0;
49 client.idx = 1;
50 ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
51 if (ret != -1 || errno != EINVAL)
52 return 0;
53
54 return 1;
55}
56
57/** Open the first DRM device matching the criteria */
58int drm_open_matching(const char *pci_glob, int flags)
59{
60 struct udev *udev;
61 struct udev_enumerate *e;
62 struct udev_device *device, *parent;
63 struct udev_list_entry *entry;
64 const char *pci_id, *path;
65 const char *usub, *dnode;
66 int fd;
67
68 udev = udev_new();
69 if (udev == NULL) {
70 fprintf(stderr, "failed to initialize udev context\n");
71 abort();
72 }
73
74 fd = -1;
75 e = udev_enumerate_new(udev);
76 udev_enumerate_add_match_subsystem(e, "drm");
77 udev_enumerate_scan_devices(e);
78 udev_list_entry_foreach(entry, udev_enumerate_get_list_entry(e)) {
79 path = udev_list_entry_get_name(entry);
80 device = udev_device_new_from_syspath(udev, path);
81 parent = udev_device_get_parent(device);
82 usub = udev_device_get_subsystem(parent);
83 /* Filter out KMS output devices. */
84 if (!usub || (strcmp(usub, "pci") != 0))
85 continue;
86 pci_id = udev_device_get_property_value(parent, "PCI_ID");
87 if (fnmatch(pci_glob, pci_id, 0) != 0)
88 continue;
89 dnode = udev_device_get_devnode(device);
90 if (strstr(dnode, "control"))
91 continue;
92 fd = open(dnode, O_RDWR);
93 if (fd < 0)
94 continue;
95 if ((flags & DRM_TEST_MASTER) && !is_master(fd)) {
96 close(fd);
97 fd = -1;
98 continue;
99 }
100
101 break;
102 }
103 udev_enumerate_unref(e);
104 udev_unref(udev);
105
106 return fd;
107}
108
109int drm_open_any(void)
110{
111 int fd = drm_open_matching("*:*", 0);
112
113 if (fd < 0) {
114 fprintf(stderr, "failed to open any drm device\n");
115 exit(0);
116 }
117
118 return fd;
119}
120
121/**
122 * Open the first DRM device we can find where we end up being the master.
123 */
124int drm_open_any_master(void)
125{
126 int fd = drm_open_matching("*:*", DRM_TEST_MASTER);
127
128 if (fd < 0) {
129 fprintf(stderr, "failed to open any drm device\n");
130 exit(0);
131 }
132
133 return fd;
134
135}
diff --git a/tests/drmtest.h b/tests/drmtest.h
deleted file mode 100644
index 55bb4464..00000000
--- a/tests/drmtest.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <stdio.h>
29#include <stdlib.h>
30#include <unistd.h>
31#include <assert.h>
32#include <errno.h>
33
34#include "xf86drm.h"
35
36#define DRM_TEST_MASTER 0x01
37
38int drm_open_any(void);
39int drm_open_any_master(void);
40int drm_open_matching(const char *pci_glob, int flags);
diff --git a/tests/etnaviv/Makefile.am b/tests/etnaviv/Makefile.am
new file mode 100644
index 00000000..06318643
--- /dev/null
+++ b/tests/etnaviv/Makefile.am
@@ -0,0 +1,41 @@
1AM_CFLAGS = \
2 -I $(top_srcdir)/include/drm \
3 -I $(top_srcdir)/etnaviv \
4 -I $(top_srcdir)
5
6if HAVE_INSTALL_TESTS
7bin_PROGRAMS = \
8 etnaviv_2d_test \
9 etnaviv_cmd_stream_test \
10 etnaviv_bo_cache_test
11else
12noinst_PROGRAMS = \
13 etnaviv_2d_test \
14 etnaviv_cmd_stream_test \
15 etnaviv_bo_cache_test
16endif
17
18etnaviv_2d_test_LDADD = \
19 $(top_builddir)/libdrm.la \
20 $(top_builddir)/etnaviv/libdrm_etnaviv.la
21
22etnaviv_2d_test_SOURCES = \
23 cmdstream.xml.h \
24 etnaviv_2d_test.c \
25 state.xml.h \
26 state_2d.xml.h \
27 write_bmp.c \
28 write_bmp.h
29
30etnaviv_cmd_stream_test_LDADD = \
31 $(top_builddir)/etnaviv/libdrm_etnaviv.la
32
33etnaviv_cmd_stream_test_SOURCES = \
34 etnaviv_cmd_stream_test.c
35
36etnaviv_bo_cache_test_LDADD = \
37 $(top_builddir)/libdrm.la \
38 $(top_builddir)/etnaviv/libdrm_etnaviv.la
39
40etnaviv_bo_cache_test_SOURCES = \
41 etnaviv_bo_cache_test.c
diff --git a/tests/etnaviv/cmdstream.xml.h b/tests/etnaviv/cmdstream.xml.h
new file mode 100644
index 00000000..109285c5
--- /dev/null
+++ b/tests/etnaviv/cmdstream.xml.h
@@ -0,0 +1,242 @@
1#ifndef CMDSTREAM_XML
2#define CMDSTREAM_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- cmdstream.xml ( 12621 bytes, from 2016-09-06 14:44:16)
12- copyright.xml ( 1597 bytes, from 2016-09-06 14:44:16)
13- common.xml ( 20583 bytes, from 2016-09-06 14:14:12)
14
15Copyright (C) 2012-2016 by the following authors:
16- Wladimir J. van der Laan <laanwj@gmail.com>
17- Christian Gmeiner <christian.gmeiner@gmail.com>
18- Lucas Stach <l.stach@pengutronix.de>
19- Russell King <rmk@arm.linux.org.uk>
20
21Permission is hereby granted, free of charge, to any person obtaining a
22copy of this software and associated documentation files (the "Software"),
23to deal in the Software without restriction, including without limitation
24the rights to use, copy, modify, merge, publish, distribute, sub license,
25and/or sell copies of the Software, and to permit persons to whom the
26Software is furnished to do so, subject to the following conditions:
27
28The above copyright notice and this permission notice (including the
29next paragraph) shall be included in all copies or substantial portions
30of the Software.
31
32THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
37FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
38DEALINGS IN THE SOFTWARE.
39*/
40
41
42#define FE_OPCODE_LOAD_STATE 0x00000001
43#define FE_OPCODE_END 0x00000002
44#define FE_OPCODE_NOP 0x00000003
45#define FE_OPCODE_DRAW_2D 0x00000004
46#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005
47#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006
48#define FE_OPCODE_WAIT 0x00000007
49#define FE_OPCODE_LINK 0x00000008
50#define FE_OPCODE_STALL 0x00000009
51#define FE_OPCODE_CALL 0x0000000a
52#define FE_OPCODE_RETURN 0x0000000b
53#define FE_OPCODE_CHIP_SELECT 0x0000000d
54#define PRIMITIVE_TYPE_POINTS 0x00000001
55#define PRIMITIVE_TYPE_LINES 0x00000002
56#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003
57#define PRIMITIVE_TYPE_TRIANGLES 0x00000004
58#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005
59#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006
60#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007
61#define PRIMITIVE_TYPE_QUADS 0x00000008
62#define VIV_FE_LOAD_STATE 0x00000000
63
64#define VIV_FE_LOAD_STATE_HEADER 0x00000000
65#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000
66#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27
67#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000
68#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000
69#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000
70#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16
71#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
72#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff
73#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0
74#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
75#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2
76
77#define VIV_FE_END 0x00000000
78
79#define VIV_FE_END_HEADER 0x00000000
80#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f
81#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0
82#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
83#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100
84#define VIV_FE_END_HEADER_OP__MASK 0xf8000000
85#define VIV_FE_END_HEADER_OP__SHIFT 27
86#define VIV_FE_END_HEADER_OP_END 0x10000000
87
88#define VIV_FE_NOP 0x00000000
89
90#define VIV_FE_NOP_HEADER 0x00000000
91#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000
92#define VIV_FE_NOP_HEADER_OP__SHIFT 27
93#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000
94
95#define VIV_FE_DRAW_2D 0x00000000
96
97#define VIV_FE_DRAW_2D_HEADER 0x00000000
98#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00
99#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8
100#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
101#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000
102#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16
103#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
104#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000
105#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27
106#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000
107
108#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008
109#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff
110#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0
111#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
112#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000
113#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16
114#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
115
116#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c
117#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff
118#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0
119#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
120#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000
121#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16
122#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
123
124#define VIV_FE_DRAW_PRIMITIVES 0x00000000
125
126#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000
127#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000
128#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27
129#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000
130
131#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004
132#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
133#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0
134#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
135
136#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008
137
138#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c
139
140#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000
141
142#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000
143#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000
144#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27
145#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000
146
147#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004
148#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
149#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0
150#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
151
152#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008
153
154#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c
155
156#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010
157
158#define VIV_FE_WAIT 0x00000000
159
160#define VIV_FE_WAIT_HEADER 0x00000000
161#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff
162#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0
163#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
164#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000
165#define VIV_FE_WAIT_HEADER_OP__SHIFT 27
166#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000
167
168#define VIV_FE_LINK 0x00000000
169
170#define VIV_FE_LINK_HEADER 0x00000000
171#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff
172#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0
173#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
174#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000
175#define VIV_FE_LINK_HEADER_OP__SHIFT 27
176#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000
177
178#define VIV_FE_LINK_ADDRESS 0x00000004
179
180#define VIV_FE_STALL 0x00000000
181
182#define VIV_FE_STALL_HEADER 0x00000000
183#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000
184#define VIV_FE_STALL_HEADER_OP__SHIFT 27
185#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000
186
187#define VIV_FE_STALL_TOKEN 0x00000004
188#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f
189#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0
190#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
191#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00
192#define VIV_FE_STALL_TOKEN_TO__SHIFT 8
193#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
194
195#define VIV_FE_CALL 0x00000000
196
197#define VIV_FE_CALL_HEADER 0x00000000
198#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff
199#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0
200#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
201#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000
202#define VIV_FE_CALL_HEADER_OP__SHIFT 27
203#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000
204
205#define VIV_FE_CALL_ADDRESS 0x00000004
206
207#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008
208
209#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c
210
211#define VIV_FE_RETURN 0x00000000
212
213#define VIV_FE_RETURN_HEADER 0x00000000
214#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000
215#define VIV_FE_RETURN_HEADER_OP__SHIFT 27
216#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000
217
218#define VIV_FE_CHIP_SELECT 0x00000000
219
220#define VIV_FE_CHIP_SELECT_HEADER 0x00000000
221#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000
222#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27
223#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000
224#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000
225#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000
226#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000
227#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000
228#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800
229#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400
230#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200
231#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100
232#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080
233#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040
234#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020
235#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010
236#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008
237#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004
238#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
239#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
240
241
242#endif /* CMDSTREAM_XML */
diff --git a/tests/etnaviv/etnaviv_2d_test.c b/tests/etnaviv/etnaviv_2d_test.c
new file mode 100644
index 00000000..10751c73
--- /dev/null
+++ b/tests/etnaviv/etnaviv_2d_test.c
@@ -0,0 +1,240 @@
1/*
2 * Copyright (C) 2014-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include "config.h"
29#endif
30
31#include <fcntl.h>
32#include <stdio.h>
33#include <string.h>
34#include <unistd.h>
35
36#include "xf86drm.h"
37#include "etnaviv_drmif.h"
38#include "etnaviv_drm.h"
39
40#include "state.xml.h"
41#include "state_2d.xml.h"
42#include "cmdstream.xml.h"
43
44#include "write_bmp.h"
45
46static inline void etna_emit_load_state(struct etna_cmd_stream *stream,
47 const uint16_t offset, const uint16_t count)
48{
49 uint32_t v;
50
51 v = (VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE | VIV_FE_LOAD_STATE_HEADER_OFFSET(offset) |
52 (VIV_FE_LOAD_STATE_HEADER_COUNT(count) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK));
53
54 etna_cmd_stream_emit(stream, v);
55}
56
57static inline void etna_set_state(struct etna_cmd_stream *stream, uint32_t address, uint32_t value)
58{
59 etna_cmd_stream_reserve(stream, 2);
60 etna_emit_load_state(stream, address >> 2, 1);
61 etna_cmd_stream_emit(stream, value);
62}
63
64static inline void etna_set_state_from_bo(struct etna_cmd_stream *stream,
65 uint32_t address, struct etna_bo *bo)
66{
67 etna_cmd_stream_reserve(stream, 2);
68 etna_emit_load_state(stream, address >> 2, 1);
69
70 etna_cmd_stream_reloc(stream, &(struct etna_reloc){
71 .bo = bo,
72 .flags = ETNA_RELOC_READ,
73 .offset = 0,
74 });
75}
76
77static void gen_cmd_stream(struct etna_cmd_stream *stream, struct etna_bo *bmp, const int width, const int height)
78{
79 int rec;
80 static int num_rects = 256;
81
82 etna_set_state(stream, VIVS_DE_SRC_STRIDE, 0);
83 etna_set_state(stream, VIVS_DE_SRC_ROTATION_CONFIG, 0);
84 etna_set_state(stream, VIVS_DE_SRC_CONFIG, 0);
85 etna_set_state(stream, VIVS_DE_SRC_ORIGIN, 0);
86 etna_set_state(stream, VIVS_DE_SRC_SIZE, 0);
87 etna_set_state(stream, VIVS_DE_SRC_COLOR_BG, 0);
88 etna_set_state(stream, VIVS_DE_SRC_COLOR_FG, 0);
89 etna_set_state(stream, VIVS_DE_STRETCH_FACTOR_LOW, 0);
90 etna_set_state(stream, VIVS_DE_STRETCH_FACTOR_HIGH, 0);
91 etna_set_state_from_bo(stream, VIVS_DE_DEST_ADDRESS, bmp);
92 etna_set_state(stream, VIVS_DE_DEST_STRIDE, width*4);
93 etna_set_state(stream, VIVS_DE_DEST_ROTATION_CONFIG, 0);
94 etna_set_state(stream, VIVS_DE_DEST_CONFIG,
95 VIVS_DE_DEST_CONFIG_FORMAT(DE_FORMAT_A8R8G8B8) |
96 VIVS_DE_DEST_CONFIG_COMMAND_CLEAR |
97 VIVS_DE_DEST_CONFIG_SWIZZLE(DE_SWIZZLE_ARGB) |
98 VIVS_DE_DEST_CONFIG_TILED_DISABLE |
99 VIVS_DE_DEST_CONFIG_MINOR_TILED_DISABLE
100 );
101 etna_set_state(stream, VIVS_DE_ROP,
102 VIVS_DE_ROP_ROP_FG(0xcc) | VIVS_DE_ROP_ROP_BG(0xcc) | VIVS_DE_ROP_TYPE_ROP4);
103 etna_set_state(stream, VIVS_DE_CLIP_TOP_LEFT,
104 VIVS_DE_CLIP_TOP_LEFT_X(0) |
105 VIVS_DE_CLIP_TOP_LEFT_Y(0)
106 );
107 etna_set_state(stream, VIVS_DE_CLIP_BOTTOM_RIGHT,
108 VIVS_DE_CLIP_BOTTOM_RIGHT_X(width) |
109 VIVS_DE_CLIP_BOTTOM_RIGHT_Y(height)
110 );
111 etna_set_state(stream, VIVS_DE_CONFIG, 0); /* TODO */
112 etna_set_state(stream, VIVS_DE_SRC_ORIGIN_FRACTION, 0);
113 etna_set_state(stream, VIVS_DE_ALPHA_CONTROL, 0);
114 etna_set_state(stream, VIVS_DE_ALPHA_MODES, 0);
115 etna_set_state(stream, VIVS_DE_DEST_ROTATION_HEIGHT, 0);
116 etna_set_state(stream, VIVS_DE_SRC_ROTATION_HEIGHT, 0);
117 etna_set_state(stream, VIVS_DE_ROT_ANGLE, 0);
118
119 /* Clear color PE20 */
120 etna_set_state(stream, VIVS_DE_CLEAR_PIXEL_VALUE32, 0xff40ff40);
121 /* Clear color PE10 */
122 etna_set_state(stream, VIVS_DE_CLEAR_BYTE_MASK, 0xff);
123 etna_set_state(stream, VIVS_DE_CLEAR_PIXEL_VALUE_LOW, 0xff40ff40);
124 etna_set_state(stream, VIVS_DE_CLEAR_PIXEL_VALUE_HIGH, 0xff40ff40);
125
126 etna_set_state(stream, VIVS_DE_DEST_COLOR_KEY, 0);
127 etna_set_state(stream, VIVS_DE_GLOBAL_SRC_COLOR, 0);
128 etna_set_state(stream, VIVS_DE_GLOBAL_DEST_COLOR, 0);
129 etna_set_state(stream, VIVS_DE_COLOR_MULTIPLY_MODES, 0);
130 etna_set_state(stream, VIVS_DE_PE_TRANSPARENCY, 0);
131 etna_set_state(stream, VIVS_DE_PE_CONTROL, 0);
132 etna_set_state(stream, VIVS_DE_PE_DITHER_LOW, 0xffffffff);
133 etna_set_state(stream, VIVS_DE_PE_DITHER_HIGH, 0xffffffff);
134
135 /* Queue DE command */
136 etna_cmd_stream_emit(stream,
137 VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D | VIV_FE_DRAW_2D_HEADER_COUNT(num_rects) /* render one rectangle */
138 );
139 etna_cmd_stream_emit(stream, 0x0); /* rectangles start aligned */
140
141 for(rec=0; rec < num_rects; ++rec) {
142 int x = rec%16;
143 int y = rec/16;
144 etna_cmd_stream_emit(stream, VIV_FE_DRAW_2D_TOP_LEFT_X(x*8) | VIV_FE_DRAW_2D_TOP_LEFT_Y(y*8));
145 etna_cmd_stream_emit(stream, VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x*8+4) | VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(y*8+4));
146 }
147 etna_set_state(stream, 1, 0);
148 etna_set_state(stream, 1, 0);
149 etna_set_state(stream, 1, 0);
150
151 etna_set_state(stream, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_PE2D);
152}
153
154int main(int argc, char *argv[])
155{
156 const int width = 256;
157 const int height = 256;
158 const size_t bmp_size = width * height * 4;
159
160 struct etna_device *dev;
161 struct etna_gpu *gpu;
162 struct etna_pipe *pipe;
163 struct etna_bo *bmp;
164 struct etna_cmd_stream *stream;
165
166 drmVersionPtr version;
167 int fd, ret = 0;
168
169 fd = open(argv[1], O_RDWR);
170 if (fd < 0)
171 return 1;
172
173 version = drmGetVersion(fd);
174 if (version) {
175 printf("Version: %d.%d.%d\n", version->version_major,
176 version->version_minor, version->version_patchlevel);
177 printf(" Name: %s\n", version->name);
178 printf(" Date: %s\n", version->date);
179 printf(" Description: %s\n", version->desc);
180 drmFreeVersion(version);
181 }
182
183 dev = etna_device_new(fd);
184 if (!dev) {
185 ret = 2;
186 goto out;
187 }
188
189 /* TODO: we assume that core 0 is a 2D capable one */
190 gpu = etna_gpu_new(dev, 0);
191 if (!gpu) {
192 ret = 3;
193 goto out_device;
194 }
195
196 pipe = etna_pipe_new(gpu, ETNA_PIPE_2D);
197 if (!pipe) {
198 ret = 4;
199 goto out_gpu;
200 }
201
202 bmp = etna_bo_new(dev, bmp_size, ETNA_BO_UNCACHED);
203 if (!bmp) {
204 ret = 5;
205 goto out_pipe;
206 }
207 memset(etna_bo_map(bmp), 0, bmp_size);
208
209 stream = etna_cmd_stream_new(pipe, 0x300, NULL, NULL);
210 if (!stream) {
211 ret = 6;
212 goto out_bo;
213 }
214
215 /* generate command sequence */
216 gen_cmd_stream(stream, bmp, width, height);
217
218 etna_cmd_stream_finish(stream);
219
220 bmp_dump32(etna_bo_map(bmp), width, height, false, "/tmp/etna.bmp");
221
222 etna_cmd_stream_del(stream);
223
224out_bo:
225 etna_bo_del(bmp);
226
227out_pipe:
228 etna_pipe_del(pipe);
229
230out_gpu:
231 etna_gpu_del(gpu);
232
233out_device:
234 etna_device_del(dev);
235
236out:
237 close(fd);
238
239 return ret;
240}
diff --git a/tests/etnaviv/etnaviv_bo_cache_test.c b/tests/etnaviv/etnaviv_bo_cache_test.c
new file mode 100644
index 00000000..fb01f8d3
--- /dev/null
+++ b/tests/etnaviv/etnaviv_bo_cache_test.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright (C) 2016 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#ifdef HAVE_CONFIG_H
28# include "config.h"
29#endif
30
31#undef NDEBUG
32#include <assert.h>
33
34#include <fcntl.h>
35#include <stdio.h>
36#include <string.h>
37#include <unistd.h>
38
39#include "xf86drm.h"
40#include "etnaviv_drmif.h"
41#include "etnaviv_drm.h"
42
43static void test_cache(struct etna_device *dev)
44{
45 struct etna_bo *bo, *tmp;
46
47 /* allocate and free some bo's with same size - we must
48 * get the same bo over and over. */
49 printf("testing bo cache ... ");
50
51 bo = tmp = etna_bo_new(dev, 0x100, ETNA_BO_UNCACHED);
52 assert(bo);
53 etna_bo_del(bo);
54
55 for (unsigned i = 0; i < 100; i++) {
56 tmp = etna_bo_new(dev, 0x100, ETNA_BO_UNCACHED);
57 etna_bo_del(tmp);
58 assert(tmp == bo);
59 }
60
61 printf("ok\n");
62}
63
64static void test_size_rounding(struct etna_device *dev)
65{
66 struct etna_bo *bo;
67
68 printf("testing size rounding ... ");
69
70 bo = etna_bo_new(dev, 15, ETNA_BO_UNCACHED);
71 assert(etna_bo_size(bo) == 4096);
72 etna_bo_del(bo);
73
74 bo = etna_bo_new(dev, 4096, ETNA_BO_UNCACHED);
75 assert(etna_bo_size(bo) == 4096);
76 etna_bo_del(bo);
77
78 bo = etna_bo_new(dev, 4100, ETNA_BO_UNCACHED);
79 assert(etna_bo_size(bo) == 8192);
80 etna_bo_del(bo);
81
82 printf("ok\n");
83}
84
85int main(int argc, char *argv[])
86{
87 struct etna_device *dev;
88
89 drmVersionPtr version;
90 int fd, ret = 0;
91
92 fd = open(argv[1], O_RDWR);
93 if (fd < 0)
94 return 1;
95
96 version = drmGetVersion(fd);
97 if (version) {
98 printf("Version: %d.%d.%d\n", version->version_major,
99 version->version_minor, version->version_patchlevel);
100 printf(" Name: %s\n", version->name);
101 printf(" Date: %s\n", version->date);
102 printf(" Description: %s\n", version->desc);
103 drmFreeVersion(version);
104 }
105
106 dev = etna_device_new(fd);
107 if (!dev) {
108 ret = 2;
109 goto out;
110 }
111
112 test_cache(dev);
113 test_size_rounding(dev);
114
115 etna_device_del(dev);
116
117out:
118 close(fd);
119
120 return ret;
121}
diff --git a/tests/etnaviv/etnaviv_cmd_stream_test.c b/tests/etnaviv/etnaviv_cmd_stream_test.c
new file mode 100644
index 00000000..b650aae2
--- /dev/null
+++ b/tests/etnaviv/etnaviv_cmd_stream_test.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27#undef NDEBUG
28#include <assert.h>
29#include <string.h>
30#include <stdio.h>
31
32#include "etnaviv_drmif.h"
33
34static void test_avail()
35{
36 struct etna_cmd_stream *stream;
37
38 printf("testing etna_cmd_stream_avail ... ");
39
40 /* invalid size */
41 stream = etna_cmd_stream_new(NULL, 0, NULL, NULL);
42 assert(stream == NULL);
43
44 stream = etna_cmd_stream_new(NULL, 4, NULL, NULL);
45 assert(stream);
46 assert(etna_cmd_stream_avail(stream) == 2);
47 etna_cmd_stream_del(stream);
48
49 stream = etna_cmd_stream_new(NULL, 20, NULL, NULL);
50 assert(stream);
51 assert(etna_cmd_stream_avail(stream) == 18);
52 etna_cmd_stream_del(stream);
53
54 /* odd number of 32 bit words */
55 stream = etna_cmd_stream_new(NULL, 1, NULL, NULL);
56 assert(stream);
57 assert(etna_cmd_stream_avail(stream) == 0);
58 etna_cmd_stream_del(stream);
59
60 stream = etna_cmd_stream_new(NULL, 23, NULL, NULL);
61 assert(stream);
62 assert(etna_cmd_stream_avail(stream) == 22);
63 etna_cmd_stream_del(stream);
64
65 printf("ok\n");
66}
67
68static void test_emit()
69{
70 struct etna_cmd_stream *stream;
71
72 printf("testing etna_cmd_stream_emit ... ");
73
74 stream = etna_cmd_stream_new(NULL, 6, NULL, NULL);
75 assert(stream);
76 assert(etna_cmd_stream_avail(stream) == 4);
77
78 etna_cmd_stream_emit(stream, 0x1);
79 assert(etna_cmd_stream_avail(stream) == 3);
80
81 etna_cmd_stream_emit(stream, 0x2);
82 assert(etna_cmd_stream_avail(stream) == 2);
83
84 etna_cmd_stream_emit(stream, 0x3);
85 assert(etna_cmd_stream_avail(stream) == 1);
86
87 etna_cmd_stream_del(stream);
88
89 printf("ok\n");
90}
91
92static void test_offset()
93{
94 struct etna_cmd_stream *stream;
95
96 printf("testing etna_cmd_stream_offset ... ");
97
98 stream = etna_cmd_stream_new(NULL, 6, NULL, NULL);
99 assert(etna_cmd_stream_offset(stream) == 0);
100
101 etna_cmd_stream_emit(stream, 0x1);
102 assert(etna_cmd_stream_offset(stream) == 1);
103
104 etna_cmd_stream_emit(stream, 0x2);
105 assert(etna_cmd_stream_offset(stream) == 2);
106
107 etna_cmd_stream_emit(stream, 0x3);
108 etna_cmd_stream_emit(stream, 0x4);
109 assert(etna_cmd_stream_offset(stream) == 4);
110
111 etna_cmd_stream_del(stream);
112
113 printf("ok\n");
114}
115
116int main(int argc, char *argv[])
117{
118 test_avail();
119 test_emit();
120 test_offset();
121
122 return 0;
123}
diff --git a/tests/etnaviv/state.xml.h b/tests/etnaviv/state.xml.h
new file mode 100644
index 00000000..e1ecbf3a
--- /dev/null
+++ b/tests/etnaviv/state.xml.h
@@ -0,0 +1,375 @@
1#ifndef STATE_XML
2#define STATE_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- state.xml ( 18940 bytes, from 2016-09-06 14:14:12)
12- common.xml ( 20583 bytes, from 2016-09-06 14:14:12)
13- state_hi.xml ( 25653 bytes, from 2016-09-06 14:45:17)
14- copyright.xml ( 1597 bytes, from 2016-09-06 14:44:16)
15- state_2d.xml ( 51552 bytes, from 2016-09-06 14:44:16)
16- state_3d.xml ( 54603 bytes, from 2016-09-06 14:44:16)
17- state_vg.xml ( 5975 bytes, from 2016-09-06 14:44:16)
18
19Copyright (C) 2012-2016 by the following authors:
20- Wladimir J. van der Laan <laanwj@gmail.com>
21- Christian Gmeiner <christian.gmeiner@gmail.com>
22- Lucas Stach <l.stach@pengutronix.de>
23- Russell King <rmk@arm.linux.org.uk>
24
25Permission is hereby granted, free of charge, to any person obtaining a
26copy of this software and associated documentation files (the "Software"),
27to deal in the Software without restriction, including without limitation
28the rights to use, copy, modify, merge, publish, distribute, sub license,
29and/or sell copies of the Software, and to permit persons to whom the
30Software is furnished to do so, subject to the following conditions:
31
32The above copyright notice and this permission notice (including the
33next paragraph) shall be included in all copies or substantial portions
34of the Software.
35
36THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
40LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
41FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
42DEALINGS IN THE SOFTWARE.
43*/
44
45
46#define VARYING_COMPONENT_USE_UNUSED 0x00000000
47#define VARYING_COMPONENT_USE_USED 0x00000001
48#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
49#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
50#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
51#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
52#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
53#define VIVS_FE 0x00000000
54
55#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
56#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004
57#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
58#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
59#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
60#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000
61#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001
62#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002
63#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003
64#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004
65#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005
66#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008
67#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009
68#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b
69#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c
70#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
71#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
72#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
73#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
74#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080
75#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700
76#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8
77#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
78#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000
79#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12
80#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
81#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000
82#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14
83#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000
84#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000
85#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000
86#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16
87#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
88#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000
89#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24
90#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
91
92#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640
93
94#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644
95
96#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648
97#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003
98#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0
99#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
100#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
101#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
102
103#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
104
105#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650
106
107#define VIVS_FE_COMMAND_ADDRESS 0x00000654
108
109#define VIVS_FE_COMMAND_CONTROL 0x00000658
110#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
111#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0
112#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
113#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000
114
115#define VIVS_FE_DMA_STATUS 0x0000065c
116
117#define VIVS_FE_DMA_DEBUG_STATE 0x00000660
118#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f
119#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0
120#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000
121#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001
122#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002
123#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003
124#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004
125#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005
126#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006
127#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007
128#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008
129#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009
130#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a
131#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b
132#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c
133#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d
134#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e
135#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f
136#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010
137#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011
138#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012
139#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013
140#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014
141#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015
142#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300
143#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8
144#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000
145#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100
146#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200
147#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300
148#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00
149#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10
150#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000
151#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400
152#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800
153#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000
154#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12
155#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000
156#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000
157#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000
158#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000
159#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14
160#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000
161#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000
162#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000
163#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000
164#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16
165#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000
166#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000
167#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000
168
169#define VIVS_FE_DMA_ADDRESS 0x00000664
170
171#define VIVS_FE_DMA_LOW 0x00000668
172
173#define VIVS_FE_DMA_HIGH 0x0000066c
174
175#define VIVS_FE_AUTO_FLUSH 0x00000670
176
177#define VIVS_FE_UNK00678 0x00000678
178
179#define VIVS_FE_UNK0067C 0x0000067c
180
181#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
182#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004
183#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008
184
185#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0))
186
187#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
188
189#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0))
190#define VIVS_FE_UNK00700__ESIZE 0x00000004
191#define VIVS_FE_UNK00700__LEN 0x00000010
192
193#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0))
194#define VIVS_FE_UNK00740__ESIZE 0x00000004
195#define VIVS_FE_UNK00740__LEN 0x00000010
196
197#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0))
198#define VIVS_FE_UNK00780__ESIZE 0x00000004
199#define VIVS_FE_UNK00780__LEN 0x00000010
200
201#define VIVS_GL 0x00000000
202
203#define VIVS_GL_PIPE_SELECT 0x00003800
204#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001
205#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0
206#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
207
208#define VIVS_GL_EVENT 0x00003804
209#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f
210#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0
211#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
212#define VIVS_GL_EVENT_FROM_FE 0x00000020
213#define VIVS_GL_EVENT_FROM_PE 0x00000040
214#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
215#define VIVS_GL_EVENT_SOURCE__SHIFT 8
216#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
217
218#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808
219#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f
220#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0
221#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
222#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
223#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
224#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
225
226#define VIVS_GL_FLUSH_CACHE 0x0000380c
227#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
228#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002
229#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004
230#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008
231#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
232#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
233#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
234
235#define VIVS_GL_FLUSH_MMU 0x00003810
236#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
237#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002
238#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004
239#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008
240#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010
241
242#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814
243
244#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818
245#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003
246#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0
247#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000
248#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001
249#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002
250#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008
251#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0
252#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4
253#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
254#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100
255#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000
256#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12
257#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
258#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000
259#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000
260#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16
261#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
262#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000
263
264#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c
265#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff
266#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0
267#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
268
269#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
270#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007
271#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0
272#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
273#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070
274#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4
275#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
276#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700
277#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8
278#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
279#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000
280#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12
281#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
282#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000
283#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16
284#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
285#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000
286#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20
287#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
288#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000
289#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24
290#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
291#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000
292#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28
293#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
294
295#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
296#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
297#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002
298#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003
299#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0
300#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
301#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c
302#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2
303#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
304#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030
305#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4
306#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
307#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0
308#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6
309#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
310#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300
311#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8
312#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
313#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00
314#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10
315#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
316#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000
317#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12
318#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
319#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000
320#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14
321#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
322#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000
323#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16
324#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
325#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000
326#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18
327#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
328#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000
329#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20
330#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
331#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000
332#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22
333#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
334#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000
335#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24
336#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
337#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000
338#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26
339#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
340#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000
341#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28
342#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
343#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000
344#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
345#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
346
347#define VIVS_GL_UNK03834 0x00003834
348
349#define VIVS_GL_UNK03838 0x00003838
350
351#define VIVS_GL_API_MODE 0x0000384c
352#define VIVS_GL_API_MODE_OPENGL 0x00000000
353#define VIVS_GL_API_MODE_OPENVG 0x00000001
354#define VIVS_GL_API_MODE_OPENCL 0x00000002
355
356#define VIVS_GL_CONTEXT_POINTER 0x00003850
357
358#define VIVS_GL_UNK03A00 0x00003a00
359
360#define VIVS_GL_STALL_TOKEN 0x00003c00
361#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
362#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
363#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
364#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00
365#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8
366#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
367#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
368#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
369
370#define VIVS_DUMMY 0x00000000
371
372#define VIVS_DUMMY_DUMMY 0x0003fffc
373
374
375#endif /* STATE_XML */
diff --git a/tests/etnaviv/state_2d.xml.h b/tests/etnaviv/state_2d.xml.h
new file mode 100644
index 00000000..715eed44
--- /dev/null
+++ b/tests/etnaviv/state_2d.xml.h
@@ -0,0 +1,1497 @@
1#ifndef STATE_2D_XML
2#define STATE_2D_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://0x04.net/cgit/index.cgi/rules-ng-ng
8git clone git://0x04.net/rules-ng-ng
9
10The rules-ng-ng source files this header was generated from are:
11- state.xml ( 18940 bytes, from 2016-09-06 14:14:12)
12- common.xml ( 20583 bytes, from 2016-09-06 14:14:12)
13- state_hi.xml ( 25653 bytes, from 2016-09-06 14:45:17)
14- copyright.xml ( 1597 bytes, from 2016-09-06 14:44:16)
15- state_2d.xml ( 51552 bytes, from 2016-09-06 14:44:16)
16- state_3d.xml ( 54603 bytes, from 2016-09-06 14:44:16)
17- state_vg.xml ( 5975 bytes, from 2016-09-06 14:44:16)
18
19Copyright (C) 2012-2016 by the following authors:
20- Wladimir J. van der Laan <laanwj@gmail.com>
21- Christian Gmeiner <christian.gmeiner@gmail.com>
22- Lucas Stach <l.stach@pengutronix.de>
23- Russell King <rmk@arm.linux.org.uk>
24
25Permission is hereby granted, free of charge, to any person obtaining a
26copy of this software and associated documentation files (the "Software"),
27to deal in the Software without restriction, including without limitation
28the rights to use, copy, modify, merge, publish, distribute, sub license,
29and/or sell copies of the Software, and to permit persons to whom the
30Software is furnished to do so, subject to the following conditions:
31
32The above copyright notice and this permission notice (including the
33next paragraph) shall be included in all copies or substantial portions
34of the Software.
35
36THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
39THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
40LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
41FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
42DEALINGS IN THE SOFTWARE.
43*/
44
45
46#define DE_FORMAT_X4R4G4B4 0x00000000
47#define DE_FORMAT_A4R4G4B4 0x00000001
48#define DE_FORMAT_X1R5G5B5 0x00000002
49#define DE_FORMAT_A1R5G5B5 0x00000003
50#define DE_FORMAT_R5G6B5 0x00000004
51#define DE_FORMAT_X8R8G8B8 0x00000005
52#define DE_FORMAT_A8R8G8B8 0x00000006
53#define DE_FORMAT_YUY2 0x00000007
54#define DE_FORMAT_UYVY 0x00000008
55#define DE_FORMAT_INDEX8 0x00000009
56#define DE_FORMAT_MONOCHROME 0x0000000a
57#define DE_FORMAT_YV12 0x0000000f
58#define DE_FORMAT_A8 0x00000010
59#define DE_FORMAT_NV12 0x00000011
60#define DE_FORMAT_NV16 0x00000012
61#define DE_FORMAT_RG16 0x00000013
62#define DE_SWIZZLE_ARGB 0x00000000
63#define DE_SWIZZLE_RGBA 0x00000001
64#define DE_SWIZZLE_ABGR 0x00000002
65#define DE_SWIZZLE_BGRA 0x00000003
66#define DE_BLENDMODE_ZERO 0x00000000
67#define DE_BLENDMODE_ONE 0x00000001
68#define DE_BLENDMODE_NORMAL 0x00000002
69#define DE_BLENDMODE_INVERSED 0x00000003
70#define DE_BLENDMODE_COLOR 0x00000004
71#define DE_BLENDMODE_COLOR_INVERSED 0x00000005
72#define DE_BLENDMODE_SATURATED_ALPHA 0x00000006
73#define DE_BLENDMODE_SATURATED_DEST_ALPHA 0x00000007
74#define DE_COMPONENT_BLUE 0x00000000
75#define DE_COMPONENT_GREEN 0x00000001
76#define DE_COMPONENT_RED 0x00000002
77#define DE_COMPONENT_ALPHA 0x00000003
78#define DE_ROT_MODE_ROT0 0x00000000
79#define DE_ROT_MODE_FLIP_X 0x00000001
80#define DE_ROT_MODE_FLIP_Y 0x00000002
81#define DE_ROT_MODE_ROT90 0x00000004
82#define DE_ROT_MODE_ROT180 0x00000005
83#define DE_ROT_MODE_ROT270 0x00000006
84#define DE_MIRROR_MODE_NONE 0x00000000
85#define DE_MIRROR_MODE_MIRROR_X 0x00000001
86#define DE_MIRROR_MODE_MIRROR_Y 0x00000002
87#define DE_MIRROR_MODE_MIRROR_XY 0x00000003
88#define DE_COLOR_BLUE__MASK 0x000000ff
89#define DE_COLOR_BLUE__SHIFT 0
90#define DE_COLOR_BLUE(x) (((x) << DE_COLOR_BLUE__SHIFT) & DE_COLOR_BLUE__MASK)
91#define DE_COLOR_GREEN__MASK 0x0000ff00
92#define DE_COLOR_GREEN__SHIFT 8
93#define DE_COLOR_GREEN(x) (((x) << DE_COLOR_GREEN__SHIFT) & DE_COLOR_GREEN__MASK)
94#define DE_COLOR_RED__MASK 0x00ff0000
95#define DE_COLOR_RED__SHIFT 16
96#define DE_COLOR_RED(x) (((x) << DE_COLOR_RED__SHIFT) & DE_COLOR_RED__MASK)
97#define DE_COLOR_ALPHA__MASK 0xff000000
98#define DE_COLOR_ALPHA__SHIFT 24
99#define DE_COLOR_ALPHA(x) (((x) << DE_COLOR_ALPHA__SHIFT) & DE_COLOR_ALPHA__MASK)
100#define VIVS_DE 0x00000000
101
102#define VIVS_DE_SRC_ADDRESS 0x00001200
103
104#define VIVS_DE_SRC_STRIDE 0x00001204
105#define VIVS_DE_SRC_STRIDE_STRIDE__MASK 0x0003ffff
106#define VIVS_DE_SRC_STRIDE_STRIDE__SHIFT 0
107#define VIVS_DE_SRC_STRIDE_STRIDE(x) (((x) << VIVS_DE_SRC_STRIDE_STRIDE__SHIFT) & VIVS_DE_SRC_STRIDE_STRIDE__MASK)
108
109#define VIVS_DE_SRC_ROTATION_CONFIG 0x00001208
110#define VIVS_DE_SRC_ROTATION_CONFIG_WIDTH__MASK 0x0000ffff
111#define VIVS_DE_SRC_ROTATION_CONFIG_WIDTH__SHIFT 0
112#define VIVS_DE_SRC_ROTATION_CONFIG_WIDTH(x) (((x) << VIVS_DE_SRC_ROTATION_CONFIG_WIDTH__SHIFT) & VIVS_DE_SRC_ROTATION_CONFIG_WIDTH__MASK)
113#define VIVS_DE_SRC_ROTATION_CONFIG_ROTATION__MASK 0x00010000
114#define VIVS_DE_SRC_ROTATION_CONFIG_ROTATION__SHIFT 16
115#define VIVS_DE_SRC_ROTATION_CONFIG_ROTATION_DISABLE 0x00000000
116#define VIVS_DE_SRC_ROTATION_CONFIG_ROTATION_ENABLE 0x00010000
117
118#define VIVS_DE_SRC_CONFIG 0x0000120c
119#define VIVS_DE_SRC_CONFIG_PE10_SOURCE_FORMAT__MASK 0x0000000f
120#define VIVS_DE_SRC_CONFIG_PE10_SOURCE_FORMAT__SHIFT 0
121#define VIVS_DE_SRC_CONFIG_PE10_SOURCE_FORMAT(x) (((x) << VIVS_DE_SRC_CONFIG_PE10_SOURCE_FORMAT__SHIFT) & VIVS_DE_SRC_CONFIG_PE10_SOURCE_FORMAT__MASK)
122#define VIVS_DE_SRC_CONFIG_TRANSPARENCY__MASK 0x00000030
123#define VIVS_DE_SRC_CONFIG_TRANSPARENCY__SHIFT 4
124#define VIVS_DE_SRC_CONFIG_TRANSPARENCY(x) (((x) << VIVS_DE_SRC_CONFIG_TRANSPARENCY__SHIFT) & VIVS_DE_SRC_CONFIG_TRANSPARENCY__MASK)
125#define VIVS_DE_SRC_CONFIG_SRC_RELATIVE__MASK 0x00000040
126#define VIVS_DE_SRC_CONFIG_SRC_RELATIVE__SHIFT 6
127#define VIVS_DE_SRC_CONFIG_SRC_RELATIVE_ABSOLUTE 0x00000000
128#define VIVS_DE_SRC_CONFIG_SRC_RELATIVE_RELATIVE 0x00000040
129#define VIVS_DE_SRC_CONFIG_TILED__MASK 0x00000080
130#define VIVS_DE_SRC_CONFIG_TILED__SHIFT 7
131#define VIVS_DE_SRC_CONFIG_TILED_DISABLE 0x00000000
132#define VIVS_DE_SRC_CONFIG_TILED_ENABLE 0x00000080
133#define VIVS_DE_SRC_CONFIG_LOCATION__MASK 0x00000100
134#define VIVS_DE_SRC_CONFIG_LOCATION__SHIFT 8
135#define VIVS_DE_SRC_CONFIG_LOCATION_MEMORY 0x00000000
136#define VIVS_DE_SRC_CONFIG_LOCATION_STREAM 0x00000100
137#define VIVS_DE_SRC_CONFIG_PACK__MASK 0x00003000
138#define VIVS_DE_SRC_CONFIG_PACK__SHIFT 12
139#define VIVS_DE_SRC_CONFIG_PACK_PACKED8 0x00000000
140#define VIVS_DE_SRC_CONFIG_PACK_PACKED16 0x00001000
141#define VIVS_DE_SRC_CONFIG_PACK_PACKED32 0x00002000
142#define VIVS_DE_SRC_CONFIG_PACK_UNPACKED 0x00003000
143#define VIVS_DE_SRC_CONFIG_MONO_TRANSPARENCY__MASK 0x00008000
144#define VIVS_DE_SRC_CONFIG_MONO_TRANSPARENCY__SHIFT 15
145#define VIVS_DE_SRC_CONFIG_MONO_TRANSPARENCY_BACKGROUND 0x00000000
146#define VIVS_DE_SRC_CONFIG_MONO_TRANSPARENCY_FOREGROUND 0x00008000
147#define VIVS_DE_SRC_CONFIG_UNK16 0x00010000
148#define VIVS_DE_SRC_CONFIG_SWIZZLE__MASK 0x00300000
149#define VIVS_DE_SRC_CONFIG_SWIZZLE__SHIFT 20
150#define VIVS_DE_SRC_CONFIG_SWIZZLE(x) (((x) << VIVS_DE_SRC_CONFIG_SWIZZLE__SHIFT) & VIVS_DE_SRC_CONFIG_SWIZZLE__MASK)
151#define VIVS_DE_SRC_CONFIG_SOURCE_FORMAT__MASK 0x1f000000
152#define VIVS_DE_SRC_CONFIG_SOURCE_FORMAT__SHIFT 24
153#define VIVS_DE_SRC_CONFIG_SOURCE_FORMAT(x) (((x) << VIVS_DE_SRC_CONFIG_SOURCE_FORMAT__SHIFT) & VIVS_DE_SRC_CONFIG_SOURCE_FORMAT__MASK)
154#define VIVS_DE_SRC_CONFIG_DISABLE420_L2_CACHE 0x20000000
155#define VIVS_DE_SRC_CONFIG_ENDIAN_CONTROL__MASK 0xc0000000
156#define VIVS_DE_SRC_CONFIG_ENDIAN_CONTROL__SHIFT 30
157#define VIVS_DE_SRC_CONFIG_ENDIAN_CONTROL(x) (((x) << VIVS_DE_SRC_CONFIG_ENDIAN_CONTROL__SHIFT) & VIVS_DE_SRC_CONFIG_ENDIAN_CONTROL__MASK)
158
159#define VIVS_DE_SRC_ORIGIN 0x00001210
160#define VIVS_DE_SRC_ORIGIN_X__MASK 0x0000ffff
161#define VIVS_DE_SRC_ORIGIN_X__SHIFT 0
162#define VIVS_DE_SRC_ORIGIN_X(x) (((x) << VIVS_DE_SRC_ORIGIN_X__SHIFT) & VIVS_DE_SRC_ORIGIN_X__MASK)
163#define VIVS_DE_SRC_ORIGIN_Y__MASK 0xffff0000
164#define VIVS_DE_SRC_ORIGIN_Y__SHIFT 16
165#define VIVS_DE_SRC_ORIGIN_Y(x) (((x) << VIVS_DE_SRC_ORIGIN_Y__SHIFT) & VIVS_DE_SRC_ORIGIN_Y__MASK)
166
167#define VIVS_DE_SRC_SIZE 0x00001214
168#define VIVS_DE_SRC_SIZE_X__MASK 0x0000ffff
169#define VIVS_DE_SRC_SIZE_X__SHIFT 0
170#define VIVS_DE_SRC_SIZE_X(x) (((x) << VIVS_DE_SRC_SIZE_X__SHIFT) & VIVS_DE_SRC_SIZE_X__MASK)
171#define VIVS_DE_SRC_SIZE_Y__MASK 0xffff0000
172#define VIVS_DE_SRC_SIZE_Y__SHIFT 16
173#define VIVS_DE_SRC_SIZE_Y(x) (((x) << VIVS_DE_SRC_SIZE_Y__SHIFT) & VIVS_DE_SRC_SIZE_Y__MASK)
174
175#define VIVS_DE_SRC_COLOR_BG 0x00001218
176
177#define VIVS_DE_SRC_COLOR_FG 0x0000121c
178
179#define VIVS_DE_STRETCH_FACTOR_LOW 0x00001220
180#define VIVS_DE_STRETCH_FACTOR_LOW_X__MASK 0x7fffffff
181#define VIVS_DE_STRETCH_FACTOR_LOW_X__SHIFT 0
182#define VIVS_DE_STRETCH_FACTOR_LOW_X(x) (((x) << VIVS_DE_STRETCH_FACTOR_LOW_X__SHIFT) & VIVS_DE_STRETCH_FACTOR_LOW_X__MASK)
183
184#define VIVS_DE_STRETCH_FACTOR_HIGH 0x00001224
185#define VIVS_DE_STRETCH_FACTOR_HIGH_Y__MASK 0x7fffffff
186#define VIVS_DE_STRETCH_FACTOR_HIGH_Y__SHIFT 0
187#define VIVS_DE_STRETCH_FACTOR_HIGH_Y(x) (((x) << VIVS_DE_STRETCH_FACTOR_HIGH_Y__SHIFT) & VIVS_DE_STRETCH_FACTOR_HIGH_Y__MASK)
188
189#define VIVS_DE_DEST_ADDRESS 0x00001228
190
191#define VIVS_DE_DEST_STRIDE 0x0000122c
192#define VIVS_DE_DEST_STRIDE_STRIDE__MASK 0x0003ffff
193#define VIVS_DE_DEST_STRIDE_STRIDE__SHIFT 0
194#define VIVS_DE_DEST_STRIDE_STRIDE(x) (((x) << VIVS_DE_DEST_STRIDE_STRIDE__SHIFT) & VIVS_DE_DEST_STRIDE_STRIDE__MASK)
195
196#define VIVS_DE_DEST_ROTATION_CONFIG 0x00001230
197#define VIVS_DE_DEST_ROTATION_CONFIG_WIDTH__MASK 0x0000ffff
198#define VIVS_DE_DEST_ROTATION_CONFIG_WIDTH__SHIFT 0
199#define VIVS_DE_DEST_ROTATION_CONFIG_WIDTH(x) (((x) << VIVS_DE_DEST_ROTATION_CONFIG_WIDTH__SHIFT) & VIVS_DE_DEST_ROTATION_CONFIG_WIDTH__MASK)
200#define VIVS_DE_DEST_ROTATION_CONFIG_ROTATION__MASK 0x00010000
201#define VIVS_DE_DEST_ROTATION_CONFIG_ROTATION__SHIFT 16
202#define VIVS_DE_DEST_ROTATION_CONFIG_ROTATION_DISABLE 0x00000000
203#define VIVS_DE_DEST_ROTATION_CONFIG_ROTATION_ENABLE 0x00010000
204
205#define VIVS_DE_DEST_CONFIG 0x00001234
206#define VIVS_DE_DEST_CONFIG_FORMAT__MASK 0x0000001f
207#define VIVS_DE_DEST_CONFIG_FORMAT__SHIFT 0
208#define VIVS_DE_DEST_CONFIG_FORMAT(x) (((x) << VIVS_DE_DEST_CONFIG_FORMAT__SHIFT) & VIVS_DE_DEST_CONFIG_FORMAT__MASK)
209#define VIVS_DE_DEST_CONFIG_TILED__MASK 0x00000100
210#define VIVS_DE_DEST_CONFIG_TILED__SHIFT 8
211#define VIVS_DE_DEST_CONFIG_TILED_DISABLE 0x00000000
212#define VIVS_DE_DEST_CONFIG_TILED_ENABLE 0x00000100
213#define VIVS_DE_DEST_CONFIG_COMMAND__MASK 0x0000f000
214#define VIVS_DE_DEST_CONFIG_COMMAND__SHIFT 12
215#define VIVS_DE_DEST_CONFIG_COMMAND_CLEAR 0x00000000
216#define VIVS_DE_DEST_CONFIG_COMMAND_LINE 0x00001000
217#define VIVS_DE_DEST_CONFIG_COMMAND_BIT_BLT 0x00002000
218#define VIVS_DE_DEST_CONFIG_COMMAND_BIT_BLT_REVERSED 0x00003000
219#define VIVS_DE_DEST_CONFIG_COMMAND_STRETCH_BLT 0x00004000
220#define VIVS_DE_DEST_CONFIG_COMMAND_HOR_FILTER_BLT 0x00005000
221#define VIVS_DE_DEST_CONFIG_COMMAND_VER_FILTER_BLT 0x00006000
222#define VIVS_DE_DEST_CONFIG_COMMAND_ONE_PASS_FILTER_BLT 0x00007000
223#define VIVS_DE_DEST_CONFIG_COMMAND_MULTI_SOURCE_BLT 0x00008000
224#define VIVS_DE_DEST_CONFIG_SWIZZLE__MASK 0x00030000
225#define VIVS_DE_DEST_CONFIG_SWIZZLE__SHIFT 16
226#define VIVS_DE_DEST_CONFIG_SWIZZLE(x) (((x) << VIVS_DE_DEST_CONFIG_SWIZZLE__SHIFT) & VIVS_DE_DEST_CONFIG_SWIZZLE__MASK)
227#define VIVS_DE_DEST_CONFIG_ENDIAN_CONTROL__MASK 0x00300000
228#define VIVS_DE_DEST_CONFIG_ENDIAN_CONTROL__SHIFT 20
229#define VIVS_DE_DEST_CONFIG_ENDIAN_CONTROL(x) (((x) << VIVS_DE_DEST_CONFIG_ENDIAN_CONTROL__SHIFT) & VIVS_DE_DEST_CONFIG_ENDIAN_CONTROL__MASK)
230#define VIVS_DE_DEST_CONFIG_GDI_STRE__MASK 0x01000000
231#define VIVS_DE_DEST_CONFIG_GDI_STRE__SHIFT 24
232#define VIVS_DE_DEST_CONFIG_GDI_STRE_DISABLE 0x00000000
233#define VIVS_DE_DEST_CONFIG_GDI_STRE_ENABLE 0x01000000
234#define VIVS_DE_DEST_CONFIG_INTER_TILE_PER_FIX__MASK 0x02000000
235#define VIVS_DE_DEST_CONFIG_INTER_TILE_PER_FIX__SHIFT 25
236#define VIVS_DE_DEST_CONFIG_INTER_TILE_PER_FIX_DISABLED 0x02000000
237#define VIVS_DE_DEST_CONFIG_INTER_TILE_PER_FIX_ENABLED 0x00000000
238#define VIVS_DE_DEST_CONFIG_MINOR_TILED__MASK 0x04000000
239#define VIVS_DE_DEST_CONFIG_MINOR_TILED__SHIFT 26
240#define VIVS_DE_DEST_CONFIG_MINOR_TILED_DISABLE 0x00000000
241#define VIVS_DE_DEST_CONFIG_MINOR_TILED_ENABLE 0x04000000
242
243#define VIVS_DE_PATTERN_ADDRESS 0x00001238
244
245#define VIVS_DE_PATTERN_CONFIG 0x0000123c
246#define VIVS_DE_PATTERN_CONFIG_FORMAT__MASK 0x0000000f
247#define VIVS_DE_PATTERN_CONFIG_FORMAT__SHIFT 0
248#define VIVS_DE_PATTERN_CONFIG_FORMAT(x) (((x) << VIVS_DE_PATTERN_CONFIG_FORMAT__SHIFT) & VIVS_DE_PATTERN_CONFIG_FORMAT__MASK)
249#define VIVS_DE_PATTERN_CONFIG_TYPE__MASK 0x00000010
250#define VIVS_DE_PATTERN_CONFIG_TYPE__SHIFT 4
251#define VIVS_DE_PATTERN_CONFIG_TYPE_SOLID_COLOR 0x00000000
252#define VIVS_DE_PATTERN_CONFIG_TYPE_PATTERN 0x00000010
253#define VIVS_DE_PATTERN_CONFIG_COLOR_CONVERT__MASK 0x00000020
254#define VIVS_DE_PATTERN_CONFIG_COLOR_CONVERT__SHIFT 5
255#define VIVS_DE_PATTERN_CONFIG_COLOR_CONVERT_DISABLE 0x00000000
256#define VIVS_DE_PATTERN_CONFIG_COLOR_CONVERT_ENABLE 0x00000020
257#define VIVS_DE_PATTERN_CONFIG_INIT_TRIGGER__MASK 0x000000c0
258#define VIVS_DE_PATTERN_CONFIG_INIT_TRIGGER__SHIFT 6
259#define VIVS_DE_PATTERN_CONFIG_INIT_TRIGGER(x) (((x) << VIVS_DE_PATTERN_CONFIG_INIT_TRIGGER__SHIFT) & VIVS_DE_PATTERN_CONFIG_INIT_TRIGGER__MASK)
260#define VIVS_DE_PATTERN_CONFIG_ORIGIN_X__MASK 0x00070000
261#define VIVS_DE_PATTERN_CONFIG_ORIGIN_X__SHIFT 16
262#define VIVS_DE_PATTERN_CONFIG_ORIGIN_X(x) (((x) << VIVS_DE_PATTERN_CONFIG_ORIGIN_X__SHIFT) & VIVS_DE_PATTERN_CONFIG_ORIGIN_X__MASK)
263#define VIVS_DE_PATTERN_CONFIG_ORIGIN_Y__MASK 0x00700000
264#define VIVS_DE_PATTERN_CONFIG_ORIGIN_Y__SHIFT 20
265#define VIVS_DE_PATTERN_CONFIG_ORIGIN_Y(x) (((x) << VIVS_DE_PATTERN_CONFIG_ORIGIN_Y__SHIFT) & VIVS_DE_PATTERN_CONFIG_ORIGIN_Y__MASK)
266
267#define VIVS_DE_PATTERN_LOW 0x00001240
268
269#define VIVS_DE_PATTERN_HIGH 0x00001244
270
271#define VIVS_DE_PATTERN_MASK_LOW 0x00001248
272
273#define VIVS_DE_PATTERN_MASK_HIGH 0x0000124c
274
275#define VIVS_DE_PATTERN_BG_COLOR 0x00001250
276
277#define VIVS_DE_PATTERN_FG_COLOR 0x00001254
278
279#define VIVS_DE_ROP 0x0000125c
280#define VIVS_DE_ROP_ROP_FG__MASK 0x000000ff
281#define VIVS_DE_ROP_ROP_FG__SHIFT 0
282#define VIVS_DE_ROP_ROP_FG(x) (((x) << VIVS_DE_ROP_ROP_FG__SHIFT) & VIVS_DE_ROP_ROP_FG__MASK)
283#define VIVS_DE_ROP_ROP_BG__MASK 0x0000ff00
284#define VIVS_DE_ROP_ROP_BG__SHIFT 8
285#define VIVS_DE_ROP_ROP_BG(x) (((x) << VIVS_DE_ROP_ROP_BG__SHIFT) & VIVS_DE_ROP_ROP_BG__MASK)
286#define VIVS_DE_ROP_TYPE__MASK 0x00300000
287#define VIVS_DE_ROP_TYPE__SHIFT 20
288#define VIVS_DE_ROP_TYPE_ROP2_PATTERN 0x00000000
289#define VIVS_DE_ROP_TYPE_ROP2_SOURCE 0x00100000
290#define VIVS_DE_ROP_TYPE_ROP3 0x00200000
291#define VIVS_DE_ROP_TYPE_ROP4 0x00300000
292
293#define VIVS_DE_CLIP_TOP_LEFT 0x00001260
294#define VIVS_DE_CLIP_TOP_LEFT_X__MASK 0x00007fff
295#define VIVS_DE_CLIP_TOP_LEFT_X__SHIFT 0
296#define VIVS_DE_CLIP_TOP_LEFT_X(x) (((x) << VIVS_DE_CLIP_TOP_LEFT_X__SHIFT) & VIVS_DE_CLIP_TOP_LEFT_X__MASK)
297#define VIVS_DE_CLIP_TOP_LEFT_Y__MASK 0x7fff0000
298#define VIVS_DE_CLIP_TOP_LEFT_Y__SHIFT 16
299#define VIVS_DE_CLIP_TOP_LEFT_Y(x) (((x) << VIVS_DE_CLIP_TOP_LEFT_Y__SHIFT) & VIVS_DE_CLIP_TOP_LEFT_Y__MASK)
300
301#define VIVS_DE_CLIP_BOTTOM_RIGHT 0x00001264
302#define VIVS_DE_CLIP_BOTTOM_RIGHT_X__MASK 0x00007fff
303#define VIVS_DE_CLIP_BOTTOM_RIGHT_X__SHIFT 0
304#define VIVS_DE_CLIP_BOTTOM_RIGHT_X(x) (((x) << VIVS_DE_CLIP_BOTTOM_RIGHT_X__SHIFT) & VIVS_DE_CLIP_BOTTOM_RIGHT_X__MASK)
305#define VIVS_DE_CLIP_BOTTOM_RIGHT_Y__MASK 0x7fff0000
306#define VIVS_DE_CLIP_BOTTOM_RIGHT_Y__SHIFT 16
307#define VIVS_DE_CLIP_BOTTOM_RIGHT_Y(x) (((x) << VIVS_DE_CLIP_BOTTOM_RIGHT_Y__SHIFT) & VIVS_DE_CLIP_BOTTOM_RIGHT_Y__MASK)
308
309#define VIVS_DE_CLEAR_BYTE_MASK 0x00001268
310
311#define VIVS_DE_CONFIG 0x0000126c
312#define VIVS_DE_CONFIG_MIRROR_BLT_ENABLE__MASK 0x00000001
313#define VIVS_DE_CONFIG_MIRROR_BLT_ENABLE__SHIFT 0
314#define VIVS_DE_CONFIG_MIRROR_BLT_ENABLE_OFF 0x00000000
315#define VIVS_DE_CONFIG_MIRROR_BLT_ENABLE_ON 0x00000001
316#define VIVS_DE_CONFIG_MIRROR_BLT_MODE__MASK 0x00000030
317#define VIVS_DE_CONFIG_MIRROR_BLT_MODE__SHIFT 4
318#define VIVS_DE_CONFIG_MIRROR_BLT_MODE_NORMAL 0x00000000
319#define VIVS_DE_CONFIG_MIRROR_BLT_MODE_HMIRROR 0x00000010
320#define VIVS_DE_CONFIG_MIRROR_BLT_MODE_VMIRROR 0x00000020
321#define VIVS_DE_CONFIG_MIRROR_BLT_MODE_FULL_MIRROR 0x00000030
322#define VIVS_DE_CONFIG_SOURCE_SELECT__MASK 0x00070000
323#define VIVS_DE_CONFIG_SOURCE_SELECT__SHIFT 16
324#define VIVS_DE_CONFIG_SOURCE_SELECT(x) (((x) << VIVS_DE_CONFIG_SOURCE_SELECT__SHIFT) & VIVS_DE_CONFIG_SOURCE_SELECT__MASK)
325#define VIVS_DE_CONFIG_DESTINATION_SELECT__MASK 0x00300000
326#define VIVS_DE_CONFIG_DESTINATION_SELECT__SHIFT 20
327#define VIVS_DE_CONFIG_DESTINATION_SELECT(x) (((x) << VIVS_DE_CONFIG_DESTINATION_SELECT__SHIFT) & VIVS_DE_CONFIG_DESTINATION_SELECT__MASK)
328
329#define VIVS_DE_CLEAR_PIXEL_VALUE_LOW 0x00001270
330
331#define VIVS_DE_CLEAR_PIXEL_VALUE_HIGH 0x00001274
332
333#define VIVS_DE_SRC_ORIGIN_FRACTION 0x00001278
334#define VIVS_DE_SRC_ORIGIN_FRACTION_X__MASK 0x0000ffff
335#define VIVS_DE_SRC_ORIGIN_FRACTION_X__SHIFT 0
336#define VIVS_DE_SRC_ORIGIN_FRACTION_X(x) (((x) << VIVS_DE_SRC_ORIGIN_FRACTION_X__SHIFT) & VIVS_DE_SRC_ORIGIN_FRACTION_X__MASK)
337#define VIVS_DE_SRC_ORIGIN_FRACTION_Y__MASK 0xffff0000
338#define VIVS_DE_SRC_ORIGIN_FRACTION_Y__SHIFT 16
339#define VIVS_DE_SRC_ORIGIN_FRACTION_Y(x) (((x) << VIVS_DE_SRC_ORIGIN_FRACTION_Y__SHIFT) & VIVS_DE_SRC_ORIGIN_FRACTION_Y__MASK)
340
341#define VIVS_DE_ALPHA_CONTROL 0x0000127c
342#define VIVS_DE_ALPHA_CONTROL_ENABLE__MASK 0x00000001
343#define VIVS_DE_ALPHA_CONTROL_ENABLE__SHIFT 0
344#define VIVS_DE_ALPHA_CONTROL_ENABLE_OFF 0x00000000
345#define VIVS_DE_ALPHA_CONTROL_ENABLE_ON 0x00000001
346#define VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__MASK 0x00ff0000
347#define VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__SHIFT 16
348#define VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA(x) (((x) << VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__SHIFT) & VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__MASK)
349#define VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__MASK 0xff000000
350#define VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__SHIFT 24
351#define VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA(x) (((x) << VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__SHIFT) & VIVS_DE_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__MASK)
352
353#define VIVS_DE_ALPHA_MODES 0x00001280
354#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_MODE__MASK 0x00000001
355#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_MODE__SHIFT 0
356#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_MODE_NORMAL 0x00000000
357#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_MODE_INVERSED 0x00000001
358#define VIVS_DE_ALPHA_MODES_DST_ALPHA_MODE__MASK 0x00000010
359#define VIVS_DE_ALPHA_MODES_DST_ALPHA_MODE__SHIFT 4
360#define VIVS_DE_ALPHA_MODES_DST_ALPHA_MODE_NORMAL 0x00000000
361#define VIVS_DE_ALPHA_MODES_DST_ALPHA_MODE_INVERSED 0x00000010
362#define VIVS_DE_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE__MASK 0x00000300
363#define VIVS_DE_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE__SHIFT 8
364#define VIVS_DE_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_NORMAL 0x00000000
365#define VIVS_DE_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_GLOBAL 0x00000100
366#define VIVS_DE_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_SCALED 0x00000200
367#define VIVS_DE_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE__MASK 0x00003000
368#define VIVS_DE_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE__SHIFT 12
369#define VIVS_DE_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_NORMAL 0x00000000
370#define VIVS_DE_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_GLOBAL 0x00001000
371#define VIVS_DE_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_SCALED 0x00002000
372#define VIVS_DE_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY__MASK 0x00010000
373#define VIVS_DE_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY__SHIFT 16
374#define VIVS_DE_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY_DISABLE 0x00000000
375#define VIVS_DE_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY_ENABLE 0x00010000
376#define VIVS_DE_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY__MASK 0x00100000
377#define VIVS_DE_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY__SHIFT 20
378#define VIVS_DE_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY_DISABLE 0x00000000
379#define VIVS_DE_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY_ENABLE 0x00100000
380#define VIVS_DE_ALPHA_MODES_SRC_BLENDING_MODE__MASK 0x07000000
381#define VIVS_DE_ALPHA_MODES_SRC_BLENDING_MODE__SHIFT 24
382#define VIVS_DE_ALPHA_MODES_SRC_BLENDING_MODE(x) (((x) << VIVS_DE_ALPHA_MODES_SRC_BLENDING_MODE__SHIFT) & VIVS_DE_ALPHA_MODES_SRC_BLENDING_MODE__MASK)
383#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_FACTOR__MASK 0x08000000
384#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_FACTOR__SHIFT 27
385#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_FACTOR_DISABLE 0x00000000
386#define VIVS_DE_ALPHA_MODES_SRC_ALPHA_FACTOR_ENABLE 0x08000000
387#define VIVS_DE_ALPHA_MODES_DST_BLENDING_MODE__MASK 0x70000000
388#define VIVS_DE_ALPHA_MODES_DST_BLENDING_MODE__SHIFT 28
389#define VIVS_DE_ALPHA_MODES_DST_BLENDING_MODE(x) (((x) << VIVS_DE_ALPHA_MODES_DST_BLENDING_MODE__SHIFT) & VIVS_DE_ALPHA_MODES_DST_BLENDING_MODE__MASK)
390#define VIVS_DE_ALPHA_MODES_DST_ALPHA_FACTOR__MASK 0x80000000
391#define VIVS_DE_ALPHA_MODES_DST_ALPHA_FACTOR__SHIFT 31
392#define VIVS_DE_ALPHA_MODES_DST_ALPHA_FACTOR_DISABLE 0x00000000
393#define VIVS_DE_ALPHA_MODES_DST_ALPHA_FACTOR_ENABLE 0x80000000
394
395#define VIVS_DE_UPLANE_ADDRESS 0x00001284
396
397#define VIVS_DE_UPLANE_STRIDE 0x00001288
398#define VIVS_DE_UPLANE_STRIDE_STRIDE__MASK 0x0003ffff
399#define VIVS_DE_UPLANE_STRIDE_STRIDE__SHIFT 0
400#define VIVS_DE_UPLANE_STRIDE_STRIDE(x) (((x) << VIVS_DE_UPLANE_STRIDE_STRIDE__SHIFT) & VIVS_DE_UPLANE_STRIDE_STRIDE__MASK)
401
402#define VIVS_DE_VPLANE_ADDRESS 0x0000128c
403
404#define VIVS_DE_VPLANE_STRIDE 0x00001290
405#define VIVS_DE_VPLANE_STRIDE_STRIDE__MASK 0x0003ffff
406#define VIVS_DE_VPLANE_STRIDE_STRIDE__SHIFT 0
407#define VIVS_DE_VPLANE_STRIDE_STRIDE(x) (((x) << VIVS_DE_VPLANE_STRIDE_STRIDE__SHIFT) & VIVS_DE_VPLANE_STRIDE_STRIDE__MASK)
408
409#define VIVS_DE_VR_CONFIG 0x00001294
410#define VIVS_DE_VR_CONFIG_START__MASK 0x00000003
411#define VIVS_DE_VR_CONFIG_START__SHIFT 0
412#define VIVS_DE_VR_CONFIG_START_HORIZONTAL_BLIT 0x00000000
413#define VIVS_DE_VR_CONFIG_START_VERTICAL_BLIT 0x00000001
414#define VIVS_DE_VR_CONFIG_START_ONE_PASS_BLIT 0x00000002
415#define VIVS_DE_VR_CONFIG_START_MASK 0x00000008
416
417#define VIVS_DE_VR_SOURCE_IMAGE_LOW 0x00001298
418#define VIVS_DE_VR_SOURCE_IMAGE_LOW_LEFT__MASK 0x0000ffff
419#define VIVS_DE_VR_SOURCE_IMAGE_LOW_LEFT__SHIFT 0
420#define VIVS_DE_VR_SOURCE_IMAGE_LOW_LEFT(x) (((x) << VIVS_DE_VR_SOURCE_IMAGE_LOW_LEFT__SHIFT) & VIVS_DE_VR_SOURCE_IMAGE_LOW_LEFT__MASK)
421#define VIVS_DE_VR_SOURCE_IMAGE_LOW_TOP__MASK 0xffff0000
422#define VIVS_DE_VR_SOURCE_IMAGE_LOW_TOP__SHIFT 16
423#define VIVS_DE_VR_SOURCE_IMAGE_LOW_TOP(x) (((x) << VIVS_DE_VR_SOURCE_IMAGE_LOW_TOP__SHIFT) & VIVS_DE_VR_SOURCE_IMAGE_LOW_TOP__MASK)
424
425#define VIVS_DE_VR_SOURCE_IMAGE_HIGH 0x0000129c
426#define VIVS_DE_VR_SOURCE_IMAGE_HIGH_RIGHT__MASK 0x0000ffff
427#define VIVS_DE_VR_SOURCE_IMAGE_HIGH_RIGHT__SHIFT 0
428#define VIVS_DE_VR_SOURCE_IMAGE_HIGH_RIGHT(x) (((x) << VIVS_DE_VR_SOURCE_IMAGE_HIGH_RIGHT__SHIFT) & VIVS_DE_VR_SOURCE_IMAGE_HIGH_RIGHT__MASK)
429#define VIVS_DE_VR_SOURCE_IMAGE_HIGH_BOTTOM__MASK 0xffff0000
430#define VIVS_DE_VR_SOURCE_IMAGE_HIGH_BOTTOM__SHIFT 16
431#define VIVS_DE_VR_SOURCE_IMAGE_HIGH_BOTTOM(x) (((x) << VIVS_DE_VR_SOURCE_IMAGE_HIGH_BOTTOM__SHIFT) & VIVS_DE_VR_SOURCE_IMAGE_HIGH_BOTTOM__MASK)
432
433#define VIVS_DE_VR_SOURCE_ORIGIN_LOW 0x000012a0
434#define VIVS_DE_VR_SOURCE_ORIGIN_LOW_X__MASK 0xffffffff
435#define VIVS_DE_VR_SOURCE_ORIGIN_LOW_X__SHIFT 0
436#define VIVS_DE_VR_SOURCE_ORIGIN_LOW_X(x) (((x) << VIVS_DE_VR_SOURCE_ORIGIN_LOW_X__SHIFT) & VIVS_DE_VR_SOURCE_ORIGIN_LOW_X__MASK)
437
438#define VIVS_DE_VR_SOURCE_ORIGIN_HIGH 0x000012a4
439#define VIVS_DE_VR_SOURCE_ORIGIN_HIGH_Y__MASK 0xffffffff
440#define VIVS_DE_VR_SOURCE_ORIGIN_HIGH_Y__SHIFT 0
441#define VIVS_DE_VR_SOURCE_ORIGIN_HIGH_Y(x) (((x) << VIVS_DE_VR_SOURCE_ORIGIN_HIGH_Y__SHIFT) & VIVS_DE_VR_SOURCE_ORIGIN_HIGH_Y__MASK)
442
443#define VIVS_DE_VR_TARGET_WINDOW_LOW 0x000012a8
444#define VIVS_DE_VR_TARGET_WINDOW_LOW_LEFT__MASK 0x0000ffff
445#define VIVS_DE_VR_TARGET_WINDOW_LOW_LEFT__SHIFT 0
446#define VIVS_DE_VR_TARGET_WINDOW_LOW_LEFT(x) (((x) << VIVS_DE_VR_TARGET_WINDOW_LOW_LEFT__SHIFT) & VIVS_DE_VR_TARGET_WINDOW_LOW_LEFT__MASK)
447#define VIVS_DE_VR_TARGET_WINDOW_LOW_TOP__MASK 0xffff0000
448#define VIVS_DE_VR_TARGET_WINDOW_LOW_TOP__SHIFT 16
449#define VIVS_DE_VR_TARGET_WINDOW_LOW_TOP(x) (((x) << VIVS_DE_VR_TARGET_WINDOW_LOW_TOP__SHIFT) & VIVS_DE_VR_TARGET_WINDOW_LOW_TOP__MASK)
450
451#define VIVS_DE_VR_TARGET_WINDOW_HIGH 0x000012ac
452#define VIVS_DE_VR_TARGET_WINDOW_HIGH_RIGHT__MASK 0x0000ffff
453#define VIVS_DE_VR_TARGET_WINDOW_HIGH_RIGHT__SHIFT 0
454#define VIVS_DE_VR_TARGET_WINDOW_HIGH_RIGHT(x) (((x) << VIVS_DE_VR_TARGET_WINDOW_HIGH_RIGHT__SHIFT) & VIVS_DE_VR_TARGET_WINDOW_HIGH_RIGHT__MASK)
455#define VIVS_DE_VR_TARGET_WINDOW_HIGH_BOTTOM__MASK 0xffff0000
456#define VIVS_DE_VR_TARGET_WINDOW_HIGH_BOTTOM__SHIFT 16
457#define VIVS_DE_VR_TARGET_WINDOW_HIGH_BOTTOM(x) (((x) << VIVS_DE_VR_TARGET_WINDOW_HIGH_BOTTOM__SHIFT) & VIVS_DE_VR_TARGET_WINDOW_HIGH_BOTTOM__MASK)
458
459#define VIVS_DE_PE_CONFIG 0x000012b0
460#define VIVS_DE_PE_CONFIG_DESTINATION_FETCH__MASK 0x00000003
461#define VIVS_DE_PE_CONFIG_DESTINATION_FETCH__SHIFT 0
462#define VIVS_DE_PE_CONFIG_DESTINATION_FETCH_DISABLE 0x00000000
463#define VIVS_DE_PE_CONFIG_DESTINATION_FETCH_DEFAULT 0x00000001
464#define VIVS_DE_PE_CONFIG_DESTINATION_FETCH_ALWAYS 0x00000002
465#define VIVS_DE_PE_CONFIG_DESTINATION_FETCH_MASK 0x00000008
466
467#define VIVS_DE_DEST_ROTATION_HEIGHT 0x000012b4
468#define VIVS_DE_DEST_ROTATION_HEIGHT_HEIGHT__MASK 0x0000ffff
469#define VIVS_DE_DEST_ROTATION_HEIGHT_HEIGHT__SHIFT 0
470#define VIVS_DE_DEST_ROTATION_HEIGHT_HEIGHT(x) (((x) << VIVS_DE_DEST_ROTATION_HEIGHT_HEIGHT__SHIFT) & VIVS_DE_DEST_ROTATION_HEIGHT_HEIGHT__MASK)
471
472#define VIVS_DE_SRC_ROTATION_HEIGHT 0x000012b8
473#define VIVS_DE_SRC_ROTATION_HEIGHT_HEIGHT__MASK 0x0000ffff
474#define VIVS_DE_SRC_ROTATION_HEIGHT_HEIGHT__SHIFT 0
475#define VIVS_DE_SRC_ROTATION_HEIGHT_HEIGHT(x) (((x) << VIVS_DE_SRC_ROTATION_HEIGHT_HEIGHT__SHIFT) & VIVS_DE_SRC_ROTATION_HEIGHT_HEIGHT__MASK)
476
477#define VIVS_DE_ROT_ANGLE 0x000012bc
478#define VIVS_DE_ROT_ANGLE_SRC__MASK 0x00000007
479#define VIVS_DE_ROT_ANGLE_SRC__SHIFT 0
480#define VIVS_DE_ROT_ANGLE_SRC(x) (((x) << VIVS_DE_ROT_ANGLE_SRC__SHIFT) & VIVS_DE_ROT_ANGLE_SRC__MASK)
481#define VIVS_DE_ROT_ANGLE_DST__MASK 0x00000038
482#define VIVS_DE_ROT_ANGLE_DST__SHIFT 3
483#define VIVS_DE_ROT_ANGLE_DST(x) (((x) << VIVS_DE_ROT_ANGLE_DST__SHIFT) & VIVS_DE_ROT_ANGLE_DST__MASK)
484#define VIVS_DE_ROT_ANGLE_SRC_MASK 0x00000100
485#define VIVS_DE_ROT_ANGLE_DST_MASK 0x00000200
486#define VIVS_DE_ROT_ANGLE_SRC_MIRROR__MASK 0x00003000
487#define VIVS_DE_ROT_ANGLE_SRC_MIRROR__SHIFT 12
488#define VIVS_DE_ROT_ANGLE_SRC_MIRROR(x) (((x) << VIVS_DE_ROT_ANGLE_SRC_MIRROR__SHIFT) & VIVS_DE_ROT_ANGLE_SRC_MIRROR__MASK)
489#define VIVS_DE_ROT_ANGLE_SRC_MIRROR_MASK 0x00008000
490#define VIVS_DE_ROT_ANGLE_DST_MIRROR__MASK 0x00030000
491#define VIVS_DE_ROT_ANGLE_DST_MIRROR__SHIFT 16
492#define VIVS_DE_ROT_ANGLE_DST_MIRROR(x) (((x) << VIVS_DE_ROT_ANGLE_DST_MIRROR__SHIFT) & VIVS_DE_ROT_ANGLE_DST_MIRROR__MASK)
493#define VIVS_DE_ROT_ANGLE_DST_MIRROR_MASK 0x00080000
494
495#define VIVS_DE_CLEAR_PIXEL_VALUE32 0x000012c0
496
497#define VIVS_DE_DEST_COLOR_KEY 0x000012c4
498
499#define VIVS_DE_GLOBAL_SRC_COLOR 0x000012c8
500
501#define VIVS_DE_GLOBAL_DEST_COLOR 0x000012cc
502
503#define VIVS_DE_COLOR_MULTIPLY_MODES 0x000012d0
504#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY__MASK 0x00000001
505#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY__SHIFT 0
506#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY_DISABLE 0x00000000
507#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY_ENABLE 0x00000001
508#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY__MASK 0x00000010
509#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY__SHIFT 4
510#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY_DISABLE 0x00000000
511#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY_ENABLE 0x00000010
512#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY__MASK 0x00000300
513#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY__SHIFT 8
514#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_DISABLE 0x00000000
515#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_ALPHA 0x00000100
516#define VIVS_DE_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_COLOR 0x00000200
517#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY__MASK 0x00100000
518#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY__SHIFT 20
519#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY_DISABLE 0x00000000
520#define VIVS_DE_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY_ENABLE 0x00100000
521
522#define VIVS_DE_PE_TRANSPARENCY 0x000012d4
523#define VIVS_DE_PE_TRANSPARENCY_SOURCE__MASK 0x00000003
524#define VIVS_DE_PE_TRANSPARENCY_SOURCE__SHIFT 0
525#define VIVS_DE_PE_TRANSPARENCY_SOURCE_OPAQUE 0x00000000
526#define VIVS_DE_PE_TRANSPARENCY_SOURCE_MASK 0x00000001
527#define VIVS_DE_PE_TRANSPARENCY_SOURCE_KEY 0x00000002
528#define VIVS_DE_PE_TRANSPARENCY_PATTERN__MASK 0x00000030
529#define VIVS_DE_PE_TRANSPARENCY_PATTERN__SHIFT 4
530#define VIVS_DE_PE_TRANSPARENCY_PATTERN_OPAQUE 0x00000000
531#define VIVS_DE_PE_TRANSPARENCY_PATTERN_MASK 0x00000010
532#define VIVS_DE_PE_TRANSPARENCY_PATTERN_KEY 0x00000020
533#define VIVS_DE_PE_TRANSPARENCY_DESTINATION__MASK 0x00000300
534#define VIVS_DE_PE_TRANSPARENCY_DESTINATION__SHIFT 8
535#define VIVS_DE_PE_TRANSPARENCY_DESTINATION_OPAQUE 0x00000000
536#define VIVS_DE_PE_TRANSPARENCY_DESTINATION_MASK 0x00000100
537#define VIVS_DE_PE_TRANSPARENCY_DESTINATION_KEY 0x00000200
538#define VIVS_DE_PE_TRANSPARENCY_TRANSPARENCY_MASK 0x00001000
539#define VIVS_DE_PE_TRANSPARENCY_USE_SRC_OVERRIDE__MASK 0x00030000
540#define VIVS_DE_PE_TRANSPARENCY_USE_SRC_OVERRIDE__SHIFT 16
541#define VIVS_DE_PE_TRANSPARENCY_USE_SRC_OVERRIDE_DEFAULT 0x00000000
542#define VIVS_DE_PE_TRANSPARENCY_USE_SRC_OVERRIDE_USE_ENABLE 0x00010000
543#define VIVS_DE_PE_TRANSPARENCY_USE_SRC_OVERRIDE_USE_DISABLE 0x00020000
544#define VIVS_DE_PE_TRANSPARENCY_USE_PAT_OVERRIDE__MASK 0x00300000
545#define VIVS_DE_PE_TRANSPARENCY_USE_PAT_OVERRIDE__SHIFT 20
546#define VIVS_DE_PE_TRANSPARENCY_USE_PAT_OVERRIDE_DEFAULT 0x00000000
547#define VIVS_DE_PE_TRANSPARENCY_USE_PAT_OVERRIDE_USE_ENABLE 0x00100000
548#define VIVS_DE_PE_TRANSPARENCY_USE_PAT_OVERRIDE_USE_DISABLE 0x00200000
549#define VIVS_DE_PE_TRANSPARENCY_USE_DST_OVERRIDE__MASK 0x03000000
550#define VIVS_DE_PE_TRANSPARENCY_USE_DST_OVERRIDE__SHIFT 24
551#define VIVS_DE_PE_TRANSPARENCY_USE_DST_OVERRIDE_DEFAULT 0x00000000
552#define VIVS_DE_PE_TRANSPARENCY_USE_DST_OVERRIDE_USE_ENABLE 0x01000000
553#define VIVS_DE_PE_TRANSPARENCY_USE_DST_OVERRIDE_USE_DISABLE 0x02000000
554#define VIVS_DE_PE_TRANSPARENCY_RESOURCE_OVERRIDE_MASK 0x10000000
555#define VIVS_DE_PE_TRANSPARENCY_DFB_COLOR_KEY__MASK 0x20000000
556#define VIVS_DE_PE_TRANSPARENCY_DFB_COLOR_KEY__SHIFT 29
557#define VIVS_DE_PE_TRANSPARENCY_DFB_COLOR_KEY_DISABLE 0x00000000
558#define VIVS_DE_PE_TRANSPARENCY_DFB_COLOR_KEY_ENABLE 0x20000000
559#define VIVS_DE_PE_TRANSPARENCY_DFB_COLOR_KEY_MASK 0x80000000
560
561#define VIVS_DE_PE_CONTROL 0x000012d8
562#define VIVS_DE_PE_CONTROL_YUV__MASK 0x00000001
563#define VIVS_DE_PE_CONTROL_YUV__SHIFT 0
564#define VIVS_DE_PE_CONTROL_YUV_601 0x00000000
565#define VIVS_DE_PE_CONTROL_YUV_709 0x00000001
566#define VIVS_DE_PE_CONTROL_YUV_MASK 0x00000008
567#define VIVS_DE_PE_CONTROL_UV_SWIZZLE__MASK 0x00000010
568#define VIVS_DE_PE_CONTROL_UV_SWIZZLE__SHIFT 4
569#define VIVS_DE_PE_CONTROL_UV_SWIZZLE_UV 0x00000000
570#define VIVS_DE_PE_CONTROL_UV_SWIZZLE_VU 0x00000010
571#define VIVS_DE_PE_CONTROL_UV_SWIZZLE_MASK 0x00000080
572#define VIVS_DE_PE_CONTROL_YUVRGB__MASK 0x00000100
573#define VIVS_DE_PE_CONTROL_YUVRGB__SHIFT 8
574#define VIVS_DE_PE_CONTROL_YUVRGB_DISABLE 0x00000000
575#define VIVS_DE_PE_CONTROL_YUVRGB_ENABLE 0x00000100
576#define VIVS_DE_PE_CONTROL_YUVRGB_MASK 0x00000800
577
578#define VIVS_DE_SRC_COLOR_KEY_HIGH 0x000012dc
579
580#define VIVS_DE_DEST_COLOR_KEY_HIGH 0x000012e0
581
582#define VIVS_DE_VR_CONFIG_EX 0x000012e4
583#define VIVS_DE_VR_CONFIG_EX_VERTICAL_LINE_WIDTH__MASK 0x00000003
584#define VIVS_DE_VR_CONFIG_EX_VERTICAL_LINE_WIDTH__SHIFT 0
585#define VIVS_DE_VR_CONFIG_EX_VERTICAL_LINE_WIDTH_AUTO 0x00000000
586#define VIVS_DE_VR_CONFIG_EX_VERTICAL_LINE_WIDTH_PIXELS16 0x00000001
587#define VIVS_DE_VR_CONFIG_EX_VERTICAL_LINE_WIDTH_PIXELS32 0x00000002
588#define VIVS_DE_VR_CONFIG_EX_VERTICAL_LINE_WIDTH_MASK 0x00000008
589#define VIVS_DE_VR_CONFIG_EX_FILTER_TAP__MASK 0x000000f0
590#define VIVS_DE_VR_CONFIG_EX_FILTER_TAP__SHIFT 4
591#define VIVS_DE_VR_CONFIG_EX_FILTER_TAP(x) (((x) << VIVS_DE_VR_CONFIG_EX_FILTER_TAP__SHIFT) & VIVS_DE_VR_CONFIG_EX_FILTER_TAP__MASK)
592#define VIVS_DE_VR_CONFIG_EX_FILTER_TAP_MASK 0x00000100
593
594#define VIVS_DE_PE_DITHER_LOW 0x000012e8
595#define VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y0__MASK 0x0000000f
596#define VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y0__SHIFT 0
597#define VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y0(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y0__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y0__MASK)
598#define VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y0__MASK 0x000000f0
599#define VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y0__SHIFT 4
600#define VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y0(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y0__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y0__MASK)
601#define VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y0__MASK 0x00000f00
602#define VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y0__SHIFT 8
603#define VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y0(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y0__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y0__MASK)
604#define VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y0__MASK 0x0000f000
605#define VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y0__SHIFT 12
606#define VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y0(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y0__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y0__MASK)
607#define VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y1__MASK 0x000f0000
608#define VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y1__SHIFT 16
609#define VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y1(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y1__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X0_Y1__MASK)
610#define VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y1__MASK 0x00f00000
611#define VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y1__SHIFT 20
612#define VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y1(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y1__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X1_Y1__MASK)
613#define VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y1__MASK 0x0f000000
614#define VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y1__SHIFT 24
615#define VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y1(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y1__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X2_Y1__MASK)
616#define VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y1__MASK 0xf0000000
617#define VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y1__SHIFT 28
618#define VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y1(x) (((x) << VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y1__SHIFT) & VIVS_DE_PE_DITHER_LOW_PIXEL_X3_Y1__MASK)
619
620#define VIVS_DE_PE_DITHER_HIGH 0x000012ec
621#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y2__MASK 0x0000000f
622#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y2__SHIFT 0
623#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y2(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y2__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y2__MASK)
624#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y2__MASK 0x000000f0
625#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y2__SHIFT 4
626#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y2(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y2__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y2__MASK)
627#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y2__MASK 0x00000f00
628#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y2__SHIFT 8
629#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y2(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y2__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y2__MASK)
630#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y2__MASK 0x0000f000
631#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y2__SHIFT 12
632#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y2(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y2__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y2__MASK)
633#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y3__MASK 0x000f0000
634#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y3__SHIFT 16
635#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y3(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y3__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X0_Y3__MASK)
636#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y3__MASK 0x00f00000
637#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y3__SHIFT 20
638#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y3(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y3__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X1_Y3__MASK)
639#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y3__MASK 0x0f000000
640#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y3__SHIFT 24
641#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y3(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y3__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X2_Y3__MASK)
642#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y3__MASK 0xf0000000
643#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y3__SHIFT 28
644#define VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y3(x) (((x) << VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y3__SHIFT) & VIVS_DE_PE_DITHER_HIGH_PIXEL_X3_Y3__MASK)
645
646#define VIVS_DE_BW_CONFIG 0x000012f0
647#define VIVS_DE_BW_CONFIG_BLOCK_CONFIG__MASK 0x00000001
648#define VIVS_DE_BW_CONFIG_BLOCK_CONFIG__SHIFT 0
649#define VIVS_DE_BW_CONFIG_BLOCK_CONFIG_AUTO 0x00000000
650#define VIVS_DE_BW_CONFIG_BLOCK_CONFIG_CUSTOMIZE 0x00000001
651#define VIVS_DE_BW_CONFIG_BLOCK_CONFIG_MASK 0x00000008
652#define VIVS_DE_BW_CONFIG_BLOCK_WALK_DIRECTION__MASK 0x00000010
653#define VIVS_DE_BW_CONFIG_BLOCK_WALK_DIRECTION__SHIFT 4
654#define VIVS_DE_BW_CONFIG_BLOCK_WALK_DIRECTION_RIGHT_BOTTOM 0x00000000
655#define VIVS_DE_BW_CONFIG_BLOCK_WALK_DIRECTION_BOTTOM_RIGHT 0x00000010
656#define VIVS_DE_BW_CONFIG_BLOCK_WALK_DIRECTION_MASK 0x00000080
657#define VIVS_DE_BW_CONFIG_TILE_WALK_DIRECTION__MASK 0x00000100
658#define VIVS_DE_BW_CONFIG_TILE_WALK_DIRECTION__SHIFT 8
659#define VIVS_DE_BW_CONFIG_TILE_WALK_DIRECTION_RIGHT_BOTTOM 0x00000000
660#define VIVS_DE_BW_CONFIG_TILE_WALK_DIRECTION_BOTTOM_RIGHT 0x00000100
661#define VIVS_DE_BW_CONFIG_TILE_WALK_DIRECTION_MASK 0x00000800
662#define VIVS_DE_BW_CONFIG_PIXEL_WALK_DIRECTION__MASK 0x00001000
663#define VIVS_DE_BW_CONFIG_PIXEL_WALK_DIRECTION__SHIFT 12
664#define VIVS_DE_BW_CONFIG_PIXEL_WALK_DIRECTION_RIGHT_BOTTOM 0x00000000
665#define VIVS_DE_BW_CONFIG_PIXEL_WALK_DIRECTION_BOTTOM_RIGHT 0x00001000
666#define VIVS_DE_BW_CONFIG_PIXEL_WALK_DIRECTION_MASK 0x00008000
667
668#define VIVS_DE_BW_BLOCK_SIZE 0x000012f4
669#define VIVS_DE_BW_BLOCK_SIZE_WIDTH__MASK 0x0000ffff
670#define VIVS_DE_BW_BLOCK_SIZE_WIDTH__SHIFT 0
671#define VIVS_DE_BW_BLOCK_SIZE_WIDTH(x) (((x) << VIVS_DE_BW_BLOCK_SIZE_WIDTH__SHIFT) & VIVS_DE_BW_BLOCK_SIZE_WIDTH__MASK)
672#define VIVS_DE_BW_BLOCK_SIZE_HEIGHT__MASK 0xffff0000
673#define VIVS_DE_BW_BLOCK_SIZE_HEIGHT__SHIFT 16
674#define VIVS_DE_BW_BLOCK_SIZE_HEIGHT(x) (((x) << VIVS_DE_BW_BLOCK_SIZE_HEIGHT__SHIFT) & VIVS_DE_BW_BLOCK_SIZE_HEIGHT__MASK)
675
676#define VIVS_DE_BW_TILE_SIZE 0x000012f8
677#define VIVS_DE_BW_TILE_SIZE_WIDTH__MASK 0x0000ffff
678#define VIVS_DE_BW_TILE_SIZE_WIDTH__SHIFT 0
679#define VIVS_DE_BW_TILE_SIZE_WIDTH(x) (((x) << VIVS_DE_BW_TILE_SIZE_WIDTH__SHIFT) & VIVS_DE_BW_TILE_SIZE_WIDTH__MASK)
680#define VIVS_DE_BW_TILE_SIZE_HEIGHT__MASK 0xffff0000
681#define VIVS_DE_BW_TILE_SIZE_HEIGHT__SHIFT 16
682#define VIVS_DE_BW_TILE_SIZE_HEIGHT(x) (((x) << VIVS_DE_BW_TILE_SIZE_HEIGHT__SHIFT) & VIVS_DE_BW_TILE_SIZE_HEIGHT__MASK)
683
684#define VIVS_DE_BW_BLOCK_MASK 0x000012fc
685#define VIVS_DE_BW_BLOCK_MASK_HORIZONTAL__MASK 0x0000ffff
686#define VIVS_DE_BW_BLOCK_MASK_HORIZONTAL__SHIFT 0
687#define VIVS_DE_BW_BLOCK_MASK_HORIZONTAL(x) (((x) << VIVS_DE_BW_BLOCK_MASK_HORIZONTAL__SHIFT) & VIVS_DE_BW_BLOCK_MASK_HORIZONTAL__MASK)
688#define VIVS_DE_BW_BLOCK_MASK_VERTICAL__MASK 0xffff0000
689#define VIVS_DE_BW_BLOCK_MASK_VERTICAL__SHIFT 16
690#define VIVS_DE_BW_BLOCK_MASK_VERTICAL(x) (((x) << VIVS_DE_BW_BLOCK_MASK_VERTICAL__SHIFT) & VIVS_DE_BW_BLOCK_MASK_VERTICAL__MASK)
691
692#define VIVS_DE_SRC_EX_CONFIG 0x00001300
693#define VIVS_DE_SRC_EX_CONFIG_MULTI_TILED__MASK 0x00000001
694#define VIVS_DE_SRC_EX_CONFIG_MULTI_TILED__SHIFT 0
695#define VIVS_DE_SRC_EX_CONFIG_MULTI_TILED_DISABLE 0x00000000
696#define VIVS_DE_SRC_EX_CONFIG_MULTI_TILED_ENABLE 0x00000001
697#define VIVS_DE_SRC_EX_CONFIG_SUPER_TILED__MASK 0x00000008
698#define VIVS_DE_SRC_EX_CONFIG_SUPER_TILED__SHIFT 3
699#define VIVS_DE_SRC_EX_CONFIG_SUPER_TILED_DISABLE 0x00000000
700#define VIVS_DE_SRC_EX_CONFIG_SUPER_TILED_ENABLE 0x00000008
701#define VIVS_DE_SRC_EX_CONFIG_MINOR_TILED__MASK 0x00000100
702#define VIVS_DE_SRC_EX_CONFIG_MINOR_TILED__SHIFT 8
703#define VIVS_DE_SRC_EX_CONFIG_MINOR_TILED_DISABLE 0x00000000
704#define VIVS_DE_SRC_EX_CONFIG_MINOR_TILED_ENABLE 0x00000100
705
706#define VIVS_DE_SRC_EX_ADDRESS 0x00001304
707
708#define VIVS_DE_DE_MULTI_SOURCE 0x00001308
709#define VIVS_DE_DE_MULTI_SOURCE_MAX_SOURCE__MASK 0x00000007
710#define VIVS_DE_DE_MULTI_SOURCE_MAX_SOURCE__SHIFT 0
711#define VIVS_DE_DE_MULTI_SOURCE_MAX_SOURCE(x) (((x) << VIVS_DE_DE_MULTI_SOURCE_MAX_SOURCE__SHIFT) & VIVS_DE_DE_MULTI_SOURCE_MAX_SOURCE__MASK)
712#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK__MASK 0x00000700
713#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK__SHIFT 8
714#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK_PIXEL16 0x00000000
715#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK_PIXEL32 0x00000100
716#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK_PIXEL64 0x00000200
717#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK_PIXEL128 0x00000300
718#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK_PIXEL256 0x00000400
719#define VIVS_DE_DE_MULTI_SOURCE_HORIZONTAL_BLOCK_PIXEL512 0x00000500
720#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK__MASK 0x00070000
721#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK__SHIFT 16
722#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE1 0x00000000
723#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE2 0x00010000
724#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE4 0x00020000
725#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE8 0x00030000
726#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE16 0x00040000
727#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE32 0x00050000
728#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE64 0x00060000
729#define VIVS_DE_DE_MULTI_SOURCE_VERTICAL_BLOCK_LINE128 0x00070000
730
731#define VIVS_DE_DEYUV_CONVERSION 0x0000130c
732#define VIVS_DE_DEYUV_CONVERSION_ENABLE__MASK 0x00000003
733#define VIVS_DE_DEYUV_CONVERSION_ENABLE__SHIFT 0
734#define VIVS_DE_DEYUV_CONVERSION_ENABLE_OFF 0x00000000
735#define VIVS_DE_DEYUV_CONVERSION_ENABLE_PLANE1 0x00000001
736#define VIVS_DE_DEYUV_CONVERSION_ENABLE_PLANE2 0x00000002
737#define VIVS_DE_DEYUV_CONVERSION_ENABLE_PLANE3 0x00000003
738#define VIVS_DE_DEYUV_CONVERSION_PLANE1_COUNT__MASK 0x0000000c
739#define VIVS_DE_DEYUV_CONVERSION_PLANE1_COUNT__SHIFT 2
740#define VIVS_DE_DEYUV_CONVERSION_PLANE1_COUNT(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE1_COUNT__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE1_COUNT__MASK)
741#define VIVS_DE_DEYUV_CONVERSION_PLANE2_COUNT__MASK 0x00000030
742#define VIVS_DE_DEYUV_CONVERSION_PLANE2_COUNT__SHIFT 4
743#define VIVS_DE_DEYUV_CONVERSION_PLANE2_COUNT(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE2_COUNT__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE2_COUNT__MASK)
744#define VIVS_DE_DEYUV_CONVERSION_PLANE3_COUNT__MASK 0x000000c0
745#define VIVS_DE_DEYUV_CONVERSION_PLANE3_COUNT__SHIFT 6
746#define VIVS_DE_DEYUV_CONVERSION_PLANE3_COUNT(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE3_COUNT__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE3_COUNT__MASK)
747#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_B__MASK 0x00000300
748#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_B__SHIFT 8
749#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_B(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_B__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_B__MASK)
750#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_G__MASK 0x00000c00
751#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_G__SHIFT 10
752#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_G(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_G__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_G__MASK)
753#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_R__MASK 0x00003000
754#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_R__SHIFT 12
755#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_R(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_R__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_R__MASK)
756#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_A__MASK 0x0000c000
757#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_A__SHIFT 14
758#define VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_A(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_A__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE1_SWIZZLE_A__MASK)
759#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_B__MASK 0x00030000
760#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_B__SHIFT 16
761#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_B(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_B__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_B__MASK)
762#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_G__MASK 0x000c0000
763#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_G__SHIFT 18
764#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_G(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_G__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_G__MASK)
765#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_R__MASK 0x00300000
766#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_R__SHIFT 20
767#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_R(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_R__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_R__MASK)
768#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_A__MASK 0x00c00000
769#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_A__SHIFT 22
770#define VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_A(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_A__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE2_SWIZZLE_A__MASK)
771#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_B__MASK 0x03000000
772#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_B__SHIFT 24
773#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_B(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_B__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_B__MASK)
774#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_G__MASK 0x0c000000
775#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_G__SHIFT 26
776#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_G(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_G__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_G__MASK)
777#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_R__MASK 0x30000000
778#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_R__SHIFT 28
779#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_R(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_R__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_R__MASK)
780#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_A__MASK 0xc0000000
781#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_A__SHIFT 30
782#define VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_A(x) (((x) << VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_A__SHIFT) & VIVS_DE_DEYUV_CONVERSION_PLANE3_SWIZZLE_A__MASK)
783
784#define VIVS_DE_DE_PLANE2_ADDRESS 0x00001310
785
786#define VIVS_DE_DE_PLANE2_STRIDE 0x00001314
787#define VIVS_DE_DE_PLANE2_STRIDE_STRIDE__MASK 0x0003ffff
788#define VIVS_DE_DE_PLANE2_STRIDE_STRIDE__SHIFT 0
789#define VIVS_DE_DE_PLANE2_STRIDE_STRIDE(x) (((x) << VIVS_DE_DE_PLANE2_STRIDE_STRIDE__SHIFT) & VIVS_DE_DE_PLANE2_STRIDE_STRIDE__MASK)
790
791#define VIVS_DE_DE_PLANE3_ADDRESS 0x00001318
792
793#define VIVS_DE_DE_PLANE3_STRIDE 0x0000131c
794#define VIVS_DE_DE_PLANE3_STRIDE_STRIDE__MASK 0x0003ffff
795#define VIVS_DE_DE_PLANE3_STRIDE_STRIDE__SHIFT 0
796#define VIVS_DE_DE_PLANE3_STRIDE_STRIDE(x) (((x) << VIVS_DE_DE_PLANE3_STRIDE_STRIDE__SHIFT) & VIVS_DE_DE_PLANE3_STRIDE_STRIDE__MASK)
797
798#define VIVS_DE_DE_STALL_DE 0x00001320
799#define VIVS_DE_DE_STALL_DE_ENABLE__MASK 0x00000001
800#define VIVS_DE_DE_STALL_DE_ENABLE__SHIFT 0
801#define VIVS_DE_DE_STALL_DE_ENABLE_DISABLE 0x00000000
802#define VIVS_DE_DE_STALL_DE_ENABLE_ENABLE 0x00000001
803
804#define VIVS_DE_FILTER_KERNEL(i0) (0x00001800 + 0x4*(i0))
805#define VIVS_DE_FILTER_KERNEL__ESIZE 0x00000004
806#define VIVS_DE_FILTER_KERNEL__LEN 0x00000080
807#define VIVS_DE_FILTER_KERNEL_COEFFICIENT0__MASK 0x0000ffff
808#define VIVS_DE_FILTER_KERNEL_COEFFICIENT0__SHIFT 0
809#define VIVS_DE_FILTER_KERNEL_COEFFICIENT0(x) (((x) << VIVS_DE_FILTER_KERNEL_COEFFICIENT0__SHIFT) & VIVS_DE_FILTER_KERNEL_COEFFICIENT0__MASK)
810#define VIVS_DE_FILTER_KERNEL_COEFFICIENT1__MASK 0xffff0000
811#define VIVS_DE_FILTER_KERNEL_COEFFICIENT1__SHIFT 16
812#define VIVS_DE_FILTER_KERNEL_COEFFICIENT1(x) (((x) << VIVS_DE_FILTER_KERNEL_COEFFICIENT1__SHIFT) & VIVS_DE_FILTER_KERNEL_COEFFICIENT1__MASK)
813
814#define VIVS_DE_INDEX_COLOR_TABLE(i0) (0x00001c00 + 0x4*(i0))
815#define VIVS_DE_INDEX_COLOR_TABLE__ESIZE 0x00000004
816#define VIVS_DE_INDEX_COLOR_TABLE__LEN 0x00000100
817
818#define VIVS_DE_HORI_FILTER_KERNEL(i0) (0x00002800 + 0x4*(i0))
819#define VIVS_DE_HORI_FILTER_KERNEL__ESIZE 0x00000004
820#define VIVS_DE_HORI_FILTER_KERNEL__LEN 0x00000080
821#define VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT0__MASK 0x0000ffff
822#define VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT0__SHIFT 0
823#define VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT0(x) (((x) << VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT0__SHIFT) & VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT0__MASK)
824#define VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT1__MASK 0xffff0000
825#define VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT1__SHIFT 16
826#define VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT1(x) (((x) << VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT1__SHIFT) & VIVS_DE_HORI_FILTER_KERNEL_COEFFICIENT1__MASK)
827
828#define VIVS_DE_VERTI_FILTER_KERNEL(i0) (0x00002a00 + 0x4*(i0))
829#define VIVS_DE_VERTI_FILTER_KERNEL__ESIZE 0x00000004
830#define VIVS_DE_VERTI_FILTER_KERNEL__LEN 0x00000080
831#define VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT0__MASK 0x0000ffff
832#define VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT0__SHIFT 0
833#define VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT0(x) (((x) << VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT0__SHIFT) & VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT0__MASK)
834#define VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT1__MASK 0xffff0000
835#define VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT1__SHIFT 16
836#define VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT1(x) (((x) << VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT1__SHIFT) & VIVS_DE_VERTI_FILTER_KERNEL_COEFFICIENT1__MASK)
837
838#define VIVS_DE_INDEX_COLOR_TABLE32(i0) (0x00003400 + 0x4*(i0))
839#define VIVS_DE_INDEX_COLOR_TABLE32__ESIZE 0x00000004
840#define VIVS_DE_INDEX_COLOR_TABLE32__LEN 0x00000100
841
842#define VIVS_DE_BLOCK4 0x00000000
843
844#define VIVS_DE_BLOCK4_SRC_ADDRESS(i0) (0x00012800 + 0x4*(i0))
845#define VIVS_DE_BLOCK4_SRC_ADDRESS__ESIZE 0x00000004
846#define VIVS_DE_BLOCK4_SRC_ADDRESS__LEN 0x00000004
847
848#define VIVS_DE_BLOCK4_SRC_STRIDE(i0) (0x00012810 + 0x4*(i0))
849#define VIVS_DE_BLOCK4_SRC_STRIDE__ESIZE 0x00000004
850#define VIVS_DE_BLOCK4_SRC_STRIDE__LEN 0x00000004
851#define VIVS_DE_BLOCK4_SRC_STRIDE_STRIDE__MASK 0x0003ffff
852#define VIVS_DE_BLOCK4_SRC_STRIDE_STRIDE__SHIFT 0
853#define VIVS_DE_BLOCK4_SRC_STRIDE_STRIDE(x) (((x) << VIVS_DE_BLOCK4_SRC_STRIDE_STRIDE__SHIFT) & VIVS_DE_BLOCK4_SRC_STRIDE_STRIDE__MASK)
854
855#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG(i0) (0x00012820 + 0x4*(i0))
856#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG__ESIZE 0x00000004
857#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG__LEN 0x00000004
858#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_WIDTH__MASK 0x0000ffff
859#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_WIDTH__SHIFT 0
860#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_WIDTH(x) (((x) << VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_WIDTH__SHIFT) & VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_WIDTH__MASK)
861#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_ROTATION__MASK 0x00010000
862#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_ROTATION__SHIFT 16
863#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_ROTATION_DISABLE 0x00000000
864#define VIVS_DE_BLOCK4_SRC_ROTATION_CONFIG_ROTATION_ENABLE 0x00010000
865
866#define VIVS_DE_BLOCK4_SRC_CONFIG(i0) (0x00012830 + 0x4*(i0))
867#define VIVS_DE_BLOCK4_SRC_CONFIG__ESIZE 0x00000004
868#define VIVS_DE_BLOCK4_SRC_CONFIG__LEN 0x00000004
869#define VIVS_DE_BLOCK4_SRC_CONFIG_PE10_SOURCE_FORMAT__MASK 0x0000000f
870#define VIVS_DE_BLOCK4_SRC_CONFIG_PE10_SOURCE_FORMAT__SHIFT 0
871#define VIVS_DE_BLOCK4_SRC_CONFIG_PE10_SOURCE_FORMAT(x) (((x) << VIVS_DE_BLOCK4_SRC_CONFIG_PE10_SOURCE_FORMAT__SHIFT) & VIVS_DE_BLOCK4_SRC_CONFIG_PE10_SOURCE_FORMAT__MASK)
872#define VIVS_DE_BLOCK4_SRC_CONFIG_TRANSPARENCY__MASK 0x00000030
873#define VIVS_DE_BLOCK4_SRC_CONFIG_TRANSPARENCY__SHIFT 4
874#define VIVS_DE_BLOCK4_SRC_CONFIG_TRANSPARENCY(x) (((x) << VIVS_DE_BLOCK4_SRC_CONFIG_TRANSPARENCY__SHIFT) & VIVS_DE_BLOCK4_SRC_CONFIG_TRANSPARENCY__MASK)
875#define VIVS_DE_BLOCK4_SRC_CONFIG_SRC_RELATIVE__MASK 0x00000040
876#define VIVS_DE_BLOCK4_SRC_CONFIG_SRC_RELATIVE__SHIFT 6
877#define VIVS_DE_BLOCK4_SRC_CONFIG_SRC_RELATIVE_ABSOLUTE 0x00000000
878#define VIVS_DE_BLOCK4_SRC_CONFIG_SRC_RELATIVE_RELATIVE 0x00000040
879#define VIVS_DE_BLOCK4_SRC_CONFIG_TILED__MASK 0x00000080
880#define VIVS_DE_BLOCK4_SRC_CONFIG_TILED__SHIFT 7
881#define VIVS_DE_BLOCK4_SRC_CONFIG_TILED_DISABLE 0x00000000
882#define VIVS_DE_BLOCK4_SRC_CONFIG_TILED_ENABLE 0x00000080
883#define VIVS_DE_BLOCK4_SRC_CONFIG_LOCATION__MASK 0x00000100
884#define VIVS_DE_BLOCK4_SRC_CONFIG_LOCATION__SHIFT 8
885#define VIVS_DE_BLOCK4_SRC_CONFIG_LOCATION_MEMORY 0x00000000
886#define VIVS_DE_BLOCK4_SRC_CONFIG_LOCATION_STREAM 0x00000100
887#define VIVS_DE_BLOCK4_SRC_CONFIG_PACK__MASK 0x00003000
888#define VIVS_DE_BLOCK4_SRC_CONFIG_PACK__SHIFT 12
889#define VIVS_DE_BLOCK4_SRC_CONFIG_PACK_PACKED8 0x00000000
890#define VIVS_DE_BLOCK4_SRC_CONFIG_PACK_PACKED16 0x00001000
891#define VIVS_DE_BLOCK4_SRC_CONFIG_PACK_PACKED32 0x00002000
892#define VIVS_DE_BLOCK4_SRC_CONFIG_PACK_UNPACKED 0x00003000
893#define VIVS_DE_BLOCK4_SRC_CONFIG_MONO_TRANSPARENCY__MASK 0x00008000
894#define VIVS_DE_BLOCK4_SRC_CONFIG_MONO_TRANSPARENCY__SHIFT 15
895#define VIVS_DE_BLOCK4_SRC_CONFIG_MONO_TRANSPARENCY_BACKGROUND 0x00000000
896#define VIVS_DE_BLOCK4_SRC_CONFIG_MONO_TRANSPARENCY_FOREGROUND 0x00008000
897#define VIVS_DE_BLOCK4_SRC_CONFIG_UNK16 0x00010000
898#define VIVS_DE_BLOCK4_SRC_CONFIG_SWIZZLE__MASK 0x00300000
899#define VIVS_DE_BLOCK4_SRC_CONFIG_SWIZZLE__SHIFT 20
900#define VIVS_DE_BLOCK4_SRC_CONFIG_SWIZZLE(x) (((x) << VIVS_DE_BLOCK4_SRC_CONFIG_SWIZZLE__SHIFT) & VIVS_DE_BLOCK4_SRC_CONFIG_SWIZZLE__MASK)
901#define VIVS_DE_BLOCK4_SRC_CONFIG_SOURCE_FORMAT__MASK 0x1f000000
902#define VIVS_DE_BLOCK4_SRC_CONFIG_SOURCE_FORMAT__SHIFT 24
903#define VIVS_DE_BLOCK4_SRC_CONFIG_SOURCE_FORMAT(x) (((x) << VIVS_DE_BLOCK4_SRC_CONFIG_SOURCE_FORMAT__SHIFT) & VIVS_DE_BLOCK4_SRC_CONFIG_SOURCE_FORMAT__MASK)
904#define VIVS_DE_BLOCK4_SRC_CONFIG_DISABLE420_L2_CACHE 0x20000000
905#define VIVS_DE_BLOCK4_SRC_CONFIG_ENDIAN_CONTROL__MASK 0xc0000000
906#define VIVS_DE_BLOCK4_SRC_CONFIG_ENDIAN_CONTROL__SHIFT 30
907#define VIVS_DE_BLOCK4_SRC_CONFIG_ENDIAN_CONTROL(x) (((x) << VIVS_DE_BLOCK4_SRC_CONFIG_ENDIAN_CONTROL__SHIFT) & VIVS_DE_BLOCK4_SRC_CONFIG_ENDIAN_CONTROL__MASK)
908
909#define VIVS_DE_BLOCK4_SRC_ORIGIN(i0) (0x00012840 + 0x4*(i0))
910#define VIVS_DE_BLOCK4_SRC_ORIGIN__ESIZE 0x00000004
911#define VIVS_DE_BLOCK4_SRC_ORIGIN__LEN 0x00000004
912#define VIVS_DE_BLOCK4_SRC_ORIGIN_X__MASK 0x0000ffff
913#define VIVS_DE_BLOCK4_SRC_ORIGIN_X__SHIFT 0
914#define VIVS_DE_BLOCK4_SRC_ORIGIN_X(x) (((x) << VIVS_DE_BLOCK4_SRC_ORIGIN_X__SHIFT) & VIVS_DE_BLOCK4_SRC_ORIGIN_X__MASK)
915#define VIVS_DE_BLOCK4_SRC_ORIGIN_Y__MASK 0xffff0000
916#define VIVS_DE_BLOCK4_SRC_ORIGIN_Y__SHIFT 16
917#define VIVS_DE_BLOCK4_SRC_ORIGIN_Y(x) (((x) << VIVS_DE_BLOCK4_SRC_ORIGIN_Y__SHIFT) & VIVS_DE_BLOCK4_SRC_ORIGIN_Y__MASK)
918
919#define VIVS_DE_BLOCK4_SRC_SIZE(i0) (0x00012850 + 0x4*(i0))
920#define VIVS_DE_BLOCK4_SRC_SIZE__ESIZE 0x00000004
921#define VIVS_DE_BLOCK4_SRC_SIZE__LEN 0x00000004
922#define VIVS_DE_BLOCK4_SRC_SIZE_X__MASK 0x0000ffff
923#define VIVS_DE_BLOCK4_SRC_SIZE_X__SHIFT 0
924#define VIVS_DE_BLOCK4_SRC_SIZE_X(x) (((x) << VIVS_DE_BLOCK4_SRC_SIZE_X__SHIFT) & VIVS_DE_BLOCK4_SRC_SIZE_X__MASK)
925#define VIVS_DE_BLOCK4_SRC_SIZE_Y__MASK 0xffff0000
926#define VIVS_DE_BLOCK4_SRC_SIZE_Y__SHIFT 16
927#define VIVS_DE_BLOCK4_SRC_SIZE_Y(x) (((x) << VIVS_DE_BLOCK4_SRC_SIZE_Y__SHIFT) & VIVS_DE_BLOCK4_SRC_SIZE_Y__MASK)
928
929#define VIVS_DE_BLOCK4_SRC_COLOR_BG(i0) (0x00012860 + 0x4*(i0))
930#define VIVS_DE_BLOCK4_SRC_COLOR_BG__ESIZE 0x00000004
931#define VIVS_DE_BLOCK4_SRC_COLOR_BG__LEN 0x00000004
932
933#define VIVS_DE_BLOCK4_ROP(i0) (0x00012870 + 0x4*(i0))
934#define VIVS_DE_BLOCK4_ROP__ESIZE 0x00000004
935#define VIVS_DE_BLOCK4_ROP__LEN 0x00000004
936#define VIVS_DE_BLOCK4_ROP_ROP_FG__MASK 0x000000ff
937#define VIVS_DE_BLOCK4_ROP_ROP_FG__SHIFT 0
938#define VIVS_DE_BLOCK4_ROP_ROP_FG(x) (((x) << VIVS_DE_BLOCK4_ROP_ROP_FG__SHIFT) & VIVS_DE_BLOCK4_ROP_ROP_FG__MASK)
939#define VIVS_DE_BLOCK4_ROP_ROP_BG__MASK 0x0000ff00
940#define VIVS_DE_BLOCK4_ROP_ROP_BG__SHIFT 8
941#define VIVS_DE_BLOCK4_ROP_ROP_BG(x) (((x) << VIVS_DE_BLOCK4_ROP_ROP_BG__SHIFT) & VIVS_DE_BLOCK4_ROP_ROP_BG__MASK)
942#define VIVS_DE_BLOCK4_ROP_TYPE__MASK 0x00300000
943#define VIVS_DE_BLOCK4_ROP_TYPE__SHIFT 20
944#define VIVS_DE_BLOCK4_ROP_TYPE_ROP2_PATTERN 0x00000000
945#define VIVS_DE_BLOCK4_ROP_TYPE_ROP2_SOURCE 0x00100000
946#define VIVS_DE_BLOCK4_ROP_TYPE_ROP3 0x00200000
947#define VIVS_DE_BLOCK4_ROP_TYPE_ROP4 0x00300000
948
949#define VIVS_DE_BLOCK4_ALPHA_CONTROL(i0) (0x00012880 + 0x4*(i0))
950#define VIVS_DE_BLOCK4_ALPHA_CONTROL__ESIZE 0x00000004
951#define VIVS_DE_BLOCK4_ALPHA_CONTROL__LEN 0x00000004
952#define VIVS_DE_BLOCK4_ALPHA_CONTROL_ENABLE__MASK 0x00000001
953#define VIVS_DE_BLOCK4_ALPHA_CONTROL_ENABLE__SHIFT 0
954#define VIVS_DE_BLOCK4_ALPHA_CONTROL_ENABLE_OFF 0x00000000
955#define VIVS_DE_BLOCK4_ALPHA_CONTROL_ENABLE_ON 0x00000001
956#define VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__MASK 0x00ff0000
957#define VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__SHIFT 16
958#define VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA(x) (((x) << VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__SHIFT) & VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__MASK)
959#define VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__MASK 0xff000000
960#define VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__SHIFT 24
961#define VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA(x) (((x) << VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__SHIFT) & VIVS_DE_BLOCK4_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__MASK)
962
963#define VIVS_DE_BLOCK4_ALPHA_MODES(i0) (0x00012890 + 0x4*(i0))
964#define VIVS_DE_BLOCK4_ALPHA_MODES__ESIZE 0x00000004
965#define VIVS_DE_BLOCK4_ALPHA_MODES__LEN 0x00000004
966#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_MODE__MASK 0x00000001
967#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_MODE__SHIFT 0
968#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_MODE_NORMAL 0x00000000
969#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_MODE_INVERSED 0x00000001
970#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_MODE__MASK 0x00000010
971#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_MODE__SHIFT 4
972#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_MODE_NORMAL 0x00000000
973#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_MODE_INVERSED 0x00000010
974#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE__MASK 0x00000300
975#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE__SHIFT 8
976#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_NORMAL 0x00000000
977#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_GLOBAL 0x00000100
978#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_SCALED 0x00000200
979#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE__MASK 0x00003000
980#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE__SHIFT 12
981#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_NORMAL 0x00000000
982#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_GLOBAL 0x00001000
983#define VIVS_DE_BLOCK4_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_SCALED 0x00002000
984#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY__MASK 0x00010000
985#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY__SHIFT 16
986#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY_DISABLE 0x00000000
987#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY_ENABLE 0x00010000
988#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY__MASK 0x00100000
989#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY__SHIFT 20
990#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY_DISABLE 0x00000000
991#define VIVS_DE_BLOCK4_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY_ENABLE 0x00100000
992#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_BLENDING_MODE__MASK 0x07000000
993#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_BLENDING_MODE__SHIFT 24
994#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_BLENDING_MODE(x) (((x) << VIVS_DE_BLOCK4_ALPHA_MODES_SRC_BLENDING_MODE__SHIFT) & VIVS_DE_BLOCK4_ALPHA_MODES_SRC_BLENDING_MODE__MASK)
995#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_FACTOR__MASK 0x08000000
996#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_FACTOR__SHIFT 27
997#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_FACTOR_DISABLE 0x00000000
998#define VIVS_DE_BLOCK4_ALPHA_MODES_SRC_ALPHA_FACTOR_ENABLE 0x08000000
999#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_BLENDING_MODE__MASK 0x70000000
1000#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_BLENDING_MODE__SHIFT 28
1001#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_BLENDING_MODE(x) (((x) << VIVS_DE_BLOCK4_ALPHA_MODES_DST_BLENDING_MODE__SHIFT) & VIVS_DE_BLOCK4_ALPHA_MODES_DST_BLENDING_MODE__MASK)
1002#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_FACTOR__MASK 0x80000000
1003#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_FACTOR__SHIFT 31
1004#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_FACTOR_DISABLE 0x00000000
1005#define VIVS_DE_BLOCK4_ALPHA_MODES_DST_ALPHA_FACTOR_ENABLE 0x80000000
1006
1007#define VIVS_DE_BLOCK4_ADDRESS_U(i0) (0x000128a0 + 0x4*(i0))
1008#define VIVS_DE_BLOCK4_ADDRESS_U__ESIZE 0x00000004
1009#define VIVS_DE_BLOCK4_ADDRESS_U__LEN 0x00000004
1010
1011#define VIVS_DE_BLOCK4_STRIDE_U(i0) (0x000128b0 + 0x4*(i0))
1012#define VIVS_DE_BLOCK4_STRIDE_U__ESIZE 0x00000004
1013#define VIVS_DE_BLOCK4_STRIDE_U__LEN 0x00000004
1014#define VIVS_DE_BLOCK4_STRIDE_U_STRIDE__MASK 0x0003ffff
1015#define VIVS_DE_BLOCK4_STRIDE_U_STRIDE__SHIFT 0
1016#define VIVS_DE_BLOCK4_STRIDE_U_STRIDE(x) (((x) << VIVS_DE_BLOCK4_STRIDE_U_STRIDE__SHIFT) & VIVS_DE_BLOCK4_STRIDE_U_STRIDE__MASK)
1017
1018#define VIVS_DE_BLOCK4_ADDRESS_V(i0) (0x000128c0 + 0x4*(i0))
1019#define VIVS_DE_BLOCK4_ADDRESS_V__ESIZE 0x00000004
1020#define VIVS_DE_BLOCK4_ADDRESS_V__LEN 0x00000004
1021
1022#define VIVS_DE_BLOCK4_STRIDE_V(i0) (0x000128d0 + 0x4*(i0))
1023#define VIVS_DE_BLOCK4_STRIDE_V__ESIZE 0x00000004
1024#define VIVS_DE_BLOCK4_STRIDE_V__LEN 0x00000004
1025#define VIVS_DE_BLOCK4_STRIDE_V_STRIDE__MASK 0x0003ffff
1026#define VIVS_DE_BLOCK4_STRIDE_V_STRIDE__SHIFT 0
1027#define VIVS_DE_BLOCK4_STRIDE_V_STRIDE(x) (((x) << VIVS_DE_BLOCK4_STRIDE_V_STRIDE__SHIFT) & VIVS_DE_BLOCK4_STRIDE_V_STRIDE__MASK)
1028
1029#define VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT(i0) (0x000128e0 + 0x4*(i0))
1030#define VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT__ESIZE 0x00000004
1031#define VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT__LEN 0x00000004
1032#define VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT_HEIGHT__MASK 0x0000ffff
1033#define VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT_HEIGHT__SHIFT 0
1034#define VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT_HEIGHT(x) (((x) << VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT_HEIGHT__SHIFT) & VIVS_DE_BLOCK4_SRC_ROTATION_HEIGHT_HEIGHT__MASK)
1035
1036#define VIVS_DE_BLOCK4_ROT_ANGLE(i0) (0x000128f0 + 0x4*(i0))
1037#define VIVS_DE_BLOCK4_ROT_ANGLE__ESIZE 0x00000004
1038#define VIVS_DE_BLOCK4_ROT_ANGLE__LEN 0x00000004
1039#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC__MASK 0x00000007
1040#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC__SHIFT 0
1041#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC(x) (((x) << VIVS_DE_BLOCK4_ROT_ANGLE_SRC__SHIFT) & VIVS_DE_BLOCK4_ROT_ANGLE_SRC__MASK)
1042#define VIVS_DE_BLOCK4_ROT_ANGLE_DST__MASK 0x00000038
1043#define VIVS_DE_BLOCK4_ROT_ANGLE_DST__SHIFT 3
1044#define VIVS_DE_BLOCK4_ROT_ANGLE_DST(x) (((x) << VIVS_DE_BLOCK4_ROT_ANGLE_DST__SHIFT) & VIVS_DE_BLOCK4_ROT_ANGLE_DST__MASK)
1045#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MASK 0x00000100
1046#define VIVS_DE_BLOCK4_ROT_ANGLE_DST_MASK 0x00000200
1047#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MIRROR__MASK 0x00003000
1048#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MIRROR__SHIFT 12
1049#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MIRROR(x) (((x) << VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MIRROR__SHIFT) & VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MIRROR__MASK)
1050#define VIVS_DE_BLOCK4_ROT_ANGLE_SRC_MIRROR_MASK 0x00008000
1051#define VIVS_DE_BLOCK4_ROT_ANGLE_DST_MIRROR__MASK 0x00030000
1052#define VIVS_DE_BLOCK4_ROT_ANGLE_DST_MIRROR__SHIFT 16
1053#define VIVS_DE_BLOCK4_ROT_ANGLE_DST_MIRROR(x) (((x) << VIVS_DE_BLOCK4_ROT_ANGLE_DST_MIRROR__SHIFT) & VIVS_DE_BLOCK4_ROT_ANGLE_DST_MIRROR__MASK)
1054#define VIVS_DE_BLOCK4_ROT_ANGLE_DST_MIRROR_MASK 0x00080000
1055
1056#define VIVS_DE_BLOCK4_GLOBAL_SRC_COLOR(i0) (0x00012900 + 0x4*(i0))
1057#define VIVS_DE_BLOCK4_GLOBAL_SRC_COLOR__ESIZE 0x00000004
1058#define VIVS_DE_BLOCK4_GLOBAL_SRC_COLOR__LEN 0x00000004
1059
1060#define VIVS_DE_BLOCK4_GLOBAL_DEST_COLOR(i0) (0x00012910 + 0x4*(i0))
1061#define VIVS_DE_BLOCK4_GLOBAL_DEST_COLOR__ESIZE 0x00000004
1062#define VIVS_DE_BLOCK4_GLOBAL_DEST_COLOR__LEN 0x00000004
1063
1064#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES(i0) (0x00012920 + 0x4*(i0))
1065#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES__ESIZE 0x00000004
1066#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES__LEN 0x00000004
1067#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY__MASK 0x00000001
1068#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY__SHIFT 0
1069#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY_DISABLE 0x00000000
1070#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY_ENABLE 0x00000001
1071#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY__MASK 0x00000010
1072#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY__SHIFT 4
1073#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY_DISABLE 0x00000000
1074#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY_ENABLE 0x00000010
1075#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY__MASK 0x00000300
1076#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY__SHIFT 8
1077#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_DISABLE 0x00000000
1078#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_ALPHA 0x00000100
1079#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_COLOR 0x00000200
1080#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY__MASK 0x00100000
1081#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY__SHIFT 20
1082#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY_DISABLE 0x00000000
1083#define VIVS_DE_BLOCK4_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY_ENABLE 0x00100000
1084
1085#define VIVS_DE_BLOCK4_TRANSPARENCY(i0) (0x00012930 + 0x4*(i0))
1086#define VIVS_DE_BLOCK4_TRANSPARENCY__ESIZE 0x00000004
1087#define VIVS_DE_BLOCK4_TRANSPARENCY__LEN 0x00000004
1088#define VIVS_DE_BLOCK4_TRANSPARENCY_SOURCE__MASK 0x00000003
1089#define VIVS_DE_BLOCK4_TRANSPARENCY_SOURCE__SHIFT 0
1090#define VIVS_DE_BLOCK4_TRANSPARENCY_SOURCE_OPAQUE 0x00000000
1091#define VIVS_DE_BLOCK4_TRANSPARENCY_SOURCE_MASK 0x00000001
1092#define VIVS_DE_BLOCK4_TRANSPARENCY_SOURCE_KEY 0x00000002
1093#define VIVS_DE_BLOCK4_TRANSPARENCY_PATTERN__MASK 0x00000030
1094#define VIVS_DE_BLOCK4_TRANSPARENCY_PATTERN__SHIFT 4
1095#define VIVS_DE_BLOCK4_TRANSPARENCY_PATTERN_OPAQUE 0x00000000
1096#define VIVS_DE_BLOCK4_TRANSPARENCY_PATTERN_MASK 0x00000010
1097#define VIVS_DE_BLOCK4_TRANSPARENCY_PATTERN_KEY 0x00000020
1098#define VIVS_DE_BLOCK4_TRANSPARENCY_DESTINATION__MASK 0x00000300
1099#define VIVS_DE_BLOCK4_TRANSPARENCY_DESTINATION__SHIFT 8
1100#define VIVS_DE_BLOCK4_TRANSPARENCY_DESTINATION_OPAQUE 0x00000000
1101#define VIVS_DE_BLOCK4_TRANSPARENCY_DESTINATION_MASK 0x00000100
1102#define VIVS_DE_BLOCK4_TRANSPARENCY_DESTINATION_KEY 0x00000200
1103#define VIVS_DE_BLOCK4_TRANSPARENCY_TRANSPARENCY_MASK 0x00001000
1104#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_SRC_OVERRIDE__MASK 0x00030000
1105#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_SRC_OVERRIDE__SHIFT 16
1106#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_SRC_OVERRIDE_DEFAULT 0x00000000
1107#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_SRC_OVERRIDE_USE_ENABLE 0x00010000
1108#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_SRC_OVERRIDE_USE_DISABLE 0x00020000
1109#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_PAT_OVERRIDE__MASK 0x00300000
1110#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_PAT_OVERRIDE__SHIFT 20
1111#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_PAT_OVERRIDE_DEFAULT 0x00000000
1112#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_PAT_OVERRIDE_USE_ENABLE 0x00100000
1113#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_PAT_OVERRIDE_USE_DISABLE 0x00200000
1114#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_DST_OVERRIDE__MASK 0x03000000
1115#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_DST_OVERRIDE__SHIFT 24
1116#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_DST_OVERRIDE_DEFAULT 0x00000000
1117#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_DST_OVERRIDE_USE_ENABLE 0x01000000
1118#define VIVS_DE_BLOCK4_TRANSPARENCY_USE_DST_OVERRIDE_USE_DISABLE 0x02000000
1119#define VIVS_DE_BLOCK4_TRANSPARENCY_RESOURCE_OVERRIDE_MASK 0x10000000
1120#define VIVS_DE_BLOCK4_TRANSPARENCY_DFB_COLOR_KEY__MASK 0x20000000
1121#define VIVS_DE_BLOCK4_TRANSPARENCY_DFB_COLOR_KEY__SHIFT 29
1122#define VIVS_DE_BLOCK4_TRANSPARENCY_DFB_COLOR_KEY_DISABLE 0x00000000
1123#define VIVS_DE_BLOCK4_TRANSPARENCY_DFB_COLOR_KEY_ENABLE 0x20000000
1124#define VIVS_DE_BLOCK4_TRANSPARENCY_DFB_COLOR_KEY_MASK 0x80000000
1125
1126#define VIVS_DE_BLOCK4_CONTROL(i0) (0x00012940 + 0x4*(i0))
1127#define VIVS_DE_BLOCK4_CONTROL__ESIZE 0x00000004
1128#define VIVS_DE_BLOCK4_CONTROL__LEN 0x00000004
1129#define VIVS_DE_BLOCK4_CONTROL_YUV__MASK 0x00000001
1130#define VIVS_DE_BLOCK4_CONTROL_YUV__SHIFT 0
1131#define VIVS_DE_BLOCK4_CONTROL_YUV_601 0x00000000
1132#define VIVS_DE_BLOCK4_CONTROL_YUV_709 0x00000001
1133#define VIVS_DE_BLOCK4_CONTROL_YUV_MASK 0x00000008
1134#define VIVS_DE_BLOCK4_CONTROL_UV_SWIZZLE__MASK 0x00000010
1135#define VIVS_DE_BLOCK4_CONTROL_UV_SWIZZLE__SHIFT 4
1136#define VIVS_DE_BLOCK4_CONTROL_UV_SWIZZLE_UV 0x00000000
1137#define VIVS_DE_BLOCK4_CONTROL_UV_SWIZZLE_VU 0x00000010
1138#define VIVS_DE_BLOCK4_CONTROL_UV_SWIZZLE_MASK 0x00000080
1139#define VIVS_DE_BLOCK4_CONTROL_YUVRGB__MASK 0x00000100
1140#define VIVS_DE_BLOCK4_CONTROL_YUVRGB__SHIFT 8
1141#define VIVS_DE_BLOCK4_CONTROL_YUVRGB_DISABLE 0x00000000
1142#define VIVS_DE_BLOCK4_CONTROL_YUVRGB_ENABLE 0x00000100
1143#define VIVS_DE_BLOCK4_CONTROL_YUVRGB_MASK 0x00000800
1144
1145#define VIVS_DE_BLOCK4_SRC_COLOR_KEY_HIGH(i0) (0x00012950 + 0x4*(i0))
1146#define VIVS_DE_BLOCK4_SRC_COLOR_KEY_HIGH__ESIZE 0x00000004
1147#define VIVS_DE_BLOCK4_SRC_COLOR_KEY_HIGH__LEN 0x00000004
1148
1149#define VIVS_DE_BLOCK4_SRC_EX_CONFIG(i0) (0x00012960 + 0x4*(i0))
1150#define VIVS_DE_BLOCK4_SRC_EX_CONFIG__ESIZE 0x00000004
1151#define VIVS_DE_BLOCK4_SRC_EX_CONFIG__LEN 0x00000004
1152#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MULTI_TILED__MASK 0x00000001
1153#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MULTI_TILED__SHIFT 0
1154#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MULTI_TILED_DISABLE 0x00000000
1155#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MULTI_TILED_ENABLE 0x00000001
1156#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_SUPER_TILED__MASK 0x00000008
1157#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_SUPER_TILED__SHIFT 3
1158#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_SUPER_TILED_DISABLE 0x00000000
1159#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_SUPER_TILED_ENABLE 0x00000008
1160#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MINOR_TILED__MASK 0x00000100
1161#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MINOR_TILED__SHIFT 8
1162#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MINOR_TILED_DISABLE 0x00000000
1163#define VIVS_DE_BLOCK4_SRC_EX_CONFIG_MINOR_TILED_ENABLE 0x00000100
1164
1165#define VIVS_DE_BLOCK4_SRC_EX_ADDRESS(i0) (0x00012970 + 0x4*(i0))
1166#define VIVS_DE_BLOCK4_SRC_EX_ADDRESS__ESIZE 0x00000004
1167#define VIVS_DE_BLOCK4_SRC_EX_ADDRESS__LEN 0x00000004
1168
1169#define VIVS_DE_BLOCK8 0x00000000
1170
1171#define VIVS_DE_BLOCK8_SRC_ADDRESS(i0) (0x00012a00 + 0x4*(i0))
1172#define VIVS_DE_BLOCK8_SRC_ADDRESS__ESIZE 0x00000004
1173#define VIVS_DE_BLOCK8_SRC_ADDRESS__LEN 0x00000008
1174
1175#define VIVS_DE_BLOCK8_SRC_STRIDE(i0) (0x00012a20 + 0x4*(i0))
1176#define VIVS_DE_BLOCK8_SRC_STRIDE__ESIZE 0x00000004
1177#define VIVS_DE_BLOCK8_SRC_STRIDE__LEN 0x00000008
1178#define VIVS_DE_BLOCK8_SRC_STRIDE_STRIDE__MASK 0x0003ffff
1179#define VIVS_DE_BLOCK8_SRC_STRIDE_STRIDE__SHIFT 0
1180#define VIVS_DE_BLOCK8_SRC_STRIDE_STRIDE(x) (((x) << VIVS_DE_BLOCK8_SRC_STRIDE_STRIDE__SHIFT) & VIVS_DE_BLOCK8_SRC_STRIDE_STRIDE__MASK)
1181
1182#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG(i0) (0x00012a40 + 0x4*(i0))
1183#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG__ESIZE 0x00000004
1184#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG__LEN 0x00000008
1185#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_WIDTH__MASK 0x0000ffff
1186#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_WIDTH__SHIFT 0
1187#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_WIDTH(x) (((x) << VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_WIDTH__SHIFT) & VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_WIDTH__MASK)
1188#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_ROTATION__MASK 0x00010000
1189#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_ROTATION__SHIFT 16
1190#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_ROTATION_DISABLE 0x00000000
1191#define VIVS_DE_BLOCK8_SRC_ROTATION_CONFIG_ROTATION_ENABLE 0x00010000
1192
1193#define VIVS_DE_BLOCK8_SRC_CONFIG(i0) (0x00012a60 + 0x4*(i0))
1194#define VIVS_DE_BLOCK8_SRC_CONFIG__ESIZE 0x00000004
1195#define VIVS_DE_BLOCK8_SRC_CONFIG__LEN 0x00000008
1196#define VIVS_DE_BLOCK8_SRC_CONFIG_PE10_SOURCE_FORMAT__MASK 0x0000000f
1197#define VIVS_DE_BLOCK8_SRC_CONFIG_PE10_SOURCE_FORMAT__SHIFT 0
1198#define VIVS_DE_BLOCK8_SRC_CONFIG_PE10_SOURCE_FORMAT(x) (((x) << VIVS_DE_BLOCK8_SRC_CONFIG_PE10_SOURCE_FORMAT__SHIFT) & VIVS_DE_BLOCK8_SRC_CONFIG_PE10_SOURCE_FORMAT__MASK)
1199#define VIVS_DE_BLOCK8_SRC_CONFIG_TRANSPARENCY__MASK 0x00000030
1200#define VIVS_DE_BLOCK8_SRC_CONFIG_TRANSPARENCY__SHIFT 4
1201#define VIVS_DE_BLOCK8_SRC_CONFIG_TRANSPARENCY(x) (((x) << VIVS_DE_BLOCK8_SRC_CONFIG_TRANSPARENCY__SHIFT) & VIVS_DE_BLOCK8_SRC_CONFIG_TRANSPARENCY__MASK)
1202#define VIVS_DE_BLOCK8_SRC_CONFIG_SRC_RELATIVE__MASK 0x00000040
1203#define VIVS_DE_BLOCK8_SRC_CONFIG_SRC_RELATIVE__SHIFT 6
1204#define VIVS_DE_BLOCK8_SRC_CONFIG_SRC_RELATIVE_ABSOLUTE 0x00000000
1205#define VIVS_DE_BLOCK8_SRC_CONFIG_SRC_RELATIVE_RELATIVE 0x00000040
1206#define VIVS_DE_BLOCK8_SRC_CONFIG_TILED__MASK 0x00000080
1207#define VIVS_DE_BLOCK8_SRC_CONFIG_TILED__SHIFT 7
1208#define VIVS_DE_BLOCK8_SRC_CONFIG_TILED_DISABLE 0x00000000
1209#define VIVS_DE_BLOCK8_SRC_CONFIG_TILED_ENABLE 0x00000080
1210#define VIVS_DE_BLOCK8_SRC_CONFIG_LOCATION__MASK 0x00000100
1211#define VIVS_DE_BLOCK8_SRC_CONFIG_LOCATION__SHIFT 8
1212#define VIVS_DE_BLOCK8_SRC_CONFIG_LOCATION_MEMORY 0x00000000
1213#define VIVS_DE_BLOCK8_SRC_CONFIG_LOCATION_STREAM 0x00000100
1214#define VIVS_DE_BLOCK8_SRC_CONFIG_PACK__MASK 0x00003000
1215#define VIVS_DE_BLOCK8_SRC_CONFIG_PACK__SHIFT 12
1216#define VIVS_DE_BLOCK8_SRC_CONFIG_PACK_PACKED8 0x00000000
1217#define VIVS_DE_BLOCK8_SRC_CONFIG_PACK_PACKED16 0x00001000
1218#define VIVS_DE_BLOCK8_SRC_CONFIG_PACK_PACKED32 0x00002000
1219#define VIVS_DE_BLOCK8_SRC_CONFIG_PACK_UNPACKED 0x00003000
1220#define VIVS_DE_BLOCK8_SRC_CONFIG_MONO_TRANSPARENCY__MASK 0x00008000
1221#define VIVS_DE_BLOCK8_SRC_CONFIG_MONO_TRANSPARENCY__SHIFT 15
1222#define VIVS_DE_BLOCK8_SRC_CONFIG_MONO_TRANSPARENCY_BACKGROUND 0x00000000
1223#define VIVS_DE_BLOCK8_SRC_CONFIG_MONO_TRANSPARENCY_FOREGROUND 0x00008000
1224#define VIVS_DE_BLOCK8_SRC_CONFIG_UNK16 0x00010000
1225#define VIVS_DE_BLOCK8_SRC_CONFIG_SWIZZLE__MASK 0x00300000
1226#define VIVS_DE_BLOCK8_SRC_CONFIG_SWIZZLE__SHIFT 20
1227#define VIVS_DE_BLOCK8_SRC_CONFIG_SWIZZLE(x) (((x) << VIVS_DE_BLOCK8_SRC_CONFIG_SWIZZLE__SHIFT) & VIVS_DE_BLOCK8_SRC_CONFIG_SWIZZLE__MASK)
1228#define VIVS_DE_BLOCK8_SRC_CONFIG_SOURCE_FORMAT__MASK 0x1f000000
1229#define VIVS_DE_BLOCK8_SRC_CONFIG_SOURCE_FORMAT__SHIFT 24
1230#define VIVS_DE_BLOCK8_SRC_CONFIG_SOURCE_FORMAT(x) (((x) << VIVS_DE_BLOCK8_SRC_CONFIG_SOURCE_FORMAT__SHIFT) & VIVS_DE_BLOCK8_SRC_CONFIG_SOURCE_FORMAT__MASK)
1231#define VIVS_DE_BLOCK8_SRC_CONFIG_DISABLE420_L2_CACHE 0x20000000
1232#define VIVS_DE_BLOCK8_SRC_CONFIG_ENDIAN_CONTROL__MASK 0xc0000000
1233#define VIVS_DE_BLOCK8_SRC_CONFIG_ENDIAN_CONTROL__SHIFT 30
1234#define VIVS_DE_BLOCK8_SRC_CONFIG_ENDIAN_CONTROL(x) (((x) << VIVS_DE_BLOCK8_SRC_CONFIG_ENDIAN_CONTROL__SHIFT) & VIVS_DE_BLOCK8_SRC_CONFIG_ENDIAN_CONTROL__MASK)
1235
1236#define VIVS_DE_BLOCK8_SRC_ORIGIN(i0) (0x00012a80 + 0x4*(i0))
1237#define VIVS_DE_BLOCK8_SRC_ORIGIN__ESIZE 0x00000004
1238#define VIVS_DE_BLOCK8_SRC_ORIGIN__LEN 0x00000008
1239#define VIVS_DE_BLOCK8_SRC_ORIGIN_X__MASK 0x0000ffff
1240#define VIVS_DE_BLOCK8_SRC_ORIGIN_X__SHIFT 0
1241#define VIVS_DE_BLOCK8_SRC_ORIGIN_X(x) (((x) << VIVS_DE_BLOCK8_SRC_ORIGIN_X__SHIFT) & VIVS_DE_BLOCK8_SRC_ORIGIN_X__MASK)
1242#define VIVS_DE_BLOCK8_SRC_ORIGIN_Y__MASK 0xffff0000
1243#define VIVS_DE_BLOCK8_SRC_ORIGIN_Y__SHIFT 16
1244#define VIVS_DE_BLOCK8_SRC_ORIGIN_Y(x) (((x) << VIVS_DE_BLOCK8_SRC_ORIGIN_Y__SHIFT) & VIVS_DE_BLOCK8_SRC_ORIGIN_Y__MASK)
1245
1246#define VIVS_DE_BLOCK8_SRC_SIZE(i0) (0x00012aa0 + 0x4*(i0))
1247#define VIVS_DE_BLOCK8_SRC_SIZE__ESIZE 0x00000004
1248#define VIVS_DE_BLOCK8_SRC_SIZE__LEN 0x00000008
1249#define VIVS_DE_BLOCK8_SRC_SIZE_X__MASK 0x0000ffff
1250#define VIVS_DE_BLOCK8_SRC_SIZE_X__SHIFT 0
1251#define VIVS_DE_BLOCK8_SRC_SIZE_X(x) (((x) << VIVS_DE_BLOCK8_SRC_SIZE_X__SHIFT) & VIVS_DE_BLOCK8_SRC_SIZE_X__MASK)
1252#define VIVS_DE_BLOCK8_SRC_SIZE_Y__MASK 0xffff0000
1253#define VIVS_DE_BLOCK8_SRC_SIZE_Y__SHIFT 16
1254#define VIVS_DE_BLOCK8_SRC_SIZE_Y(x) (((x) << VIVS_DE_BLOCK8_SRC_SIZE_Y__SHIFT) & VIVS_DE_BLOCK8_SRC_SIZE_Y__MASK)
1255
1256#define VIVS_DE_BLOCK8_SRC_COLOR_BG(i0) (0x00012ac0 + 0x4*(i0))
1257#define VIVS_DE_BLOCK8_SRC_COLOR_BG__ESIZE 0x00000004
1258#define VIVS_DE_BLOCK8_SRC_COLOR_BG__LEN 0x00000008
1259
1260#define VIVS_DE_BLOCK8_ROP(i0) (0x00012ae0 + 0x4*(i0))
1261#define VIVS_DE_BLOCK8_ROP__ESIZE 0x00000004
1262#define VIVS_DE_BLOCK8_ROP__LEN 0x00000008
1263#define VIVS_DE_BLOCK8_ROP_ROP_FG__MASK 0x000000ff
1264#define VIVS_DE_BLOCK8_ROP_ROP_FG__SHIFT 0
1265#define VIVS_DE_BLOCK8_ROP_ROP_FG(x) (((x) << VIVS_DE_BLOCK8_ROP_ROP_FG__SHIFT) & VIVS_DE_BLOCK8_ROP_ROP_FG__MASK)
1266#define VIVS_DE_BLOCK8_ROP_ROP_BG__MASK 0x0000ff00
1267#define VIVS_DE_BLOCK8_ROP_ROP_BG__SHIFT 8
1268#define VIVS_DE_BLOCK8_ROP_ROP_BG(x) (((x) << VIVS_DE_BLOCK8_ROP_ROP_BG__SHIFT) & VIVS_DE_BLOCK8_ROP_ROP_BG__MASK)
1269#define VIVS_DE_BLOCK8_ROP_TYPE__MASK 0x00300000
1270#define VIVS_DE_BLOCK8_ROP_TYPE__SHIFT 20
1271#define VIVS_DE_BLOCK8_ROP_TYPE_ROP2_PATTERN 0x00000000
1272#define VIVS_DE_BLOCK8_ROP_TYPE_ROP2_SOURCE 0x00100000
1273#define VIVS_DE_BLOCK8_ROP_TYPE_ROP3 0x00200000
1274#define VIVS_DE_BLOCK8_ROP_TYPE_ROP4 0x00300000
1275
1276#define VIVS_DE_BLOCK8_ALPHA_CONTROL(i0) (0x00012b00 + 0x4*(i0))
1277#define VIVS_DE_BLOCK8_ALPHA_CONTROL__ESIZE 0x00000004
1278#define VIVS_DE_BLOCK8_ALPHA_CONTROL__LEN 0x00000008
1279#define VIVS_DE_BLOCK8_ALPHA_CONTROL_ENABLE__MASK 0x00000001
1280#define VIVS_DE_BLOCK8_ALPHA_CONTROL_ENABLE__SHIFT 0
1281#define VIVS_DE_BLOCK8_ALPHA_CONTROL_ENABLE_OFF 0x00000000
1282#define VIVS_DE_BLOCK8_ALPHA_CONTROL_ENABLE_ON 0x00000001
1283#define VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__MASK 0x00ff0000
1284#define VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__SHIFT 16
1285#define VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA(x) (((x) << VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__SHIFT) & VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_SRC_ALPHA__MASK)
1286#define VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__MASK 0xff000000
1287#define VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__SHIFT 24
1288#define VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA(x) (((x) << VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__SHIFT) & VIVS_DE_BLOCK8_ALPHA_CONTROL_PE10_GLOBAL_DST_ALPHA__MASK)
1289
1290#define VIVS_DE_BLOCK8_ALPHA_MODES(i0) (0x00012b20 + 0x4*(i0))
1291#define VIVS_DE_BLOCK8_ALPHA_MODES__ESIZE 0x00000004
1292#define VIVS_DE_BLOCK8_ALPHA_MODES__LEN 0x00000008
1293#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_MODE__MASK 0x00000001
1294#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_MODE__SHIFT 0
1295#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_MODE_NORMAL 0x00000000
1296#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_MODE_INVERSED 0x00000001
1297#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_MODE__MASK 0x00000010
1298#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_MODE__SHIFT 4
1299#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_MODE_NORMAL 0x00000000
1300#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_MODE_INVERSED 0x00000010
1301#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE__MASK 0x00000300
1302#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE__SHIFT 8
1303#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_NORMAL 0x00000000
1304#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_GLOBAL 0x00000100
1305#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_SRC_ALPHA_MODE_SCALED 0x00000200
1306#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE__MASK 0x00003000
1307#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE__SHIFT 12
1308#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_NORMAL 0x00000000
1309#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_GLOBAL 0x00001000
1310#define VIVS_DE_BLOCK8_ALPHA_MODES_GLOBAL_DST_ALPHA_MODE_SCALED 0x00002000
1311#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY__MASK 0x00010000
1312#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY__SHIFT 16
1313#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY_DISABLE 0x00000000
1314#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_SRC_COLOR_MULTIPLY_ENABLE 0x00010000
1315#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY__MASK 0x00100000
1316#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY__SHIFT 20
1317#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY_DISABLE 0x00000000
1318#define VIVS_DE_BLOCK8_ALPHA_MODES_PE10_DST_COLOR_MULTIPLY_ENABLE 0x00100000
1319#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_BLENDING_MODE__MASK 0x07000000
1320#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_BLENDING_MODE__SHIFT 24
1321#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_BLENDING_MODE(x) (((x) << VIVS_DE_BLOCK8_ALPHA_MODES_SRC_BLENDING_MODE__SHIFT) & VIVS_DE_BLOCK8_ALPHA_MODES_SRC_BLENDING_MODE__MASK)
1322#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_FACTOR__MASK 0x08000000
1323#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_FACTOR__SHIFT 27
1324#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_FACTOR_DISABLE 0x00000000
1325#define VIVS_DE_BLOCK8_ALPHA_MODES_SRC_ALPHA_FACTOR_ENABLE 0x08000000
1326#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_BLENDING_MODE__MASK 0x70000000
1327#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_BLENDING_MODE__SHIFT 28
1328#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_BLENDING_MODE(x) (((x) << VIVS_DE_BLOCK8_ALPHA_MODES_DST_BLENDING_MODE__SHIFT) & VIVS_DE_BLOCK8_ALPHA_MODES_DST_BLENDING_MODE__MASK)
1329#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_FACTOR__MASK 0x80000000
1330#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_FACTOR__SHIFT 31
1331#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_FACTOR_DISABLE 0x00000000
1332#define VIVS_DE_BLOCK8_ALPHA_MODES_DST_ALPHA_FACTOR_ENABLE 0x80000000
1333
1334#define VIVS_DE_BLOCK8_ADDRESS_U(i0) (0x00012b40 + 0x4*(i0))
1335#define VIVS_DE_BLOCK8_ADDRESS_U__ESIZE 0x00000004
1336#define VIVS_DE_BLOCK8_ADDRESS_U__LEN 0x00000008
1337
1338#define VIVS_DE_BLOCK8_STRIDE_U(i0) (0x00012b60 + 0x4*(i0))
1339#define VIVS_DE_BLOCK8_STRIDE_U__ESIZE 0x00000004
1340#define VIVS_DE_BLOCK8_STRIDE_U__LEN 0x00000008
1341#define VIVS_DE_BLOCK8_STRIDE_U_STRIDE__MASK 0x0003ffff
1342#define VIVS_DE_BLOCK8_STRIDE_U_STRIDE__SHIFT 0
1343#define VIVS_DE_BLOCK8_STRIDE_U_STRIDE(x) (((x) << VIVS_DE_BLOCK8_STRIDE_U_STRIDE__SHIFT) & VIVS_DE_BLOCK8_STRIDE_U_STRIDE__MASK)
1344
1345#define VIVS_DE_BLOCK8_ADDRESS_V(i0) (0x00012b80 + 0x4*(i0))
1346#define VIVS_DE_BLOCK8_ADDRESS_V__ESIZE 0x00000004
1347#define VIVS_DE_BLOCK8_ADDRESS_V__LEN 0x00000008
1348
1349#define VIVS_DE_BLOCK8_STRIDE_V(i0) (0x00012ba0 + 0x4*(i0))
1350#define VIVS_DE_BLOCK8_STRIDE_V__ESIZE 0x00000004
1351#define VIVS_DE_BLOCK8_STRIDE_V__LEN 0x00000008
1352#define VIVS_DE_BLOCK8_STRIDE_V_STRIDE__MASK 0x0003ffff
1353#define VIVS_DE_BLOCK8_STRIDE_V_STRIDE__SHIFT 0
1354#define VIVS_DE_BLOCK8_STRIDE_V_STRIDE(x) (((x) << VIVS_DE_BLOCK8_STRIDE_V_STRIDE__SHIFT) & VIVS_DE_BLOCK8_STRIDE_V_STRIDE__MASK)
1355
1356#define VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT(i0) (0x00012bc0 + 0x4*(i0))
1357#define VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT__ESIZE 0x00000004
1358#define VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT__LEN 0x00000008
1359#define VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT_HEIGHT__MASK 0x0000ffff
1360#define VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT_HEIGHT__SHIFT 0
1361#define VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT_HEIGHT(x) (((x) << VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT_HEIGHT__SHIFT) & VIVS_DE_BLOCK8_SRC_ROTATION_HEIGHT_HEIGHT__MASK)
1362
1363#define VIVS_DE_BLOCK8_ROT_ANGLE(i0) (0x00012be0 + 0x4*(i0))
1364#define VIVS_DE_BLOCK8_ROT_ANGLE__ESIZE 0x00000004
1365#define VIVS_DE_BLOCK8_ROT_ANGLE__LEN 0x00000008
1366#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC__MASK 0x00000007
1367#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC__SHIFT 0
1368#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC(x) (((x) << VIVS_DE_BLOCK8_ROT_ANGLE_SRC__SHIFT) & VIVS_DE_BLOCK8_ROT_ANGLE_SRC__MASK)
1369#define VIVS_DE_BLOCK8_ROT_ANGLE_DST__MASK 0x00000038
1370#define VIVS_DE_BLOCK8_ROT_ANGLE_DST__SHIFT 3
1371#define VIVS_DE_BLOCK8_ROT_ANGLE_DST(x) (((x) << VIVS_DE_BLOCK8_ROT_ANGLE_DST__SHIFT) & VIVS_DE_BLOCK8_ROT_ANGLE_DST__MASK)
1372#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MASK 0x00000100
1373#define VIVS_DE_BLOCK8_ROT_ANGLE_DST_MASK 0x00000200
1374#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MIRROR__MASK 0x00003000
1375#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MIRROR__SHIFT 12
1376#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MIRROR(x) (((x) << VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MIRROR__SHIFT) & VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MIRROR__MASK)
1377#define VIVS_DE_BLOCK8_ROT_ANGLE_SRC_MIRROR_MASK 0x00008000
1378#define VIVS_DE_BLOCK8_ROT_ANGLE_DST_MIRROR__MASK 0x00030000
1379#define VIVS_DE_BLOCK8_ROT_ANGLE_DST_MIRROR__SHIFT 16
1380#define VIVS_DE_BLOCK8_ROT_ANGLE_DST_MIRROR(x) (((x) << VIVS_DE_BLOCK8_ROT_ANGLE_DST_MIRROR__SHIFT) & VIVS_DE_BLOCK8_ROT_ANGLE_DST_MIRROR__MASK)
1381#define VIVS_DE_BLOCK8_ROT_ANGLE_DST_MIRROR_MASK 0x00080000
1382
1383#define VIVS_DE_BLOCK8_GLOBAL_SRC_COLOR(i0) (0x00012c00 + 0x4*(i0))
1384#define VIVS_DE_BLOCK8_GLOBAL_SRC_COLOR__ESIZE 0x00000004
1385#define VIVS_DE_BLOCK8_GLOBAL_SRC_COLOR__LEN 0x00000008
1386
1387#define VIVS_DE_BLOCK8_GLOBAL_DEST_COLOR(i0) (0x00012c20 + 0x4*(i0))
1388#define VIVS_DE_BLOCK8_GLOBAL_DEST_COLOR__ESIZE 0x00000004
1389#define VIVS_DE_BLOCK8_GLOBAL_DEST_COLOR__LEN 0x00000008
1390
1391#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES(i0) (0x00012c40 + 0x4*(i0))
1392#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES__ESIZE 0x00000004
1393#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES__LEN 0x00000008
1394#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY__MASK 0x00000001
1395#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY__SHIFT 0
1396#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY_DISABLE 0x00000000
1397#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_PREMULTIPLY_ENABLE 0x00000001
1398#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY__MASK 0x00000010
1399#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY__SHIFT 4
1400#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY_DISABLE 0x00000000
1401#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_PREMULTIPLY_ENABLE 0x00000010
1402#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY__MASK 0x00000300
1403#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY__SHIFT 8
1404#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_DISABLE 0x00000000
1405#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_ALPHA 0x00000100
1406#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_SRC_GLOBAL_PREMULTIPLY_COLOR 0x00000200
1407#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY__MASK 0x00100000
1408#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY__SHIFT 20
1409#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY_DISABLE 0x00000000
1410#define VIVS_DE_BLOCK8_COLOR_MULTIPLY_MODES_DST_DEMULTIPLY_ENABLE 0x00100000
1411
1412#define VIVS_DE_BLOCK8_TRANSPARENCY(i0) (0x00012c60 + 0x4*(i0))
1413#define VIVS_DE_BLOCK8_TRANSPARENCY__ESIZE 0x00000004
1414#define VIVS_DE_BLOCK8_TRANSPARENCY__LEN 0x00000008
1415#define VIVS_DE_BLOCK8_TRANSPARENCY_SOURCE__MASK 0x00000003
1416#define VIVS_DE_BLOCK8_TRANSPARENCY_SOURCE__SHIFT 0
1417#define VIVS_DE_BLOCK8_TRANSPARENCY_SOURCE_OPAQUE 0x00000000
1418#define VIVS_DE_BLOCK8_TRANSPARENCY_SOURCE_MASK 0x00000001
1419#define VIVS_DE_BLOCK8_TRANSPARENCY_SOURCE_KEY 0x00000002
1420#define VIVS_DE_BLOCK8_TRANSPARENCY_PATTERN__MASK 0x00000030
1421#define VIVS_DE_BLOCK8_TRANSPARENCY_PATTERN__SHIFT 4
1422#define VIVS_DE_BLOCK8_TRANSPARENCY_PATTERN_OPAQUE 0x00000000
1423#define VIVS_DE_BLOCK8_TRANSPARENCY_PATTERN_MASK 0x00000010
1424#define VIVS_DE_BLOCK8_TRANSPARENCY_PATTERN_KEY 0x00000020
1425#define VIVS_DE_BLOCK8_TRANSPARENCY_DESTINATION__MASK 0x00000300
1426#define VIVS_DE_BLOCK8_TRANSPARENCY_DESTINATION__SHIFT 8
1427#define VIVS_DE_BLOCK8_TRANSPARENCY_DESTINATION_OPAQUE 0x00000000
1428#define VIVS_DE_BLOCK8_TRANSPARENCY_DESTINATION_MASK 0x00000100
1429#define VIVS_DE_BLOCK8_TRANSPARENCY_DESTINATION_KEY 0x00000200
1430#define VIVS_DE_BLOCK8_TRANSPARENCY_TRANSPARENCY_MASK 0x00001000
1431#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_SRC_OVERRIDE__MASK 0x00030000
1432#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_SRC_OVERRIDE__SHIFT 16
1433#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_SRC_OVERRIDE_DEFAULT 0x00000000
1434#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_SRC_OVERRIDE_USE_ENABLE 0x00010000
1435#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_SRC_OVERRIDE_USE_DISABLE 0x00020000
1436#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_PAT_OVERRIDE__MASK 0x00300000
1437#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_PAT_OVERRIDE__SHIFT 20
1438#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_PAT_OVERRIDE_DEFAULT 0x00000000
1439#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_PAT_OVERRIDE_USE_ENABLE 0x00100000
1440#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_PAT_OVERRIDE_USE_DISABLE 0x00200000
1441#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_DST_OVERRIDE__MASK 0x03000000
1442#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_DST_OVERRIDE__SHIFT 24
1443#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_DST_OVERRIDE_DEFAULT 0x00000000
1444#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_DST_OVERRIDE_USE_ENABLE 0x01000000
1445#define VIVS_DE_BLOCK8_TRANSPARENCY_USE_DST_OVERRIDE_USE_DISABLE 0x02000000
1446#define VIVS_DE_BLOCK8_TRANSPARENCY_RESOURCE_OVERRIDE_MASK 0x10000000
1447#define VIVS_DE_BLOCK8_TRANSPARENCY_DFB_COLOR_KEY__MASK 0x20000000
1448#define VIVS_DE_BLOCK8_TRANSPARENCY_DFB_COLOR_KEY__SHIFT 29
1449#define VIVS_DE_BLOCK8_TRANSPARENCY_DFB_COLOR_KEY_DISABLE 0x00000000
1450#define VIVS_DE_BLOCK8_TRANSPARENCY_DFB_COLOR_KEY_ENABLE 0x20000000
1451#define VIVS_DE_BLOCK8_TRANSPARENCY_DFB_COLOR_KEY_MASK 0x80000000
1452
1453#define VIVS_DE_BLOCK8_CONTROL(i0) (0x00012c80 + 0x4*(i0))
1454#define VIVS_DE_BLOCK8_CONTROL__ESIZE 0x00000004
1455#define VIVS_DE_BLOCK8_CONTROL__LEN 0x00000008
1456#define VIVS_DE_BLOCK8_CONTROL_YUV__MASK 0x00000001
1457#define VIVS_DE_BLOCK8_CONTROL_YUV__SHIFT 0
1458#define VIVS_DE_BLOCK8_CONTROL_YUV_601 0x00000000
1459#define VIVS_DE_BLOCK8_CONTROL_YUV_709 0x00000001
1460#define VIVS_DE_BLOCK8_CONTROL_YUV_MASK 0x00000008
1461#define VIVS_DE_BLOCK8_CONTROL_UV_SWIZZLE__MASK 0x00000010
1462#define VIVS_DE_BLOCK8_CONTROL_UV_SWIZZLE__SHIFT 4
1463#define VIVS_DE_BLOCK8_CONTROL_UV_SWIZZLE_UV 0x00000000
1464#define VIVS_DE_BLOCK8_CONTROL_UV_SWIZZLE_VU 0x00000010
1465#define VIVS_DE_BLOCK8_CONTROL_UV_SWIZZLE_MASK 0x00000080
1466#define VIVS_DE_BLOCK8_CONTROL_YUVRGB__MASK 0x00000100
1467#define VIVS_DE_BLOCK8_CONTROL_YUVRGB__SHIFT 8
1468#define VIVS_DE_BLOCK8_CONTROL_YUVRGB_DISABLE 0x00000000
1469#define VIVS_DE_BLOCK8_CONTROL_YUVRGB_ENABLE 0x00000100
1470#define VIVS_DE_BLOCK8_CONTROL_YUVRGB_MASK 0x00000800
1471
1472#define VIVS_DE_BLOCK8_SRC_COLOR_KEY_HIGH(i0) (0x00012ca0 + 0x4*(i0))
1473#define VIVS_DE_BLOCK8_SRC_COLOR_KEY_HIGH__ESIZE 0x00000004
1474#define VIVS_DE_BLOCK8_SRC_COLOR_KEY_HIGH__LEN 0x00000008
1475
1476#define VIVS_DE_BLOCK8_SRC_EX_CONFIG(i0) (0x00012cc0 + 0x4*(i0))
1477#define VIVS_DE_BLOCK8_SRC_EX_CONFIG__ESIZE 0x00000004
1478#define VIVS_DE_BLOCK8_SRC_EX_CONFIG__LEN 0x00000008
1479#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MULTI_TILED__MASK 0x00000001
1480#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MULTI_TILED__SHIFT 0
1481#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MULTI_TILED_DISABLE 0x00000000
1482#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MULTI_TILED_ENABLE 0x00000001
1483#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_SUPER_TILED__MASK 0x00000008
1484#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_SUPER_TILED__SHIFT 3
1485#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_SUPER_TILED_DISABLE 0x00000000
1486#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_SUPER_TILED_ENABLE 0x00000008
1487#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MINOR_TILED__MASK 0x00000100
1488#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MINOR_TILED__SHIFT 8
1489#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MINOR_TILED_DISABLE 0x00000000
1490#define VIVS_DE_BLOCK8_SRC_EX_CONFIG_MINOR_TILED_ENABLE 0x00000100
1491
1492#define VIVS_DE_BLOCK8_SRC_EX_ADDRESS(i0) (0x00012ce0 + 0x4*(i0))
1493#define VIVS_DE_BLOCK8_SRC_EX_ADDRESS__ESIZE 0x00000004
1494#define VIVS_DE_BLOCK8_SRC_EX_ADDRESS__LEN 0x00000008
1495
1496
1497#endif /* STATE_2D_XML */
diff --git a/tests/etnaviv/write_bmp.c b/tests/etnaviv/write_bmp.c
new file mode 100644
index 00000000..7ae0646c
--- /dev/null
+++ b/tests/etnaviv/write_bmp.c
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2011 Luc Verhaegen <libv@codethink.co.uk>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24/*
25 * Quick 'n Dirty bitmap dumper.
26 */
27#include <stdio.h>
28#include <unistd.h>
29#include <sys/types.h>
30#include <sys/stat.h>
31#include <fcntl.h>
32#include <string.h>
33#include <errno.h>
34
35#include "write_bmp.h"
36
37#define FILENAME_SIZE 1024
38
39struct bmp_header {
40 unsigned short magic;
41 unsigned int size;
42 unsigned int unused;
43 unsigned int start;
44} __attribute__((__packed__));
45
46struct dib_header {
47 unsigned int size;
48 unsigned int width;
49 unsigned int height;
50 unsigned short planes;
51 unsigned short bpp;
52 unsigned int compression;
53 unsigned int data_size;
54 unsigned int h_res;
55 unsigned int v_res;
56 unsigned int colours;
57 unsigned int important_colours;
58 unsigned int red_mask;
59 unsigned int green_mask;
60 unsigned int blue_mask;
61 unsigned int alpha_mask;
62 unsigned int colour_space;
63 unsigned int unused[12];
64} __attribute__((__packed__));
65
66static int
67bmp_header_write(int fd, int width, int height, int bgra, int noflip, int alpha)
68{
69 struct bmp_header bmp_header = {
70 .magic = 0x4d42,
71 .size = (width * height * 4) +
72 sizeof(struct bmp_header) + sizeof(struct dib_header),
73 .start = sizeof(struct bmp_header) + sizeof(struct dib_header),
74 };
75 struct dib_header dib_header = {
76 .size = sizeof(struct dib_header),
77 .width = width,
78 .height = noflip ? -height : height,
79 .planes = 1,
80 .bpp = 32,
81 .compression = 3,
82 .data_size = 4 * width * height,
83 .h_res = 0xB13,
84 .v_res = 0xB13,
85 .colours = 0,
86 .important_colours = 0,
87 .red_mask = 0x000000FF,
88 .green_mask = 0x0000FF00,
89 .blue_mask = 0x00FF0000,
90 .alpha_mask = alpha ? 0xFF000000 : 0x00000000,
91 .colour_space = 0x57696E20,
92 };
93
94 if (bgra) {
95 dib_header.red_mask = 0x00FF0000;
96 dib_header.blue_mask = 0x000000FF;
97 }
98
99 write(fd, &bmp_header, sizeof(struct bmp_header));
100 write(fd, &dib_header, sizeof(struct dib_header));
101
102 return 0;
103}
104
105void
106bmp_dump32(char *buffer, unsigned width, unsigned height, bool bgra, const char *filename)
107{
108 int fd;
109
110 fd = open(filename, O_WRONLY| O_TRUNC | O_CREAT, 0666);
111 if (fd == -1) {
112 printf("Failed to open %s: %s\n", filename, strerror(errno));
113 return;
114 }
115
116 bmp_header_write(fd, width, height, bgra, false, true);
117
118 write(fd, buffer, width * height * 4);
119}
120
121void
122bmp_dump32_noflip(char *buffer, unsigned width, unsigned height, bool bgra, const char *filename)
123{
124 int fd;
125
126 fd = open(filename, O_WRONLY| O_TRUNC | O_CREAT, 0666);
127 if (fd == -1) {
128 printf("Failed to open %s: %s\n", filename, strerror(errno));
129 return;
130 }
131
132 bmp_header_write(fd, width, height, bgra, true, true);
133
134 write(fd, buffer, width * height * 4);
135}
136
137void
138bmp_dump32_ex(char *buffer, unsigned width, unsigned height, bool flip, bool bgra, bool alpha, const char *filename)
139{
140 int fd;
141
142 fd = open(filename, O_WRONLY| O_TRUNC | O_CREAT, 0666);
143 if (fd == -1) {
144 printf("Failed to open %s: %s\n", filename, strerror(errno));
145 return;
146 }
147
148 bmp_header_write(fd, width, height, bgra, flip, alpha);
149
150 write(fd, buffer, width * height * 4);
151}
diff --git a/tests/getstats.c b/tests/etnaviv/write_bmp.h
index 8a7d2999..667fa87c 100644
--- a/tests/getstats.c
+++ b/tests/etnaviv/write_bmp.h
@@ -1,50 +1,34 @@
1/* 1/*
2 * Copyright © 2007 Intel Corporation 2 * Copyright 2011 Luc Verhaegen <libv@codethink.co.uk>
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the 8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions: 9 * Software is furnished to do so, subject to the following conditions:
10 * 10 *
11 * The above copyright notice and this permission notice (including the next 11 * The above copyright notice and this permission notice (including the
12 * paragraph) shall be included in all copies or substantial portions of the 12 * next paragraph) shall be included in all copies or substantial portions
13 * Software. 13 * of the Software.
14 * 14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * 22 *
26 */ 23 */
24#ifndef BMP_DUMP_H
25#define BMP_DUMP_H 1
26#include <stdbool.h>
27/* write 32-bit image (y axis upwards) */
28void bmp_dump32(char *buffer, unsigned width, unsigned height, bool bgra, const char *filename);
29/* write 32-bit image (y axis downwards) */
30void bmp_dump32_noflip(char *buffer, unsigned width, unsigned height, bool bgra, const char *filename);
31/* write 32-bit image */
32void bmp_dump32_ex(char *buffer, unsigned width, unsigned height, bool flip, bool bgra, bool alpha, const char *filename);
27 33
28#include <limits.h> 34#endif /* BMP_DUMP_H */
29#include <sys/ioctl.h>
30#include "drmtest.h"
31
32/**
33 * Checks DRM_IOCTL_GET_STATS.
34 *
35 * I don't care too much about the actual contents, just that the kernel
36 * doesn't crash.
37 */
38int main(int argc, char **argv)
39{
40 int fd, ret;
41 drm_stats_t stats;
42
43 fd = drm_open_any();
44
45 ret = ioctl(fd, DRM_IOCTL_GET_STATS, &stats);
46 assert(ret == 0);
47
48 close(fd);
49 return 0;
50}
diff --git a/tests/exynos/Makefile.am b/tests/exynos/Makefile.am
index 357d6b8c..b6361727 100644
--- a/tests/exynos/Makefile.am
+++ b/tests/exynos/Makefile.am
@@ -1,4 +1,5 @@
1AM_CFLAGS = \ 1AM_CFLAGS = \
2 -pthread \
2 $(WARN_CFLAGS)\ 3 $(WARN_CFLAGS)\
3 -I $(top_srcdir)/include/drm \ 4 -I $(top_srcdir)/include/drm \
4 -I $(top_srcdir)/libkms/ \ 5 -I $(top_srcdir)/libkms/ \
@@ -34,8 +35,7 @@ exynos_fimg2d_perf_LDADD = \
34 35
35exynos_fimg2d_event_LDADD = \ 36exynos_fimg2d_event_LDADD = \
36 $(top_builddir)/libdrm.la \ 37 $(top_builddir)/libdrm.la \
37 $(top_builddir)/exynos/libdrm_exynos.la \ 38 $(top_builddir)/exynos/libdrm_exynos.la
38 -lpthread
39 39
40exynos_fimg2d_test_LDADD = \ 40exynos_fimg2d_test_LDADD = \
41 $(top_builddir)/libdrm.la \ 41 $(top_builddir)/libdrm.la \
diff --git a/tests/getclient.c b/tests/getclient.c
deleted file mode 100644
index 481ce119..00000000
--- a/tests/getclient.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <limits.h>
29#include <sys/ioctl.h>
30#include "drmtest.h"
31
32/**
33 * Checks DRM_IOCTL_GET_CLIENT.
34 */
35int main(int argc, char **argv)
36{
37 int fd, ret;
38 drm_client_t client;
39
40 fd = drm_open_any();
41
42 /* Look for client index 0. This should exist whether we're operating
43 * on an otherwise unused drm device, or the X Server is running on
44 * the device.
45 */
46 client.idx = 0;
47 ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
48 assert(ret == 0);
49
50 /* Look for some absurd client index and make sure it's invalid.
51 * The DRM drivers currently always return data, so the user has
52 * no real way to detect when the list has terminated. That's bad,
53 * and this test is XFAIL as a result.
54 */
55 client.idx = 0x7fffffff;
56 ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
57 assert(ret == -1 && errno == EINVAL);
58
59 close(fd);
60 return 0;
61}
diff --git a/tests/getversion.c b/tests/getversion.c
deleted file mode 100644
index bcec4699..00000000
--- a/tests/getversion.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <string.h>
29#include "drmtest.h"
30
31/**
32 * Checks DRM_IOCTL_GET_VERSION and libdrm's drmGetVersion() interface to it.
33 */
34int main(int argc, char **argv)
35{
36 int fd;
37 drmVersionPtr v;
38
39 fd = drm_open_any();
40 v = drmGetVersion(fd);
41 assert(strlen(v->name) != 0);
42 assert(strlen(v->date) != 0);
43 assert(strlen(v->desc) != 0);
44 if (strcmp(v->name, "i915") == 0)
45 assert(v->version_major >= 1);
46 drmFreeVersion(v);
47 close(fd);
48 return 0;
49}
diff --git a/tests/kms/kms-steal-crtc.c b/tests/kms/kms-steal-crtc.c
index 2f7f327e..4b830d27 100644
--- a/tests/kms/kms-steal-crtc.c
+++ b/tests/kms/kms-steal-crtc.c
@@ -29,8 +29,12 @@
29#include <fcntl.h> 29#include <fcntl.h>
30#include <signal.h> 30#include <signal.h>
31#include <stdio.h> 31#include <stdio.h>
32#include <stdint.h>
32#include <string.h> 33#include <string.h>
33#include <unistd.h> 34#include <unistd.h>
35#ifdef HAVE_SYS_SELECT_H
36#include <sys/select.h>
37#endif
34 38
35#include <drm_fourcc.h> 39#include <drm_fourcc.h>
36 40
diff --git a/tests/kms/kms-universal-planes.c b/tests/kms/kms-universal-planes.c
index 9151231f..89057bb5 100644
--- a/tests/kms/kms-universal-planes.c
+++ b/tests/kms/kms-universal-planes.c
@@ -32,6 +32,9 @@
32#include <stdio.h> 32#include <stdio.h>
33#include <string.h> 33#include <string.h>
34#include <unistd.h> 34#include <unistd.h>
35#ifdef HAVE_SYS_SELECT_H
36#include <sys/select.h>
37#endif
35 38
36#include <drm_fourcc.h> 39#include <drm_fourcc.h>
37#include "xf86drm.h" 40#include "xf86drm.h"
@@ -209,9 +212,9 @@ int main(int argc, char *argv[])
209 printf("Planes: %u\n", device->num_planes); 212 printf("Planes: %u\n", device->num_planes);
210 213
211 for (i = 0; i < device->num_planes; i++) { 214 for (i = 0; i < device->num_planes; i++) {
212 struct kms_plane *plane = device->planes[i];
213 const char *type = NULL; 215 const char *type = NULL;
214 216
217 plane = device->planes[i];
215 switch (plane->type) { 218 switch (plane->type) {
216 case DRM_PLANE_TYPE_OVERLAY: 219 case DRM_PLANE_TYPE_OVERLAY:
217 type = "overlay"; 220 type = "overlay";
diff --git a/tests/kmstest/Makefile.am b/tests/kmstest/Makefile.am
index fd21e612..ced541b7 100644
--- a/tests/kmstest/Makefile.am
+++ b/tests/kmstest/Makefile.am
@@ -2,6 +2,7 @@ AM_CFLAGS = \
2 $(WARN_CFLAGS)\ 2 $(WARN_CFLAGS)\
3 -I$(top_srcdir)/include/drm \ 3 -I$(top_srcdir)/include/drm \
4 -I$(top_srcdir)/libkms/ \ 4 -I$(top_srcdir)/libkms/ \
5 -I$(top_srcdir)/tests/ \
5 -I$(top_srcdir) 6 -I$(top_srcdir)
6 7
7if HAVE_INSTALL_TESTS 8if HAVE_INSTALL_TESTS
@@ -16,8 +17,9 @@ kmstest_SOURCES = \
16 main.c 17 main.c
17 18
18kmstest_LDADD = \ 19kmstest_LDADD = \
19 $(top_builddir)/libdrm.la \ 20 $(top_builddir)/tests/util/libutil.la \
20 $(top_builddir)/libkms/libkms.la 21 $(top_builddir)/libkms/libkms.la \
22 $(top_builddir)/libdrm.la
21 23
22run: kmstest 24run: kmstest
23 ./kmstest 25 ./kmstest
diff --git a/tests/kmstest/main.c b/tests/kmstest/main.c
index 120bc0fa..a0e4ebbd 100644
--- a/tests/kmstest/main.c
+++ b/tests/kmstest/main.c
@@ -25,12 +25,14 @@
25 * 25 *
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28#include <getopt.h>
29#include <stdio.h> 29#include <stdio.h>
30#include <string.h> 30#include <string.h>
31#include "xf86drm.h" 31#include "xf86drm.h"
32#include "libkms.h" 32#include "libkms.h"
33 33
34#include "util/kms.h"
35
34#define CHECK_RET_RETURN(ret, str) \ 36#define CHECK_RET_RETURN(ret, str) \
35 if (ret < 0) { \ 37 if (ret < 0) { \
36 printf("%s: %s (%s)\n", __func__, str, strerror(-ret)); \ 38 printf("%s: %s (%s)\n", __func__, str, strerror(-ret)); \
@@ -56,26 +58,37 @@ static int test_bo(struct kms_driver *kms)
56 return 0; 58 return 0;
57} 59}
58 60
59static const char *drivers[] = { 61static void usage(const char *program)
60 "i915", 62{
61 "radeon", 63 fprintf(stderr, "Usage: %s [options]\n", program);
62 "nouveau", 64 fprintf(stderr, "\n");
63 "vmwgfx", 65 fprintf(stderr, " -D DEVICE open the given device\n");
64 "exynos", 66 fprintf(stderr, " -M MODULE open the given module\n");
65 "amdgpu", 67}
66 "imx-drm",
67 "rockchip",
68 "atmel-hlcdc",
69 NULL
70};
71 68
72int main(int argc, char** argv) 69int main(int argc, char** argv)
73{ 70{
71 static const char optstr[] = "D:M:";
74 struct kms_driver *kms; 72 struct kms_driver *kms;
75 int ret, fd, i; 73 int c, fd, ret;
74 char *device = NULL;
75 char *module = NULL;
76
77 while ((c = getopt(argc, argv, optstr)) != -1) {
78 switch (c) {
79 case 'D':
80 device = optarg;
81 break;
82 case 'M':
83 module = optarg;
84 break;
85 default:
86 usage(argv[0]);
87 return 0;
88 }
89 }
76 90
77 for (i = 0, fd = -1; fd < 0 && drivers[i]; i++) 91 fd = util_open(device, module);
78 fd = drmOpen(drivers[i], NULL);
79 CHECK_RET_RETURN(fd, "Could not open device"); 92 CHECK_RET_RETURN(fd, "Could not open device");
80 93
81 ret = kms_create(fd, &kms); 94 ret = kms_create(fd, &kms);
diff --git a/tests/lock.c b/tests/lock.c
deleted file mode 100644
index 365681b5..00000000
--- a/tests/lock.c
+++ /dev/null
@@ -1,264 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28/** @file lock.c
29 * Tests various potential failures of the DRM locking mechanisms
30 */
31
32#include <limits.h>
33#include <sys/ioctl.h>
34#include "drmtest.h"
35
36enum auth_event {
37 SERVER_READY,
38 CLIENT_MAGIC,
39 SERVER_LOCKED,
40 CLIENT_LOCKED,
41};
42
43int commfd[2];
44unsigned int lock1 = 0x00001111;
45unsigned int lock2 = 0x00002222;
46
47/* return time in milliseconds */
48static unsigned int
49get_millis()
50{
51 struct timeval tv;
52
53 gettimeofday(&tv, NULL);
54 return tv.tv_sec * 1000 + tv.tv_usec / 1000;
55}
56
57static void
58wait_event(int pipe, enum auth_event expected_event)
59{
60 int ret;
61 enum auth_event event;
62 unsigned char in;
63
64 ret = read(commfd[pipe], &in, 1);
65 if (ret == -1)
66 err(1, "read error");
67 event = in;
68
69 if (event != expected_event)
70 errx(1, "unexpected event: %d\n", event);
71}
72
73static void
74send_event(int pipe, enum auth_event send_event)
75{
76 int ret;
77 unsigned char event;
78
79 event = send_event;
80 ret = write(commfd[pipe], &event, 1);
81 if (ret == -1)
82 err(1, "failed to send event %d", event);
83}
84
85static void
86client_auth(int drmfd)
87{
88 struct drm_auth auth;
89 int ret;
90
91 /* Get a client magic number and pass it to the master for auth. */
92 ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
93 if (ret == -1)
94 err(1, "Couldn't get client magic");
95 send_event(0, CLIENT_MAGIC);
96 ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
97 if (ret == -1)
98 err(1, "Couldn't write auth data");
99}
100
101static void
102server_auth(int drmfd)
103{
104 struct drm_auth auth;
105 int ret;
106
107 send_event(1, SERVER_READY);
108 wait_event(1, CLIENT_MAGIC);
109 ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
110 if (ret == -1)
111 err(1, "Failure to read client magic");
112
113 ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
114 if (ret == -1)
115 err(1, "Failure to authenticate client magic\n");
116}
117
118/** Tests that locking is successful in normal conditions */
119static void
120test_lock_unlock(int drmfd)
121{
122 int ret;
123
124 ret = drmGetLock(drmfd, lock1, 0);
125 if (ret != 0)
126 err(1, "Locking failed");
127 ret = drmUnlock(drmfd, lock1);
128 if (ret != 0)
129 err(1, "Unlocking failed");
130}
131
132/** Tests that unlocking the lock while it's not held works correctly */
133static void
134test_unlock_unlocked(int drmfd)
135{
136 int ret;
137
138 ret = drmUnlock(drmfd, lock1);
139 if (ret == 0)
140 err(1, "Unlocking unlocked lock succeeded");
141}
142
143/** Tests that unlocking a lock held by another context fails appropriately */
144static void
145test_unlock_unowned(int drmfd)
146{
147 int ret;
148
149 ret = drmGetLock(drmfd, lock1, 0);
150 assert(ret == 0);
151 ret = drmUnlock(drmfd, lock2);
152 if (ret == 0)
153 errx(1, "Unlocking other context's lock succeeded");
154 ret = drmUnlock(drmfd, lock1);
155 assert(ret == 0);
156}
157
158/**
159 * Tests that an open/close by the same process doesn't result in the lock
160 * being dropped.
161 */
162static void test_open_close_locked(drmfd)
163{
164 int ret, tempfd;
165
166 ret = drmGetLock(drmfd, lock1, 0);
167 assert(ret == 0);
168 /* XXX: Need to make sure that this is the same device as drmfd */
169 tempfd = drm_open_any();
170 close(tempfd);
171 ret = drmUnlock(drmfd, lock1);
172 if (ret != 0)
173 errx(1, "lock lost during open/close by same pid");
174}
175
176static void client()
177{
178 int drmfd, ret;
179 unsigned int time;
180
181 wait_event(0, SERVER_READY);
182
183 /* XXX: Should make sure we open the same DRM as the master */
184 drmfd = drm_open_any();
185
186 client_auth(drmfd);
187
188 /* Wait for the server to grab the lock, then grab it ourselves (to
189 * contest it). Hopefully we hit it within the window of when the
190 * server locks.
191 */
192 wait_event(0, SERVER_LOCKED);
193 ret = drmGetLock(drmfd, lock2, 0);
194 time = get_millis();
195 if (ret != 0)
196 err(1, "Failed to get lock on client\n");
197 drmUnlock(drmfd, lock2);
198
199 /* Tell the server that our locking completed, and when it did */
200 send_event(0, CLIENT_LOCKED);
201 ret = write(commfd[0], &time, sizeof(time));
202
203 close(drmfd);
204 exit(0);
205}
206
207static void server()
208{
209 int drmfd, tempfd, ret;
210 unsigned int client_time, unlock_time;
211
212 drmfd = drm_open_any_master();
213
214 test_lock_unlock(drmfd);
215 test_unlock_unlocked(drmfd);
216 test_unlock_unowned(drmfd);
217 test_open_close_locked(drmfd);
218
219 /* Perform the authentication sequence with the client. */
220 server_auth(drmfd);
221
222 /* Now, test that the client attempting to lock while the server
223 * holds the lock works correctly.
224 */
225 ret = drmGetLock(drmfd, lock1, 0);
226 assert(ret == 0);
227 send_event(1, SERVER_LOCKED);
228 /* Wait a while for the client to do its thing */
229 sleep(1);
230 ret = drmUnlock(drmfd, lock1);
231 assert(ret == 0);
232 unlock_time = get_millis();
233
234 wait_event(1, CLIENT_LOCKED);
235 ret = read(commfd[1], &client_time, sizeof(client_time));
236 if (ret == -1)
237 err(1, "Failure to read client magic");
238
239 if (client_time < unlock_time)
240 errx(1, "Client took lock before server released it");
241
242 close(drmfd);
243}
244
245int main(int argc, char **argv)
246{
247 int ret;
248
249
250 ret = pipe(commfd);
251 if (ret == -1)
252 err(1, "Couldn't create pipe");
253
254 ret = fork();
255 if (ret == -1)
256 err(1, "failure to fork client");
257 if (ret == 0)
258 client();
259 else
260 server();
261
262 return 0;
263}
264
diff --git a/tests/modetest/Android.mk b/tests/modetest/Android.mk
index ccdae6c8..ab40b806 100644
--- a/tests/modetest/Android.mk
+++ b/tests/modetest/Android.mk
@@ -10,4 +10,7 @@ LOCAL_MODULE := modetest
10LOCAL_SHARED_LIBRARIES := libdrm 10LOCAL_SHARED_LIBRARIES := libdrm
11LOCAL_STATIC_LIBRARIES := libdrm_util 11LOCAL_STATIC_LIBRARIES := libdrm_util
12 12
13LOCAL_C_INCLUDES := $(LOCAL_PATH)/..
14
15include $(LIBDRM_COMMON_MK)
13include $(BUILD_EXECUTABLE) 16include $(BUILD_EXECUTABLE)
diff --git a/tests/modetest/Makefile.am b/tests/modetest/Makefile.am
index 25ce372f..4b296c83 100644
--- a/tests/modetest/Makefile.am
+++ b/tests/modetest/Makefile.am
@@ -3,6 +3,7 @@ include Makefile.sources
3AM_CFLAGS = $(filter-out -Wpointer-arith, $(WARN_CFLAGS)) 3AM_CFLAGS = $(filter-out -Wpointer-arith, $(WARN_CFLAGS))
4 4
5AM_CFLAGS += \ 5AM_CFLAGS += \
6 -pthread \
6 -I$(top_srcdir)/include/drm \ 7 -I$(top_srcdir)/include/drm \
7 -I$(top_srcdir)/tests \ 8 -I$(top_srcdir)/tests \
8 -I$(top_srcdir) 9 -I$(top_srcdir)
@@ -20,7 +21,4 @@ modetest_SOURCES = $(MODETEST_FILES)
20modetest_LDADD = \ 21modetest_LDADD = \
21 $(top_builddir)/libdrm.la \ 22 $(top_builddir)/libdrm.la \
22 $(top_builddir)/tests/util/libutil.la \ 23 $(top_builddir)/tests/util/libutil.la \
23 $(CAIRO_LIBS) \ 24 $(CAIRO_LIBS)
24 -lpthread
25
26EXTRA_DIST = Android.mk
diff --git a/tests/modetest/modetest.c b/tests/modetest/modetest.c
index 2c5b2830..cd911193 100644
--- a/tests/modetest/modetest.c
+++ b/tests/modetest/modetest.c
@@ -53,8 +53,11 @@
53#include <string.h> 53#include <string.h>
54#include <strings.h> 54#include <strings.h>
55#include <errno.h> 55#include <errno.h>
56#include <sys/poll.h> 56#include <poll.h>
57#include <sys/time.h> 57#include <sys/time.h>
58#ifdef HAVE_SYS_SELECT_H
59#include <sys/select.h>
60#endif
58 61
59#include "xf86drm.h" 62#include "xf86drm.h"
60#include "xf86drmMode.h" 63#include "xf86drmMode.h"
@@ -195,7 +198,7 @@ static void dump_encoders(struct device *dev)
195 198
196static void dump_mode(drmModeModeInfo *mode) 199static void dump_mode(drmModeModeInfo *mode)
197{ 200{
198 printf(" %s %d %d %d %d %d %d %d %d %d", 201 printf(" %s %d %d %d %d %d %d %d %d %d %d",
199 mode->name, 202 mode->name,
200 mode->vrefresh, 203 mode->vrefresh,
201 mode->hdisplay, 204 mode->hdisplay,
@@ -205,7 +208,8 @@ static void dump_mode(drmModeModeInfo *mode)
205 mode->vdisplay, 208 mode->vdisplay,
206 mode->vsync_start, 209 mode->vsync_start,
207 mode->vsync_end, 210 mode->vsync_end,
208 mode->vtotal); 211 mode->vtotal,
212 mode->clock);
209 213
210 printf(" flags: "); 214 printf(" flags: ");
211 mode_flag_str(mode->flags); 215 mode_flag_str(mode->flags);
@@ -311,6 +315,8 @@ static void dump_prop(struct device *dev, drmModePropertyPtr prop,
311 printf("\t\tvalue:"); 315 printf("\t\tvalue:");
312 if (drm_property_type_is(prop, DRM_MODE_PROP_BLOB)) 316 if (drm_property_type_is(prop, DRM_MODE_PROP_BLOB))
313 dump_blob(dev, value); 317 dump_blob(dev, value);
318 else if (drm_property_type_is(prop, DRM_MODE_PROP_SIGNED_RANGE))
319 printf(" %"PRId64"\n", value);
314 else 320 else
315 printf(" %"PRIu64"\n", value); 321 printf(" %"PRIu64"\n", value);
316} 322}
@@ -699,6 +705,7 @@ struct pipe_arg {
699}; 705};
700 706
701struct plane_arg { 707struct plane_arg {
708 uint32_t plane_id; /* the id of plane to use */
702 uint32_t crtc_id; /* the id of CRTC to bind to */ 709 uint32_t crtc_id; /* the id of CRTC to bind to */
703 bool has_position; 710 bool has_position;
704 int32_t x, y; 711 int32_t x, y;
@@ -953,7 +960,7 @@ static int set_plane(struct device *dev, struct plane_arg *p)
953{ 960{
954 drmModePlane *ovr; 961 drmModePlane *ovr;
955 uint32_t handles[4] = {0}, pitches[4] = {0}, offsets[4] = {0}; 962 uint32_t handles[4] = {0}, pitches[4] = {0}, offsets[4] = {0};
956 uint32_t plane_id = 0; 963 uint32_t plane_id;
957 struct bo *plane_bo; 964 struct bo *plane_bo;
958 uint32_t plane_flags = 0; 965 uint32_t plane_flags = 0;
959 int crtc_x, crtc_y, crtc_w, crtc_h; 966 int crtc_x, crtc_y, crtc_w, crtc_h;
@@ -977,16 +984,26 @@ static int set_plane(struct device *dev, struct plane_arg *p)
977 return -1; 984 return -1;
978 } 985 }
979 986
980 for (i = 0; i < dev->resources->plane_res->count_planes && !plane_id; i++) { 987 plane_id = p->plane_id;
988
989 for (i = 0; i < dev->resources->plane_res->count_planes; i++) {
981 ovr = dev->resources->planes[i].plane; 990 ovr = dev->resources->planes[i].plane;
982 if (!ovr || !format_support(ovr, p->fourcc)) 991 if (!ovr)
983 continue; 992 continue;
984 993
985 if ((ovr->possible_crtcs & (1 << pipe)) && !ovr->crtc_id) 994 if (plane_id && plane_id != ovr->plane_id)
995 continue;
996
997 if (!format_support(ovr, p->fourcc))
998 continue;
999
1000 if ((ovr->possible_crtcs & (1 << pipe)) && !ovr->crtc_id) {
986 plane_id = ovr->plane_id; 1001 plane_id = ovr->plane_id;
1002 break;
1003 }
987 } 1004 }
988 1005
989 if (!plane_id) { 1006 if (i == dev->resources->plane_res->count_planes) {
990 fprintf(stderr, "no unused plane available for CRTC %u\n", 1007 fprintf(stderr, "no unused plane available for CRTC %u\n",
991 crtc->crtc->crtc_id); 1008 crtc->crtc->crtc_id);
992 return -1; 1009 return -1;
@@ -1354,6 +1371,11 @@ static int parse_plane(struct plane_arg *plane, const char *p)
1354{ 1371{
1355 char *end; 1372 char *end;
1356 1373
1374 plane->plane_id = strtoul(p, &end, 10);
1375 if (*end != '@')
1376 return -EINVAL;
1377
1378 p = end + 1;
1357 plane->crtc_id = strtoul(p, &end, 10); 1379 plane->crtc_id = strtoul(p, &end, 10);
1358 if (*end != ':') 1380 if (*end != ':')
1359 return -EINVAL; 1381 return -EINVAL;
@@ -1425,7 +1447,7 @@ static void usage(char *name)
1425 fprintf(stderr, "\t-p\tlist CRTCs and planes (pipes)\n"); 1447 fprintf(stderr, "\t-p\tlist CRTCs and planes (pipes)\n");
1426 1448
1427 fprintf(stderr, "\n Test options:\n\n"); 1449 fprintf(stderr, "\n Test options:\n\n");
1428 fprintf(stderr, "\t-P <crtc_id>:<w>x<h>[+<x>+<y>][*<scale>][@<format>]\tset a plane\n"); 1450 fprintf(stderr, "\t-P <plane_id>@<crtc_id>:<w>x<h>[+<x>+<y>][*<scale>][@<format>]\tset a plane\n");
1429 fprintf(stderr, "\t-s <connector_id>[,<connector_id>][@<crtc_id>]:<mode>[-<vrefresh>][@<format>]\tset a mode\n"); 1451 fprintf(stderr, "\t-s <connector_id>[,<connector_id>][@<crtc_id>]:<mode>[-<vrefresh>][@<format>]\tset a mode\n");
1430 fprintf(stderr, "\t-C\ttest hw cursor\n"); 1452 fprintf(stderr, "\t-C\ttest hw cursor\n");
1431 fprintf(stderr, "\t-v\ttest vsynced page flipping\n"); 1453 fprintf(stderr, "\t-v\ttest vsynced page flipping\n");
@@ -1604,7 +1626,7 @@ int main(int argc, char **argv)
1604 if (!args) 1626 if (!args)
1605 encoders = connectors = crtcs = planes = framebuffers = 1; 1627 encoders = connectors = crtcs = planes = framebuffers = 1;
1606 1628
1607 dev.fd = util_open(module, device); 1629 dev.fd = util_open(device, module);
1608 if (dev.fd < 0) 1630 if (dev.fd < 0)
1609 return -1; 1631 return -1;
1610 1632
diff --git a/tests/name_from_fd.c b/tests/name_from_fd.c
deleted file mode 100644
index 52646812..00000000
--- a/tests/name_from_fd.c
+++ /dev/null
@@ -1,58 +0,0 @@
1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Kristian Høgsberg <krh@bitplanet.net>
25 *
26 */
27
28#include <unistd.h>
29#include <fcntl.h>
30#include <limits.h>
31#include <string.h>
32#include "drmtest.h"
33
34/**
35 * Checks drmGetDeviceNameFromFd
36 *
37 * This tests that we can get the actual version out, and that setting invalid
38 * major/minor numbers fails appropriately. It does not check the actual
39 * behavior differenses resulting from an increased DI version.
40 */
41int main(int argc, char **argv)
42{
43 int fd;
44 const char *name = "/dev/dri/card0";
45 char *v;
46
47 fd = open("/dev/dri/card0", O_RDWR);
48 if (fd < 0)
49 return 0;
50
51 v = drmGetDeviceNameFromFd(fd);
52 close(fd);
53
54 assert(strcmp(name, v) == 0);
55 drmFree(v);
56
57 return 0;
58}
diff --git a/tests/nouveau/Makefile.am b/tests/nouveau/Makefile.am
index c4f6e299..3c799a81 100644
--- a/tests/nouveau/Makefile.am
+++ b/tests/nouveau/Makefile.am
@@ -1,14 +1,14 @@
1AM_CPPFLAGS = \ 1AM_CFLAGS = \
2 -pthread \
3 $(WARN_CFLAGS) \
2 -I$(top_srcdir)/include/drm \ 4 -I$(top_srcdir)/include/drm \
3 -I$(top_srcdir)/nouveau \ 5 -I$(top_srcdir)/nouveau \
4 -I$(top_srcdir) 6 -I$(top_srcdir)
5 7
6AM_CFLAGS = $(WARN_CFLAGS)
7
8LDADD = \ 8LDADD = \
9 ../../nouveau/libdrm_nouveau.la \ 9 ../../nouveau/libdrm_nouveau.la \
10 ../../libdrm.la \ 10 ../../libdrm.la \
11 -ldl -lpthread 11 -ldl
12 12
13TESTS = threaded 13TESTS = threaded
14 14
diff --git a/tests/openclose.c b/tests/openclose.c
deleted file mode 100644
index 946a4459..00000000
--- a/tests/openclose.c
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmtest.h"
29
30int main(int argc, char **argv)
31{
32 int fd;
33
34 fd = drm_open_any();
35 close(fd);
36 return 0;
37}
diff --git a/tests/proptest/Android.mk b/tests/proptest/Android.mk
index d0ab5c92..588fbed2 100644
--- a/tests/proptest/Android.mk
+++ b/tests/proptest/Android.mk
@@ -10,4 +10,7 @@ LOCAL_MODULE := proptest
10LOCAL_SHARED_LIBRARIES := libdrm 10LOCAL_SHARED_LIBRARIES := libdrm
11LOCAL_STATIC_LIBRARIES := libdrm_util 11LOCAL_STATIC_LIBRARIES := libdrm_util
12 12
13LOCAL_C_INCLUDES := $(LOCAL_PATH)/..
14
15include $(LIBDRM_COMMON_MK)
13include $(BUILD_EXECUTABLE) 16include $(BUILD_EXECUTABLE)
diff --git a/tests/proptest/proptest.c b/tests/proptest/proptest.c
index 24c63456..5abbf029 100644
--- a/tests/proptest/proptest.c
+++ b/tests/proptest/proptest.c
@@ -151,6 +151,8 @@ dump_prop(uint32_t prop_id, uint64_t value)
151 printf("\t\tvalue:"); 151 printf("\t\tvalue:");
152 if (drm_property_type_is(prop, DRM_MODE_PROP_BLOB)) 152 if (drm_property_type_is(prop, DRM_MODE_PROP_BLOB))
153 dump_blob(value); 153 dump_blob(value);
154 else if (drm_property_type_is(prop, DRM_MODE_PROP_SIGNED_RANGE))
155 printf(" %"PRId64"\n", value);
154 else 156 else
155 printf(" %"PRIu64"\n", value); 157 printf(" %"PRIu64"\n", value);
156 158
@@ -295,7 +297,7 @@ int main(int argc, char *argv[])
295 297
296 args = argc - optind; 298 args = argc - optind;
297 299
298 fd = util_open(module, device); 300 fd = util_open(device, module);
299 if (fd < 0) 301 if (fd < 0)
300 return 1; 302 return 1;
301 303
diff --git a/tests/setversion.c b/tests/setversion.c
deleted file mode 100644
index 2f7b529a..00000000
--- a/tests/setversion.c
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <limits.h>
29#include <string.h>
30#include <sys/ioctl.h>
31#include "drmtest.h"
32
33/**
34 * Checks DRM_IOCTL_SET_VERSION.
35 *
36 * This tests that we can get the actual version out, and that setting invalid
37 * major/minor numbers fails appropriately. It does not check the actual
38 * behavior differenses resulting from an increased DI version.
39 */
40int main(int argc, char **argv)
41{
42 int fd, ret;
43 drm_set_version_t sv, version;
44
45 if (getuid() != 0) {
46 fprintf(stderr, "setversion test requires root, skipping\n");
47 return 0;
48 }
49
50 fd = drm_open_any_master();
51
52 /* First, check that we can get the DD/DI versions. */
53 memset(&version, 0, sizeof(version));
54 version.drm_di_major = -1;
55 version.drm_di_minor = -1;
56 version.drm_dd_major = -1;
57 version.drm_dd_minor = -1;
58 ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &version);
59 assert(ret == 0);
60 assert(version.drm_di_major != -1);
61 assert(version.drm_di_minor != -1);
62 assert(version.drm_dd_major != -1);
63 assert(version.drm_dd_minor != -1);
64
65 /* Check that an invalid DI major fails */
66 sv = version;
67 sv.drm_di_major++;
68 ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
69 assert(ret == -1 && errno == EINVAL);
70
71 /* Check that an invalid DI minor fails */
72 sv = version;
73 sv.drm_di_major++;
74 ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
75 assert(ret == -1 && errno == EINVAL);
76
77 /* Check that an invalid DD major fails */
78 sv = version;
79 sv.drm_dd_major++;
80 ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
81 assert(ret == -1 && errno == EINVAL);
82
83 /* Check that an invalid DD minor fails */
84 sv = version;
85 sv.drm_dd_minor++;
86 ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
87 assert(ret == -1 && errno == EINVAL);
88
89 close(fd);
90 return 0;
91}
diff --git a/tests/updatedraw.c b/tests/updatedraw.c
deleted file mode 100644
index d01fa96d..00000000
--- a/tests/updatedraw.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * Copyright © 2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <sys/ioctl.h>
29#include "drmtest.h"
30
31static void
32set_draw_cliprects_empty(int fd, int drawable)
33{
34 int ret;
35 struct drm_update_draw update;
36
37 update.handle = drawable;
38 update.type = DRM_DRAWABLE_CLIPRECTS;
39 update.num = 0;
40 update.data = 0;
41
42 ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update);
43 assert(ret == 0);
44}
45
46static void
47set_draw_cliprects_empty_fail(int fd, int drawable)
48{
49 int ret;
50 struct drm_update_draw update;
51
52 update.handle = drawable;
53 update.type = DRM_DRAWABLE_CLIPRECTS;
54 update.num = 0;
55 update.data = 0;
56
57 ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update);
58 assert(ret == -1 && errno == EINVAL);
59}
60
61static void
62set_draw_cliprects_2(int fd, int drawable)
63{
64 int ret;
65 struct drm_update_draw update;
66 drm_clip_rect_t rects[2];
67
68 rects[0].x1 = 0;
69 rects[0].y1 = 0;
70 rects[0].x2 = 10;
71 rects[0].y2 = 10;
72
73 rects[1].x1 = 10;
74 rects[1].y1 = 10;
75 rects[1].x2 = 20;
76 rects[1].y2 = 20;
77
78 update.handle = drawable;
79 update.type = DRM_DRAWABLE_CLIPRECTS;
80 update.num = 2;
81 update.data = (unsigned long long)(uintptr_t)&rects;
82
83 ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update);
84 assert(ret == 0);
85}
86
87static int add_drawable(int fd)
88{
89 drm_draw_t drawarg;
90 int ret;
91
92 /* Create a drawable.
93 * IOCTL_ADD_DRAW is RDWR, though it should really just be RD
94 */
95 drawarg.handle = 0;
96 ret = ioctl(fd, DRM_IOCTL_ADD_DRAW, &drawarg);
97 assert(ret == 0);
98 return drawarg.handle;
99}
100
101static int rm_drawable(int fd, int drawable, int fail)
102{
103 drm_draw_t drawarg;
104 int ret;
105
106 /* Create a drawable.
107 * IOCTL_ADD_DRAW is RDWR, though it should really just be RD
108 */
109 drawarg.handle = drawable;
110 ret = ioctl(fd, DRM_IOCTL_RM_DRAW, &drawarg);
111 if (!fail)
112 assert(ret == 0);
113 else
114 assert(ret == -1 && errno == EINVAL);
115
116 return drawarg.handle;
117}
118
119/**
120 * Tests drawable management: adding, removing, and updating the cliprects of
121 * drawables.
122 */
123int main(int argc, char **argv)
124{
125 int fd, d1, d2;
126
127 if (getuid() != 0) {
128 fprintf(stderr, "updatedraw test requires root, skipping\n");
129 return 0;
130 }
131
132 fd = drm_open_any_master();
133
134 d1 = add_drawable(fd);
135 d2 = add_drawable(fd);
136 /* Do a series of cliprect updates */
137 set_draw_cliprects_empty(fd, d1);
138 set_draw_cliprects_empty(fd, d2);
139 set_draw_cliprects_2(fd, d1);
140 set_draw_cliprects_empty(fd, d1);
141
142 /* Remove our drawables */
143 rm_drawable(fd, d1, 0);
144 rm_drawable(fd, d2, 0);
145
146 /* Check that removing an unknown drawable returns error */
147 rm_drawable(fd, 0x7fffffff, 1);
148
149 /* Attempt to set cliprects on a nonexistent drawable */
150 set_draw_cliprects_empty_fail(fd, d1);
151
152 close(fd);
153 return 0;
154}
diff --git a/tests/util/Android.mk b/tests/util/Android.mk
index a78341fa..7656c4c2 100644
--- a/tests/util/Android.mk
+++ b/tests/util/Android.mk
@@ -27,13 +27,10 @@ include $(CLEAR_VARS)
27include $(LOCAL_PATH)/Makefile.sources 27include $(LOCAL_PATH)/Makefile.sources
28 28
29LOCAL_MODULE := libdrm_util 29LOCAL_MODULE := libdrm_util
30LOCAL_MODULE_TAGS := optional
31 30
32LOCAL_SHARED_LIBRARIES := libdrm 31LOCAL_SHARED_LIBRARIES := libdrm
33 32
34LOCAL_SRC_FILES := $(filter-out %.h,$(UTIL_FILES)) 33LOCAL_SRC_FILES := $(UTIL_FILES)
35
36# avoid name clashes by requiring users to include util/*.h
37LOCAL_EXPORT_C_INCLUDE_DIRS := $(dir $(LOCAL_PATH))
38 34
35include $(LIBDRM_COMMON_MK)
39include $(BUILD_STATIC_LIBRARY) 36include $(BUILD_STATIC_LIBRARY)
diff --git a/tests/util/kms.c b/tests/util/kms.c
index 57b0191b..959b6881 100644
--- a/tests/util/kms.c
+++ b/tests/util/kms.c
@@ -127,6 +127,7 @@ const char *util_lookup_connector_type_name(unsigned int type)
127 127
128static const char * const modules[] = { 128static const char * const modules[] = {
129 "i915", 129 "i915",
130 "amdgpu",
130 "radeon", 131 "radeon",
131 "nouveau", 132 "nouveau",
132 "vmwgfx", 133 "vmwgfx",
@@ -139,6 +140,11 @@ static const char * const modules[] = {
139 "imx-drm", 140 "imx-drm",
140 "rockchip", 141 "rockchip",
141 "atmel-hlcdc", 142 "atmel-hlcdc",
143 "fsl-dcu-drm",
144 "vc4",
145 "virtio_gpu",
146 "mediatek",
147 "meson",
142}; 148};
143 149
144int util_open(const char *device, const char *module) 150int util_open(const char *device, const char *module)
diff --git a/tests/vbltest/vbltest.c b/tests/vbltest/vbltest.c
index 18333212..3f6b803a 100644
--- a/tests/vbltest/vbltest.c
+++ b/tests/vbltest/vbltest.c
@@ -35,8 +35,11 @@
35#include <unistd.h> 35#include <unistd.h>
36#include <string.h> 36#include <string.h>
37#include <errno.h> 37#include <errno.h>
38#include <sys/poll.h> 38#include <poll.h>
39#include <sys/time.h> 39#include <sys/time.h>
40#ifdef HAVE_SYS_SELECT_H
41#include <sys/select.h>
42#endif
40 43
41#include "xf86drm.h" 44#include "xf86drm.h"
42#include "xf86drmMode.h" 45#include "xf86drmMode.h"
@@ -120,7 +123,7 @@ int main(int argc, char **argv)
120 } 123 }
121 } 124 }
122 125
123 fd = util_open(module, device); 126 fd = util_open(device, module);
124 if (fd < 0) 127 if (fd < 0)
125 return 1; 128 return 1;
126 129
diff --git a/util_double_list.h b/util_double_list.h
index 27e0761b..7e48b26c 100644
--- a/util_double_list.h
+++ b/util_double_list.h
@@ -98,13 +98,19 @@ static inline void list_delinit(struct list_head *item)
98#define LIST_ENTRY(__type, __item, __field) \ 98#define LIST_ENTRY(__type, __item, __field) \
99 ((__type *)(((char *)(__item)) - offsetof(__type, __field))) 99 ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
100 100
101#define LIST_FIRST_ENTRY(__ptr, __type, __field) \
102 LIST_ENTRY(__type, (__ptr)->next, __field)
103
104#define LIST_LAST_ENTRY(__ptr, __type, __field) \
105 LIST_ENTRY(__type, (__ptr)->prev, __field)
106
101#define LIST_IS_EMPTY(__list) \ 107#define LIST_IS_EMPTY(__list) \
102 ((__list)->next == (__list)) 108 ((__list)->next == (__list))
103 109
104#ifndef container_of 110#ifndef container_of
105#define container_of(ptr, sample, member) \ 111#define container_of(ptr, sample, member) \
106 (void *)((char *)(ptr) \ 112 (void *)((char *)(ptr) \
107 - ((char *)&(sample)->member - (char *)(sample))) 113 - ((char *)&((typeof(sample))0)->member))
108#endif 114#endif
109 115
110#define LIST_FOR_EACH_ENTRY(pos, head, member) \ 116#define LIST_FOR_EACH_ENTRY(pos, head, member) \
diff --git a/vc4/Makefile.am b/vc4/Makefile.am
new file mode 100644
index 00000000..7e486b4d
--- /dev/null
+++ b/vc4/Makefile.am
@@ -0,0 +1,34 @@
1# Copyright © 2016 Broadcom
2#
3# Permission is hereby granted, free of charge, to any person obtaining a
4# copy of this software and associated documentation files (the "Software"),
5# to deal in the Software without restriction, including without limitation
6# the rights to use, copy, modify, merge, publish, distribute, sublicense,
7# and/or sell copies of the Software, and to permit persons to whom the
8# Software is furnished to do so, subject to the following conditions:
9#
10# The above copyright notice and this permission notice (including the next
11# paragraph) shall be included in all copies or substantial portions of the
12# Software.
13#
14# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20# IN THE SOFTWARE.
21
22include Makefile.sources
23
24AM_CFLAGS = \
25 $(WARN_CFLAGS) \
26 -I$(top_srcdir) \
27 $(PTHREADSTUBS_CFLAGS) \
28 $(VALGRIND_CFLAGS) \
29 -I$(top_srcdir)/include/drm
30
31libdrm_vc4includedir = ${includedir}/libdrm
32libdrm_vc4include_HEADERS = $(LIBDRM_VC4_H_FILES)
33
34pkgconfig_DATA = libdrm_vc4.pc
diff --git a/vc4/Makefile.sources b/vc4/Makefile.sources
new file mode 100644
index 00000000..8bf97ff1
--- /dev/null
+++ b/vc4/Makefile.sources
@@ -0,0 +1,3 @@
1LIBDRM_VC4_H_FILES := \
2 vc4_packet.h \
3 vc4_qpu_defines.h
diff --git a/vc4/libdrm_vc4.pc.in b/vc4/libdrm_vc4.pc.in
new file mode 100644
index 00000000..a92678ed
--- /dev/null
+++ b/vc4/libdrm_vc4.pc.in
@@ -0,0 +1,9 @@
1prefix=@prefix@
2exec_prefix=@exec_prefix@
3libdir=@libdir@
4includedir=@includedir@
5
6Name: libdrm_vc4
7Description: Userspace interface to vc4 kernel DRM services
8Version: @PACKAGE_VERSION@
9Requires.private: libdrm
diff --git a/vc4/vc4_packet.h b/vc4/vc4_packet.h
new file mode 100644
index 00000000..e18e0bdf
--- /dev/null
+++ b/vc4/vc4_packet.h
@@ -0,0 +1,397 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef VC4_PACKET_H
25#define VC4_PACKET_H
26
27enum vc4_packet {
28 VC4_PACKET_HALT = 0,
29 VC4_PACKET_NOP = 1,
30
31 VC4_PACKET_FLUSH = 4,
32 VC4_PACKET_FLUSH_ALL = 5,
33 VC4_PACKET_START_TILE_BINNING = 6,
34 VC4_PACKET_INCREMENT_SEMAPHORE = 7,
35 VC4_PACKET_WAIT_ON_SEMAPHORE = 8,
36
37 VC4_PACKET_BRANCH = 16,
38 VC4_PACKET_BRANCH_TO_SUB_LIST = 17,
39 VC4_PACKET_RETURN_FROM_SUB_LIST = 18,
40
41 VC4_PACKET_STORE_MS_TILE_BUFFER = 24,
42 VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF = 25,
43 VC4_PACKET_STORE_FULL_RES_TILE_BUFFER = 26,
44 VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER = 27,
45 VC4_PACKET_STORE_TILE_BUFFER_GENERAL = 28,
46 VC4_PACKET_LOAD_TILE_BUFFER_GENERAL = 29,
47
48 VC4_PACKET_GL_INDEXED_PRIMITIVE = 32,
49 VC4_PACKET_GL_ARRAY_PRIMITIVE = 33,
50
51 VC4_PACKET_COMPRESSED_PRIMITIVE = 48,
52 VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE = 49,
53
54 VC4_PACKET_PRIMITIVE_LIST_FORMAT = 56,
55
56 VC4_PACKET_GL_SHADER_STATE = 64,
57 VC4_PACKET_NV_SHADER_STATE = 65,
58 VC4_PACKET_VG_SHADER_STATE = 66,
59
60 VC4_PACKET_CONFIGURATION_BITS = 96,
61 VC4_PACKET_FLAT_SHADE_FLAGS = 97,
62 VC4_PACKET_POINT_SIZE = 98,
63 VC4_PACKET_LINE_WIDTH = 99,
64 VC4_PACKET_RHT_X_BOUNDARY = 100,
65 VC4_PACKET_DEPTH_OFFSET = 101,
66 VC4_PACKET_CLIP_WINDOW = 102,
67 VC4_PACKET_VIEWPORT_OFFSET = 103,
68 VC4_PACKET_Z_CLIPPING = 104,
69 VC4_PACKET_CLIPPER_XY_SCALING = 105,
70 VC4_PACKET_CLIPPER_Z_SCALING = 106,
71
72 VC4_PACKET_TILE_BINNING_MODE_CONFIG = 112,
73 VC4_PACKET_TILE_RENDERING_MODE_CONFIG = 113,
74 VC4_PACKET_CLEAR_COLORS = 114,
75 VC4_PACKET_TILE_COORDINATES = 115,
76
77 /* Not an actual hardware packet -- this is what we use to put
78 * references to GEM bos in the command stream, since we need the u32
79 * int the actual address packet in order to store the offset from the
80 * start of the BO.
81 */
82 VC4_PACKET_GEM_HANDLES = 254,
83} __attribute__ ((__packed__));
84
85#define VC4_PACKET_HALT_SIZE 1
86#define VC4_PACKET_NOP_SIZE 1
87#define VC4_PACKET_FLUSH_SIZE 1
88#define VC4_PACKET_FLUSH_ALL_SIZE 1
89#define VC4_PACKET_START_TILE_BINNING_SIZE 1
90#define VC4_PACKET_INCREMENT_SEMAPHORE_SIZE 1
91#define VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE 1
92#define VC4_PACKET_BRANCH_SIZE 5
93#define VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE 5
94#define VC4_PACKET_RETURN_FROM_SUB_LIST_SIZE 1
95#define VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE 1
96#define VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF_SIZE 1
97#define VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE 5
98#define VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE 5
99#define VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE 7
100#define VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE 7
101#define VC4_PACKET_GL_INDEXED_PRIMITIVE_SIZE 14
102#define VC4_PACKET_GL_ARRAY_PRIMITIVE_SIZE 10
103#define VC4_PACKET_COMPRESSED_PRIMITIVE_SIZE 1
104#define VC4_PACKET_CLIPPED_COMPRESSED_PRIMITIVE_SIZE 1
105#define VC4_PACKET_PRIMITIVE_LIST_FORMAT_SIZE 2
106#define VC4_PACKET_GL_SHADER_STATE_SIZE 5
107#define VC4_PACKET_NV_SHADER_STATE_SIZE 5
108#define VC4_PACKET_VG_SHADER_STATE_SIZE 5
109#define VC4_PACKET_CONFIGURATION_BITS_SIZE 4
110#define VC4_PACKET_FLAT_SHADE_FLAGS_SIZE 5
111#define VC4_PACKET_POINT_SIZE_SIZE 5
112#define VC4_PACKET_LINE_WIDTH_SIZE 5
113#define VC4_PACKET_RHT_X_BOUNDARY_SIZE 3
114#define VC4_PACKET_DEPTH_OFFSET_SIZE 5
115#define VC4_PACKET_CLIP_WINDOW_SIZE 9
116#define VC4_PACKET_VIEWPORT_OFFSET_SIZE 5
117#define VC4_PACKET_Z_CLIPPING_SIZE 9
118#define VC4_PACKET_CLIPPER_XY_SCALING_SIZE 9
119#define VC4_PACKET_CLIPPER_Z_SCALING_SIZE 9
120#define VC4_PACKET_TILE_BINNING_MODE_CONFIG_SIZE 16
121#define VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE 11
122#define VC4_PACKET_CLEAR_COLORS_SIZE 14
123#define VC4_PACKET_TILE_COORDINATES_SIZE 3
124#define VC4_PACKET_GEM_HANDLES_SIZE 9
125
126#define VC4_MASK(high, low) (((1 << ((high) - (low) + 1)) - 1) << (low))
127/* Using the GNU statement expression extension */
128#define VC4_SET_FIELD(value, field) \
129 ({ \
130 uint32_t fieldval = (value) << field ## _SHIFT; \
131 assert((fieldval & ~ field ## _MASK) == 0); \
132 fieldval & field ## _MASK; \
133 })
134
135#define VC4_GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
136
137/** @{
138 * Bits used by packets like VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
139 * VC4_PACKET_TILE_RENDERING_MODE_CONFIG.
140*/
141#define VC4_TILING_FORMAT_LINEAR 0
142#define VC4_TILING_FORMAT_T 1
143#define VC4_TILING_FORMAT_LT 2
144/** @} */
145
146/** @{
147 *
148 * low bits of VC4_PACKET_STORE_FULL_RES_TILE_BUFFER and
149 * VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER.
150 */
151#define VC4_LOADSTORE_FULL_RES_EOF (1 << 3)
152#define VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL (1 << 2)
153#define VC4_LOADSTORE_FULL_RES_DISABLE_ZS (1 << 1)
154#define VC4_LOADSTORE_FULL_RES_DISABLE_COLOR (1 << 0)
155
156/** @{
157 *
158 * byte 2 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
159 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL (low bits of the address)
160 */
161
162#define VC4_LOADSTORE_TILE_BUFFER_EOF (1 << 3)
163#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_VG_MASK (1 << 2)
164#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_ZS (1 << 1)
165#define VC4_LOADSTORE_TILE_BUFFER_DISABLE_FULL_COLOR (1 << 0)
166
167/** @} */
168
169/** @{
170 *
171 * byte 0-1 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
172 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
173 */
174#define VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR (1 << 15)
175#define VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR (1 << 14)
176#define VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR (1 << 13)
177#define VC4_STORE_TILE_BUFFER_DISABLE_SWAP (1 << 12)
178
179#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK VC4_MASK(9, 8)
180#define VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT 8
181#define VC4_LOADSTORE_TILE_BUFFER_RGBA8888 0
182#define VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER 1
183#define VC4_LOADSTORE_TILE_BUFFER_BGR565 2
184/** @} */
185
186/** @{
187 *
188 * byte 0 of VC4_PACKET_STORE_TILE_BUFFER_GENERAL and
189 * VC4_PACKET_LOAD_TILE_BUFFER_GENERAL
190 */
191#define VC4_STORE_TILE_BUFFER_MODE_MASK VC4_MASK(7, 6)
192#define VC4_STORE_TILE_BUFFER_MODE_SHIFT 6
193#define VC4_STORE_TILE_BUFFER_MODE_SAMPLE0 (0 << 6)
194#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X4 (1 << 6)
195#define VC4_STORE_TILE_BUFFER_MODE_DECIMATE_X16 (2 << 6)
196
197/** The values of the field are VC4_TILING_FORMAT_* */
198#define VC4_LOADSTORE_TILE_BUFFER_TILING_MASK VC4_MASK(5, 4)
199#define VC4_LOADSTORE_TILE_BUFFER_TILING_SHIFT 4
200
201#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK VC4_MASK(2, 0)
202#define VC4_LOADSTORE_TILE_BUFFER_BUFFER_SHIFT 0
203#define VC4_LOADSTORE_TILE_BUFFER_NONE 0
204#define VC4_LOADSTORE_TILE_BUFFER_COLOR 1
205#define VC4_LOADSTORE_TILE_BUFFER_ZS 2
206#define VC4_LOADSTORE_TILE_BUFFER_Z 3
207#define VC4_LOADSTORE_TILE_BUFFER_VG_MASK 4
208#define VC4_LOADSTORE_TILE_BUFFER_FULL 5
209/** @} */
210
211#define VC4_INDEX_BUFFER_U8 (0 << 4)
212#define VC4_INDEX_BUFFER_U16 (1 << 4)
213
214/* This flag is only present in NV shader state. */
215#define VC4_SHADER_FLAG_SHADED_CLIP_COORDS (1 << 3)
216#define VC4_SHADER_FLAG_ENABLE_CLIPPING (1 << 2)
217#define VC4_SHADER_FLAG_VS_POINT_SIZE (1 << 1)
218#define VC4_SHADER_FLAG_FS_SINGLE_THREAD (1 << 0)
219
220/** @{ byte 2 of config bits. */
221#define VC4_CONFIG_BITS_EARLY_Z_UPDATE (1 << 1)
222#define VC4_CONFIG_BITS_EARLY_Z (1 << 0)
223/** @} */
224
225/** @{ byte 1 of config bits. */
226#define VC4_CONFIG_BITS_Z_UPDATE (1 << 7)
227/** same values in this 3-bit field as PIPE_FUNC_* */
228#define VC4_CONFIG_BITS_DEPTH_FUNC_SHIFT 4
229#define VC4_CONFIG_BITS_COVERAGE_READ_LEAVE (1 << 3)
230
231#define VC4_CONFIG_BITS_COVERAGE_UPDATE_NONZERO (0 << 1)
232#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ODD (1 << 1)
233#define VC4_CONFIG_BITS_COVERAGE_UPDATE_OR (2 << 1)
234#define VC4_CONFIG_BITS_COVERAGE_UPDATE_ZERO (3 << 1)
235
236#define VC4_CONFIG_BITS_COVERAGE_PIPE_SELECT (1 << 0)
237/** @} */
238
239/** @{ byte 0 of config bits. */
240#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_NONE (0 << 6)
241#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_4X (1 << 6)
242#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_16X (2 << 6)
243#define VC4_CONFIG_BITS_RASTERIZER_OVERSAMPLE_MASK (3 << 6)
244
245#define VC4_CONFIG_BITS_AA_POINTS_AND_LINES (1 << 4)
246#define VC4_CONFIG_BITS_ENABLE_DEPTH_OFFSET (1 << 3)
247#define VC4_CONFIG_BITS_CW_PRIMITIVES (1 << 2)
248#define VC4_CONFIG_BITS_ENABLE_PRIM_BACK (1 << 1)
249#define VC4_CONFIG_BITS_ENABLE_PRIM_FRONT (1 << 0)
250/** @} */
251
252/** @{ bits in the last u8 of VC4_PACKET_TILE_BINNING_MODE_CONFIG */
253#define VC4_BIN_CONFIG_DB_NON_MS (1 << 7)
254
255#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK VC4_MASK(6, 5)
256#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_SHIFT 5
257#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_32 0
258#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_64 1
259#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128 2
260#define VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_256 3
261
262#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK VC4_MASK(4, 3)
263#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_SHIFT 3
264#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32 0
265#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_64 1
266#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_128 2
267#define VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_256 3
268
269#define VC4_BIN_CONFIG_AUTO_INIT_TSDA (1 << 2)
270#define VC4_BIN_CONFIG_TILE_BUFFER_64BIT (1 << 1)
271#define VC4_BIN_CONFIG_MS_MODE_4X (1 << 0)
272/** @} */
273
274/** @{ bits in the last u16 of VC4_PACKET_TILE_RENDERING_MODE_CONFIG */
275#define VC4_RENDER_CONFIG_DB_NON_MS (1 << 12)
276#define VC4_RENDER_CONFIG_EARLY_Z_COVERAGE_DISABLE (1 << 11)
277#define VC4_RENDER_CONFIG_EARLY_Z_DIRECTION_G (1 << 10)
278#define VC4_RENDER_CONFIG_COVERAGE_MODE (1 << 9)
279#define VC4_RENDER_CONFIG_ENABLE_VG_MASK (1 << 8)
280
281/** The values of the field are VC4_TILING_FORMAT_* */
282#define VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK VC4_MASK(7, 6)
283#define VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT 6
284
285#define VC4_RENDER_CONFIG_DECIMATE_MODE_1X (0 << 4)
286#define VC4_RENDER_CONFIG_DECIMATE_MODE_4X (1 << 4)
287#define VC4_RENDER_CONFIG_DECIMATE_MODE_16X (2 << 4)
288#define VC4_RENDER_CONFIG_DECIMATE_MODE_MASK (3 << 4)
289
290#define VC4_RENDER_CONFIG_FORMAT_MASK VC4_MASK(3, 2)
291#define VC4_RENDER_CONFIG_FORMAT_SHIFT 2
292#define VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED 0
293#define VC4_RENDER_CONFIG_FORMAT_RGBA8888 1
294#define VC4_RENDER_CONFIG_FORMAT_BGR565 2
295
296#define VC4_RENDER_CONFIG_TILE_BUFFER_64BIT (1 << 1)
297#define VC4_RENDER_CONFIG_MS_MODE_4X (1 << 0)
298
299#define VC4_PRIMITIVE_LIST_FORMAT_16_INDEX (1 << 4)
300#define VC4_PRIMITIVE_LIST_FORMAT_32_XY (3 << 4)
301#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_POINTS (0 << 0)
302#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_LINES (1 << 0)
303#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_TRIANGLES (2 << 0)
304#define VC4_PRIMITIVE_LIST_FORMAT_TYPE_RHT (3 << 0)
305
306enum vc4_texture_data_type {
307 VC4_TEXTURE_TYPE_RGBA8888 = 0,
308 VC4_TEXTURE_TYPE_RGBX8888 = 1,
309 VC4_TEXTURE_TYPE_RGBA4444 = 2,
310 VC4_TEXTURE_TYPE_RGBA5551 = 3,
311 VC4_TEXTURE_TYPE_RGB565 = 4,
312 VC4_TEXTURE_TYPE_LUMINANCE = 5,
313 VC4_TEXTURE_TYPE_ALPHA = 6,
314 VC4_TEXTURE_TYPE_LUMALPHA = 7,
315 VC4_TEXTURE_TYPE_ETC1 = 8,
316 VC4_TEXTURE_TYPE_S16F = 9,
317 VC4_TEXTURE_TYPE_S8 = 10,
318 VC4_TEXTURE_TYPE_S16 = 11,
319 VC4_TEXTURE_TYPE_BW1 = 12,
320 VC4_TEXTURE_TYPE_A4 = 13,
321 VC4_TEXTURE_TYPE_A1 = 14,
322 VC4_TEXTURE_TYPE_RGBA64 = 15,
323 VC4_TEXTURE_TYPE_RGBA32R = 16,
324 VC4_TEXTURE_TYPE_YUV422R = 17,
325};
326
327#define VC4_TEX_P0_OFFSET_MASK VC4_MASK(31, 12)
328#define VC4_TEX_P0_OFFSET_SHIFT 12
329#define VC4_TEX_P0_CSWIZ_MASK VC4_MASK(11, 10)
330#define VC4_TEX_P0_CSWIZ_SHIFT 10
331#define VC4_TEX_P0_CMMODE_MASK VC4_MASK(9, 9)
332#define VC4_TEX_P0_CMMODE_SHIFT 9
333#define VC4_TEX_P0_FLIPY_MASK VC4_MASK(8, 8)
334#define VC4_TEX_P0_FLIPY_SHIFT 8
335#define VC4_TEX_P0_TYPE_MASK VC4_MASK(7, 4)
336#define VC4_TEX_P0_TYPE_SHIFT 4
337#define VC4_TEX_P0_MIPLVLS_MASK VC4_MASK(3, 0)
338#define VC4_TEX_P0_MIPLVLS_SHIFT 0
339
340#define VC4_TEX_P1_TYPE4_MASK VC4_MASK(31, 31)
341#define VC4_TEX_P1_TYPE4_SHIFT 31
342#define VC4_TEX_P1_HEIGHT_MASK VC4_MASK(30, 20)
343#define VC4_TEX_P1_HEIGHT_SHIFT 20
344#define VC4_TEX_P1_ETCFLIP_MASK VC4_MASK(19, 19)
345#define VC4_TEX_P1_ETCFLIP_SHIFT 19
346#define VC4_TEX_P1_WIDTH_MASK VC4_MASK(18, 8)
347#define VC4_TEX_P1_WIDTH_SHIFT 8
348
349#define VC4_TEX_P1_MAGFILT_MASK VC4_MASK(7, 7)
350#define VC4_TEX_P1_MAGFILT_SHIFT 7
351# define VC4_TEX_P1_MAGFILT_LINEAR 0
352# define VC4_TEX_P1_MAGFILT_NEAREST 1
353
354#define VC4_TEX_P1_MINFILT_MASK VC4_MASK(6, 4)
355#define VC4_TEX_P1_MINFILT_SHIFT 4
356# define VC4_TEX_P1_MINFILT_LINEAR 0
357# define VC4_TEX_P1_MINFILT_NEAREST 1
358# define VC4_TEX_P1_MINFILT_NEAR_MIP_NEAR 2
359# define VC4_TEX_P1_MINFILT_NEAR_MIP_LIN 3
360# define VC4_TEX_P1_MINFILT_LIN_MIP_NEAR 4
361# define VC4_TEX_P1_MINFILT_LIN_MIP_LIN 5
362
363#define VC4_TEX_P1_WRAP_T_MASK VC4_MASK(3, 2)
364#define VC4_TEX_P1_WRAP_T_SHIFT 2
365#define VC4_TEX_P1_WRAP_S_MASK VC4_MASK(1, 0)
366#define VC4_TEX_P1_WRAP_S_SHIFT 0
367# define VC4_TEX_P1_WRAP_REPEAT 0
368# define VC4_TEX_P1_WRAP_CLAMP 1
369# define VC4_TEX_P1_WRAP_MIRROR 2
370# define VC4_TEX_P1_WRAP_BORDER 3
371
372#define VC4_TEX_P2_PTYPE_MASK VC4_MASK(31, 30)
373#define VC4_TEX_P2_PTYPE_SHIFT 30
374# define VC4_TEX_P2_PTYPE_IGNORED 0
375# define VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE 1
376# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS 2
377# define VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS 3
378
379/* VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE bits */
380#define VC4_TEX_P2_CMST_MASK VC4_MASK(29, 12)
381#define VC4_TEX_P2_CMST_SHIFT 12
382#define VC4_TEX_P2_BSLOD_MASK VC4_MASK(0, 0)
383#define VC4_TEX_P2_BSLOD_SHIFT 0
384
385/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_DIMENSIONS */
386#define VC4_TEX_P2_CHEIGHT_MASK VC4_MASK(22, 12)
387#define VC4_TEX_P2_CHEIGHT_SHIFT 12
388#define VC4_TEX_P2_CWIDTH_MASK VC4_MASK(10, 0)
389#define VC4_TEX_P2_CWIDTH_SHIFT 0
390
391/* VC4_TEX_P2_PTYPE_CHILD_IMAGE_OFFSETS */
392#define VC4_TEX_P2_CYOFF_MASK VC4_MASK(22, 12)
393#define VC4_TEX_P2_CYOFF_SHIFT 12
394#define VC4_TEX_P2_CXOFF_MASK VC4_MASK(10, 0)
395#define VC4_TEX_P2_CXOFF_SHIFT 0
396
397#endif /* VC4_PACKET_H */
diff --git a/vc4/vc4_qpu_defines.h b/vc4/vc4_qpu_defines.h
new file mode 100644
index 00000000..26fcf505
--- /dev/null
+++ b/vc4/vc4_qpu_defines.h
@@ -0,0 +1,274 @@
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#ifndef VC4_QPU_DEFINES_H
25#define VC4_QPU_DEFINES_H
26
27enum qpu_op_add {
28 QPU_A_NOP,
29 QPU_A_FADD,
30 QPU_A_FSUB,
31 QPU_A_FMIN,
32 QPU_A_FMAX,
33 QPU_A_FMINABS,
34 QPU_A_FMAXABS,
35 QPU_A_FTOI,
36 QPU_A_ITOF,
37 QPU_A_ADD = 12,
38 QPU_A_SUB,
39 QPU_A_SHR,
40 QPU_A_ASR,
41 QPU_A_ROR,
42 QPU_A_SHL,
43 QPU_A_MIN,
44 QPU_A_MAX,
45 QPU_A_AND,
46 QPU_A_OR,
47 QPU_A_XOR,
48 QPU_A_NOT,
49 QPU_A_CLZ,
50 QPU_A_V8ADDS = 30,
51 QPU_A_V8SUBS = 31,
52};
53
54enum qpu_op_mul {
55 QPU_M_NOP,
56 QPU_M_FMUL,
57 QPU_M_MUL24,
58 QPU_M_V8MULD,
59 QPU_M_V8MIN,
60 QPU_M_V8MAX,
61 QPU_M_V8ADDS,
62 QPU_M_V8SUBS,
63};
64
65enum qpu_raddr {
66 QPU_R_FRAG_PAYLOAD_ZW = 15, /* W for A file, Z for B file */
67 /* 0-31 are the plain regfile a or b fields */
68 QPU_R_UNIF = 32,
69 QPU_R_VARY = 35,
70 QPU_R_ELEM_QPU = 38,
71 QPU_R_NOP,
72 QPU_R_XY_PIXEL_COORD = 41,
73 QPU_R_MS_REV_FLAGS = 42,
74 QPU_R_VPM = 48,
75 QPU_R_VPM_LD_BUSY,
76 QPU_R_VPM_LD_WAIT,
77 QPU_R_MUTEX_ACQUIRE,
78};
79
80enum qpu_waddr {
81 /* 0-31 are the plain regfile a or b fields */
82 QPU_W_ACC0 = 32, /* aka r0 */
83 QPU_W_ACC1,
84 QPU_W_ACC2,
85 QPU_W_ACC3,
86 QPU_W_TMU_NOSWAP,
87 QPU_W_ACC5,
88 QPU_W_HOST_INT,
89 QPU_W_NOP,
90 QPU_W_UNIFORMS_ADDRESS,
91 QPU_W_QUAD_XY, /* X for regfile a, Y for regfile b */
92 QPU_W_MS_FLAGS = 42,
93 QPU_W_REV_FLAG = 42,
94 QPU_W_TLB_STENCIL_SETUP = 43,
95 QPU_W_TLB_Z,
96 QPU_W_TLB_COLOR_MS,
97 QPU_W_TLB_COLOR_ALL,
98 QPU_W_TLB_ALPHA_MASK,
99 QPU_W_VPM,
100 QPU_W_VPMVCD_SETUP, /* LD for regfile a, ST for regfile b */
101 QPU_W_VPM_ADDR, /* LD for regfile a, ST for regfile b */
102 QPU_W_MUTEX_RELEASE,
103 QPU_W_SFU_RECIP,
104 QPU_W_SFU_RECIPSQRT,
105 QPU_W_SFU_EXP,
106 QPU_W_SFU_LOG,
107 QPU_W_TMU0_S,
108 QPU_W_TMU0_T,
109 QPU_W_TMU0_R,
110 QPU_W_TMU0_B,
111 QPU_W_TMU1_S,
112 QPU_W_TMU1_T,
113 QPU_W_TMU1_R,
114 QPU_W_TMU1_B,
115};
116
117enum qpu_sig_bits {
118 QPU_SIG_SW_BREAKPOINT,
119 QPU_SIG_NONE,
120 QPU_SIG_THREAD_SWITCH,
121 QPU_SIG_PROG_END,
122 QPU_SIG_WAIT_FOR_SCOREBOARD,
123 QPU_SIG_SCOREBOARD_UNLOCK,
124 QPU_SIG_LAST_THREAD_SWITCH,
125 QPU_SIG_COVERAGE_LOAD,
126 QPU_SIG_COLOR_LOAD,
127 QPU_SIG_COLOR_LOAD_END,
128 QPU_SIG_LOAD_TMU0,
129 QPU_SIG_LOAD_TMU1,
130 QPU_SIG_ALPHA_MASK_LOAD,
131 QPU_SIG_SMALL_IMM,
132 QPU_SIG_LOAD_IMM,
133 QPU_SIG_BRANCH
134};
135
136enum qpu_mux {
137 /* hardware mux values */
138 QPU_MUX_R0,
139 QPU_MUX_R1,
140 QPU_MUX_R2,
141 QPU_MUX_R3,
142 QPU_MUX_R4,
143 QPU_MUX_R5,
144 QPU_MUX_A,
145 QPU_MUX_B,
146
147 /**
148 * Non-hardware mux value, stores a small immediate field to be
149 * programmed into raddr_b in the qpu_reg.index.
150 */
151 QPU_MUX_SMALL_IMM,
152};
153
154enum qpu_cond {
155 QPU_COND_NEVER,
156 QPU_COND_ALWAYS,
157 QPU_COND_ZS,
158 QPU_COND_ZC,
159 QPU_COND_NS,
160 QPU_COND_NC,
161 QPU_COND_CS,
162 QPU_COND_CC,
163};
164
165enum qpu_pack_mul {
166 QPU_PACK_MUL_NOP,
167 QPU_PACK_MUL_8888 = 3, /* replicated to each 8 bits of the 32-bit dst. */
168 QPU_PACK_MUL_8A,
169 QPU_PACK_MUL_8B,
170 QPU_PACK_MUL_8C,
171 QPU_PACK_MUL_8D,
172};
173
174enum qpu_pack_a {
175 QPU_PACK_A_NOP,
176 /* convert to 16 bit float if float input, or to int16. */
177 QPU_PACK_A_16A,
178 QPU_PACK_A_16B,
179 /* replicated to each 8 bits of the 32-bit dst. */
180 QPU_PACK_A_8888,
181 /* Convert to 8-bit unsigned int. */
182 QPU_PACK_A_8A,
183 QPU_PACK_A_8B,
184 QPU_PACK_A_8C,
185 QPU_PACK_A_8D,
186
187 /* Saturating variants of the previous instructions. */
188 QPU_PACK_A_32_SAT, /* int-only */
189 QPU_PACK_A_16A_SAT, /* int or float */
190 QPU_PACK_A_16B_SAT,
191 QPU_PACK_A_8888_SAT,
192 QPU_PACK_A_8A_SAT,
193 QPU_PACK_A_8B_SAT,
194 QPU_PACK_A_8C_SAT,
195 QPU_PACK_A_8D_SAT,
196};
197
198enum qpu_unpack {
199 QPU_UNPACK_NOP,
200 QPU_UNPACK_16A,
201 QPU_UNPACK_16B,
202 QPU_UNPACK_8D_REP,
203 QPU_UNPACK_8A,
204 QPU_UNPACK_8B,
205 QPU_UNPACK_8C,
206 QPU_UNPACK_8D,
207};
208
209#define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
210/* Using the GNU statement expression extension */
211#define QPU_SET_FIELD(value, field) \
212 ({ \
213 uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
214 assert((fieldval & ~ field ## _MASK) == 0); \
215 fieldval & field ## _MASK; \
216 })
217
218#define QPU_GET_FIELD(word, field) ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
219
220#define QPU_UPDATE_FIELD(inst, value, field) \
221 (((inst) & ~(field ## _MASK)) | QPU_SET_FIELD(value, field))
222
223#define QPU_SIG_SHIFT 60
224#define QPU_SIG_MASK QPU_MASK(63, 60)
225
226#define QPU_UNPACK_SHIFT 57
227#define QPU_UNPACK_MASK QPU_MASK(59, 57)
228
229/**
230 * If set, the pack field means PACK_MUL or R4 packing, instead of normal
231 * regfile a packing.
232 */
233#define QPU_PM ((uint64_t)1 << 56)
234
235#define QPU_PACK_SHIFT 52
236#define QPU_PACK_MASK QPU_MASK(55, 52)
237
238#define QPU_COND_ADD_SHIFT 49
239#define QPU_COND_ADD_MASK QPU_MASK(51, 49)
240#define QPU_COND_MUL_SHIFT 46
241#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
242
243#define QPU_SF ((uint64_t)1 << 45)
244
245#define QPU_WADDR_ADD_SHIFT 38
246#define QPU_WADDR_ADD_MASK QPU_MASK(43, 38)
247#define QPU_WADDR_MUL_SHIFT 32
248#define QPU_WADDR_MUL_MASK QPU_MASK(37, 32)
249
250#define QPU_OP_MUL_SHIFT 29
251#define QPU_OP_MUL_MASK QPU_MASK(31, 29)
252
253#define QPU_RADDR_A_SHIFT 18
254#define QPU_RADDR_A_MASK QPU_MASK(23, 18)
255#define QPU_RADDR_B_SHIFT 12
256#define QPU_RADDR_B_MASK QPU_MASK(17, 12)
257#define QPU_SMALL_IMM_SHIFT 12
258#define QPU_SMALL_IMM_MASK QPU_MASK(17, 12)
259
260#define QPU_ADD_A_SHIFT 9
261#define QPU_ADD_A_MASK QPU_MASK(11, 9)
262#define QPU_ADD_B_SHIFT 6
263#define QPU_ADD_B_MASK QPU_MASK(8, 6)
264#define QPU_MUL_A_SHIFT 3
265#define QPU_MUL_A_MASK QPU_MASK(5, 3)
266#define QPU_MUL_B_SHIFT 0
267#define QPU_MUL_B_MASK QPU_MASK(2, 0)
268
269#define QPU_WS ((uint64_t)1 << 44)
270
271#define QPU_OP_ADD_SHIFT 24
272#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
273
274#endif /* VC4_QPU_DEFINES_H */
diff --git a/xf86drm.c b/xf86drm.c
index 7e28b4f7..88f86ed5 100644
--- a/xf86drm.c
+++ b/xf86drm.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * \file xf86drm.c 2 * \file xf86drm.c
3 * User-level interface to DRM device 3 * User-level interface to DRM device
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -54,8 +54,11 @@
54#include <sys/ioctl.h> 54#include <sys/ioctl.h>
55#include <sys/time.h> 55#include <sys/time.h>
56#include <stdarg.h> 56#include <stdarg.h>
57#ifdef HAVE_SYS_MKDEV_H 57#ifdef MAJOR_IN_MKDEV
58# include <sys/mkdev.h> /* defines major(), minor(), and makedev() on Solaris */ 58#include <sys/mkdev.h>
59#endif
60#ifdef MAJOR_IN_SYSMACROS
61#include <sys/sysmacros.h>
59#endif 62#endif
60#include <math.h> 63#include <math.h>
61 64
@@ -70,13 +73,13 @@
70#include "util_math.h" 73#include "util_math.h"
71 74
72#ifdef __OpenBSD__ 75#ifdef __OpenBSD__
73#define DRM_PRIMARY_MINOR_NAME "drm" 76#define DRM_PRIMARY_MINOR_NAME "drm"
74#define DRM_CONTROL_MINOR_NAME "drmC" 77#define DRM_CONTROL_MINOR_NAME "drmC"
75#define DRM_RENDER_MINOR_NAME "drmR" 78#define DRM_RENDER_MINOR_NAME "drmR"
76#else 79#else
77#define DRM_PRIMARY_MINOR_NAME "card" 80#define DRM_PRIMARY_MINOR_NAME "card"
78#define DRM_CONTROL_MINOR_NAME "controlD" 81#define DRM_CONTROL_MINOR_NAME "controlD"
79#define DRM_RENDER_MINOR_NAME "renderD" 82#define DRM_RENDER_MINOR_NAME "renderD"
80#endif 83#endif
81 84
82#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) 85#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
@@ -96,7 +99,23 @@
96#endif /* __OpenBSD__ */ 99#endif /* __OpenBSD__ */
97 100
98#ifndef DRM_MAJOR 101#ifndef DRM_MAJOR
99#define DRM_MAJOR 226 /* Linux */ 102#define DRM_MAJOR 226 /* Linux */
103#endif
104
105#ifdef __OpenBSD__
106struct drm_pciinfo {
107 uint16_t domain;
108 uint8_t bus;
109 uint8_t dev;
110 uint8_t func;
111 uint16_t vendor_id;
112 uint16_t device_id;
113 uint16_t subvendor_id;
114 uint16_t subdevice_id;
115 uint8_t revision_id;
116};
117
118#define DRM_IOCTL_GET_PCIINFO DRM_IOR(0x15, struct drm_pciinfo)
100#endif 119#endif
101 120
102#define DRM_MSG_VERBOSITY 3 121#define DRM_MSG_VERBOSITY 3
@@ -128,18 +147,18 @@ drmDebugPrint(const char *format, va_list ap)
128void 147void
129drmMsg(const char *format, ...) 148drmMsg(const char *format, ...)
130{ 149{
131 va_list ap; 150 va_list ap;
132 const char *env; 151 const char *env;
133 if (((env = getenv("LIBGL_DEBUG")) && strstr(env, "verbose")) || 152 if (((env = getenv("LIBGL_DEBUG")) && strstr(env, "verbose")) ||
134 (drm_server_info && drm_server_info->debug_print)) 153 (drm_server_info && drm_server_info->debug_print))
135 { 154 {
136 va_start(ap, format); 155 va_start(ap, format);
137 if (drm_server_info) { 156 if (drm_server_info) {
138 drm_server_info->debug_print(format,ap); 157 drm_server_info->debug_print(format,ap);
139 } else { 158 } else {
140 drmDebugPrint(format, ap); 159 drmDebugPrint(format, ap);
141 } 160 }
142 va_end(ap); 161 va_end(ap);
143 } 162 }
144} 163}
145 164
@@ -166,10 +185,10 @@ void drmFree(void *pt)
166int 185int
167drmIoctl(int fd, unsigned long request, void *arg) 186drmIoctl(int fd, unsigned long request, void *arg)
168{ 187{
169 int ret; 188 int ret;
170 189
171 do { 190 do {
172 ret = ioctl(fd, request, arg); 191 ret = ioctl(fd, request, arg);
173 } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 192 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
174 return ret; 193 return ret;
175} 194}
@@ -190,16 +209,16 @@ drmHashEntry *drmGetEntry(int fd)
190 drmHashEntry *entry; 209 drmHashEntry *entry;
191 210
192 if (!drmHashTable) 211 if (!drmHashTable)
193 drmHashTable = drmHashCreate(); 212 drmHashTable = drmHashCreate();
194 213
195 if (drmHashLookup(drmHashTable, key, &value)) { 214 if (drmHashLookup(drmHashTable, key, &value)) {
196 entry = drmMalloc(sizeof(*entry)); 215 entry = drmMalloc(sizeof(*entry));
197 entry->fd = fd; 216 entry->fd = fd;
198 entry->f = NULL; 217 entry->f = NULL;
199 entry->tagTable = drmHashCreate(); 218 entry->tagTable = drmHashCreate();
200 drmHashInsert(drmHashTable, key, entry); 219 drmHashInsert(drmHashTable, key, entry);
201 } else { 220 } else {
202 entry = value; 221 entry = value;
203 } 222 }
204 return entry; 223 return entry;
205} 224}
@@ -221,41 +240,41 @@ static int drmMatchBusID(const char *id1, const char *id2, int pci_domain_ok)
221{ 240{
222 /* First, check if the IDs are exactly the same */ 241 /* First, check if the IDs are exactly the same */
223 if (strcasecmp(id1, id2) == 0) 242 if (strcasecmp(id1, id2) == 0)
224 return 1; 243 return 1;
225 244
226 /* Try to match old/new-style PCI bus IDs. */ 245 /* Try to match old/new-style PCI bus IDs. */
227 if (strncasecmp(id1, "pci", 3) == 0) { 246 if (strncasecmp(id1, "pci", 3) == 0) {
228 unsigned int o1, b1, d1, f1; 247 unsigned int o1, b1, d1, f1;
229 unsigned int o2, b2, d2, f2; 248 unsigned int o2, b2, d2, f2;
230 int ret; 249 int ret;
231 250
232 ret = sscanf(id1, "pci:%04x:%02x:%02x.%u", &o1, &b1, &d1, &f1); 251 ret = sscanf(id1, "pci:%04x:%02x:%02x.%u", &o1, &b1, &d1, &f1);
233 if (ret != 4) { 252 if (ret != 4) {
234 o1 = 0; 253 o1 = 0;
235 ret = sscanf(id1, "PCI:%u:%u:%u", &b1, &d1, &f1); 254 ret = sscanf(id1, "PCI:%u:%u:%u", &b1, &d1, &f1);
236 if (ret != 3) 255 if (ret != 3)
237 return 0; 256 return 0;
238 } 257 }
239 258
240 ret = sscanf(id2, "pci:%04x:%02x:%02x.%u", &o2, &b2, &d2, &f2); 259 ret = sscanf(id2, "pci:%04x:%02x:%02x.%u", &o2, &b2, &d2, &f2);
241 if (ret != 4) { 260 if (ret != 4) {
242 o2 = 0; 261 o2 = 0;
243 ret = sscanf(id2, "PCI:%u:%u:%u", &b2, &d2, &f2); 262 ret = sscanf(id2, "PCI:%u:%u:%u", &b2, &d2, &f2);
244 if (ret != 3) 263 if (ret != 3)
245 return 0; 264 return 0;
246 } 265 }
247 266
248 /* If domains aren't properly supported by the kernel interface, 267 /* If domains aren't properly supported by the kernel interface,
249 * just ignore them, which sucks less than picking a totally random 268 * just ignore them, which sucks less than picking a totally random
250 * card with "open by name" 269 * card with "open by name"
251 */ 270 */
252 if (!pci_domain_ok) 271 if (!pci_domain_ok)
253 o1 = o2 = 0; 272 o1 = o2 = 0;
254 273
255 if ((o1 != o2) || (b1 != b2) || (d1 != d2) || (f1 != f2)) 274 if ((o1 != o2) || (b1 != b2) || (d1 != d2) || (f1 != f2))
256 return 0; 275 return 0;
257 else 276 else
258 return 1; 277 return 1;
259 } 278 }
260 return 0; 279 return 0;
261} 280}
@@ -277,18 +296,18 @@ static int drmMatchBusID(const char *id1, const char *id2, int pci_domain_ok)
277#if !defined(UDEV) 296#if !defined(UDEV)
278static int chown_check_return(const char *path, uid_t owner, gid_t group) 297static int chown_check_return(const char *path, uid_t owner, gid_t group)
279{ 298{
280 int rv; 299 int rv;
281 300
282 do { 301 do {
283 rv = chown(path, owner, group); 302 rv = chown(path, owner, group);
284 } while (rv != 0 && errno == EINTR); 303 } while (rv != 0 && errno == EINTR);
285 304
286 if (rv == 0) 305 if (rv == 0)
287 return 0; 306 return 0;
288 307
289 drmMsg("Failed to change owner or group for file %s! %d: %s\n", 308 drmMsg("Failed to change owner or group for file %s! %d: %s\n",
290 path, errno, strerror(errno)); 309 path, errno, strerror(errno));
291 return -1; 310 return -1;
292} 311}
293#endif 312#endif
294 313
@@ -297,7 +316,7 @@ static int chown_check_return(const char *path, uid_t owner, gid_t group)
297 * 316 *
298 * \param dev major and minor numbers of the device. 317 * \param dev major and minor numbers of the device.
299 * \param minor minor number of the device. 318 * \param minor minor number of the device.
300 * 319 *
301 * \return a file descriptor on success, or a negative value on error. 320 * \return a file descriptor on success, or a negative value on error.
302 * 321 *
303 * \internal 322 * \internal
@@ -321,99 +340,99 @@ static int drmOpenDevice(dev_t dev, int minor, int type)
321 340
322 switch (type) { 341 switch (type) {
323 case DRM_NODE_PRIMARY: 342 case DRM_NODE_PRIMARY:
324 dev_name = DRM_DEV_NAME; 343 dev_name = DRM_DEV_NAME;
325 break; 344 break;
326 case DRM_NODE_CONTROL: 345 case DRM_NODE_CONTROL:
327 dev_name = DRM_CONTROL_DEV_NAME; 346 dev_name = DRM_CONTROL_DEV_NAME;
328 break; 347 break;
329 case DRM_NODE_RENDER: 348 case DRM_NODE_RENDER:
330 dev_name = DRM_RENDER_DEV_NAME; 349 dev_name = DRM_RENDER_DEV_NAME;
331 break; 350 break;
332 default: 351 default:
333 return -EINVAL; 352 return -EINVAL;
334 }; 353 };
335 354
336 sprintf(buf, dev_name, DRM_DIR_NAME, minor); 355 sprintf(buf, dev_name, DRM_DIR_NAME, minor);
337 drmMsg("drmOpenDevice: node name is %s\n", buf); 356 drmMsg("drmOpenDevice: node name is %s\n", buf);
338 357
339 if (drm_server_info && drm_server_info->get_perms) { 358 if (drm_server_info && drm_server_info->get_perms) {
340 drm_server_info->get_perms(&serv_group, &serv_mode); 359 drm_server_info->get_perms(&serv_group, &serv_mode);
341 devmode = serv_mode ? serv_mode : DRM_DEV_MODE; 360 devmode = serv_mode ? serv_mode : DRM_DEV_MODE;
342 devmode &= ~(S_IXUSR|S_IXGRP|S_IXOTH); 361 devmode &= ~(S_IXUSR|S_IXGRP|S_IXOTH);
343 } 362 }
344 363
345#if !defined(UDEV) 364#if !defined(UDEV)
346 if (stat(DRM_DIR_NAME, &st)) { 365 if (stat(DRM_DIR_NAME, &st)) {
347 if (!isroot) 366 if (!isroot)
348 return DRM_ERR_NOT_ROOT; 367 return DRM_ERR_NOT_ROOT;
349 mkdir(DRM_DIR_NAME, DRM_DEV_DIRMODE); 368 mkdir(DRM_DIR_NAME, DRM_DEV_DIRMODE);
350 chown_check_return(DRM_DIR_NAME, 0, 0); /* root:root */ 369 chown_check_return(DRM_DIR_NAME, 0, 0); /* root:root */
351 chmod(DRM_DIR_NAME, DRM_DEV_DIRMODE); 370 chmod(DRM_DIR_NAME, DRM_DEV_DIRMODE);
352 } 371 }
353 372
354 /* Check if the device node exists and create it if necessary. */ 373 /* Check if the device node exists and create it if necessary. */
355 if (stat(buf, &st)) { 374 if (stat(buf, &st)) {
356 if (!isroot) 375 if (!isroot)
357 return DRM_ERR_NOT_ROOT; 376 return DRM_ERR_NOT_ROOT;
358 remove(buf); 377 remove(buf);
359 mknod(buf, S_IFCHR | devmode, dev); 378 mknod(buf, S_IFCHR | devmode, dev);
360 } 379 }
361 380
362 if (drm_server_info && drm_server_info->get_perms) { 381 if (drm_server_info && drm_server_info->get_perms) {
363 group = ((int)serv_group >= 0) ? serv_group : DRM_DEV_GID; 382 group = ((int)serv_group >= 0) ? serv_group : DRM_DEV_GID;
364 chown_check_return(buf, user, group); 383 chown_check_return(buf, user, group);
365 chmod(buf, devmode); 384 chmod(buf, devmode);
366 } 385 }
367#else 386#else
368 /* if we modprobed then wait for udev */ 387 /* if we modprobed then wait for udev */
369 { 388 {
370 int udev_count = 0; 389 int udev_count = 0;
371wait_for_udev: 390wait_for_udev:
372 if (stat(DRM_DIR_NAME, &st)) { 391 if (stat(DRM_DIR_NAME, &st)) {
373 usleep(20); 392 usleep(20);
374 udev_count++; 393 udev_count++;
375 394
376 if (udev_count == 50) 395 if (udev_count == 50)
377 return -1; 396 return -1;
378 goto wait_for_udev; 397 goto wait_for_udev;
379 } 398 }
380 399
381 if (stat(buf, &st)) { 400 if (stat(buf, &st)) {
382 usleep(20); 401 usleep(20);
383 udev_count++; 402 udev_count++;
384 403
385 if (udev_count == 50) 404 if (udev_count == 50)
386 return -1; 405 return -1;
387 goto wait_for_udev; 406 goto wait_for_udev;
388 } 407 }
389 } 408 }
390#endif 409#endif
391 410
392 fd = open(buf, O_RDWR, 0); 411 fd = open(buf, O_RDWR, 0);
393 drmMsg("drmOpenDevice: open result is %d, (%s)\n", 412 drmMsg("drmOpenDevice: open result is %d, (%s)\n",
394 fd, fd < 0 ? strerror(errno) : "OK"); 413 fd, fd < 0 ? strerror(errno) : "OK");
395 if (fd >= 0) 414 if (fd >= 0)
396 return fd; 415 return fd;
397 416
398#if !defined(UDEV) 417#if !defined(UDEV)
399 /* Check if the device node is not what we expect it to be, and recreate it 418 /* Check if the device node is not what we expect it to be, and recreate it
400 * and try again if so. 419 * and try again if so.
401 */ 420 */
402 if (st.st_rdev != dev) { 421 if (st.st_rdev != dev) {
403 if (!isroot) 422 if (!isroot)
404 return DRM_ERR_NOT_ROOT; 423 return DRM_ERR_NOT_ROOT;
405 remove(buf); 424 remove(buf);
406 mknod(buf, S_IFCHR | devmode, dev); 425 mknod(buf, S_IFCHR | devmode, dev);
407 if (drm_server_info && drm_server_info->get_perms) { 426 if (drm_server_info && drm_server_info->get_perms) {
408 chown_check_return(buf, user, group); 427 chown_check_return(buf, user, group);
409 chmod(buf, devmode); 428 chmod(buf, devmode);
410 } 429 }
411 } 430 }
412 fd = open(buf, O_RDWR, 0); 431 fd = open(buf, O_RDWR, 0);
413 drmMsg("drmOpenDevice: open result is %d, (%s)\n", 432 drmMsg("drmOpenDevice: open result is %d, (%s)\n",
414 fd, fd < 0 ? strerror(errno) : "OK"); 433 fd, fd < 0 ? strerror(errno) : "OK");
415 if (fd >= 0) 434 if (fd >= 0)
416 return fd; 435 return fd;
417 436
418 drmMsg("drmOpenDevice: Open failed\n"); 437 drmMsg("drmOpenDevice: Open failed\n");
419 remove(buf); 438 remove(buf);
@@ -429,7 +448,7 @@ wait_for_udev:
429 * \param create allow to create the device if set. 448 * \param create allow to create the device if set.
430 * 449 *
431 * \return a file descriptor on success, or a negative value on error. 450 * \return a file descriptor on success, or a negative value on error.
432 * 451 *
433 * \internal 452 * \internal
434 * Calls drmOpenDevice() if \p create is set, otherwise assembles the device 453 * Calls drmOpenDevice() if \p create is set, otherwise assembles the device
435 * name from \p minor and opens it. 454 * name from \p minor and opens it.
@@ -439,37 +458,37 @@ static int drmOpenMinor(int minor, int create, int type)
439 int fd; 458 int fd;
440 char buf[64]; 459 char buf[64];
441 const char *dev_name; 460 const char *dev_name;
442 461
443 if (create) 462 if (create)
444 return drmOpenDevice(makedev(DRM_MAJOR, minor), minor, type); 463 return drmOpenDevice(makedev(DRM_MAJOR, minor), minor, type);
445 464
446 switch (type) { 465 switch (type) {
447 case DRM_NODE_PRIMARY: 466 case DRM_NODE_PRIMARY:
448 dev_name = DRM_DEV_NAME; 467 dev_name = DRM_DEV_NAME;
449 break; 468 break;
450 case DRM_NODE_CONTROL: 469 case DRM_NODE_CONTROL:
451 dev_name = DRM_CONTROL_DEV_NAME; 470 dev_name = DRM_CONTROL_DEV_NAME;
452 break; 471 break;
453 case DRM_NODE_RENDER: 472 case DRM_NODE_RENDER:
454 dev_name = DRM_RENDER_DEV_NAME; 473 dev_name = DRM_RENDER_DEV_NAME;
455 break; 474 break;
456 default: 475 default:
457 return -EINVAL; 476 return -EINVAL;
458 }; 477 };
459 478
460 sprintf(buf, dev_name, DRM_DIR_NAME, minor); 479 sprintf(buf, dev_name, DRM_DIR_NAME, minor);
461 if ((fd = open(buf, O_RDWR, 0)) >= 0) 480 if ((fd = open(buf, O_RDWR, 0)) >= 0)
462 return fd; 481 return fd;
463 return -errno; 482 return -errno;
464} 483}
465 484
466 485
467/** 486/**
468 * Determine whether the DRM kernel driver has been loaded. 487 * Determine whether the DRM kernel driver has been loaded.
469 * 488 *
470 * \return 1 if the DRM driver is loaded, 0 otherwise. 489 * \return 1 if the DRM driver is loaded, 0 otherwise.
471 * 490 *
472 * \internal 491 * \internal
473 * Determine the presence of the kernel driver by attempting to open the 0 492 * Determine the presence of the kernel driver by attempting to open the 0
474 * minor and get version information. For backward compatibility with older 493 * minor and get version information. For backward compatibility with older
475 * Linux implementations, /proc/dri is also checked. 494 * Linux implementations, /proc/dri is also checked.
@@ -482,16 +501,16 @@ int drmAvailable(void)
482 501
483 if ((fd = drmOpenMinor(0, 1, DRM_NODE_PRIMARY)) < 0) { 502 if ((fd = drmOpenMinor(0, 1, DRM_NODE_PRIMARY)) < 0) {
484#ifdef __linux__ 503#ifdef __linux__
485 /* Try proc for backward Linux compatibility */ 504 /* Try proc for backward Linux compatibility */
486 if (!access("/proc/dri/0", R_OK)) 505 if (!access("/proc/dri/0", R_OK))
487 return 1; 506 return 1;
488#endif 507#endif
489 return 0; 508 return 0;
490 } 509 }
491 510
492 if ((version = drmGetVersion(fd))) { 511 if ((version = drmGetVersion(fd))) {
493 retval = 1; 512 retval = 1;
494 drmFreeVersion(version); 513 drmFreeVersion(version);
495 } 514 }
496 close(fd); 515 close(fd);
497 516
@@ -570,37 +589,37 @@ static int drmOpenByBusid(const char *busid, int type)
570 589
571 drmMsg("drmOpenByBusid: Searching for BusID %s\n", busid); 590 drmMsg("drmOpenByBusid: Searching for BusID %s\n", busid);
572 for (i = base; i < base + DRM_MAX_MINOR; i++) { 591 for (i = base; i < base + DRM_MAX_MINOR; i++) {
573 fd = drmOpenMinor(i, 1, type); 592 fd = drmOpenMinor(i, 1, type);
574 drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd); 593 drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd);
575 if (fd >= 0) { 594 if (fd >= 0) {
576 /* We need to try for 1.4 first for proper PCI domain support 595 /* We need to try for 1.4 first for proper PCI domain support
577 * and if that fails, we know the kernel is busted 596 * and if that fails, we know the kernel is busted
578 */ 597 */
579 sv.drm_di_major = 1; 598 sv.drm_di_major = 1;
580 sv.drm_di_minor = 4; 599 sv.drm_di_minor = 4;
581 sv.drm_dd_major = -1; /* Don't care */ 600 sv.drm_dd_major = -1; /* Don't care */
582 sv.drm_dd_minor = -1; /* Don't care */ 601 sv.drm_dd_minor = -1; /* Don't care */
583 if (drmSetInterfaceVersion(fd, &sv)) { 602 if (drmSetInterfaceVersion(fd, &sv)) {
584#ifndef __alpha__ 603#ifndef __alpha__
585 pci_domain_ok = 0; 604 pci_domain_ok = 0;
586#endif 605#endif
587 sv.drm_di_major = 1; 606 sv.drm_di_major = 1;
588 sv.drm_di_minor = 1; 607 sv.drm_di_minor = 1;
589 sv.drm_dd_major = -1; /* Don't care */ 608 sv.drm_dd_major = -1; /* Don't care */
590 sv.drm_dd_minor = -1; /* Don't care */ 609 sv.drm_dd_minor = -1; /* Don't care */
591 drmMsg("drmOpenByBusid: Interface 1.4 failed, trying 1.1\n"); 610 drmMsg("drmOpenByBusid: Interface 1.4 failed, trying 1.1\n");
592 drmSetInterfaceVersion(fd, &sv); 611 drmSetInterfaceVersion(fd, &sv);
593 } 612 }
594 buf = drmGetBusid(fd); 613 buf = drmGetBusid(fd);
595 drmMsg("drmOpenByBusid: drmGetBusid reports %s\n", buf); 614 drmMsg("drmOpenByBusid: drmGetBusid reports %s\n", buf);
596 if (buf && drmMatchBusID(buf, busid, pci_domain_ok)) { 615 if (buf && drmMatchBusID(buf, busid, pci_domain_ok)) {
597 drmFreeBusid(buf); 616 drmFreeBusid(buf);
598 return fd; 617 return fd;
599 } 618 }
600 if (buf) 619 if (buf)
601 drmFreeBusid(buf); 620 drmFreeBusid(buf);
602 close(fd); 621 close(fd);
603 } 622 }
604 } 623 }
605 return -1; 624 return -1;
606} 625}
@@ -611,14 +630,14 @@ static int drmOpenByBusid(const char *busid, int type)
611 * 630 *
612 * \param name driver name. 631 * \param name driver name.
613 * \param type the device node type. 632 * \param type the device node type.
614 * 633 *
615 * \return a file descriptor on success, or a negative value on error. 634 * \return a file descriptor on success, or a negative value on error.
616 * 635 *
617 * \internal 636 * \internal
618 * This function opens the first minor number that matches the driver name and 637 * This function opens the first minor number that matches the driver name and
619 * isn't already in use. If it's in use it then it will already have a bus ID 638 * isn't already in use. If it's in use it then it will already have a bus ID
620 * assigned. 639 * assigned.
621 * 640 *
622 * \sa drmOpenMinor(), drmGetVersion() and drmGetBusid(). 641 * \sa drmOpenMinor(), drmGetVersion() and drmGetBusid().
623 */ 642 */
624static int drmOpenByName(const char *name, int type) 643static int drmOpenByName(const char *name, int type)
@@ -637,56 +656,56 @@ static int drmOpenByName(const char *name, int type)
637 * already in use. If it's in use it will have a busid assigned already. 656 * already in use. If it's in use it will have a busid assigned already.
638 */ 657 */
639 for (i = base; i < base + DRM_MAX_MINOR; i++) { 658 for (i = base; i < base + DRM_MAX_MINOR; i++) {
640 if ((fd = drmOpenMinor(i, 1, type)) >= 0) { 659 if ((fd = drmOpenMinor(i, 1, type)) >= 0) {
641 if ((version = drmGetVersion(fd))) { 660 if ((version = drmGetVersion(fd))) {
642 if (!strcmp(version->name, name)) { 661 if (!strcmp(version->name, name)) {
643 drmFreeVersion(version); 662 drmFreeVersion(version);
644 id = drmGetBusid(fd); 663 id = drmGetBusid(fd);
645 drmMsg("drmGetBusid returned '%s'\n", id ? id : "NULL"); 664 drmMsg("drmGetBusid returned '%s'\n", id ? id : "NULL");
646 if (!id || !*id) { 665 if (!id || !*id) {
647 if (id) 666 if (id)
648 drmFreeBusid(id); 667 drmFreeBusid(id);
649 return fd; 668 return fd;
650 } else { 669 } else {
651 drmFreeBusid(id); 670 drmFreeBusid(id);
652 } 671 }
653 } else { 672 } else {
654 drmFreeVersion(version); 673 drmFreeVersion(version);
655 } 674 }
656 } 675 }
657 close(fd); 676 close(fd);
658 } 677 }
659 } 678 }
660 679
661#ifdef __linux__ 680#ifdef __linux__
662 /* Backward-compatibility /proc support */ 681 /* Backward-compatibility /proc support */
663 for (i = 0; i < 8; i++) { 682 for (i = 0; i < 8; i++) {
664 char proc_name[64], buf[512]; 683 char proc_name[64], buf[512];
665 char *driver, *pt, *devstring; 684 char *driver, *pt, *devstring;
666 int retcode; 685 int retcode;
667 686
668 sprintf(proc_name, "/proc/dri/%d/name", i); 687 sprintf(proc_name, "/proc/dri/%d/name", i);
669 if ((fd = open(proc_name, 0, 0)) >= 0) { 688 if ((fd = open(proc_name, 0, 0)) >= 0) {
670 retcode = read(fd, buf, sizeof(buf)-1); 689 retcode = read(fd, buf, sizeof(buf)-1);
671 close(fd); 690 close(fd);
672 if (retcode) { 691 if (retcode) {
673 buf[retcode-1] = '\0'; 692 buf[retcode-1] = '\0';
674 for (driver = pt = buf; *pt && *pt != ' '; ++pt) 693 for (driver = pt = buf; *pt && *pt != ' '; ++pt)
675 ; 694 ;
676 if (*pt) { /* Device is next */ 695 if (*pt) { /* Device is next */
677 *pt = '\0'; 696 *pt = '\0';
678 if (!strcmp(driver, name)) { /* Match */ 697 if (!strcmp(driver, name)) { /* Match */
679 for (devstring = ++pt; *pt && *pt != ' '; ++pt) 698 for (devstring = ++pt; *pt && *pt != ' '; ++pt)
680 ; 699 ;
681 if (*pt) { /* Found busid */ 700 if (*pt) { /* Found busid */
682 return drmOpenByBusid(++pt, type); 701 return drmOpenByBusid(++pt, type);
683 } else { /* No busid */ 702 } else { /* No busid */
684 return drmOpenDevice(strtol(devstring, NULL, 0),i, type); 703 return drmOpenDevice(strtol(devstring, NULL, 0),i, type);
685 } 704 }
686 } 705 }
687 } 706 }
688 } 707 }
689 } 708 }
690 } 709 }
691#endif 710#endif
692 711
@@ -702,9 +721,9 @@ static int drmOpenByName(const char *name, int type)
702 * 721 *
703 * \param name driver name. Not referenced if bus ID is supplied. 722 * \param name driver name. Not referenced if bus ID is supplied.
704 * \param busid bus ID. Zero if not known. 723 * \param busid bus ID. Zero if not known.
705 * 724 *
706 * \return a file descriptor on success, or a negative value on error. 725 * \return a file descriptor on success, or a negative value on error.
707 * 726 *
708 * \internal 727 * \internal
709 * It calls drmOpenByBusid() if \p busid is specified or drmOpenByName() 728 * It calls drmOpenByBusid() if \p busid is specified or drmOpenByName()
710 * otherwise. 729 * otherwise.
@@ -734,21 +753,21 @@ int drmOpenWithType(const char *name, const char *busid, int type)
734{ 753{
735 if (!drmAvailable() && name != NULL && drm_server_info && 754 if (!drmAvailable() && name != NULL && drm_server_info &&
736 drm_server_info->load_module) { 755 drm_server_info->load_module) {
737 /* try to load the kernel module */ 756 /* try to load the kernel module */
738 if (!drm_server_info->load_module(name)) { 757 if (!drm_server_info->load_module(name)) {
739 drmMsg("[drm] failed to load kernel module \"%s\"\n", name); 758 drmMsg("[drm] failed to load kernel module \"%s\"\n", name);
740 return -1; 759 return -1;
741 } 760 }
742 } 761 }
743 762
744 if (busid) { 763 if (busid) {
745 int fd = drmOpenByBusid(busid, type); 764 int fd = drmOpenByBusid(busid, type);
746 if (fd >= 0) 765 if (fd >= 0)
747 return fd; 766 return fd;
748 } 767 }
749 768
750 if (name) 769 if (name)
751 return drmOpenByName(name, type); 770 return drmOpenByName(name, type);
752 771
753 return -1; 772 return -1;
754} 773}
@@ -775,7 +794,7 @@ int drmOpenRender(int minor)
775void drmFreeVersion(drmVersionPtr v) 794void drmFreeVersion(drmVersionPtr v)
776{ 795{
777 if (!v) 796 if (!v)
778 return; 797 return;
779 drmFree(v->name); 798 drmFree(v->name);
780 drmFree(v->date); 799 drmFree(v->date);
781 drmFree(v->desc); 800 drmFree(v->desc);
@@ -795,7 +814,7 @@ void drmFreeVersion(drmVersionPtr v)
795static void drmFreeKernelVersion(drm_version_t *v) 814static void drmFreeKernelVersion(drm_version_t *v)
796{ 815{
797 if (!v) 816 if (!v)
798 return; 817 return;
799 drmFree(v->name); 818 drmFree(v->name);
800 drmFree(v->date); 819 drmFree(v->date);
801 drmFree(v->desc); 820 drmFree(v->desc);
@@ -805,10 +824,10 @@ static void drmFreeKernelVersion(drm_version_t *v)
805 824
806/** 825/**
807 * Copy version information. 826 * Copy version information.
808 * 827 *
809 * \param d destination pointer. 828 * \param d destination pointer.
810 * \param s source pointer. 829 * \param s source pointer.
811 * 830 *
812 * \internal 831 * \internal
813 * Used by drmGetVersion() to translate the information returned by the ioctl 832 * Used by drmGetVersion() to translate the information returned by the ioctl
814 * interface in a private structure into the public structure counterpart. 833 * interface in a private structure into the public structure counterpart.
@@ -831,12 +850,12 @@ static void drmCopyVersion(drmVersionPtr d, const drm_version_t *s)
831 * Query the driver version information. 850 * Query the driver version information.
832 * 851 *
833 * \param fd file descriptor. 852 * \param fd file descriptor.
834 * 853 *
835 * \return pointer to a drmVersion structure which should be freed with 854 * \return pointer to a drmVersion structure which should be freed with
836 * drmFreeVersion(). 855 * drmFreeVersion().
837 * 856 *
838 * \note Similar information is available via /proc/dri. 857 * \note Similar information is available via /proc/dri.
839 * 858 *
840 * \internal 859 * \internal
841 * It gets the version information via successive DRM_IOCTL_VERSION ioctls, 860 * It gets the version information via successive DRM_IOCTL_VERSION ioctls,
842 * first with zeros to get the string lengths, and then the actually strings. 861 * first with zeros to get the string lengths, and then the actually strings.
@@ -850,21 +869,21 @@ drmVersionPtr drmGetVersion(int fd)
850 memclear(*version); 869 memclear(*version);
851 870
852 if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) { 871 if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
853 drmFreeKernelVersion(version); 872 drmFreeKernelVersion(version);
854 return NULL; 873 return NULL;
855 } 874 }
856 875
857 if (version->name_len) 876 if (version->name_len)
858 version->name = drmMalloc(version->name_len + 1); 877 version->name = drmMalloc(version->name_len + 1);
859 if (version->date_len) 878 if (version->date_len)
860 version->date = drmMalloc(version->date_len + 1); 879 version->date = drmMalloc(version->date_len + 1);
861 if (version->desc_len) 880 if (version->desc_len)
862 version->desc = drmMalloc(version->desc_len + 1); 881 version->desc = drmMalloc(version->desc_len + 1);
863 882
864 if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) { 883 if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
865 drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno)); 884 drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
866 drmFreeKernelVersion(version); 885 drmFreeKernelVersion(version);
867 return NULL; 886 return NULL;
868 } 887 }
869 888
870 /* The results might not be null-terminated strings, so terminate them. */ 889 /* The results might not be null-terminated strings, so terminate them. */
@@ -881,13 +900,13 @@ drmVersionPtr drmGetVersion(int fd)
881 900
882/** 901/**
883 * Get version information for the DRM user space library. 902 * Get version information for the DRM user space library.
884 * 903 *
885 * This version number is driver independent. 904 * This version number is driver independent.
886 * 905 *
887 * \param fd file descriptor. 906 * \param fd file descriptor.
888 * 907 *
889 * \return version information. 908 * \return version information.
890 * 909 *
891 * \internal 910 * \internal
892 * This function allocates and fills a drm_version structure with a hard coded 911 * This function allocates and fills a drm_version structure with a hard coded
893 * version number. 912 * version number.
@@ -915,29 +934,29 @@ drmVersionPtr drmGetLibVersion(int fd)
915 934
916int drmGetCap(int fd, uint64_t capability, uint64_t *value) 935int drmGetCap(int fd, uint64_t capability, uint64_t *value)
917{ 936{
918 struct drm_get_cap cap; 937 struct drm_get_cap cap;
919 int ret; 938 int ret;
920 939
921 memclear(cap); 940 memclear(cap);
922 cap.capability = capability; 941 cap.capability = capability;
923 942
924 ret = drmIoctl(fd, DRM_IOCTL_GET_CAP, &cap); 943 ret = drmIoctl(fd, DRM_IOCTL_GET_CAP, &cap);
925 if (ret) 944 if (ret)
926 return ret; 945 return ret;
927 946
928 *value = cap.value; 947 *value = cap.value;
929 return 0; 948 return 0;
930} 949}
931 950
932int drmSetClientCap(int fd, uint64_t capability, uint64_t value) 951int drmSetClientCap(int fd, uint64_t capability, uint64_t value)
933{ 952{
934 struct drm_set_client_cap cap; 953 struct drm_set_client_cap cap;
935 954
936 memclear(cap); 955 memclear(cap);
937 cap.capability = capability; 956 cap.capability = capability;
938 cap.value = value; 957 cap.value = value;
939 958
940 return drmIoctl(fd, DRM_IOCTL_SET_CLIENT_CAP, &cap); 959 return drmIoctl(fd, DRM_IOCTL_SET_CLIENT_CAP, &cap);
941} 960}
942 961
943/** 962/**
@@ -973,10 +992,10 @@ char *drmGetBusid(int fd)
973 memclear(u); 992 memclear(u);
974 993
975 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) 994 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
976 return NULL; 995 return NULL;
977 u.unique = drmMalloc(u.unique_len + 1); 996 u.unique = drmMalloc(u.unique_len + 1);
978 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) 997 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
979 return NULL; 998 return NULL;
980 u.unique[u.unique_len] = '\0'; 999 u.unique[u.unique_len] = '\0';
981 1000
982 return u.unique; 1001 return u.unique;
@@ -1004,7 +1023,7 @@ int drmSetBusid(int fd, const char *busid)
1004 u.unique_len = strlen(busid); 1023 u.unique_len = strlen(busid);
1005 1024
1006 if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) { 1025 if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
1007 return -errno; 1026 return -errno;
1008 } 1027 }
1009 return 0; 1028 return 0;
1010} 1029}
@@ -1017,7 +1036,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)
1017 1036
1018 *magic = 0; 1037 *magic = 0;
1019 if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth)) 1038 if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
1020 return -errno; 1039 return -errno;
1021 *magic = auth.magic; 1040 *magic = auth.magic;
1022 return 0; 1041 return 0;
1023} 1042}
@@ -1029,7 +1048,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
1029 memclear(auth); 1048 memclear(auth);
1030 auth.magic = magic; 1049 auth.magic = magic;
1031 if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth)) 1050 if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
1032 return -errno; 1051 return -errno;
1033 return 0; 1052 return 0;
1034} 1053}
1035 1054
@@ -1045,7 +1064,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
1045 * \param flags combination of several flags to modify the function actions. 1064 * \param flags combination of several flags to modify the function actions.
1046 * \param handle will be set to a value that may be used as the offset 1065 * \param handle will be set to a value that may be used as the offset
1047 * parameter for mmap(). 1066 * parameter for mmap().
1048 * 1067 *
1049 * \return zero on success or a negative value on error. 1068 * \return zero on success or a negative value on error.
1050 * 1069 *
1051 * \par Mapping the frame buffer 1070 * \par Mapping the frame buffer
@@ -1056,7 +1075,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
1056 * 1075 *
1057 * \par 1076 * \par
1058 * The area mapped will be uncached. If MTRR support is available in the 1077 * The area mapped will be uncached. If MTRR support is available in the
1059 * kernel, the frame buffer area will be set to write combining. 1078 * kernel, the frame buffer area will be set to write combining.
1060 * 1079 *
1061 * \par Mapping the MMIO register area 1080 * \par Mapping the MMIO register area
1062 * For the MMIO register area, 1081 * For the MMIO register area,
@@ -1064,19 +1083,19 @@ int drmAuthMagic(int fd, drm_magic_t magic)
1064 * - \p size will be the size of the register area bytes, and 1083 * - \p size will be the size of the register area bytes, and
1065 * - \p type will be DRM_REGISTERS. 1084 * - \p type will be DRM_REGISTERS.
1066 * \par 1085 * \par
1067 * The area mapped will be uncached. 1086 * The area mapped will be uncached.
1068 * 1087 *
1069 * \par Mapping the SAREA 1088 * \par Mapping the SAREA
1070 * For the SAREA, 1089 * For the SAREA,
1071 * - \p offset will be ignored and should be set to zero, 1090 * - \p offset will be ignored and should be set to zero,
1072 * - \p size will be the desired size of the SAREA in bytes, 1091 * - \p size will be the desired size of the SAREA in bytes,
1073 * - \p type will be DRM_SHM. 1092 * - \p type will be DRM_SHM.
1074 * 1093 *
1075 * \par 1094 * \par
1076 * A shared memory area of the requested size will be created and locked in 1095 * A shared memory area of the requested size will be created and locked in
1077 * kernel memory. This area may be mapped into client-space by using the handle 1096 * kernel memory. This area may be mapped into client-space by using the handle
1078 * returned. 1097 * returned.
1079 * 1098 *
1080 * \note May only be called by root. 1099 * \note May only be called by root.
1081 * 1100 *
1082 * \internal 1101 * \internal
@@ -1084,7 +1103,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
1084 * the arguments in a drm_map structure. 1103 * the arguments in a drm_map structure.
1085 */ 1104 */
1086int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type, 1105int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
1087 drmMapFlags flags, drm_handle_t *handle) 1106 drmMapFlags flags, drm_handle_t *handle)
1088{ 1107{
1089 drm_map_t map; 1108 drm_map_t map;
1090 1109
@@ -1094,9 +1113,9 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
1094 map.type = type; 1113 map.type = type;
1095 map.flags = flags; 1114 map.flags = flags;
1096 if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map)) 1115 if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
1097 return -errno; 1116 return -errno;
1098 if (handle) 1117 if (handle)
1099 *handle = (drm_handle_t)(uintptr_t)map.handle; 1118 *handle = (drm_handle_t)(uintptr_t)map.handle;
1100 return 0; 1119 return 0;
1101} 1120}
1102 1121
@@ -1108,18 +1127,18 @@ int drmRmMap(int fd, drm_handle_t handle)
1108 map.handle = (void *)(uintptr_t)handle; 1127 map.handle = (void *)(uintptr_t)handle;
1109 1128
1110 if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map)) 1129 if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
1111 return -errno; 1130 return -errno;
1112 return 0; 1131 return 0;
1113} 1132}
1114 1133
1115/** 1134/**
1116 * Make buffers available for DMA transfers. 1135 * Make buffers available for DMA transfers.
1117 * 1136 *
1118 * \param fd file descriptor. 1137 * \param fd file descriptor.
1119 * \param count number of buffers. 1138 * \param count number of buffers.
1120 * \param size size of each buffer. 1139 * \param size size of each buffer.
1121 * \param flags buffer allocation flags. 1140 * \param flags buffer allocation flags.
1122 * \param agp_offset offset in the AGP aperture 1141 * \param agp_offset offset in the AGP aperture
1123 * 1142 *
1124 * \return number of buffers allocated, negative on error. 1143 * \return number of buffers allocated, negative on error.
1125 * 1144 *
@@ -1129,7 +1148,7 @@ int drmRmMap(int fd, drm_handle_t handle)
1129 * \sa drm_buf_desc. 1148 * \sa drm_buf_desc.
1130 */ 1149 */
1131int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags, 1150int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
1132 int agp_offset) 1151 int agp_offset)
1133{ 1152{
1134 drm_buf_desc_t request; 1153 drm_buf_desc_t request;
1135 1154
@@ -1140,7 +1159,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
1140 request.agp_start = agp_offset; 1159 request.agp_start = agp_offset;
1141 1160
1142 if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request)) 1161 if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
1143 return -errno; 1162 return -errno;
1144 return request.count; 1163 return request.count;
1145} 1164}
1146 1165
@@ -1152,28 +1171,28 @@ int drmMarkBufs(int fd, double low, double high)
1152 memclear(info); 1171 memclear(info);
1153 1172
1154 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) 1173 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
1155 return -EINVAL; 1174 return -EINVAL;
1156 1175
1157 if (!info.count) 1176 if (!info.count)
1158 return -EINVAL; 1177 return -EINVAL;
1159 1178
1160 if (!(info.list = drmMalloc(info.count * sizeof(*info.list)))) 1179 if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
1161 return -ENOMEM; 1180 return -ENOMEM;
1162 1181
1163 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { 1182 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
1164 int retval = -errno; 1183 int retval = -errno;
1165 drmFree(info.list); 1184 drmFree(info.list);
1166 return retval; 1185 return retval;
1167 } 1186 }
1168 1187
1169 for (i = 0; i < info.count; i++) { 1188 for (i = 0; i < info.count; i++) {
1170 info.list[i].low_mark = low * info.list[i].count; 1189 info.list[i].low_mark = low * info.list[i].count;
1171 info.list[i].high_mark = high * info.list[i].count; 1190 info.list[i].high_mark = high * info.list[i].count;
1172 if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) { 1191 if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
1173 int retval = -errno; 1192 int retval = -errno;
1174 drmFree(info.list); 1193 drmFree(info.list);
1175 return retval; 1194 return retval;
1176 } 1195 }
1177 } 1196 }
1178 drmFree(info.list); 1197 drmFree(info.list);
1179 1198
@@ -1188,9 +1207,9 @@ int drmMarkBufs(int fd, double low, double high)
1188 * \param list list of buffers to be freed. 1207 * \param list list of buffers to be freed.
1189 * 1208 *
1190 * \return zero on success, or a negative value on failure. 1209 * \return zero on success, or a negative value on failure.
1191 * 1210 *
1192 * \note This function is primarily used for debugging. 1211 * \note This function is primarily used for debugging.
1193 * 1212 *
1194 * \internal 1213 * \internal
1195 * This function is a wrapper around the DRM_IOCTL_FREE_BUFS ioctl, passing 1214 * This function is a wrapper around the DRM_IOCTL_FREE_BUFS ioctl, passing
1196 * the arguments in a drm_buf_free structure. 1215 * the arguments in a drm_buf_free structure.
@@ -1203,7 +1222,7 @@ int drmFreeBufs(int fd, int count, int *list)
1203 request.count = count; 1222 request.count = count;
1204 request.list = list; 1223 request.list = list;
1205 if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request)) 1224 if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
1206 return -errno; 1225 return -errno;
1207 return 0; 1226 return 0;
1208} 1227}
1209 1228
@@ -1243,7 +1262,7 @@ int drmClose(int fd)
1243 * begins. 1262 * begins.
1244 * 1263 *
1245 * \return zero on success, or a negative value on failure. 1264 * \return zero on success, or a negative value on failure.
1246 * 1265 *
1247 * \internal 1266 * \internal
1248 * This function is a wrapper for mmap(). 1267 * This function is a wrapper for mmap().
1249 */ 1268 */
@@ -1252,16 +1271,16 @@ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
1252 static unsigned long pagesize_mask = 0; 1271 static unsigned long pagesize_mask = 0;
1253 1272
1254 if (fd < 0) 1273 if (fd < 0)
1255 return -EINVAL; 1274 return -EINVAL;
1256 1275
1257 if (!pagesize_mask) 1276 if (!pagesize_mask)
1258 pagesize_mask = getpagesize() - 1; 1277 pagesize_mask = getpagesize() - 1;
1259 1278
1260 size = (size + pagesize_mask) & ~pagesize_mask; 1279 size = (size + pagesize_mask) & ~pagesize_mask;
1261 1280
1262 *address = drm_mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle); 1281 *address = drm_mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
1263 if (*address == MAP_FAILED) 1282 if (*address == MAP_FAILED)
1264 return -errno; 1283 return -errno;
1265 return 0; 1284 return 0;
1266} 1285}
1267 1286
@@ -1271,7 +1290,7 @@ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
1271 * 1290 *
1272 * \param address address as given by drmMap(). 1291 * \param address address as given by drmMap().
1273 * \param size size in bytes. Must match the size used by drmMap(). 1292 * \param size size in bytes. Must match the size used by drmMap().
1274 * 1293 *
1275 * \return zero on success, or a negative value on failure. 1294 * \return zero on success, or a negative value on failure.
1276 * 1295 *
1277 * \internal 1296 * \internal
@@ -1291,28 +1310,28 @@ drmBufInfoPtr drmGetBufInfo(int fd)
1291 memclear(info); 1310 memclear(info);
1292 1311
1293 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) 1312 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
1294 return NULL; 1313 return NULL;
1295 1314
1296 if (info.count) { 1315 if (info.count) {
1297 if (!(info.list = drmMalloc(info.count * sizeof(*info.list)))) 1316 if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
1298 return NULL; 1317 return NULL;
1299 1318
1300 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { 1319 if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
1301 drmFree(info.list); 1320 drmFree(info.list);
1302 return NULL; 1321 return NULL;
1303 } 1322 }
1304 1323
1305 retval = drmMalloc(sizeof(*retval)); 1324 retval = drmMalloc(sizeof(*retval));
1306 retval->count = info.count; 1325 retval->count = info.count;
1307 retval->list = drmMalloc(info.count * sizeof(*retval->list)); 1326 retval->list = drmMalloc(info.count * sizeof(*retval->list));
1308 for (i = 0; i < info.count; i++) { 1327 for (i = 0; i < info.count; i++) {
1309 retval->list[i].count = info.list[i].count; 1328 retval->list[i].count = info.list[i].count;
1310 retval->list[i].size = info.list[i].size; 1329 retval->list[i].size = info.list[i].size;
1311 retval->list[i].low_mark = info.list[i].low_mark; 1330 retval->list[i].low_mark = info.list[i].low_mark;
1312 retval->list[i].high_mark = info.list[i].high_mark; 1331 retval->list[i].high_mark = info.list[i].high_mark;
1313 } 1332 }
1314 drmFree(info.list); 1333 drmFree(info.list);
1315 return retval; 1334 return retval;
1316 } 1335 }
1317 return NULL; 1336 return NULL;
1318} 1337}
@@ -1326,12 +1345,12 @@ drmBufInfoPtr drmGetBufInfo(int fd)
1326 * 1345 *
1327 * \note The client may not use these buffers until obtaining buffer indices 1346 * \note The client may not use these buffers until obtaining buffer indices
1328 * with drmDMA(). 1347 * with drmDMA().
1329 * 1348 *
1330 * \internal 1349 * \internal
1331 * This function calls the DRM_IOCTL_MAP_BUFS ioctl and copies the returned 1350 * This function calls the DRM_IOCTL_MAP_BUFS ioctl and copies the returned
1332 * information about the buffers in a drm_buf_map structure into the 1351 * information about the buffers in a drm_buf_map structure into the
1333 * client-visible data structures. 1352 * client-visible data structures.
1334 */ 1353 */
1335drmBufMapPtr drmMapBufs(int fd) 1354drmBufMapPtr drmMapBufs(int fd)
1336{ 1355{
1337 drm_buf_map_t bufs; 1356 drm_buf_map_t bufs;
@@ -1340,32 +1359,31 @@ drmBufMapPtr drmMapBufs(int fd)
1340 1359
1341 memclear(bufs); 1360 memclear(bufs);
1342 if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) 1361 if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
1343 return NULL; 1362 return NULL;
1344 1363
1345 if (!bufs.count) 1364 if (!bufs.count)
1346 return NULL; 1365 return NULL;
1347 1366
1348 if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list)))) 1367 if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
1349 return NULL; 1368 return NULL;
1350 1369
1351 if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) { 1370 if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
1352 drmFree(bufs.list); 1371 drmFree(bufs.list);
1353 return NULL; 1372 return NULL;
1354 } 1373 }
1355 1374
1356 retval = drmMalloc(sizeof(*retval)); 1375 retval = drmMalloc(sizeof(*retval));
1357 retval->count = bufs.count; 1376 retval->count = bufs.count;
1358 retval->list = drmMalloc(bufs.count * sizeof(*retval->list)); 1377 retval->list = drmMalloc(bufs.count * sizeof(*retval->list));
1359 for (i = 0; i < bufs.count; i++) { 1378 for (i = 0; i < bufs.count; i++) {
1360 retval->list[i].idx = bufs.list[i].idx; 1379 retval->list[i].idx = bufs.list[i].idx;
1361 retval->list[i].total = bufs.list[i].total; 1380 retval->list[i].total = bufs.list[i].total;
1362 retval->list[i].used = 0; 1381 retval->list[i].used = 0;
1363 retval->list[i].address = bufs.list[i].address; 1382 retval->list[i].address = bufs.list[i].address;
1364 } 1383 }
1365 1384
1366 drmFree(bufs.list); 1385 drmFree(bufs.list);
1367 1386 return retval;
1368 return retval;
1369} 1387}
1370 1388
1371 1389
@@ -1383,24 +1401,23 @@ int drmUnmapBufs(drmBufMapPtr bufs)
1383 int i; 1401 int i;
1384 1402
1385 for (i = 0; i < bufs->count; i++) { 1403 for (i = 0; i < bufs->count; i++) {
1386 drm_munmap(bufs->list[i].address, bufs->list[i].total); 1404 drm_munmap(bufs->list[i].address, bufs->list[i].total);
1387 } 1405 }
1388 1406
1389 drmFree(bufs->list); 1407 drmFree(bufs->list);
1390 drmFree(bufs); 1408 drmFree(bufs);
1391
1392 return 0; 1409 return 0;
1393} 1410}
1394 1411
1395 1412
1396#define DRM_DMA_RETRY 16 1413#define DRM_DMA_RETRY 16
1397 1414
1398/** 1415/**
1399 * Reserve DMA buffers. 1416 * Reserve DMA buffers.
1400 * 1417 *
1401 * \param fd file descriptor. 1418 * \param fd file descriptor.
1402 * \param request 1419 * \param request
1403 * 1420 *
1404 * \return zero on success, or a negative value on failure. 1421 * \return zero on success, or a negative value on failure.
1405 * 1422 *
1406 * \internal 1423 * \internal
@@ -1424,14 +1441,14 @@ int drmDMA(int fd, drmDMAReqPtr request)
1424 dma.granted_count = 0; 1441 dma.granted_count = 0;
1425 1442
1426 do { 1443 do {
1427 ret = ioctl( fd, DRM_IOCTL_DMA, &dma ); 1444 ret = ioctl( fd, DRM_IOCTL_DMA, &dma );
1428 } while ( ret && errno == EAGAIN && i++ < DRM_DMA_RETRY ); 1445 } while ( ret && errno == EAGAIN && i++ < DRM_DMA_RETRY );
1429 1446
1430 if ( ret == 0 ) { 1447 if ( ret == 0 ) {
1431 request->granted_count = dma.granted_count; 1448 request->granted_count = dma.granted_count;
1432 return 0; 1449 return 0;
1433 } else { 1450 } else {
1434 return -errno; 1451 return -errno;
1435 } 1452 }
1436} 1453}
1437 1454
@@ -1443,9 +1460,9 @@ int drmDMA(int fd, drmDMAReqPtr request)
1443 * \param context context. 1460 * \param context context.
1444 * \param flags flags that determine the sate of the hardware when the function 1461 * \param flags flags that determine the sate of the hardware when the function
1445 * returns. 1462 * returns.
1446 * 1463 *
1447 * \return always zero. 1464 * \return always zero.
1448 * 1465 *
1449 * \internal 1466 * \internal
1450 * This function translates the arguments into a drm_lock structure and issue 1467 * This function translates the arguments into a drm_lock structure and issue
1451 * the DRM_IOCTL_LOCK ioctl until the lock is successfully acquired. 1468 * the DRM_IOCTL_LOCK ioctl until the lock is successfully acquired.
@@ -1465,7 +1482,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
1465 if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; 1482 if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
1466 1483
1467 while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock)) 1484 while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
1468 ; 1485 ;
1469 return 0; 1486 return 0;
1470} 1487}
1471 1488
@@ -1474,9 +1491,9 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
1474 * 1491 *
1475 * \param fd file descriptor. 1492 * \param fd file descriptor.
1476 * \param context context. 1493 * \param context context.
1477 * 1494 *
1478 * \return zero on success, or a negative value on failure. 1495 * \return zero on success, or a negative value on failure.
1479 * 1496 *
1480 * \internal 1497 * \internal
1481 * This function is a wrapper around the DRM_IOCTL_UNLOCK ioctl, passing the 1498 * This function is a wrapper around the DRM_IOCTL_UNLOCK ioctl, passing the
1482 * argument in a drm_lock structure. 1499 * argument in a drm_lock structure.
@@ -1499,24 +1516,24 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
1499 1516
1500 memclear(res); 1517 memclear(res);
1501 if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res)) 1518 if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
1502 return NULL; 1519 return NULL;
1503 1520
1504 if (!res.count) 1521 if (!res.count)
1505 return NULL; 1522 return NULL;
1506 1523
1507 if (!(list = drmMalloc(res.count * sizeof(*list)))) 1524 if (!(list = drmMalloc(res.count * sizeof(*list))))
1508 return NULL; 1525 return NULL;
1509 if (!(retval = drmMalloc(res.count * sizeof(*retval)))) { 1526 if (!(retval = drmMalloc(res.count * sizeof(*retval)))) {
1510 drmFree(list); 1527 drmFree(list);
1511 return NULL; 1528 return NULL;
1512 } 1529 }
1513 1530
1514 res.contexts = list; 1531 res.contexts = list;
1515 if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res)) 1532 if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
1516 return NULL; 1533 return NULL;
1517 1534
1518 for (i = 0; i < res.count; i++) 1535 for (i = 0; i < res.count; i++)
1519 retval[i] = list[i].handle; 1536 retval[i] = list[i].handle;
1520 drmFree(list); 1537 drmFree(list);
1521 1538
1522 *count = res.count; 1539 *count = res.count;
@@ -1537,11 +1554,11 @@ void drmFreeReservedContextList(drm_context_t *pt)
1537 * \param fd file descriptor. 1554 * \param fd file descriptor.
1538 * \param handle is set on success. To be used by the client when requesting DMA 1555 * \param handle is set on success. To be used by the client when requesting DMA
1539 * dispatch with drmDMA(). 1556 * dispatch with drmDMA().
1540 * 1557 *
1541 * \return zero on success, or a negative value on failure. 1558 * \return zero on success, or a negative value on failure.
1542 * 1559 *
1543 * \note May only be called by root. 1560 * \note May only be called by root.
1544 * 1561 *
1545 * \internal 1562 * \internal
1546 * This function is a wrapper around the DRM_IOCTL_ADD_CTX ioctl, passing the 1563 * This function is a wrapper around the DRM_IOCTL_ADD_CTX ioctl, passing the
1547 * argument in a drm_ctx structure. 1564 * argument in a drm_ctx structure.
@@ -1552,7 +1569,7 @@ int drmCreateContext(int fd, drm_context_t *handle)
1552 1569
1553 memclear(ctx); 1570 memclear(ctx);
1554 if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx)) 1571 if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
1555 return -errno; 1572 return -errno;
1556 *handle = ctx.handle; 1573 *handle = ctx.handle;
1557 return 0; 1574 return 0;
1558} 1575}
@@ -1564,7 +1581,7 @@ int drmSwitchToContext(int fd, drm_context_t context)
1564 memclear(ctx); 1581 memclear(ctx);
1565 ctx.handle = context; 1582 ctx.handle = context;
1566 if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx)) 1583 if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
1567 return -errno; 1584 return -errno;
1568 return 0; 1585 return 0;
1569} 1586}
1570 1587
@@ -1581,11 +1598,11 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
1581 memclear(ctx); 1598 memclear(ctx);
1582 ctx.handle = context; 1599 ctx.handle = context;
1583 if (flags & DRM_CONTEXT_PRESERVED) 1600 if (flags & DRM_CONTEXT_PRESERVED)
1584 ctx.flags |= _DRM_CONTEXT_PRESERVED; 1601 ctx.flags |= _DRM_CONTEXT_PRESERVED;
1585 if (flags & DRM_CONTEXT_2DONLY) 1602 if (flags & DRM_CONTEXT_2DONLY)
1586 ctx.flags |= _DRM_CONTEXT_2DONLY; 1603 ctx.flags |= _DRM_CONTEXT_2DONLY;
1587 if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx)) 1604 if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
1588 return -errno; 1605 return -errno;
1589 return 0; 1606 return 0;
1590} 1607}
1591 1608
@@ -1597,12 +1614,12 @@ int drmGetContextFlags(int fd, drm_context_t context,
1597 memclear(ctx); 1614 memclear(ctx);
1598 ctx.handle = context; 1615 ctx.handle = context;
1599 if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx)) 1616 if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
1600 return -errno; 1617 return -errno;
1601 *flags = 0; 1618 *flags = 0;
1602 if (ctx.flags & _DRM_CONTEXT_PRESERVED) 1619 if (ctx.flags & _DRM_CONTEXT_PRESERVED)
1603 *flags |= DRM_CONTEXT_PRESERVED; 1620 *flags |= DRM_CONTEXT_PRESERVED;
1604 if (ctx.flags & _DRM_CONTEXT_2DONLY) 1621 if (ctx.flags & _DRM_CONTEXT_2DONLY)
1605 *flags |= DRM_CONTEXT_2DONLY; 1622 *flags |= DRM_CONTEXT_2DONLY;
1606 return 0; 1623 return 0;
1607} 1624}
1608 1625
@@ -1611,14 +1628,14 @@ int drmGetContextFlags(int fd, drm_context_t context,
1611 * 1628 *
1612 * Free any kernel-level resources allocated with drmCreateContext() associated 1629 * Free any kernel-level resources allocated with drmCreateContext() associated
1613 * with the context. 1630 * with the context.
1614 * 1631 *
1615 * \param fd file descriptor. 1632 * \param fd file descriptor.
1616 * \param handle handle given by drmCreateContext(). 1633 * \param handle handle given by drmCreateContext().
1617 * 1634 *
1618 * \return zero on success, or a negative value on failure. 1635 * \return zero on success, or a negative value on failure.
1619 * 1636 *
1620 * \note May only be called by root. 1637 * \note May only be called by root.
1621 * 1638 *
1622 * \internal 1639 * \internal
1623 * This function is a wrapper around the DRM_IOCTL_RM_CTX ioctl, passing the 1640 * This function is a wrapper around the DRM_IOCTL_RM_CTX ioctl, passing the
1624 * argument in a drm_ctx structure. 1641 * argument in a drm_ctx structure.
@@ -1630,7 +1647,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
1630 memclear(ctx); 1647 memclear(ctx);
1631 ctx.handle = handle; 1648 ctx.handle = handle;
1632 if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx)) 1649 if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
1633 return -errno; 1650 return -errno;
1634 return 0; 1651 return 0;
1635} 1652}
1636 1653
@@ -1640,7 +1657,7 @@ int drmCreateDrawable(int fd, drm_drawable_t *handle)
1640 1657
1641 memclear(draw); 1658 memclear(draw);
1642 if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw)) 1659 if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
1643 return -errno; 1660 return -errno;
1644 *handle = draw.handle; 1661 *handle = draw.handle;
1645 return 0; 1662 return 0;
1646} 1663}
@@ -1652,13 +1669,13 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
1652 memclear(draw); 1669 memclear(draw);
1653 draw.handle = handle; 1670 draw.handle = handle;
1654 if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw)) 1671 if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
1655 return -errno; 1672 return -errno;
1656 return 0; 1673 return 0;
1657} 1674}
1658 1675
1659int drmUpdateDrawableInfo(int fd, drm_drawable_t handle, 1676int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
1660 drm_drawable_info_type_t type, unsigned int num, 1677 drm_drawable_info_type_t type, unsigned int num,
1661 void *data) 1678 void *data)
1662{ 1679{
1663 drm_update_draw_t update; 1680 drm_update_draw_t update;
1664 1681
@@ -1669,7 +1686,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
1669 update.data = (unsigned long long)(unsigned long)data; 1686 update.data = (unsigned long long)(unsigned long)data;
1670 1687
1671 if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update)) 1688 if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
1672 return -errno; 1689 return -errno;
1673 1690
1674 return 0; 1691 return 0;
1675} 1692}
@@ -1680,16 +1697,16 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
1680 * Must be called before any of the other AGP related calls. 1697 * Must be called before any of the other AGP related calls.
1681 * 1698 *
1682 * \param fd file descriptor. 1699 * \param fd file descriptor.
1683 * 1700 *
1684 * \return zero on success, or a negative value on failure. 1701 * \return zero on success, or a negative value on failure.
1685 * 1702 *
1686 * \internal 1703 * \internal
1687 * This function is a wrapper around the DRM_IOCTL_AGP_ACQUIRE ioctl. 1704 * This function is a wrapper around the DRM_IOCTL_AGP_ACQUIRE ioctl.
1688 */ 1705 */
1689int drmAgpAcquire(int fd) 1706int drmAgpAcquire(int fd)
1690{ 1707{
1691 if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) 1708 if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
1692 return -errno; 1709 return -errno;
1693 return 0; 1710 return 0;
1694} 1711}
1695 1712
@@ -1698,16 +1715,16 @@ int drmAgpAcquire(int fd)
1698 * Release the AGP device. 1715 * Release the AGP device.
1699 * 1716 *
1700 * \param fd file descriptor. 1717 * \param fd file descriptor.
1701 * 1718 *
1702 * \return zero on success, or a negative value on failure. 1719 * \return zero on success, or a negative value on failure.
1703 * 1720 *
1704 * \internal 1721 * \internal
1705 * This function is a wrapper around the DRM_IOCTL_AGP_RELEASE ioctl. 1722 * This function is a wrapper around the DRM_IOCTL_AGP_RELEASE ioctl.
1706 */ 1723 */
1707int drmAgpRelease(int fd) 1724int drmAgpRelease(int fd)
1708{ 1725{
1709 if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) 1726 if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
1710 return -errno; 1727 return -errno;
1711 return 0; 1728 return 0;
1712} 1729}
1713 1730
@@ -1717,9 +1734,9 @@ int drmAgpRelease(int fd)
1717 * 1734 *
1718 * \param fd file descriptor. 1735 * \param fd file descriptor.
1719 * \param mode AGP mode. 1736 * \param mode AGP mode.
1720 * 1737 *
1721 * \return zero on success, or a negative value on failure. 1738 * \return zero on success, or a negative value on failure.
1722 * 1739 *
1723 * \internal 1740 * \internal
1724 * This function is a wrapper around the DRM_IOCTL_AGP_ENABLE ioctl, passing the 1741 * This function is a wrapper around the DRM_IOCTL_AGP_ENABLE ioctl, passing the
1725 * argument in a drm_agp_mode structure. 1742 * argument in a drm_agp_mode structure.
@@ -1731,7 +1748,7 @@ int drmAgpEnable(int fd, unsigned long mode)
1731 memclear(m); 1748 memclear(m);
1732 m.mode = mode; 1749 m.mode = mode;
1733 if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) 1750 if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
1734 return -errno; 1751 return -errno;
1735 return 0; 1752 return 0;
1736} 1753}
1737 1754
@@ -1745,15 +1762,15 @@ int drmAgpEnable(int fd, unsigned long mode)
1745 * \param address if not zero, will be set to the physical address of the 1762 * \param address if not zero, will be set to the physical address of the
1746 * allocated memory. 1763 * allocated memory.
1747 * \param handle on success will be set to a handle of the allocated memory. 1764 * \param handle on success will be set to a handle of the allocated memory.
1748 * 1765 *
1749 * \return zero on success, or a negative value on failure. 1766 * \return zero on success, or a negative value on failure.
1750 * 1767 *
1751 * \internal 1768 * \internal
1752 * This function is a wrapper around the DRM_IOCTL_AGP_ALLOC ioctl, passing the 1769 * This function is a wrapper around the DRM_IOCTL_AGP_ALLOC ioctl, passing the
1753 * arguments in a drm_agp_buffer structure. 1770 * arguments in a drm_agp_buffer structure.
1754 */ 1771 */
1755int drmAgpAlloc(int fd, unsigned long size, unsigned long type, 1772int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
1756 unsigned long *address, drm_handle_t *handle) 1773 unsigned long *address, drm_handle_t *handle)
1757{ 1774{
1758 drm_agp_buffer_t b; 1775 drm_agp_buffer_t b;
1759 1776
@@ -1762,9 +1779,9 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
1762 b.size = size; 1779 b.size = size;
1763 b.type = type; 1780 b.type = type;
1764 if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) 1781 if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
1765 return -errno; 1782 return -errno;
1766 if (address != 0UL) 1783 if (address != 0UL)
1767 *address = b.physical; 1784 *address = b.physical;
1768 *handle = b.handle; 1785 *handle = b.handle;
1769 return 0; 1786 return 0;
1770} 1787}
@@ -1775,9 +1792,9 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
1775 * 1792 *
1776 * \param fd file descriptor. 1793 * \param fd file descriptor.
1777 * \param handle handle to the allocated memory, as given by drmAgpAllocate(). 1794 * \param handle handle to the allocated memory, as given by drmAgpAllocate().
1778 * 1795 *
1779 * \return zero on success, or a negative value on failure. 1796 * \return zero on success, or a negative value on failure.
1780 * 1797 *
1781 * \internal 1798 * \internal
1782 * This function is a wrapper around the DRM_IOCTL_AGP_FREE ioctl, passing the 1799 * This function is a wrapper around the DRM_IOCTL_AGP_FREE ioctl, passing the
1783 * argument in a drm_agp_buffer structure. 1800 * argument in a drm_agp_buffer structure.
@@ -1789,7 +1806,7 @@ int drmAgpFree(int fd, drm_handle_t handle)
1789 memclear(b); 1806 memclear(b);
1790 b.handle = handle; 1807 b.handle = handle;
1791 if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b)) 1808 if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
1792 return -errno; 1809 return -errno;
1793 return 0; 1810 return 0;
1794} 1811}
1795 1812
@@ -1800,9 +1817,9 @@ int drmAgpFree(int fd, drm_handle_t handle)
1800 * \param fd file descriptor. 1817 * \param fd file descriptor.
1801 * \param handle handle to the allocated memory, as given by drmAgpAllocate(). 1818 * \param handle handle to the allocated memory, as given by drmAgpAllocate().
1802 * \param offset offset in bytes. It will round to page boundary. 1819 * \param offset offset in bytes. It will round to page boundary.
1803 * 1820 *
1804 * \return zero on success, or a negative value on failure. 1821 * \return zero on success, or a negative value on failure.
1805 * 1822 *
1806 * \internal 1823 * \internal
1807 * This function is a wrapper around the DRM_IOCTL_AGP_BIND ioctl, passing the 1824 * This function is a wrapper around the DRM_IOCTL_AGP_BIND ioctl, passing the
1808 * argument in a drm_agp_binding structure. 1825 * argument in a drm_agp_binding structure.
@@ -1815,7 +1832,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
1815 b.handle = handle; 1832 b.handle = handle;
1816 b.offset = offset; 1833 b.offset = offset;
1817 if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b)) 1834 if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
1818 return -errno; 1835 return -errno;
1819 return 0; 1836 return 0;
1820} 1837}
1821 1838
@@ -1825,9 +1842,9 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
1825 * 1842 *
1826 * \param fd file descriptor. 1843 * \param fd file descriptor.
1827 * \param handle handle to the allocated memory, as given by drmAgpAllocate(). 1844 * \param handle handle to the allocated memory, as given by drmAgpAllocate().
1828 * 1845 *
1829 * \return zero on success, or a negative value on failure. 1846 * \return zero on success, or a negative value on failure.
1830 * 1847 *
1831 * \internal 1848 * \internal
1832 * This function is a wrapper around the DRM_IOCTL_AGP_UNBIND ioctl, passing 1849 * This function is a wrapper around the DRM_IOCTL_AGP_UNBIND ioctl, passing
1833 * the argument in a drm_agp_binding structure. 1850 * the argument in a drm_agp_binding structure.
@@ -1839,7 +1856,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
1839 memclear(b); 1856 memclear(b);
1840 b.handle = handle; 1857 b.handle = handle;
1841 if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) 1858 if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
1842 return -errno; 1859 return -errno;
1843 return 0; 1860 return 0;
1844} 1861}
1845 1862
@@ -1848,9 +1865,9 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
1848 * Get AGP driver major version number. 1865 * Get AGP driver major version number.
1849 * 1866 *
1850 * \param fd file descriptor. 1867 * \param fd file descriptor.
1851 * 1868 *
1852 * \return major version number on success, or a negative value on failure.. 1869 * \return major version number on success, or a negative value on failure..
1853 * 1870 *
1854 * \internal 1871 * \internal
1855 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 1872 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1856 * necessary information in a drm_agp_info structure. 1873 * necessary information in a drm_agp_info structure.
@@ -1862,7 +1879,7 @@ int drmAgpVersionMajor(int fd)
1862 memclear(i); 1879 memclear(i);
1863 1880
1864 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 1881 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
1865 return -errno; 1882 return -errno;
1866 return i.agp_version_major; 1883 return i.agp_version_major;
1867} 1884}
1868 1885
@@ -1871,9 +1888,9 @@ int drmAgpVersionMajor(int fd)
1871 * Get AGP driver minor version number. 1888 * Get AGP driver minor version number.
1872 * 1889 *
1873 * \param fd file descriptor. 1890 * \param fd file descriptor.
1874 * 1891 *
1875 * \return minor version number on success, or a negative value on failure. 1892 * \return minor version number on success, or a negative value on failure.
1876 * 1893 *
1877 * \internal 1894 * \internal
1878 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 1895 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1879 * necessary information in a drm_agp_info structure. 1896 * necessary information in a drm_agp_info structure.
@@ -1885,7 +1902,7 @@ int drmAgpVersionMinor(int fd)
1885 memclear(i); 1902 memclear(i);
1886 1903
1887 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 1904 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
1888 return -errno; 1905 return -errno;
1889 return i.agp_version_minor; 1906 return i.agp_version_minor;
1890} 1907}
1891 1908
@@ -1894,9 +1911,9 @@ int drmAgpVersionMinor(int fd)
1894 * Get AGP mode. 1911 * Get AGP mode.
1895 * 1912 *
1896 * \param fd file descriptor. 1913 * \param fd file descriptor.
1897 * 1914 *
1898 * \return mode on success, or zero on failure. 1915 * \return mode on success, or zero on failure.
1899 * 1916 *
1900 * \internal 1917 * \internal
1901 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 1918 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1902 * necessary information in a drm_agp_info structure. 1919 * necessary information in a drm_agp_info structure.
@@ -1908,7 +1925,7 @@ unsigned long drmAgpGetMode(int fd)
1908 memclear(i); 1925 memclear(i);
1909 1926
1910 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 1927 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
1911 return 0; 1928 return 0;
1912 return i.mode; 1929 return i.mode;
1913} 1930}
1914 1931
@@ -1917,9 +1934,9 @@ unsigned long drmAgpGetMode(int fd)
1917 * Get AGP aperture base. 1934 * Get AGP aperture base.
1918 * 1935 *
1919 * \param fd file descriptor. 1936 * \param fd file descriptor.
1920 * 1937 *
1921 * \return aperture base on success, zero on failure. 1938 * \return aperture base on success, zero on failure.
1922 * 1939 *
1923 * \internal 1940 * \internal
1924 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 1941 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1925 * necessary information in a drm_agp_info structure. 1942 * necessary information in a drm_agp_info structure.
@@ -1931,7 +1948,7 @@ unsigned long drmAgpBase(int fd)
1931 memclear(i); 1948 memclear(i);
1932 1949
1933 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 1950 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
1934 return 0; 1951 return 0;
1935 return i.aperture_base; 1952 return i.aperture_base;
1936} 1953}
1937 1954
@@ -1940,9 +1957,9 @@ unsigned long drmAgpBase(int fd)
1940 * Get AGP aperture size. 1957 * Get AGP aperture size.
1941 * 1958 *
1942 * \param fd file descriptor. 1959 * \param fd file descriptor.
1943 * 1960 *
1944 * \return aperture size on success, zero on failure. 1961 * \return aperture size on success, zero on failure.
1945 * 1962 *
1946 * \internal 1963 * \internal
1947 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 1964 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1948 * necessary information in a drm_agp_info structure. 1965 * necessary information in a drm_agp_info structure.
@@ -1954,7 +1971,7 @@ unsigned long drmAgpSize(int fd)
1954 memclear(i); 1971 memclear(i);
1955 1972
1956 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 1973 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
1957 return 0; 1974 return 0;
1958 return i.aperture_size; 1975 return i.aperture_size;
1959} 1976}
1960 1977
@@ -1963,9 +1980,9 @@ unsigned long drmAgpSize(int fd)
1963 * Get used AGP memory. 1980 * Get used AGP memory.
1964 * 1981 *
1965 * \param fd file descriptor. 1982 * \param fd file descriptor.
1966 * 1983 *
1967 * \return memory used on success, or zero on failure. 1984 * \return memory used on success, or zero on failure.
1968 * 1985 *
1969 * \internal 1986 * \internal
1970 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 1987 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1971 * necessary information in a drm_agp_info structure. 1988 * necessary information in a drm_agp_info structure.
@@ -1977,7 +1994,7 @@ unsigned long drmAgpMemoryUsed(int fd)
1977 memclear(i); 1994 memclear(i);
1978 1995
1979 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 1996 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
1980 return 0; 1997 return 0;
1981 return i.memory_used; 1998 return i.memory_used;
1982} 1999}
1983 2000
@@ -1986,9 +2003,9 @@ unsigned long drmAgpMemoryUsed(int fd)
1986 * Get available AGP memory. 2003 * Get available AGP memory.
1987 * 2004 *
1988 * \param fd file descriptor. 2005 * \param fd file descriptor.
1989 * 2006 *
1990 * \return memory available on success, or zero on failure. 2007 * \return memory available on success, or zero on failure.
1991 * 2008 *
1992 * \internal 2009 * \internal
1993 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 2010 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
1994 * necessary information in a drm_agp_info structure. 2011 * necessary information in a drm_agp_info structure.
@@ -2000,7 +2017,7 @@ unsigned long drmAgpMemoryAvail(int fd)
2000 memclear(i); 2017 memclear(i);
2001 2018
2002 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 2019 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
2003 return 0; 2020 return 0;
2004 return i.memory_allowed; 2021 return i.memory_allowed;
2005} 2022}
2006 2023
@@ -2009,9 +2026,9 @@ unsigned long drmAgpMemoryAvail(int fd)
2009 * Get hardware vendor ID. 2026 * Get hardware vendor ID.
2010 * 2027 *
2011 * \param fd file descriptor. 2028 * \param fd file descriptor.
2012 * 2029 *
2013 * \return vendor ID on success, or zero on failure. 2030 * \return vendor ID on success, or zero on failure.
2014 * 2031 *
2015 * \internal 2032 * \internal
2016 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 2033 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
2017 * necessary information in a drm_agp_info structure. 2034 * necessary information in a drm_agp_info structure.
@@ -2023,7 +2040,7 @@ unsigned int drmAgpVendorId(int fd)
2023 memclear(i); 2040 memclear(i);
2024 2041
2025 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 2042 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
2026 return 0; 2043 return 0;
2027 return i.id_vendor; 2044 return i.id_vendor;
2028} 2045}
2029 2046
@@ -2032,9 +2049,9 @@ unsigned int drmAgpVendorId(int fd)
2032 * Get hardware device ID. 2049 * Get hardware device ID.
2033 * 2050 *
2034 * \param fd file descriptor. 2051 * \param fd file descriptor.
2035 * 2052 *
2036 * \return zero on success, or zero on failure. 2053 * \return zero on success, or zero on failure.
2037 * 2054 *
2038 * \internal 2055 * \internal
2039 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the 2056 * This function is a wrapper around the DRM_IOCTL_AGP_INFO ioctl, getting the
2040 * necessary information in a drm_agp_info structure. 2057 * necessary information in a drm_agp_info structure.
@@ -2046,7 +2063,7 @@ unsigned int drmAgpDeviceId(int fd)
2046 memclear(i); 2063 memclear(i);
2047 2064
2048 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i)) 2065 if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
2049 return 0; 2066 return 0;
2050 return i.id_device; 2067 return i.id_device;
2051} 2068}
2052 2069
@@ -2059,7 +2076,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
2059 *handle = 0; 2076 *handle = 0;
2060 sg.size = size; 2077 sg.size = size;
2061 if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg)) 2078 if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
2062 return -errno; 2079 return -errno;
2063 *handle = sg.handle; 2080 *handle = sg.handle;
2064 return 0; 2081 return 0;
2065} 2082}
@@ -2071,7 +2088,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
2071 memclear(sg); 2088 memclear(sg);
2072 sg.handle = handle; 2089 sg.handle = handle;
2073 if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg)) 2090 if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
2074 return -errno; 2091 return -errno;
2075 return 0; 2092 return 0;
2076} 2093}
2077 2094
@@ -2080,9 +2097,9 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
2080 * 2097 *
2081 * \param fd file descriptor. 2098 * \param fd file descriptor.
2082 * \param vbl pointer to a drmVBlank structure. 2099 * \param vbl pointer to a drmVBlank structure.
2083 * 2100 *
2084 * \return zero on success, or a negative value on failure. 2101 * \return zero on success, or a negative value on failure.
2085 * 2102 *
2086 * \internal 2103 * \internal
2087 * This function is a wrapper around the DRM_IOCTL_WAIT_VBLANK ioctl. 2104 * This function is a wrapper around the DRM_IOCTL_WAIT_VBLANK ioctl.
2088 */ 2105 */
@@ -2093,8 +2110,8 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
2093 2110
2094 ret = clock_gettime(CLOCK_MONOTONIC, &timeout); 2111 ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
2095 if (ret < 0) { 2112 if (ret < 0) {
2096 fprintf(stderr, "clock_gettime failed: %s\n", strerror(errno)); 2113 fprintf(stderr, "clock_gettime failed: %s\n", strerror(errno));
2097 goto out; 2114 goto out;
2098 } 2115 }
2099 timeout.tv_sec++; 2116 timeout.tv_sec++;
2100 2117
@@ -2102,15 +2119,15 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
2102 ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl); 2119 ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
2103 vbl->request.type &= ~DRM_VBLANK_RELATIVE; 2120 vbl->request.type &= ~DRM_VBLANK_RELATIVE;
2104 if (ret && errno == EINTR) { 2121 if (ret && errno == EINTR) {
2105 clock_gettime(CLOCK_MONOTONIC, &cur); 2122 clock_gettime(CLOCK_MONOTONIC, &cur);
2106 /* Timeout after 1s */ 2123 /* Timeout after 1s */
2107 if (cur.tv_sec > timeout.tv_sec + 1 || 2124 if (cur.tv_sec > timeout.tv_sec + 1 ||
2108 (cur.tv_sec == timeout.tv_sec && cur.tv_nsec >= 2125 (cur.tv_sec == timeout.tv_sec && cur.tv_nsec >=
2109 timeout.tv_nsec)) { 2126 timeout.tv_nsec)) {
2110 errno = EBUSY; 2127 errno = EBUSY;
2111 ret = -1; 2128 ret = -1;
2112 break; 2129 break;
2113 } 2130 }
2114 } 2131 }
2115 } while (ret && errno == EINTR); 2132 } while (ret && errno == EINTR);
2116 2133
@@ -2122,22 +2139,22 @@ int drmError(int err, const char *label)
2122{ 2139{
2123 switch (err) { 2140 switch (err) {
2124 case DRM_ERR_NO_DEVICE: 2141 case DRM_ERR_NO_DEVICE:
2125 fprintf(stderr, "%s: no device\n", label); 2142 fprintf(stderr, "%s: no device\n", label);
2126 break; 2143 break;
2127 case DRM_ERR_NO_ACCESS: 2144 case DRM_ERR_NO_ACCESS:
2128 fprintf(stderr, "%s: no access\n", label); 2145 fprintf(stderr, "%s: no access\n", label);
2129 break; 2146 break;
2130 case DRM_ERR_NOT_ROOT: 2147 case DRM_ERR_NOT_ROOT:
2131 fprintf(stderr, "%s: not root\n", label); 2148 fprintf(stderr, "%s: not root\n", label);
2132 break; 2149 break;
2133 case DRM_ERR_INVALID: 2150 case DRM_ERR_INVALID:
2134 fprintf(stderr, "%s: invalid args\n", label); 2151 fprintf(stderr, "%s: invalid args\n", label);
2135 break; 2152 break;
2136 default: 2153 default:
2137 if (err < 0) 2154 if (err < 0)
2138 err = -err; 2155 err = -err;
2139 fprintf( stderr, "%s: error %d (%s)\n", label, err, strerror(err) ); 2156 fprintf( stderr, "%s: error %d (%s)\n", label, err, strerror(err) );
2140 break; 2157 break;
2141 } 2158 }
2142 2159
2143 return 1; 2160 return 1;
@@ -2148,9 +2165,9 @@ int drmError(int err, const char *label)
2148 * 2165 *
2149 * \param fd file descriptor. 2166 * \param fd file descriptor.
2150 * \param irq IRQ number. 2167 * \param irq IRQ number.
2151 * 2168 *
2152 * \return zero on success, or a negative value on failure. 2169 * \return zero on success, or a negative value on failure.
2153 * 2170 *
2154 * \internal 2171 * \internal
2155 * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the 2172 * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the
2156 * argument in a drm_control structure. 2173 * argument in a drm_control structure.
@@ -2163,7 +2180,7 @@ int drmCtlInstHandler(int fd, int irq)
2163 ctl.func = DRM_INST_HANDLER; 2180 ctl.func = DRM_INST_HANDLER;
2164 ctl.irq = irq; 2181 ctl.irq = irq;
2165 if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl)) 2182 if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
2166 return -errno; 2183 return -errno;
2167 return 0; 2184 return 0;
2168} 2185}
2169 2186
@@ -2172,9 +2189,9 @@ int drmCtlInstHandler(int fd, int irq)
2172 * Uninstall IRQ handler. 2189 * Uninstall IRQ handler.
2173 * 2190 *
2174 * \param fd file descriptor. 2191 * \param fd file descriptor.
2175 * 2192 *
2176 * \return zero on success, or a negative value on failure. 2193 * \return zero on success, or a negative value on failure.
2177 * 2194 *
2178 * \internal 2195 * \internal
2179 * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the 2196 * This function is a wrapper around the DRM_IOCTL_CONTROL ioctl, passing the
2180 * argument in a drm_control structure. 2197 * argument in a drm_control structure.
@@ -2187,7 +2204,7 @@ int drmCtlUninstHandler(int fd)
2187 ctl.func = DRM_UNINST_HANDLER; 2204 ctl.func = DRM_UNINST_HANDLER;
2188 ctl.irq = 0; 2205 ctl.irq = 0;
2189 if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl)) 2206 if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
2190 return -errno; 2207 return -errno;
2191 return 0; 2208 return 0;
2192} 2209}
2193 2210
@@ -2204,7 +2221,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)
2204 if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES; 2221 if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
2205 if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; 2222 if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
2206 if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock)) 2223 if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
2207 return -errno; 2224 return -errno;
2208 return 0; 2225 return 0;
2209} 2226}
2210 2227
@@ -2215,9 +2232,9 @@ int drmFinish(int fd, int context, drmLockFlags flags)
2215 * \param busnum bus number. 2232 * \param busnum bus number.
2216 * \param devnum device number. 2233 * \param devnum device number.
2217 * \param funcnum function number. 2234 * \param funcnum function number.
2218 * 2235 *
2219 * \return IRQ number on success, or a negative value on failure. 2236 * \return IRQ number on success, or a negative value on failure.
2220 * 2237 *
2221 * \internal 2238 * \internal
2222 * This function is a wrapper around the DRM_IOCTL_IRQ_BUSID ioctl, passing the 2239 * This function is a wrapper around the DRM_IOCTL_IRQ_BUSID ioctl, passing the
2223 * arguments in a drm_irq_busid structure. 2240 * arguments in a drm_irq_busid structure.
@@ -2231,7 +2248,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
2231 p.devnum = devnum; 2248 p.devnum = devnum;
2232 p.funcnum = funcnum; 2249 p.funcnum = funcnum;
2233 if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p)) 2250 if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
2234 return -errno; 2251 return -errno;
2235 return p.irq; 2252 return p.irq;
2236} 2253}
2237 2254
@@ -2240,8 +2257,8 @@ int drmAddContextTag(int fd, drm_context_t context, void *tag)
2240 drmHashEntry *entry = drmGetEntry(fd); 2257 drmHashEntry *entry = drmGetEntry(fd);
2241 2258
2242 if (drmHashInsert(entry->tagTable, context, tag)) { 2259 if (drmHashInsert(entry->tagTable, context, tag)) {
2243 drmHashDelete(entry->tagTable, context); 2260 drmHashDelete(entry->tagTable, context);
2244 drmHashInsert(entry->tagTable, context, tag); 2261 drmHashInsert(entry->tagTable, context, tag);
2245 } 2262 }
2246 return 0; 2263 return 0;
2247} 2264}
@@ -2259,7 +2276,7 @@ void *drmGetContextTag(int fd, drm_context_t context)
2259 void *value; 2276 void *value;
2260 2277
2261 if (drmHashLookup(entry->tagTable, context, &value)) 2278 if (drmHashLookup(entry->tagTable, context, &value))
2262 return NULL; 2279 return NULL;
2263 2280
2264 return value; 2281 return value;
2265} 2282}
@@ -2274,7 +2291,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
2274 map.handle = (void *)(uintptr_t)handle; 2291 map.handle = (void *)(uintptr_t)handle;
2275 2292
2276 if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) 2293 if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
2277 return -errno; 2294 return -errno;
2278 return 0; 2295 return 0;
2279} 2296}
2280 2297
@@ -2287,23 +2304,23 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
2287 map.ctx_id = ctx_id; 2304 map.ctx_id = ctx_id;
2288 2305
2289 if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) 2306 if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
2290 return -errno; 2307 return -errno;
2291 if (handle) 2308 if (handle)
2292 *handle = (drm_handle_t)(uintptr_t)map.handle; 2309 *handle = (drm_handle_t)(uintptr_t)map.handle;
2293 2310
2294 return 0; 2311 return 0;
2295} 2312}
2296 2313
2297int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size, 2314int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
2298 drmMapType *type, drmMapFlags *flags, drm_handle_t *handle, 2315 drmMapType *type, drmMapFlags *flags, drm_handle_t *handle,
2299 int *mtrr) 2316 int *mtrr)
2300{ 2317{
2301 drm_map_t map; 2318 drm_map_t map;
2302 2319
2303 memclear(map); 2320 memclear(map);
2304 map.offset = idx; 2321 map.offset = idx;
2305 if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map)) 2322 if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
2306 return -errno; 2323 return -errno;
2307 *offset = map.offset; 2324 *offset = map.offset;
2308 *size = map.size; 2325 *size = map.size;
2309 *type = map.type; 2326 *type = map.type;
@@ -2314,14 +2331,14 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
2314} 2331}
2315 2332
2316int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid, 2333int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
2317 unsigned long *magic, unsigned long *iocs) 2334 unsigned long *magic, unsigned long *iocs)
2318{ 2335{
2319 drm_client_t client; 2336 drm_client_t client;
2320 2337
2321 memclear(client); 2338 memclear(client);
2322 client.idx = idx; 2339 client.idx = idx;
2323 if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client)) 2340 if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
2324 return -errno; 2341 return -errno;
2325 *auth = client.auth; 2342 *auth = client.auth;
2326 *pid = client.pid; 2343 *pid = client.pid;
2327 *uid = client.uid; 2344 *uid = client.uid;
@@ -2337,12 +2354,12 @@ int drmGetStats(int fd, drmStatsT *stats)
2337 2354
2338 memclear(s); 2355 memclear(s);
2339 if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s)) 2356 if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
2340 return -errno; 2357 return -errno;
2341 2358
2342 stats->count = 0; 2359 stats->count = 0;
2343 memset(stats, 0, sizeof(*stats)); 2360 memset(stats, 0, sizeof(*stats));
2344 if (s.count > sizeof(stats->data)/sizeof(stats->data[0])) 2361 if (s.count > sizeof(stats->data)/sizeof(stats->data[0]))
2345 return -1; 2362 return -1;
2346 2363
2347#define SET_VALUE \ 2364#define SET_VALUE \
2348 stats->data[i].long_format = "%-20.20s"; \ 2365 stats->data[i].long_format = "%-20.20s"; \
@@ -2369,87 +2386,87 @@ int drmGetStats(int fd, drmStatsT *stats)
2369 2386
2370 stats->count = s.count; 2387 stats->count = s.count;
2371 for (i = 0; i < s.count; i++) { 2388 for (i = 0; i < s.count; i++) {
2372 stats->data[i].value = s.data[i].value; 2389 stats->data[i].value = s.data[i].value;
2373 switch (s.data[i].type) { 2390 switch (s.data[i].type) {
2374 case _DRM_STAT_LOCK: 2391 case _DRM_STAT_LOCK:
2375 stats->data[i].long_name = "Lock"; 2392 stats->data[i].long_name = "Lock";
2376 stats->data[i].rate_name = "Lock"; 2393 stats->data[i].rate_name = "Lock";
2377 SET_VALUE; 2394 SET_VALUE;
2378 break; 2395 break;
2379 case _DRM_STAT_OPENS: 2396 case _DRM_STAT_OPENS:
2380 stats->data[i].long_name = "Opens"; 2397 stats->data[i].long_name = "Opens";
2381 stats->data[i].rate_name = "O"; 2398 stats->data[i].rate_name = "O";
2382 SET_COUNT; 2399 SET_COUNT;
2383 stats->data[i].verbose = 1; 2400 stats->data[i].verbose = 1;
2384 break; 2401 break;
2385 case _DRM_STAT_CLOSES: 2402 case _DRM_STAT_CLOSES:
2386 stats->data[i].long_name = "Closes"; 2403 stats->data[i].long_name = "Closes";
2387 stats->data[i].rate_name = "Lock"; 2404 stats->data[i].rate_name = "Lock";
2388 SET_COUNT; 2405 SET_COUNT;
2389 stats->data[i].verbose = 1; 2406 stats->data[i].verbose = 1;
2390 break; 2407 break;
2391 case _DRM_STAT_IOCTLS: 2408 case _DRM_STAT_IOCTLS:
2392 stats->data[i].long_name = "Ioctls"; 2409 stats->data[i].long_name = "Ioctls";
2393 stats->data[i].rate_name = "Ioc/s"; 2410 stats->data[i].rate_name = "Ioc/s";
2394 SET_COUNT; 2411 SET_COUNT;
2395 break; 2412 break;
2396 case _DRM_STAT_LOCKS: 2413 case _DRM_STAT_LOCKS:
2397 stats->data[i].long_name = "Locks"; 2414 stats->data[i].long_name = "Locks";
2398 stats->data[i].rate_name = "Lck/s"; 2415 stats->data[i].rate_name = "Lck/s";
2399 SET_COUNT; 2416 SET_COUNT;
2400 break; 2417 break;
2401 case _DRM_STAT_UNLOCKS: 2418 case _DRM_STAT_UNLOCKS:
2402 stats->data[i].long_name = "Unlocks"; 2419 stats->data[i].long_name = "Unlocks";
2403 stats->data[i].rate_name = "Unl/s"; 2420 stats->data[i].rate_name = "Unl/s";
2404 SET_COUNT; 2421 SET_COUNT;
2405 break; 2422 break;
2406 case _DRM_STAT_IRQ: 2423 case _DRM_STAT_IRQ:
2407 stats->data[i].long_name = "IRQs"; 2424 stats->data[i].long_name = "IRQs";
2408 stats->data[i].rate_name = "IRQ/s"; 2425 stats->data[i].rate_name = "IRQ/s";
2409 SET_COUNT; 2426 SET_COUNT;
2410 break; 2427 break;
2411 case _DRM_STAT_PRIMARY: 2428 case _DRM_STAT_PRIMARY:
2412 stats->data[i].long_name = "Primary Bytes"; 2429 stats->data[i].long_name = "Primary Bytes";
2413 stats->data[i].rate_name = "PB/s"; 2430 stats->data[i].rate_name = "PB/s";
2414 SET_BYTE; 2431 SET_BYTE;
2415 break; 2432 break;
2416 case _DRM_STAT_SECONDARY: 2433 case _DRM_STAT_SECONDARY:
2417 stats->data[i].long_name = "Secondary Bytes"; 2434 stats->data[i].long_name = "Secondary Bytes";
2418 stats->data[i].rate_name = "SB/s"; 2435 stats->data[i].rate_name = "SB/s";
2419 SET_BYTE; 2436 SET_BYTE;
2420 break; 2437 break;
2421 case _DRM_STAT_DMA: 2438 case _DRM_STAT_DMA:
2422 stats->data[i].long_name = "DMA"; 2439 stats->data[i].long_name = "DMA";
2423 stats->data[i].rate_name = "DMA/s"; 2440 stats->data[i].rate_name = "DMA/s";
2424 SET_COUNT; 2441 SET_COUNT;
2425 break; 2442 break;
2426 case _DRM_STAT_SPECIAL: 2443 case _DRM_STAT_SPECIAL:
2427 stats->data[i].long_name = "Special DMA"; 2444 stats->data[i].long_name = "Special DMA";
2428 stats->data[i].rate_name = "dma/s"; 2445 stats->data[i].rate_name = "dma/s";
2429 SET_COUNT; 2446 SET_COUNT;
2430 break; 2447 break;
2431 case _DRM_STAT_MISSED: 2448 case _DRM_STAT_MISSED:
2432 stats->data[i].long_name = "Miss"; 2449 stats->data[i].long_name = "Miss";
2433 stats->data[i].rate_name = "Ms/s"; 2450 stats->data[i].rate_name = "Ms/s";
2434 SET_COUNT; 2451 SET_COUNT;
2435 break; 2452 break;
2436 case _DRM_STAT_VALUE: 2453 case _DRM_STAT_VALUE:
2437 stats->data[i].long_name = "Value"; 2454 stats->data[i].long_name = "Value";
2438 stats->data[i].rate_name = "Value"; 2455 stats->data[i].rate_name = "Value";
2439 SET_VALUE; 2456 SET_VALUE;
2440 break; 2457 break;
2441 case _DRM_STAT_BYTE: 2458 case _DRM_STAT_BYTE:
2442 stats->data[i].long_name = "Bytes"; 2459 stats->data[i].long_name = "Bytes";
2443 stats->data[i].rate_name = "B/s"; 2460 stats->data[i].rate_name = "B/s";
2444 SET_BYTE; 2461 SET_BYTE;
2445 break; 2462 break;
2446 case _DRM_STAT_COUNT: 2463 case _DRM_STAT_COUNT:
2447 default: 2464 default:
2448 stats->data[i].long_name = "Count"; 2465 stats->data[i].long_name = "Count";
2449 stats->data[i].rate_name = "Cnt/s"; 2466 stats->data[i].rate_name = "Cnt/s";
2450 SET_COUNT; 2467 SET_COUNT;
2451 break; 2468 break;
2452 } 2469 }
2453 } 2470 }
2454 return 0; 2471 return 0;
2455} 2472}
@@ -2458,14 +2475,14 @@ int drmGetStats(int fd, drmStatsT *stats)
2458 * Issue a set-version ioctl. 2475 * Issue a set-version ioctl.
2459 * 2476 *
2460 * \param fd file descriptor. 2477 * \param fd file descriptor.
2461 * \param drmCommandIndex command index 2478 * \param drmCommandIndex command index
2462 * \param data source pointer of the data to be read and written. 2479 * \param data source pointer of the data to be read and written.
2463 * \param size size of the data to be read and written. 2480 * \param size size of the data to be read and written.
2464 * 2481 *
2465 * \return zero on success, or a negative value on failure. 2482 * \return zero on success, or a negative value on failure.
2466 * 2483 *
2467 * \internal 2484 * \internal
2468 * It issues a read-write ioctl given by 2485 * It issues a read-write ioctl given by
2469 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. 2486 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
2470 */ 2487 */
2471int drmSetInterfaceVersion(int fd, drmSetVersion *version) 2488int drmSetInterfaceVersion(int fd, drmSetVersion *version)
@@ -2480,7 +2497,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
2480 sv.drm_dd_minor = version->drm_dd_minor; 2497 sv.drm_dd_minor = version->drm_dd_minor;
2481 2498
2482 if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) { 2499 if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
2483 retcode = -errno; 2500 retcode = -errno;
2484 } 2501 }
2485 2502
2486 version->drm_di_major = sv.drm_di_major; 2503 version->drm_di_major = sv.drm_di_major;
@@ -2495,12 +2512,12 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
2495 * Send a device-specific command. 2512 * Send a device-specific command.
2496 * 2513 *
2497 * \param fd file descriptor. 2514 * \param fd file descriptor.
2498 * \param drmCommandIndex command index 2515 * \param drmCommandIndex command index
2499 * 2516 *
2500 * \return zero on success, or a negative value on failure. 2517 * \return zero on success, or a negative value on failure.
2501 * 2518 *
2502 * \internal 2519 * \internal
2503 * It issues a ioctl given by 2520 * It issues a ioctl given by
2504 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. 2521 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
2505 */ 2522 */
2506int drmCommandNone(int fd, unsigned long drmCommandIndex) 2523int drmCommandNone(int fd, unsigned long drmCommandIndex)
@@ -2510,7 +2527,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
2510 request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex); 2527 request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
2511 2528
2512 if (drmIoctl(fd, request, NULL)) { 2529 if (drmIoctl(fd, request, NULL)) {
2513 return -errno; 2530 return -errno;
2514 } 2531 }
2515 return 0; 2532 return 0;
2516} 2533}
@@ -2520,14 +2537,14 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
2520 * Send a device-specific read command. 2537 * Send a device-specific read command.
2521 * 2538 *
2522 * \param fd file descriptor. 2539 * \param fd file descriptor.
2523 * \param drmCommandIndex command index 2540 * \param drmCommandIndex command index
2524 * \param data destination pointer of the data to be read. 2541 * \param data destination pointer of the data to be read.
2525 * \param size size of the data to be read. 2542 * \param size size of the data to be read.
2526 * 2543 *
2527 * \return zero on success, or a negative value on failure. 2544 * \return zero on success, or a negative value on failure.
2528 * 2545 *
2529 * \internal 2546 * \internal
2530 * It issues a read ioctl given by 2547 * It issues a read ioctl given by
2531 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. 2548 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
2532 */ 2549 */
2533int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data, 2550int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
@@ -2535,11 +2552,11 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
2535{ 2552{
2536 unsigned long request; 2553 unsigned long request;
2537 2554
2538 request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE, 2555 request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,
2539 DRM_COMMAND_BASE + drmCommandIndex, size); 2556 DRM_COMMAND_BASE + drmCommandIndex, size);
2540 2557
2541 if (drmIoctl(fd, request, data)) { 2558 if (drmIoctl(fd, request, data)) {
2542 return -errno; 2559 return -errno;
2543 } 2560 }
2544 return 0; 2561 return 0;
2545} 2562}
@@ -2549,14 +2566,14 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
2549 * Send a device-specific write command. 2566 * Send a device-specific write command.
2550 * 2567 *
2551 * \param fd file descriptor. 2568 * \param fd file descriptor.
2552 * \param drmCommandIndex command index 2569 * \param drmCommandIndex command index
2553 * \param data source pointer of the data to be written. 2570 * \param data source pointer of the data to be written.
2554 * \param size size of the data to be written. 2571 * \param size size of the data to be written.
2555 * 2572 *
2556 * \return zero on success, or a negative value on failure. 2573 * \return zero on success, or a negative value on failure.
2557 * 2574 *
2558 * \internal 2575 * \internal
2559 * It issues a write ioctl given by 2576 * It issues a write ioctl given by
2560 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. 2577 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
2561 */ 2578 */
2562int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data, 2579int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
@@ -2564,11 +2581,11 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
2564{ 2581{
2565 unsigned long request; 2582 unsigned long request;
2566 2583
2567 request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE, 2584 request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,
2568 DRM_COMMAND_BASE + drmCommandIndex, size); 2585 DRM_COMMAND_BASE + drmCommandIndex, size);
2569 2586
2570 if (drmIoctl(fd, request, data)) { 2587 if (drmIoctl(fd, request, data)) {
2571 return -errno; 2588 return -errno;
2572 } 2589 }
2573 return 0; 2590 return 0;
2574} 2591}
@@ -2578,14 +2595,14 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
2578 * Send a device-specific read-write command. 2595 * Send a device-specific read-write command.
2579 * 2596 *
2580 * \param fd file descriptor. 2597 * \param fd file descriptor.
2581 * \param drmCommandIndex command index 2598 * \param drmCommandIndex command index
2582 * \param data source pointer of the data to be read and written. 2599 * \param data source pointer of the data to be read and written.
2583 * \param size size of the data to be read and written. 2600 * \param size size of the data to be read and written.
2584 * 2601 *
2585 * \return zero on success, or a negative value on failure. 2602 * \return zero on success, or a negative value on failure.
2586 * 2603 *
2587 * \internal 2604 * \internal
2588 * It issues a read-write ioctl given by 2605 * It issues a read-write ioctl given by
2589 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode. 2606 * \code DRM_COMMAND_BASE + drmCommandIndex \endcode.
2590 */ 2607 */
2591int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data, 2608int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
@@ -2593,11 +2610,11 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
2593{ 2610{
2594 unsigned long request; 2611 unsigned long request;
2595 2612
2596 request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE, 2613 request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,
2597 DRM_COMMAND_BASE + drmCommandIndex, size); 2614 DRM_COMMAND_BASE + drmCommandIndex, size);
2598 2615
2599 if (drmIoctl(fd, request, data)) 2616 if (drmIoctl(fd, request, data))
2600 return -errno; 2617 return -errno;
2601 return 0; 2618 return 0;
2602} 2619}
2603 2620
@@ -2611,9 +2628,9 @@ static struct {
2611 2628
2612static int nr_fds = 0; 2629static int nr_fds = 0;
2613 2630
2614int drmOpenOnce(void *unused, 2631int drmOpenOnce(void *unused,
2615 const char *BusID, 2632 const char *BusID,
2616 int *newlyopened) 2633 int *newlyopened)
2617{ 2634{
2618 return drmOpenOnceWithType(BusID, newlyopened, DRM_NODE_PRIMARY); 2635 return drmOpenOnceWithType(BusID, newlyopened, DRM_NODE_PRIMARY);
2619} 2636}
@@ -2622,19 +2639,19 @@ int drmOpenOnceWithType(const char *BusID, int *newlyopened, int type)
2622{ 2639{
2623 int i; 2640 int i;
2624 int fd; 2641 int fd;
2625 2642
2626 for (i = 0; i < nr_fds; i++) 2643 for (i = 0; i < nr_fds; i++)
2627 if ((strcmp(BusID, connection[i].BusID) == 0) && 2644 if ((strcmp(BusID, connection[i].BusID) == 0) &&
2628 (connection[i].type == type)) { 2645 (connection[i].type == type)) {
2629 connection[i].refcount++; 2646 connection[i].refcount++;
2630 *newlyopened = 0; 2647 *newlyopened = 0;
2631 return connection[i].fd; 2648 return connection[i].fd;
2632 } 2649 }
2633 2650
2634 fd = drmOpenWithType(NULL, BusID, type); 2651 fd = drmOpenWithType(NULL, BusID, type);
2635 if (fd < 0 || nr_fds == DRM_MAX_FDS) 2652 if (fd < 0 || nr_fds == DRM_MAX_FDS)
2636 return fd; 2653 return fd;
2637 2654
2638 connection[nr_fds].BusID = strdup(BusID); 2655 connection[nr_fds].BusID = strdup(BusID);
2639 connection[nr_fds].fd = fd; 2656 connection[nr_fds].fd = fd;
2640 connection[nr_fds].refcount = 1; 2657 connection[nr_fds].refcount = 1;
@@ -2642,9 +2659,9 @@ int drmOpenOnceWithType(const char *BusID, int *newlyopened, int type)
2642 *newlyopened = 1; 2659 *newlyopened = 1;
2643 2660
2644 if (0) 2661 if (0)
2645 fprintf(stderr, "saved connection %d for %s %d\n", 2662 fprintf(stderr, "saved connection %d for %s %d\n",
2646 nr_fds, connection[nr_fds].BusID, 2663 nr_fds, connection[nr_fds].BusID,
2647 strcmp(BusID, connection[nr_fds].BusID)); 2664 strcmp(BusID, connection[nr_fds].BusID));
2648 2665
2649 nr_fds++; 2666 nr_fds++;
2650 2667
@@ -2656,181 +2673,262 @@ void drmCloseOnce(int fd)
2656 int i; 2673 int i;
2657 2674
2658 for (i = 0; i < nr_fds; i++) { 2675 for (i = 0; i < nr_fds; i++) {
2659 if (fd == connection[i].fd) { 2676 if (fd == connection[i].fd) {
2660 if (--connection[i].refcount == 0) { 2677 if (--connection[i].refcount == 0) {
2661 drmClose(connection[i].fd); 2678 drmClose(connection[i].fd);
2662 free(connection[i].BusID); 2679 free(connection[i].BusID);
2663 2680
2664 if (i < --nr_fds) 2681 if (i < --nr_fds)
2665 connection[i] = connection[nr_fds]; 2682 connection[i] = connection[nr_fds];
2666 2683
2667 return; 2684 return;
2668 } 2685 }
2669 } 2686 }
2670 } 2687 }
2671} 2688}
2672 2689
2673int drmSetMaster(int fd) 2690int drmSetMaster(int fd)
2674{ 2691{
2675 return drmIoctl(fd, DRM_IOCTL_SET_MASTER, NULL); 2692 return drmIoctl(fd, DRM_IOCTL_SET_MASTER, NULL);
2676} 2693}
2677 2694
2678int drmDropMaster(int fd) 2695int drmDropMaster(int fd)
2679{ 2696{
2680 return drmIoctl(fd, DRM_IOCTL_DROP_MASTER, NULL); 2697 return drmIoctl(fd, DRM_IOCTL_DROP_MASTER, NULL);
2681} 2698}
2682 2699
2683char *drmGetDeviceNameFromFd(int fd) 2700char *drmGetDeviceNameFromFd(int fd)
2684{ 2701{
2685 char name[128]; 2702 char name[128];
2686 struct stat sbuf; 2703 struct stat sbuf;
2687 dev_t d; 2704 dev_t d;
2688 int i; 2705 int i;
2689 2706
2690 /* The whole drmOpen thing is a fiasco and we need to find a way 2707 /* The whole drmOpen thing is a fiasco and we need to find a way
2691 * back to just using open(2). For now, however, lets just make 2708 * back to just using open(2). For now, however, lets just make
2692 * things worse with even more ad hoc directory walking code to 2709 * things worse with even more ad hoc directory walking code to
2693 * discover the device file name. */ 2710 * discover the device file name. */
2694 2711
2695 fstat(fd, &sbuf); 2712 fstat(fd, &sbuf);
2696 d = sbuf.st_rdev; 2713 d = sbuf.st_rdev;
2697 2714
2698 for (i = 0; i < DRM_MAX_MINOR; i++) { 2715 for (i = 0; i < DRM_MAX_MINOR; i++) {
2699 snprintf(name, sizeof name, DRM_DEV_NAME, DRM_DIR_NAME, i); 2716 snprintf(name, sizeof name, DRM_DEV_NAME, DRM_DIR_NAME, i);
2700 if (stat(name, &sbuf) == 0 && sbuf.st_rdev == d) 2717 if (stat(name, &sbuf) == 0 && sbuf.st_rdev == d)
2701 break; 2718 break;
2702 } 2719 }
2703 if (i == DRM_MAX_MINOR) 2720 if (i == DRM_MAX_MINOR)
2704 return NULL; 2721 return NULL;
2705 2722
2706 return strdup(name); 2723 return strdup(name);
2707} 2724}
2708 2725
2709int drmGetNodeTypeFromFd(int fd) 2726int drmGetNodeTypeFromFd(int fd)
2710{ 2727{
2711 struct stat sbuf; 2728 struct stat sbuf;
2712 int maj, min, type; 2729 int maj, min, type;
2713 2730
2714 if (fstat(fd, &sbuf)) 2731 if (fstat(fd, &sbuf))
2715 return -1; 2732 return -1;
2716 2733
2717 maj = major(sbuf.st_rdev); 2734 maj = major(sbuf.st_rdev);
2718 min = minor(sbuf.st_rdev); 2735 min = minor(sbuf.st_rdev);
2719 2736
2720 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode)) { 2737 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode)) {
2721 errno = EINVAL; 2738 errno = EINVAL;
2722 return -1; 2739 return -1;
2723 } 2740 }
2724 2741
2725 type = drmGetMinorType(min); 2742 type = drmGetMinorType(min);
2726 if (type == -1) 2743 if (type == -1)
2727 errno = ENODEV; 2744 errno = ENODEV;
2728 return type; 2745 return type;
2729} 2746}
2730 2747
2731int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd) 2748int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd)
2732{ 2749{
2733 struct drm_prime_handle args; 2750 struct drm_prime_handle args;
2734 int ret; 2751 int ret;
2735 2752
2736 memclear(args); 2753 memclear(args);
2737 args.fd = -1; 2754 args.fd = -1;
2738 args.handle = handle; 2755 args.handle = handle;
2739 args.flags = flags; 2756 args.flags = flags;
2740 ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args); 2757 ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
2741 if (ret) 2758 if (ret)
2742 return ret; 2759 return ret;
2743 2760
2744 *prime_fd = args.fd; 2761 *prime_fd = args.fd;
2745 return 0; 2762 return 0;
2746} 2763}
2747 2764
2748int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle) 2765int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle)
2749{ 2766{
2750 struct drm_prime_handle args; 2767 struct drm_prime_handle args;
2751 int ret; 2768 int ret;
2752 2769
2753 memclear(args); 2770 memclear(args);
2754 args.fd = prime_fd; 2771 args.fd = prime_fd;
2755 ret = drmIoctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args); 2772 ret = drmIoctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
2756 if (ret) 2773 if (ret)
2757 return ret; 2774 return ret;
2758 2775
2759 *handle = args.handle; 2776 *handle = args.handle;
2760 return 0; 2777 return 0;
2761} 2778}
2762 2779
2763static char *drmGetMinorNameForFD(int fd, int type) 2780static char *drmGetMinorNameForFD(int fd, int type)
2764{ 2781{
2765#ifdef __linux__ 2782#ifdef __linux__
2766 DIR *sysdir; 2783 DIR *sysdir;
2767 struct dirent *pent, *ent; 2784 struct dirent *pent, *ent;
2768 struct stat sbuf; 2785 struct stat sbuf;
2769 const char *name = drmGetMinorName(type); 2786 const char *name = drmGetMinorName(type);
2770 int len; 2787 int len;
2771 char dev_name[64], buf[64]; 2788 char dev_name[64], buf[64];
2772 long name_max; 2789 long name_max;
2773 int maj, min; 2790 int maj, min;
2774 2791
2775 if (!name) 2792 if (!name)
2776 return NULL; 2793 return NULL;
2777 2794
2778 len = strlen(name); 2795 len = strlen(name);
2779 2796
2780 if (fstat(fd, &sbuf)) 2797 if (fstat(fd, &sbuf))
2781 return NULL; 2798 return NULL;
2782 2799
2783 maj = major(sbuf.st_rdev); 2800 maj = major(sbuf.st_rdev);
2784 min = minor(sbuf.st_rdev); 2801 min = minor(sbuf.st_rdev);
2785 2802
2786 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode)) 2803 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
2787 return NULL; 2804 return NULL;
2788 2805
2789 snprintf(buf, sizeof(buf), "/sys/dev/char/%d:%d/device/drm", maj, min); 2806 snprintf(buf, sizeof(buf), "/sys/dev/char/%d:%d/device/drm", maj, min);
2790 2807
2791 sysdir = opendir(buf); 2808 sysdir = opendir(buf);
2792 if (!sysdir) 2809 if (!sysdir)
2793 return NULL; 2810 return NULL;
2794 2811
2795 name_max = fpathconf(dirfd(sysdir), _PC_NAME_MAX); 2812 name_max = fpathconf(dirfd(sysdir), _PC_NAME_MAX);
2796 if (name_max == -1) 2813 if (name_max == -1)
2797 goto out_close_dir; 2814 goto out_close_dir;
2798 2815
2799 pent = malloc(offsetof(struct dirent, d_name) + name_max + 1); 2816 pent = malloc(offsetof(struct dirent, d_name) + name_max + 1);
2800 if (pent == NULL) 2817 if (pent == NULL)
2801 goto out_close_dir; 2818 goto out_close_dir;
2802 2819
2803 while (readdir_r(sysdir, pent, &ent) == 0 && ent != NULL) { 2820 while (readdir_r(sysdir, pent, &ent) == 0 && ent != NULL) {
2804 if (strncmp(ent->d_name, name, len) == 0) { 2821 if (strncmp(ent->d_name, name, len) == 0) {
2805 snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s", 2822 snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s",
2806 ent->d_name); 2823 ent->d_name);
2807 2824
2808 free(pent); 2825 free(pent);
2809 closedir(sysdir); 2826 closedir(sysdir);
2810 2827
2811 return strdup(dev_name); 2828 return strdup(dev_name);
2812 } 2829 }
2813 } 2830 }
2814 2831
2815 free(pent); 2832 free(pent);
2816 2833
2817out_close_dir: 2834out_close_dir:
2818 closedir(sysdir); 2835 closedir(sysdir);
2819#else 2836#else
2820#warning "Missing implementation of drmGetMinorNameForFD" 2837 struct stat sbuf;
2838 char buf[PATH_MAX + 1];
2839 const char *dev_name;
2840 unsigned int maj, min;
2841 int n, base;
2842
2843 if (fstat(fd, &sbuf))
2844 return NULL;
2845
2846 maj = major(sbuf.st_rdev);
2847 min = minor(sbuf.st_rdev);
2848
2849 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
2850 return NULL;
2851
2852 switch (type) {
2853 case DRM_NODE_PRIMARY:
2854 dev_name = DRM_DEV_NAME;
2855 break;
2856 case DRM_NODE_CONTROL:
2857 dev_name = DRM_CONTROL_DEV_NAME;
2858 break;
2859 case DRM_NODE_RENDER:
2860 dev_name = DRM_RENDER_DEV_NAME;
2861 break;
2862 default:
2863 return NULL;
2864 };
2865
2866 base = drmGetMinorBase(type);
2867 if (base < 0)
2868 return NULL;
2869
2870 n = snprintf(buf, sizeof(buf), dev_name, DRM_DIR_NAME, min - base);
2871 if (n == -1 || n >= sizeof(buf))
2872 return NULL;
2873
2874 return strdup(buf);
2821#endif 2875#endif
2822 return NULL; 2876 return NULL;
2823} 2877}
2824 2878
2825char *drmGetPrimaryDeviceNameFromFd(int fd) 2879char *drmGetPrimaryDeviceNameFromFd(int fd)
2826{ 2880{
2827 return drmGetMinorNameForFD(fd, DRM_NODE_PRIMARY); 2881 return drmGetMinorNameForFD(fd, DRM_NODE_PRIMARY);
2828} 2882}
2829 2883
2830char *drmGetRenderDeviceNameFromFd(int fd) 2884char *drmGetRenderDeviceNameFromFd(int fd)
2831{ 2885{
2832 return drmGetMinorNameForFD(fd, DRM_NODE_RENDER); 2886 return drmGetMinorNameForFD(fd, DRM_NODE_RENDER);
2887}
2888
2889#ifdef __linux__
2890static char * DRM_PRINTFLIKE(2, 3)
2891sysfs_uevent_get(const char *path, const char *fmt, ...)
2892{
2893 char filename[PATH_MAX + 1], *key, *line = NULL, *value = NULL;
2894 size_t size = 0, len;
2895 ssize_t num;
2896 va_list ap;
2897 FILE *fp;
2898
2899 va_start(ap, fmt);
2900 num = vasprintf(&key, fmt, ap);
2901 va_end(ap);
2902 len = num;
2903
2904 snprintf(filename, sizeof(filename), "%s/uevent", path);
2905
2906 fp = fopen(filename, "r");
2907 if (!fp) {
2908 free(key);
2909 return NULL;
2910 }
2911
2912 while ((num = getline(&line, &size, fp)) >= 0) {
2913 if ((strncmp(line, key, len) == 0) && (line[len] == '=')) {
2914 char *start = line + len + 1, *end = line + num - 1;
2915
2916 if (*end != '\n')
2917 end++;
2918
2919 value = strndup(start, end - start);
2920 break;
2921 }
2922 }
2923
2924 free(line);
2925 fclose(fp);
2926
2927 free(key);
2928
2929 return value;
2833} 2930}
2931#endif
2834 2932
2835static int drmParseSubsystemType(int maj, int min) 2933static int drmParseSubsystemType(int maj, int min)
2836{ 2934{
@@ -2852,7 +2950,18 @@ static int drmParseSubsystemType(int maj, int min)
2852 if (strncmp(name, "/pci", 4) == 0) 2950 if (strncmp(name, "/pci", 4) == 0)
2853 return DRM_BUS_PCI; 2951 return DRM_BUS_PCI;
2854 2952
2953 if (strncmp(name, "/usb", 4) == 0)
2954 return DRM_BUS_USB;
2955
2956 if (strncmp(name, "/platform", 9) == 0)
2957 return DRM_BUS_PLATFORM;
2958
2959 if (strncmp(name, "/host1x", 7) == 0)
2960 return DRM_BUS_HOST1X;
2961
2855 return -EINVAL; 2962 return -EINVAL;
2963#elif defined(__OpenBSD__)
2964 return DRM_BUS_PCI;
2856#else 2965#else
2857#warning "Missing implementation of drmParseSubsystemType" 2966#warning "Missing implementation of drmParseSubsystemType"
2858 return -EINVAL; 2967 return -EINVAL;
@@ -2862,31 +2971,21 @@ static int drmParseSubsystemType(int maj, int min)
2862static int drmParsePciBusInfo(int maj, int min, drmPciBusInfoPtr info) 2971static int drmParsePciBusInfo(int maj, int min, drmPciBusInfoPtr info)
2863{ 2972{
2864#ifdef __linux__ 2973#ifdef __linux__
2865 char path[PATH_MAX + 1]; 2974 unsigned int domain, bus, dev, func;
2866 char data[128]; 2975 char path[PATH_MAX + 1], *value;
2867 char *str; 2976 int num;
2868 int domain, bus, dev, func;
2869 int fd, ret;
2870 2977
2871 snprintf(path, PATH_MAX, "/sys/dev/char/%d:%d/device/uevent", maj, min); 2978 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
2872 fd = open(path, O_RDONLY);
2873 if (fd < 0)
2874 return -errno;
2875 2979
2876 ret = read(fd, data, sizeof(data)); 2980 value = sysfs_uevent_get(path, "PCI_SLOT_NAME");
2877 close(fd); 2981 if (!value)
2878 if (ret < 0) 2982 return -ENOENT;
2879 return -errno;
2880 2983
2881#define TAG "PCI_SLOT_NAME=" 2984 num = sscanf(value, "%04x:%02x:%02x.%1u", &domain, &bus, &dev, &func);
2882 str = strstr(data, TAG); 2985 free(value);
2883 if (str == NULL)
2884 return -EINVAL;
2885 2986
2886 if (sscanf(str, TAG "%04x:%02x:%02x.%1u", 2987 if (num != 4)
2887 &domain, &bus, &dev, &func) != 4)
2888 return -EINVAL; 2988 return -EINVAL;
2889#undef TAG
2890 2989
2891 info->domain = domain; 2990 info->domain = domain;
2892 info->bus = bus; 2991 info->bus = bus;
@@ -2894,6 +2993,30 @@ static int drmParsePciBusInfo(int maj, int min, drmPciBusInfoPtr info)
2894 info->func = func; 2993 info->func = func;
2895 2994
2896 return 0; 2995 return 0;
2996#elif defined(__OpenBSD__)
2997 struct drm_pciinfo pinfo;
2998 int fd, type;
2999
3000 type = drmGetMinorType(min);
3001 if (type == -1)
3002 return -ENODEV;
3003
3004 fd = drmOpenMinor(min, 0, type);
3005 if (fd < 0)
3006 return -errno;
3007
3008 if (drmIoctl(fd, DRM_IOCTL_GET_PCIINFO, &pinfo)) {
3009 close(fd);
3010 return -errno;
3011 }
3012 close(fd);
3013
3014 info->domain = pinfo.domain;
3015 info->bus = pinfo.bus;
3016 info->dev = pinfo.dev;
3017 info->func = pinfo.func;
3018
3019 return 0;
2897#else 3020#else
2898#warning "Missing implementation of drmParsePciBusInfo" 3021#warning "Missing implementation of drmParsePciBusInfo"
2899 return -EINVAL; 3022 return -EINVAL;
@@ -2911,6 +3034,16 @@ static int drmCompareBusInfo(drmDevicePtr a, drmDevicePtr b)
2911 switch (a->bustype) { 3034 switch (a->bustype) {
2912 case DRM_BUS_PCI: 3035 case DRM_BUS_PCI:
2913 return memcmp(a->businfo.pci, b->businfo.pci, sizeof(drmPciBusInfo)); 3036 return memcmp(a->businfo.pci, b->businfo.pci, sizeof(drmPciBusInfo));
3037
3038 case DRM_BUS_USB:
3039 return memcmp(a->businfo.usb, b->businfo.usb, sizeof(drmUsbBusInfo));
3040
3041 case DRM_BUS_PLATFORM:
3042 return memcmp(a->businfo.platform, b->businfo.platform, sizeof(drmPlatformBusInfo));
3043
3044 case DRM_BUS_HOST1X:
3045 return memcmp(a->businfo.host1x, b->businfo.host1x, sizeof(drmHost1xBusInfo));
3046
2914 default: 3047 default:
2915 break; 3048 break;
2916 } 3049 }
@@ -2941,18 +3074,58 @@ static int drmGetMaxNodeName(void)
2941 MAX3(sizeof(DRM_PRIMARY_MINOR_NAME), 3074 MAX3(sizeof(DRM_PRIMARY_MINOR_NAME),
2942 sizeof(DRM_CONTROL_MINOR_NAME), 3075 sizeof(DRM_CONTROL_MINOR_NAME),
2943 sizeof(DRM_RENDER_MINOR_NAME)) + 3076 sizeof(DRM_RENDER_MINOR_NAME)) +
2944 3 /* lenght of the node number */; 3077 3 /* length of the node number */;
2945} 3078}
2946 3079
2947static int drmParsePciDeviceInfo(const char *d_name,
2948 drmPciDeviceInfoPtr device)
2949{
2950#ifdef __linux__ 3080#ifdef __linux__
3081static int parse_separate_sysfs_files(int maj, int min,
3082 drmPciDeviceInfoPtr device,
3083 bool ignore_revision)
3084{
3085#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
3086 static const char *attrs[] = {
3087 "revision", /* Older kernels are missing the file, so check for it first */
3088 "vendor",
3089 "device",
3090 "subsystem_vendor",
3091 "subsystem_device",
3092 };
3093 char path[PATH_MAX + 1];
3094 unsigned int data[ARRAY_SIZE(attrs)];
3095 FILE *fp;
3096 int ret;
3097
3098 for (unsigned i = ignore_revision ? 1 : 0; i < ARRAY_SIZE(attrs); i++) {
3099 snprintf(path, PATH_MAX, "/sys/dev/char/%d:%d/device/%s", maj, min,
3100 attrs[i]);
3101 fp = fopen(path, "r");
3102 if (!fp)
3103 return -errno;
3104
3105 ret = fscanf(fp, "%x", &data[i]);
3106 fclose(fp);
3107 if (ret != 1)
3108 return -errno;
3109
3110 }
3111
3112 device->revision_id = ignore_revision ? 0xff : data[0] & 0xff;
3113 device->vendor_id = data[1] & 0xffff;
3114 device->device_id = data[2] & 0xffff;
3115 device->subvendor_id = data[3] & 0xffff;
3116 device->subdevice_id = data[4] & 0xffff;
3117
3118 return 0;
3119}
3120
3121static int parse_config_sysfs_file(int maj, int min,
3122 drmPciDeviceInfoPtr device)
3123{
2951 char path[PATH_MAX + 1]; 3124 char path[PATH_MAX + 1];
2952 unsigned char config[64]; 3125 unsigned char config[64];
2953 int fd, ret; 3126 int fd, ret;
2954 3127
2955 snprintf(path, PATH_MAX, "/sys/class/drm/%s/device/config", d_name); 3128 snprintf(path, PATH_MAX, "/sys/dev/char/%d:%d/device/config", maj, min);
2956 fd = open(path, O_RDONLY); 3129 fd = open(path, O_RDONLY);
2957 if (fd < 0) 3130 if (fd < 0)
2958 return -errno; 3131 return -errno;
@@ -2969,17 +3142,101 @@ static int drmParsePciDeviceInfo(const char *d_name,
2969 device->subdevice_id = config[46] | (config[47] << 8); 3142 device->subdevice_id = config[46] | (config[47] << 8);
2970 3143
2971 return 0; 3144 return 0;
3145}
3146#endif
3147
3148static int drmParsePciDeviceInfo(int maj, int min,
3149 drmPciDeviceInfoPtr device,
3150 uint32_t flags)
3151{
3152#ifdef __linux__
3153 if (!(flags & DRM_DEVICE_GET_PCI_REVISION))
3154 return parse_separate_sysfs_files(maj, min, device, true);
3155
3156 if (parse_separate_sysfs_files(maj, min, device, false))
3157 return parse_config_sysfs_file(maj, min, device);
3158
3159 return 0;
3160#elif defined(__OpenBSD__)
3161 struct drm_pciinfo pinfo;
3162 int fd, type;
3163
3164 type = drmGetMinorType(min);
3165 if (type == -1)
3166 return -ENODEV;
3167
3168 fd = drmOpenMinor(min, 0, type);
3169 if (fd < 0)
3170 return -errno;
3171
3172 if (drmIoctl(fd, DRM_IOCTL_GET_PCIINFO, &pinfo)) {
3173 close(fd);
3174 return -errno;
3175 }
3176 close(fd);
3177
3178 device->vendor_id = pinfo.vendor_id;
3179 device->device_id = pinfo.device_id;
3180 device->revision_id = pinfo.revision_id;
3181 device->subvendor_id = pinfo.subvendor_id;
3182 device->subdevice_id = pinfo.subdevice_id;
3183
3184 return 0;
2972#else 3185#else
2973#warning "Missing implementation of drmParsePciDeviceInfo" 3186#warning "Missing implementation of drmParsePciDeviceInfo"
2974 return -EINVAL; 3187 return -EINVAL;
2975#endif 3188#endif
2976} 3189}
2977 3190
3191static void drmFreePlatformDevice(drmDevicePtr device)
3192{
3193 if (device->deviceinfo.platform) {
3194 if (device->deviceinfo.platform->compatible) {
3195 char **compatible = device->deviceinfo.platform->compatible;
3196
3197 while (*compatible) {
3198 free(*compatible);
3199 compatible++;
3200 }
3201
3202 free(device->deviceinfo.platform->compatible);
3203 }
3204 }
3205}
3206
3207static void drmFreeHost1xDevice(drmDevicePtr device)
3208{
3209 if (device->deviceinfo.host1x) {
3210 if (device->deviceinfo.host1x->compatible) {
3211 char **compatible = device->deviceinfo.host1x->compatible;
3212
3213 while (*compatible) {
3214 free(*compatible);
3215 compatible++;
3216 }
3217
3218 free(device->deviceinfo.host1x->compatible);
3219 }
3220 }
3221}
3222
2978void drmFreeDevice(drmDevicePtr *device) 3223void drmFreeDevice(drmDevicePtr *device)
2979{ 3224{
2980 if (device == NULL) 3225 if (device == NULL)
2981 return; 3226 return;
2982 3227
3228 if (*device) {
3229 switch ((*device)->bustype) {
3230 case DRM_BUS_PLATFORM:
3231 drmFreePlatformDevice(*device);
3232 break;
3233
3234 case DRM_BUS_HOST1X:
3235 drmFreeHost1xDevice(*device);
3236 break;
3237 }
3238 }
3239
2983 free(*device); 3240 free(*device);
2984 *device = NULL; 3241 *device = NULL;
2985} 3242}
@@ -2991,63 +3248,415 @@ void drmFreeDevices(drmDevicePtr devices[], int count)
2991 if (devices == NULL) 3248 if (devices == NULL)
2992 return; 3249 return;
2993 3250
2994 for (i = 0; i < count && devices[i] != NULL; i++) 3251 for (i = 0; i < count; i++)
2995 drmFreeDevice(&devices[i]); 3252 if (devices[i])
3253 drmFreeDevice(&devices[i]);
2996} 3254}
2997 3255
2998static int drmProcessPciDevice(drmDevicePtr *device, const char *d_name, 3256static drmDevicePtr drmDeviceAlloc(unsigned int type, const char *node,
2999 const char *node, int node_type, 3257 size_t bus_size, size_t device_size,
3000 int maj, int min, bool fetch_deviceinfo) 3258 char **ptrp)
3001{ 3259{
3002 const int max_node_str = drmGetMaxNodeName(); 3260 size_t max_node_length, extra, size;
3003 int ret, i; 3261 drmDevicePtr device;
3004 char *addr; 3262 unsigned int i;
3263 char *ptr;
3005 3264
3006 *device = calloc(1, sizeof(drmDevice) + 3265 max_node_length = ALIGN(drmGetMaxNodeName(), sizeof(void *));
3007 (DRM_NODE_MAX * (sizeof(void *) + max_node_str)) + 3266 extra = DRM_NODE_MAX * (sizeof(void *) + max_node_length);
3008 sizeof(drmPciBusInfo) + 3267
3009 sizeof(drmPciDeviceInfo)); 3268 size = sizeof(*device) + extra + bus_size + device_size;
3010 if (!*device) 3269
3011 return -ENOMEM; 3270 device = calloc(1, size);
3271 if (!device)
3272 return NULL;
3273
3274 device->available_nodes = 1 << type;
3012 3275
3013 addr = (char*)*device; 3276 ptr = (char *)device + sizeof(*device);
3014 3277 device->nodes = (char **)ptr;
3015 (*device)->bustype = DRM_BUS_PCI;
3016 (*device)->available_nodes = 1 << node_type;
3017 3278
3018 addr += sizeof(drmDevice); 3279 ptr += DRM_NODE_MAX * sizeof(void *);
3019 (*device)->nodes = (char**)addr;
3020 3280
3021 addr += DRM_NODE_MAX * sizeof(void *);
3022 for (i = 0; i < DRM_NODE_MAX; i++) { 3281 for (i = 0; i < DRM_NODE_MAX; i++) {
3023 (*device)->nodes[i] = addr; 3282 device->nodes[i] = ptr;
3024 addr += max_node_str; 3283 ptr += max_node_length;
3025 } 3284 }
3026 memcpy((*device)->nodes[node_type], node, max_node_str);
3027 3285
3028 (*device)->businfo.pci = (drmPciBusInfoPtr)addr; 3286 memcpy(device->nodes[type], node, max_node_length);
3287
3288 *ptrp = ptr;
3289
3290 return device;
3291}
3292
3293static int drmProcessPciDevice(drmDevicePtr *device,
3294 const char *node, int node_type,
3295 int maj, int min, bool fetch_deviceinfo,
3296 uint32_t flags)
3297{
3298 drmDevicePtr dev;
3299 char *addr;
3300 int ret;
3301
3302 dev = drmDeviceAlloc(node_type, node, sizeof(drmPciBusInfo),
3303 sizeof(drmPciDeviceInfo), &addr);
3304 if (!dev)
3305 return -ENOMEM;
3306
3307 dev->bustype = DRM_BUS_PCI;
3308
3309 dev->businfo.pci = (drmPciBusInfoPtr)addr;
3029 3310
3030 ret = drmParsePciBusInfo(maj, min, (*device)->businfo.pci); 3311 ret = drmParsePciBusInfo(maj, min, dev->businfo.pci);
3031 if (ret) 3312 if (ret)
3032 goto free_device; 3313 goto free_device;
3033 3314
3034 // Fetch the device info if the user has requested it 3315 // Fetch the device info if the user has requested it
3035 if (fetch_deviceinfo) { 3316 if (fetch_deviceinfo) {
3036 addr += sizeof(drmPciBusInfo); 3317 addr += sizeof(drmPciBusInfo);
3037 (*device)->deviceinfo.pci = (drmPciDeviceInfoPtr)addr; 3318 dev->deviceinfo.pci = (drmPciDeviceInfoPtr)addr;
3038 3319
3039 ret = drmParsePciDeviceInfo(d_name, (*device)->deviceinfo.pci); 3320 ret = drmParsePciDeviceInfo(maj, min, dev->deviceinfo.pci, flags);
3040 if (ret) 3321 if (ret)
3041 goto free_device; 3322 goto free_device;
3042 } 3323 }
3324
3325 *device = dev;
3326
3043 return 0; 3327 return 0;
3044 3328
3045free_device: 3329free_device:
3046 free(*device); 3330 free(dev);
3047 *device = NULL; 3331 return ret;
3332}
3333
3334static int drmParseUsbBusInfo(int maj, int min, drmUsbBusInfoPtr info)
3335{
3336#ifdef __linux__
3337 char path[PATH_MAX + 1], *value;
3338 unsigned int bus, dev;
3339 int ret;
3340
3341 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
3342
3343 value = sysfs_uevent_get(path, "BUSNUM");
3344 if (!value)
3345 return -ENOENT;
3346
3347 ret = sscanf(value, "%03u", &bus);
3348 free(value);
3349
3350 if (ret <= 0)
3351 return -errno;
3352
3353 value = sysfs_uevent_get(path, "DEVNUM");
3354 if (!value)
3355 return -ENOENT;
3356
3357 ret = sscanf(value, "%03u", &dev);
3358 free(value);
3359
3360 if (ret <= 0)
3361 return -errno;
3362
3363 info->bus = bus;
3364 info->dev = dev;
3365
3366 return 0;
3367#else
3368#warning "Missing implementation of drmParseUsbBusInfo"
3369 return -EINVAL;
3370#endif
3371}
3372
3373static int drmParseUsbDeviceInfo(int maj, int min, drmUsbDeviceInfoPtr info)
3374{
3375#ifdef __linux__
3376 char path[PATH_MAX + 1], *value;
3377 unsigned int vendor, product;
3378 int ret;
3379
3380 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
3381
3382 value = sysfs_uevent_get(path, "PRODUCT");
3383 if (!value)
3384 return -ENOENT;
3385
3386 ret = sscanf(value, "%x/%x", &vendor, &product);
3387 free(value);
3388
3389 if (ret <= 0)
3390 return -errno;
3391
3392 info->vendor = vendor;
3393 info->product = product;
3394
3395 return 0;
3396#else
3397#warning "Missing implementation of drmParseUsbDeviceInfo"
3398 return -EINVAL;
3399#endif
3400}
3401
3402static int drmProcessUsbDevice(drmDevicePtr *device, const char *node,
3403 int node_type, int maj, int min,
3404 bool fetch_deviceinfo, uint32_t flags)
3405{
3406 drmDevicePtr dev;
3407 char *ptr;
3408 int ret;
3409
3410 dev = drmDeviceAlloc(node_type, node, sizeof(drmUsbBusInfo),
3411 sizeof(drmUsbDeviceInfo), &ptr);
3412 if (!dev)
3413 return -ENOMEM;
3414
3415 dev->bustype = DRM_BUS_USB;
3416
3417 dev->businfo.usb = (drmUsbBusInfoPtr)ptr;
3418
3419 ret = drmParseUsbBusInfo(maj, min, dev->businfo.usb);
3420 if (ret < 0)
3421 goto free_device;
3422
3423 if (fetch_deviceinfo) {
3424 ptr += sizeof(drmUsbBusInfo);
3425 dev->deviceinfo.usb = (drmUsbDeviceInfoPtr)ptr;
3426
3427 ret = drmParseUsbDeviceInfo(maj, min, dev->deviceinfo.usb);
3428 if (ret < 0)
3429 goto free_device;
3430 }
3431
3432 *device = dev;
3433
3434 return 0;
3435
3436free_device:
3437 free(dev);
3438 return ret;
3439}
3440
3441static int drmParsePlatformBusInfo(int maj, int min, drmPlatformBusInfoPtr info)
3442{
3443#ifdef __linux__
3444 char path[PATH_MAX + 1], *name;
3445
3446 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
3447
3448 name = sysfs_uevent_get(path, "OF_FULLNAME");
3449 if (!name)
3450 return -ENOENT;
3451
3452 strncpy(info->fullname, name, DRM_PLATFORM_DEVICE_NAME_LEN);
3453 info->fullname[DRM_PLATFORM_DEVICE_NAME_LEN - 1] = '\0';
3454 free(name);
3455
3456 return 0;
3457#else
3458#warning "Missing implementation of drmParsePlatformBusInfo"
3459 return -EINVAL;
3460#endif
3461}
3462
3463static int drmParsePlatformDeviceInfo(int maj, int min,
3464 drmPlatformDeviceInfoPtr info)
3465{
3466#ifdef __linux__
3467 char path[PATH_MAX + 1], *value;
3468 unsigned int count, i;
3469 int err;
3470
3471 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
3472
3473 value = sysfs_uevent_get(path, "OF_COMPATIBLE_N");
3474 if (!value)
3475 return -ENOENT;
3476
3477 sscanf(value, "%u", &count);
3478 free(value);
3479
3480 info->compatible = calloc(count + 1, sizeof(*info->compatible));
3481 if (!info->compatible)
3482 return -ENOMEM;
3483
3484 for (i = 0; i < count; i++) {
3485 value = sysfs_uevent_get(path, "OF_COMPATIBLE_%u", i);
3486 if (!value) {
3487 err = -ENOENT;
3488 goto free;
3489 }
3490
3491 info->compatible[i] = value;
3492 }
3493
3494 return 0;
3495
3496free:
3497 while (i--)
3498 free(info->compatible[i]);
3499
3500 free(info->compatible);
3501 return err;
3502#else
3503#warning "Missing implementation of drmParsePlatformDeviceInfo"
3504 return -EINVAL;
3505#endif
3506}
3507
3508static int drmProcessPlatformDevice(drmDevicePtr *device,
3509 const char *node, int node_type,
3510 int maj, int min, bool fetch_deviceinfo,
3511 uint32_t flags)
3512{
3513 drmDevicePtr dev;
3514 char *ptr;
3515 int ret;
3516
3517 dev = drmDeviceAlloc(node_type, node, sizeof(drmPlatformBusInfo),
3518 sizeof(drmPlatformDeviceInfo), &ptr);
3519 if (!dev)
3520 return -ENOMEM;
3521
3522 dev->bustype = DRM_BUS_PLATFORM;
3523
3524 dev->businfo.platform = (drmPlatformBusInfoPtr)ptr;
3525
3526 ret = drmParsePlatformBusInfo(maj, min, dev->businfo.platform);
3527 if (ret < 0)
3528 goto free_device;
3529
3530 if (fetch_deviceinfo) {
3531 ptr += sizeof(drmPlatformBusInfo);
3532 dev->deviceinfo.platform = (drmPlatformDeviceInfoPtr)ptr;
3533
3534 ret = drmParsePlatformDeviceInfo(maj, min, dev->deviceinfo.platform);
3535 if (ret < 0)
3536 goto free_device;
3537 }
3538
3539 *device = dev;
3540
3541 return 0;
3542
3543free_device:
3544 free(dev);
3545 return ret;
3546}
3547
3548static int drmParseHost1xBusInfo(int maj, int min, drmHost1xBusInfoPtr info)
3549{
3550#ifdef __linux__
3551 char path[PATH_MAX + 1], *name;
3552
3553 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
3554
3555 name = sysfs_uevent_get(path, "OF_FULLNAME");
3556 if (!name)
3557 return -ENOENT;
3558
3559 strncpy(info->fullname, name, DRM_HOST1X_DEVICE_NAME_LEN);
3560 info->fullname[DRM_HOST1X_DEVICE_NAME_LEN - 1] = '\0';
3561 free(name);
3562
3563 return 0;
3564#else
3565#warning "Missing implementation of drmParseHost1xBusInfo"
3566 return -EINVAL;
3567#endif
3568}
3569
3570static int drmParseHost1xDeviceInfo(int maj, int min,
3571 drmHost1xDeviceInfoPtr info)
3572{
3573#ifdef __linux__
3574 char path[PATH_MAX + 1], *value;
3575 unsigned int count, i;
3576 int err;
3577
3578 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d/device", maj, min);
3579
3580 value = sysfs_uevent_get(path, "OF_COMPATIBLE_N");
3581 if (!value)
3582 return -ENOENT;
3583
3584 sscanf(value, "%u", &count);
3585 free(value);
3586
3587 info->compatible = calloc(count + 1, sizeof(*info->compatible));
3588 if (!info->compatible)
3589 return -ENOMEM;
3590
3591 for (i = 0; i < count; i++) {
3592 value = sysfs_uevent_get(path, "OF_COMPATIBLE_%u", i);
3593 if (!value) {
3594 err = -ENOENT;
3595 goto free;
3596 }
3597
3598 info->compatible[i] = value;
3599 }
3600
3601 return 0;
3602
3603free:
3604 while (i--)
3605 free(info->compatible[i]);
3606
3607 free(info->compatible);
3608 return err;
3609#else
3610#warning "Missing implementation of drmParseHost1xDeviceInfo"
3611 return -EINVAL;
3612#endif
3613}
3614
3615static int drmProcessHost1xDevice(drmDevicePtr *device,
3616 const char *node, int node_type,
3617 int maj, int min, bool fetch_deviceinfo,
3618 uint32_t flags)
3619{
3620 drmDevicePtr dev;
3621 char *ptr;
3622 int ret;
3623
3624 dev = drmDeviceAlloc(node_type, node, sizeof(drmHost1xBusInfo),
3625 sizeof(drmHost1xDeviceInfo), &ptr);
3626 if (!dev)
3627 return -ENOMEM;
3628
3629 dev->bustype = DRM_BUS_HOST1X;
3630
3631 dev->businfo.host1x = (drmHost1xBusInfoPtr)ptr;
3632
3633 ret = drmParseHost1xBusInfo(maj, min, dev->businfo.host1x);
3634 if (ret < 0)
3635 goto free_device;
3636
3637 if (fetch_deviceinfo) {
3638 ptr += sizeof(drmHost1xBusInfo);
3639 dev->deviceinfo.host1x = (drmHost1xDeviceInfoPtr)ptr;
3640
3641 ret = drmParseHost1xDeviceInfo(maj, min, dev->deviceinfo.host1x);
3642 if (ret < 0)
3643 goto free_device;
3644 }
3645
3646 *device = dev;
3647
3648 return 0;
3649
3650free_device:
3651 free(dev);
3048 return ret; 3652 return ret;
3049} 3653}
3050 3654
3655/* Consider devices located on the same bus as duplicate and fold the respective
3656 * entries into a single one.
3657 *
3658 * Note: this leaves "gaps" in the array, while preserving the length.
3659 */
3051static void drmFoldDuplicatedDevices(drmDevicePtr local_devices[], int count) 3660static void drmFoldDuplicatedDevices(drmDevicePtr local_devices[], int count)
3052{ 3661{
3053 int node_type, i, j; 3662 int node_type, i, j;
@@ -3065,17 +3674,93 @@ static void drmFoldDuplicatedDevices(drmDevicePtr local_devices[], int count)
3065 } 3674 }
3066} 3675}
3067 3676
3677/* Check that the given flags are valid returning 0 on success */
3678static int
3679drm_device_validate_flags(uint32_t flags)
3680{
3681 return (flags & ~DRM_DEVICE_GET_PCI_REVISION);
3682}
3683
3068/** 3684/**
3069 * Get information about the opened drm device 3685 * Get information about the opened drm device
3070 * 3686 *
3071 * \param fd file descriptor of the drm device 3687 * \param fd file descriptor of the drm device
3688 * \param flags feature/behaviour bitmask
3072 * \param device the address of a drmDevicePtr where the information 3689 * \param device the address of a drmDevicePtr where the information
3073 * will be allocated in stored 3690 * will be allocated in stored
3074 * 3691 *
3075 * \return zero on success, negative error code otherwise. 3692 * \return zero on success, negative error code otherwise.
3693 *
3694 * \note Unlike drmGetDevice it does not retrieve the pci device revision field
3695 * unless the DRM_DEVICE_GET_PCI_REVISION \p flag is set.
3076 */ 3696 */
3077int drmGetDevice(int fd, drmDevicePtr *device) 3697int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device)
3078{ 3698{
3699#ifdef __OpenBSD__
3700 /*
3701 * DRI device nodes on OpenBSD are not in their own directory, they reside
3702 * in /dev along with a large number of statically generated /dev nodes.
3703 * Avoid stat'ing all of /dev needlessly by implementing this custom path.
3704 */
3705 drmDevicePtr d;
3706 struct stat sbuf;
3707 char node[PATH_MAX + 1];
3708 const char *dev_name;
3709 int node_type, subsystem_type;
3710 int maj, min, n, ret, base;
3711
3712 if (fd == -1 || device == NULL)
3713 return -EINVAL;
3714
3715 if (fstat(fd, &sbuf))
3716 return -errno;
3717
3718 maj = major(sbuf.st_rdev);
3719 min = minor(sbuf.st_rdev);
3720
3721 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
3722 return -EINVAL;
3723
3724 node_type = drmGetMinorType(min);
3725 if (node_type == -1)
3726 return -ENODEV;
3727
3728 switch (node_type) {
3729 case DRM_NODE_PRIMARY:
3730 dev_name = DRM_DEV_NAME;
3731 break;
3732 case DRM_NODE_CONTROL:
3733 dev_name = DRM_CONTROL_DEV_NAME;
3734 break;
3735 case DRM_NODE_RENDER:
3736 dev_name = DRM_RENDER_DEV_NAME;
3737 break;
3738 default:
3739 return -EINVAL;
3740 };
3741
3742 base = drmGetMinorBase(node_type);
3743 if (base < 0)
3744 return -EINVAL;
3745
3746 n = snprintf(node, PATH_MAX, dev_name, DRM_DIR_NAME, min - base);
3747 if (n == -1 || n >= PATH_MAX)
3748 return -errno;
3749 if (stat(node, &sbuf))
3750 return -EINVAL;
3751
3752 subsystem_type = drmParseSubsystemType(maj, min);
3753 if (subsystem_type != DRM_BUS_PCI)
3754 return -ENODEV;
3755
3756 ret = drmProcessPciDevice(&d, node, node_type, maj, min, true, flags);
3757 if (ret)
3758 return ret;
3759
3760 *device = d;
3761
3762 return 0;
3763#else
3079 drmDevicePtr *local_devices; 3764 drmDevicePtr *local_devices;
3080 drmDevicePtr d; 3765 drmDevicePtr d;
3081 DIR *sysdir; 3766 DIR *sysdir;
@@ -3086,6 +3771,10 @@ int drmGetDevice(int fd, drmDevicePtr *device)
3086 int maj, min; 3771 int maj, min;
3087 int ret, i, node_count; 3772 int ret, i, node_count;
3088 int max_count = 16; 3773 int max_count = 16;
3774 dev_t find_rdev;
3775
3776 if (drm_device_validate_flags(flags))
3777 return -EINVAL;
3089 3778
3090 if (fd == -1 || device == NULL) 3779 if (fd == -1 || device == NULL)
3091 return -EINVAL; 3780 return -EINVAL;
@@ -3093,6 +3782,7 @@ int drmGetDevice(int fd, drmDevicePtr *device)
3093 if (fstat(fd, &sbuf)) 3782 if (fstat(fd, &sbuf))
3094 return -errno; 3783 return -errno;
3095 3784
3785 find_rdev = sbuf.st_rdev;
3096 maj = major(sbuf.st_rdev); 3786 maj = major(sbuf.st_rdev);
3097 min = minor(sbuf.st_rdev); 3787 min = minor(sbuf.st_rdev);
3098 3788
@@ -3132,14 +3822,34 @@ int drmGetDevice(int fd, drmDevicePtr *device)
3132 3822
3133 switch (subsystem_type) { 3823 switch (subsystem_type) {
3134 case DRM_BUS_PCI: 3824 case DRM_BUS_PCI:
3135 ret = drmProcessPciDevice(&d, dent->d_name, node, node_type, 3825 ret = drmProcessPciDevice(&d, node, node_type, maj, min, true, flags);
3136 maj, min, true);
3137 if (ret) 3826 if (ret)
3138 goto free_devices; 3827 continue;
3828
3829 break;
3830
3831 case DRM_BUS_USB:
3832 ret = drmProcessUsbDevice(&d, node, node_type, maj, min, true, flags);
3833 if (ret)
3834 continue;
3835
3836 break;
3837
3838 case DRM_BUS_PLATFORM:
3839 ret = drmProcessPlatformDevice(&d, node, node_type, maj, min, true, flags);
3840 if (ret)
3841 continue;
3139 3842
3140 break; 3843 break;
3844
3845 case DRM_BUS_HOST1X:
3846 ret = drmProcessHost1xDevice(&d, node, node_type, maj, min, true, flags);
3847 if (ret)
3848 continue;
3849
3850 break;
3851
3141 default: 3852 default:
3142 fprintf(stderr, "The subsystem type is not supported yet\n");
3143 continue; 3853 continue;
3144 } 3854 }
3145 3855
@@ -3153,20 +3863,26 @@ int drmGetDevice(int fd, drmDevicePtr *device)
3153 local_devices = temp; 3863 local_devices = temp;
3154 } 3864 }
3155 3865
3156 local_devices[i] = d; 3866 /* store target at local_devices[0] for ease to use below */
3867 if (find_rdev == sbuf.st_rdev && i) {
3868 local_devices[i] = local_devices[0];
3869 local_devices[0] = d;
3870 }
3871 else
3872 local_devices[i] = d;
3157 i++; 3873 i++;
3158 } 3874 }
3159 node_count = i; 3875 node_count = i;
3160 3876
3161 /* Fold nodes into a single device if they share the same bus info */
3162 drmFoldDuplicatedDevices(local_devices, node_count); 3877 drmFoldDuplicatedDevices(local_devices, node_count);
3163 3878
3164 *device = local_devices[0]; 3879 *device = local_devices[0];
3165 for (i = 1; i < node_count && local_devices[i]; i++) 3880 drmFreeDevices(&local_devices[1], node_count - 1);
3166 drmFreeDevice(&local_devices[i]);
3167 3881
3168 closedir(sysdir); 3882 closedir(sysdir);
3169 free(local_devices); 3883 free(local_devices);
3884 if (*device == NULL)
3885 return -ENODEV;
3170 return 0; 3886 return 0;
3171 3887
3172free_devices: 3888free_devices:
@@ -3176,11 +3892,27 @@ free_devices:
3176free_locals: 3892free_locals:
3177 free(local_devices); 3893 free(local_devices);
3178 return ret; 3894 return ret;
3895#endif
3896}
3897
3898/**
3899 * Get information about the opened drm device
3900 *
3901 * \param fd file descriptor of the drm device
3902 * \param device the address of a drmDevicePtr where the information
3903 * will be allocated in stored
3904 *
3905 * \return zero on success, negative error code otherwise.
3906 */
3907int drmGetDevice(int fd, drmDevicePtr *device)
3908{
3909 return drmGetDevice2(fd, DRM_DEVICE_GET_PCI_REVISION, device);
3179} 3910}
3180 3911
3181/** 3912/**
3182 * Get drm devices on the system 3913 * Get drm devices on the system
3183 * 3914 *
3915 * \param flags feature/behaviour bitmask
3184 * \param devices the array of devices with drmDevicePtr elements 3916 * \param devices the array of devices with drmDevicePtr elements
3185 * can be NULL to get the device number first 3917 * can be NULL to get the device number first
3186 * \param max_devices the maximum number of devices for the array 3918 * \param max_devices the maximum number of devices for the array
@@ -3189,8 +3921,11 @@ free_locals:
3189 * if devices is NULL - total number of devices available on the system, 3921 * if devices is NULL - total number of devices available on the system,
3190 * alternatively the number of devices stored in devices[], which is 3922 * alternatively the number of devices stored in devices[], which is
3191 * capped by the max_devices. 3923 * capped by the max_devices.
3924 *
3925 * \note Unlike drmGetDevices it does not retrieve the pci device revision field
3926 * unless the DRM_DEVICE_GET_PCI_REVISION \p flag is set.
3192 */ 3927 */
3193int drmGetDevices(drmDevicePtr devices[], int max_devices) 3928int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices)
3194{ 3929{
3195 drmDevicePtr *local_devices; 3930 drmDevicePtr *local_devices;
3196 drmDevicePtr device; 3931 drmDevicePtr device;
@@ -3203,6 +3938,9 @@ int drmGetDevices(drmDevicePtr devices[], int max_devices)
3203 int ret, i, node_count, device_count; 3938 int ret, i, node_count, device_count;
3204 int max_count = 16; 3939 int max_count = 16;
3205 3940
3941 if (drm_device_validate_flags(flags))
3942 return -EINVAL;
3943
3206 local_devices = calloc(max_count, sizeof(drmDevicePtr)); 3944 local_devices = calloc(max_count, sizeof(drmDevicePtr));
3207 if (local_devices == NULL) 3945 if (local_devices == NULL)
3208 return -ENOMEM; 3946 return -ENOMEM;
@@ -3236,14 +3974,38 @@ int drmGetDevices(drmDevicePtr devices[], int max_devices)
3236 3974
3237 switch (subsystem_type) { 3975 switch (subsystem_type) {
3238 case DRM_BUS_PCI: 3976 case DRM_BUS_PCI:
3239 ret = drmProcessPciDevice(&device, dent->d_name, node, node_type, 3977 ret = drmProcessPciDevice(&device, node, node_type,
3240 maj, min, devices != NULL); 3978 maj, min, devices != NULL, flags);
3979 if (ret)
3980 continue;
3981
3982 break;
3983
3984 case DRM_BUS_USB:
3985 ret = drmProcessUsbDevice(&device, node, node_type, maj, min,
3986 devices != NULL, flags);
3987 if (ret)
3988 goto free_devices;
3989
3990 break;
3991
3992 case DRM_BUS_PLATFORM:
3993 ret = drmProcessPlatformDevice(&device, node, node_type, maj, min,
3994 devices != NULL, flags);
3995 if (ret)
3996 goto free_devices;
3997
3998 break;
3999
4000 case DRM_BUS_HOST1X:
4001 ret = drmProcessHost1xDevice(&device, node, node_type, maj, min,
4002 devices != NULL, flags);
3241 if (ret) 4003 if (ret)
3242 goto free_devices; 4004 goto free_devices;
3243 4005
3244 break; 4006 break;
4007
3245 default: 4008 default:
3246 fprintf(stderr, "The subsystem type is not supported yet\n");
3247 continue; 4009 continue;
3248 } 4010 }
3249 4011
@@ -3262,11 +4024,13 @@ int drmGetDevices(drmDevicePtr devices[], int max_devices)
3262 } 4024 }
3263 node_count = i; 4025 node_count = i;
3264 4026
3265 /* Fold nodes into a single device if they share the same bus info */
3266 drmFoldDuplicatedDevices(local_devices, node_count); 4027 drmFoldDuplicatedDevices(local_devices, node_count);
3267 4028
3268 device_count = 0; 4029 device_count = 0;
3269 for (i = 0; i < node_count && local_devices[i]; i++) { 4030 for (i = 0; i < node_count; i++) {
4031 if (!local_devices[i])
4032 continue;
4033
3270 if ((devices != NULL) && (device_count < max_devices)) 4034 if ((devices != NULL) && (device_count < max_devices))
3271 devices[device_count] = local_devices[i]; 4035 devices[device_count] = local_devices[i];
3272 else 4036 else
@@ -3287,3 +4051,92 @@ free_locals:
3287 free(local_devices); 4051 free(local_devices);
3288 return ret; 4052 return ret;
3289} 4053}
4054
4055/**
4056 * Get drm devices on the system
4057 *
4058 * \param devices the array of devices with drmDevicePtr elements
4059 * can be NULL to get the device number first
4060 * \param max_devices the maximum number of devices for the array
4061 *
4062 * \return on error - negative error code,
4063 * if devices is NULL - total number of devices available on the system,
4064 * alternatively the number of devices stored in devices[], which is
4065 * capped by the max_devices.
4066 */
4067int drmGetDevices(drmDevicePtr devices[], int max_devices)
4068{
4069 return drmGetDevices2(DRM_DEVICE_GET_PCI_REVISION, devices, max_devices);
4070}
4071
4072char *drmGetDeviceNameFromFd2(int fd)
4073{
4074#ifdef __linux__
4075 struct stat sbuf;
4076 char path[PATH_MAX + 1], *value;
4077 unsigned int maj, min;
4078
4079 if (fstat(fd, &sbuf))
4080 return NULL;
4081
4082 maj = major(sbuf.st_rdev);
4083 min = minor(sbuf.st_rdev);
4084
4085 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
4086 return NULL;
4087
4088 snprintf(path, sizeof(path), "/sys/dev/char/%d:%d", maj, min);
4089
4090 value = sysfs_uevent_get(path, "DEVNAME");
4091 if (!value)
4092 return NULL;
4093
4094 snprintf(path, sizeof(path), "/dev/%s", value);
4095 free(value);
4096
4097 return strdup(path);
4098#else
4099 struct stat sbuf;
4100 char node[PATH_MAX + 1];
4101 const char *dev_name;
4102 int node_type;
4103 int maj, min, n, base;
4104
4105 if (fstat(fd, &sbuf))
4106 return NULL;
4107
4108 maj = major(sbuf.st_rdev);
4109 min = minor(sbuf.st_rdev);
4110
4111 if (maj != DRM_MAJOR || !S_ISCHR(sbuf.st_mode))
4112 return NULL;
4113
4114 node_type = drmGetMinorType(min);
4115 if (node_type == -1)
4116 return NULL;
4117
4118 switch (node_type) {
4119 case DRM_NODE_PRIMARY:
4120 dev_name = DRM_DEV_NAME;
4121 break;
4122 case DRM_NODE_CONTROL:
4123 dev_name = DRM_CONTROL_DEV_NAME;
4124 break;
4125 case DRM_NODE_RENDER:
4126 dev_name = DRM_RENDER_DEV_NAME;
4127 break;
4128 default:
4129 return NULL;
4130 };
4131
4132 base = drmGetMinorBase(node_type);
4133 if (base < 0)
4134 return NULL;
4135
4136 n = snprintf(node, PATH_MAX, dev_name, DRM_DIR_NAME, min - base);
4137 if (n == -1 || n >= PATH_MAX)
4138 return NULL;
4139
4140 return strdup(node);
4141#endif
4142}
diff --git a/xf86drm.h b/xf86drm.h
index 481d882a..0d927018 100644
--- a/xf86drm.h
+++ b/xf86drm.h
@@ -753,6 +753,11 @@ typedef struct _drmEventContext {
753extern int drmHandleEvent(int fd, drmEventContextPtr evctx); 753extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
754 754
755extern char *drmGetDeviceNameFromFd(int fd); 755extern char *drmGetDeviceNameFromFd(int fd);
756
757/* Improved version of drmGetDeviceNameFromFd which attributes for any type of
758 * device/node - card, control or renderD.
759 */
760extern char *drmGetDeviceNameFromFd2(int fd);
756extern int drmGetNodeTypeFromFd(int fd); 761extern int drmGetNodeTypeFromFd(int fd);
757 762
758extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd); 763extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
@@ -761,7 +766,10 @@ extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
761extern char *drmGetPrimaryDeviceNameFromFd(int fd); 766extern char *drmGetPrimaryDeviceNameFromFd(int fd);
762extern char *drmGetRenderDeviceNameFromFd(int fd); 767extern char *drmGetRenderDeviceNameFromFd(int fd);
763 768
764#define DRM_BUS_PCI 0 769#define DRM_BUS_PCI 0
770#define DRM_BUS_USB 1
771#define DRM_BUS_PLATFORM 2
772#define DRM_BUS_HOST1X 3
765 773
766typedef struct _drmPciBusInfo { 774typedef struct _drmPciBusInfo {
767 uint16_t domain; 775 uint16_t domain;
@@ -778,15 +786,51 @@ typedef struct _drmPciDeviceInfo {
778 uint8_t revision_id; 786 uint8_t revision_id;
779} drmPciDeviceInfo, *drmPciDeviceInfoPtr; 787} drmPciDeviceInfo, *drmPciDeviceInfoPtr;
780 788
789typedef struct _drmUsbBusInfo {
790 uint8_t bus;
791 uint8_t dev;
792} drmUsbBusInfo, *drmUsbBusInfoPtr;
793
794typedef struct _drmUsbDeviceInfo {
795 uint16_t vendor;
796 uint16_t product;
797} drmUsbDeviceInfo, *drmUsbDeviceInfoPtr;
798
799#define DRM_PLATFORM_DEVICE_NAME_LEN 512
800
801typedef struct _drmPlatformBusInfo {
802 char fullname[DRM_PLATFORM_DEVICE_NAME_LEN];
803} drmPlatformBusInfo, *drmPlatformBusInfoPtr;
804
805typedef struct _drmPlatformDeviceInfo {
806 char **compatible; /* NULL terminated list of compatible strings */
807} drmPlatformDeviceInfo, *drmPlatformDeviceInfoPtr;
808
809#define DRM_HOST1X_DEVICE_NAME_LEN 512
810
811typedef struct _drmHost1xBusInfo {
812 char fullname[DRM_HOST1X_DEVICE_NAME_LEN];
813} drmHost1xBusInfo, *drmHost1xBusInfoPtr;
814
815typedef struct _drmHost1xDeviceInfo {
816 char **compatible; /* NULL terminated list of compatible strings */
817} drmHost1xDeviceInfo, *drmHost1xDeviceInfoPtr;
818
781typedef struct _drmDevice { 819typedef struct _drmDevice {
782 char **nodes; /* DRM_NODE_MAX sized array */ 820 char **nodes; /* DRM_NODE_MAX sized array */
783 int available_nodes; /* DRM_NODE_* bitmask */ 821 int available_nodes; /* DRM_NODE_* bitmask */
784 int bustype; 822 int bustype;
785 union { 823 union {
786 drmPciBusInfoPtr pci; 824 drmPciBusInfoPtr pci;
825 drmUsbBusInfoPtr usb;
826 drmPlatformBusInfoPtr platform;
827 drmHost1xBusInfoPtr host1x;
787 } businfo; 828 } businfo;
788 union { 829 union {
789 drmPciDeviceInfoPtr pci; 830 drmPciDeviceInfoPtr pci;
831 drmUsbDeviceInfoPtr usb;
832 drmPlatformDeviceInfoPtr platform;
833 drmHost1xDeviceInfoPtr host1x;
790 } deviceinfo; 834 } deviceinfo;
791} drmDevice, *drmDevicePtr; 835} drmDevice, *drmDevicePtr;
792 836
@@ -796,6 +840,10 @@ extern void drmFreeDevice(drmDevicePtr *device);
796extern int drmGetDevices(drmDevicePtr devices[], int max_devices); 840extern int drmGetDevices(drmDevicePtr devices[], int max_devices);
797extern void drmFreeDevices(drmDevicePtr devices[], int count); 841extern void drmFreeDevices(drmDevicePtr devices[], int count);
798 842
843#define DRM_DEVICE_GET_PCI_REVISION (1 << 0)
844extern int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device);
845extern int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices);
846
799#if defined(__cplusplus) 847#if defined(__cplusplus)
800} 848}
801#endif 849#endif
diff --git a/xf86drmMode.c b/xf86drmMode.c
index b341f382..e1c99742 100644
--- a/xf86drmMode.c
+++ b/xf86drmMode.c
@@ -34,7 +34,7 @@
34 */ 34 */
35 35
36/* 36/*
37 * TODO the types we are after are defined in diffrent headers on diffrent 37 * TODO the types we are after are defined in different headers on different
38 * platforms find which headers to include to get uint32_t 38 * platforms find which headers to include to get uint32_t
39 */ 39 */
40 40
@@ -270,10 +270,10 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
270 return 0; 270 return 0;
271} 271}
272 272
273int drmModeAddFB2(int fd, uint32_t width, uint32_t height, 273int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
274 uint32_t pixel_format, uint32_t bo_handles[4], 274 uint32_t pixel_format, uint32_t bo_handles[4],
275 uint32_t pitches[4], uint32_t offsets[4], 275 uint32_t pitches[4], uint32_t offsets[4],
276 uint32_t *buf_id, uint32_t flags) 276 uint64_t modifier[4], uint32_t *buf_id, uint32_t flags)
277{ 277{
278 struct drm_mode_fb_cmd2 f; 278 struct drm_mode_fb_cmd2 f;
279 int ret; 279 int ret;
@@ -286,6 +286,8 @@ int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
286 memcpy(f.handles, bo_handles, 4 * sizeof(bo_handles[0])); 286 memcpy(f.handles, bo_handles, 4 * sizeof(bo_handles[0]));
287 memcpy(f.pitches, pitches, 4 * sizeof(pitches[0])); 287 memcpy(f.pitches, pitches, 4 * sizeof(pitches[0]));
288 memcpy(f.offsets, offsets, 4 * sizeof(offsets[0])); 288 memcpy(f.offsets, offsets, 4 * sizeof(offsets[0]));
289 if (modifier)
290 memcpy(f.modifier, modifier, 4 * sizeof(modifier[0]));
289 291
290 if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB2, &f))) 292 if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB2, &f)))
291 return ret; 293 return ret;
@@ -294,6 +296,17 @@ int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
294 return 0; 296 return 0;
295} 297}
296 298
299int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
300 uint32_t pixel_format, uint32_t bo_handles[4],
301 uint32_t pitches[4], uint32_t offsets[4],
302 uint32_t *buf_id, uint32_t flags)
303{
304 return drmModeAddFB2WithModifiers(fd, width, height,
305 pixel_format, bo_handles,
306 pitches, offsets, NULL,
307 buf_id, flags);
308}
309
297int drmModeRmFB(int fd, uint32_t bufferId) 310int drmModeRmFB(int fd, uint32_t bufferId)
298{ 311{
299 return DRM_IOCTL(fd, DRM_IOCTL_MODE_RMFB, &bufferId); 312 return DRM_IOCTL(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
@@ -475,12 +488,13 @@ _drmModeGetConnector(int fd, uint32_t connector_id, int probe)
475{ 488{
476 struct drm_mode_get_connector conn, counts; 489 struct drm_mode_get_connector conn, counts;
477 drmModeConnectorPtr r = NULL; 490 drmModeConnectorPtr r = NULL;
491 struct drm_mode_modeinfo stack_mode;
478 492
479 memclear(conn); 493 memclear(conn);
480 conn.connector_id = connector_id; 494 conn.connector_id = connector_id;
481 if (!probe) { 495 if (!probe) {
482 conn.count_modes = 1; 496 conn.count_modes = 1;
483 conn.modes_ptr = VOID2U64(drmMalloc(sizeof(struct drm_mode_modeinfo))); 497 conn.modes_ptr = VOID2U64(&stack_mode);
484 } 498 }
485 499
486 if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn)) 500 if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
@@ -504,7 +518,7 @@ retry:
504 goto err_allocs; 518 goto err_allocs;
505 } else { 519 } else {
506 conn.count_modes = 1; 520 conn.count_modes = 1;
507 conn.modes_ptr = VOID2U64(drmMalloc(sizeof(struct drm_mode_modeinfo))); 521 conn.modes_ptr = VOID2U64(&stack_mode);
508 } 522 }
509 523
510 if (conn.count_encoders) { 524 if (conn.count_encoders) {
@@ -525,7 +539,8 @@ retry:
525 counts.count_encoders < conn.count_encoders) { 539 counts.count_encoders < conn.count_encoders) {
526 drmFree(U642VOID(conn.props_ptr)); 540 drmFree(U642VOID(conn.props_ptr));
527 drmFree(U642VOID(conn.prop_values_ptr)); 541 drmFree(U642VOID(conn.prop_values_ptr));
528 drmFree(U642VOID(conn.modes_ptr)); 542 if (U642VOID(conn.modes_ptr) != &stack_mode)
543 drmFree(U642VOID(conn.modes_ptr));
529 drmFree(U642VOID(conn.encoders_ptr)); 544 drmFree(U642VOID(conn.encoders_ptr));
530 545
531 goto retry; 546 goto retry;
@@ -567,7 +582,8 @@ retry:
567err_allocs: 582err_allocs:
568 drmFree(U642VOID(conn.prop_values_ptr)); 583 drmFree(U642VOID(conn.prop_values_ptr));
569 drmFree(U642VOID(conn.props_ptr)); 584 drmFree(U642VOID(conn.props_ptr));
570 drmFree(U642VOID(conn.modes_ptr)); 585 if (U642VOID(conn.modes_ptr) != &stack_mode)
586 drmFree(U642VOID(conn.modes_ptr));
571 drmFree(U642VOID(conn.encoders_ptr)); 587 drmFree(U642VOID(conn.encoders_ptr));
572 588
573 return r; 589 return r;
@@ -885,7 +901,7 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
885 901
886 i = 0; 902 i = 0;
887 while (i < len) { 903 while (i < len) {
888 e = (struct drm_event *) &buffer[i]; 904 e = (struct drm_event *)(buffer + i);
889 switch (e->type) { 905 switch (e->type) {
890 case DRM_EVENT_VBLANK: 906 case DRM_EVENT_VBLANK:
891 if (evctx->version < 1 || 907 if (evctx->version < 1 ||
@@ -932,6 +948,22 @@ int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
932 return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip); 948 return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip);
933} 949}
934 950
951int drmModePageFlipTarget(int fd, uint32_t crtc_id, uint32_t fb_id,
952 uint32_t flags, void *user_data,
953 uint32_t target_vblank)
954{
955 struct drm_mode_crtc_page_flip_target flip_target;
956
957 memclear(flip_target);
958 flip_target.fb_id = fb_id;
959 flip_target.crtc_id = crtc_id;
960 flip_target.user_data = VOID2U64(user_data);
961 flip_target.flags = flags;
962 flip_target.sequence = target_vblank;
963
964 return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip_target);
965}
966
935int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id, 967int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id,
936 uint32_t fb_id, uint32_t flags, 968 uint32_t fb_id, uint32_t flags,
937 int32_t crtc_x, int32_t crtc_y, 969 int32_t crtc_x, int32_t crtc_y,
diff --git a/xf86drmMode.h b/xf86drmMode.h
index 53bb423c..9d73be95 100644
--- a/xf86drmMode.h
+++ b/xf86drmMode.h
@@ -123,13 +123,15 @@ extern "C" {
123#define DRM_MODE_DITHERING_OFF 0 123#define DRM_MODE_DITHERING_OFF 0
124#define DRM_MODE_DITHERING_ON 1 124#define DRM_MODE_DITHERING_ON 1
125 125
126#define DRM_MODE_ENCODER_NONE 0 126#define DRM_MODE_ENCODER_NONE 0
127#define DRM_MODE_ENCODER_DAC 1 127#define DRM_MODE_ENCODER_DAC 1
128#define DRM_MODE_ENCODER_TMDS 2 128#define DRM_MODE_ENCODER_TMDS 2
129#define DRM_MODE_ENCODER_LVDS 3 129#define DRM_MODE_ENCODER_LVDS 3
130#define DRM_MODE_ENCODER_TVDAC 4 130#define DRM_MODE_ENCODER_TVDAC 4
131#define DRM_MODE_ENCODER_VIRTUAL 5 131#define DRM_MODE_ENCODER_VIRTUAL 5
132#define DRM_MODE_ENCODER_DSI 6 132#define DRM_MODE_ENCODER_DSI 6
133#define DRM_MODE_ENCODER_DPMST 7
134#define DRM_MODE_ENCODER_DPI 8
133 135
134#define DRM_MODE_SUBCONNECTOR_Automatic 0 136#define DRM_MODE_SUBCONNECTOR_Automatic 0
135#define DRM_MODE_SUBCONNECTOR_Unknown 0 137#define DRM_MODE_SUBCONNECTOR_Unknown 0
@@ -153,10 +155,11 @@ extern "C" {
153#define DRM_MODE_CONNECTOR_DisplayPort 10 155#define DRM_MODE_CONNECTOR_DisplayPort 10
154#define DRM_MODE_CONNECTOR_HDMIA 11 156#define DRM_MODE_CONNECTOR_HDMIA 11
155#define DRM_MODE_CONNECTOR_HDMIB 12 157#define DRM_MODE_CONNECTOR_HDMIB 12
156#define DRM_MODE_CONNECTOR_TV 13 158#define DRM_MODE_CONNECTOR_TV 13
157#define DRM_MODE_CONNECTOR_eDP 14 159#define DRM_MODE_CONNECTOR_eDP 14
158#define DRM_MODE_CONNECTOR_VIRTUAL 15 160#define DRM_MODE_CONNECTOR_VIRTUAL 15
159#define DRM_MODE_CONNECTOR_DSI 16 161#define DRM_MODE_CONNECTOR_DSI 16
162#define DRM_MODE_CONNECTOR_DPI 17
160 163
161#define DRM_MODE_PROP_PENDING (1<<0) 164#define DRM_MODE_PROP_PENDING (1<<0)
162#define DRM_MODE_PROP_RANGE (1<<1) 165#define DRM_MODE_PROP_RANGE (1<<1)
@@ -369,6 +372,13 @@ extern int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
369 uint32_t pixel_format, uint32_t bo_handles[4], 372 uint32_t pixel_format, uint32_t bo_handles[4],
370 uint32_t pitches[4], uint32_t offsets[4], 373 uint32_t pitches[4], uint32_t offsets[4],
371 uint32_t *buf_id, uint32_t flags); 374 uint32_t *buf_id, uint32_t flags);
375
376/* ...with format modifiers */
377int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
378 uint32_t pixel_format, uint32_t bo_handles[4],
379 uint32_t pitches[4], uint32_t offsets[4],
380 uint64_t modifier[4], uint32_t *buf_id, uint32_t flags);
381
372/** 382/**
373 * Destroies the given framebuffer. 383 * Destroies the given framebuffer.
374 */ 384 */
@@ -466,6 +476,9 @@ extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
466 uint16_t *red, uint16_t *green, uint16_t *blue); 476 uint16_t *red, uint16_t *green, uint16_t *blue);
467extern int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id, 477extern int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
468 uint32_t flags, void *user_data); 478 uint32_t flags, void *user_data);
479extern int drmModePageFlipTarget(int fd, uint32_t crtc_id, uint32_t fb_id,
480 uint32_t flags, void *user_data,
481 uint32_t target_vblank);
469 482
470extern drmModePlaneResPtr drmModeGetPlaneResources(int fd); 483extern drmModePlaneResPtr drmModeGetPlaneResources(int fd);
471extern drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id); 484extern drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id);