aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.editorconfig4
-rw-r--r--Android.bp2
-rw-r--r--Makefile.am33
-rw-r--r--Makefile.sources3
-rw-r--r--README24
-rw-r--r--RELEASING6
-rw-r--r--amdgpu/.editorconfig13
-rw-r--r--amdgpu/Makefile.am4
-rw-r--r--amdgpu/Makefile.sources3
-rwxr-xr-xamdgpu/amdgpu-symbol-check23
-rw-r--r--amdgpu/amdgpu.h324
-rw-r--r--amdgpu/amdgpu_asic_id.c161
-rw-r--r--amdgpu/amdgpu_asic_id.h165
-rw-r--r--amdgpu/amdgpu_bo.c97
-rw-r--r--amdgpu/amdgpu_cs.c298
-rw-r--r--amdgpu/amdgpu_device.c92
-rw-r--r--amdgpu/amdgpu_gpu_info.c94
-rw-r--r--amdgpu/amdgpu_internal.h48
-rw-r--r--amdgpu/amdgpu_vamgr.c180
-rw-r--r--amdgpu/amdgpu_vm.c49
-rw-r--r--amdgpu/meson.build66
-rw-r--r--amdgpu/util_hash.c4
-rw-r--r--amdgpu/util_hash.h4
-rw-r--r--amdgpu/util_hash_table.c4
-rw-r--r--amdgpu/util_hash_table.h4
-rw-r--r--android/gralloc_handle.h111
-rwxr-xr-xautogen.sh10
-rw-r--r--configure.ac86
-rw-r--r--data/Android.mk10
-rw-r--r--data/Makefile.am25
-rw-r--r--data/amdgpu.ids187
-rw-r--r--data/meson.build27
-rw-r--r--etnaviv/Makefile.sources1
-rwxr-xr-xetnaviv/etnaviv-symbol-check6
-rw-r--r--etnaviv/etnaviv_bo.c11
-rw-r--r--etnaviv/etnaviv_bo_cache.c30
-rw-r--r--etnaviv/etnaviv_cmd_stream.c49
-rw-r--r--etnaviv/etnaviv_device.c4
-rw-r--r--etnaviv/etnaviv_drm.h58
-rw-r--r--etnaviv/etnaviv_drmif.h25
-rw-r--r--etnaviv/etnaviv_gpu.c76
-rw-r--r--etnaviv/etnaviv_perfmon.c185
-rw-r--r--etnaviv/etnaviv_pipe.c4
-rw-r--r--etnaviv/etnaviv_priv.h47
-rw-r--r--etnaviv/meson.build59
-rwxr-xr-xexynos/exynos-symbol-check2
-rw-r--r--exynos/exynos_drm.c6
-rw-r--r--exynos/exynos_drmif.h8
-rw-r--r--exynos/exynos_fimg2d.c39
-rw-r--r--exynos/exynos_fimg2d.h21
-rw-r--r--exynos/fimg2d_reg.h21
-rw-r--r--exynos/meson.build54
-rw-r--r--freedreno/Makefile.am1
-rwxr-xr-xfreedreno/freedreno-symbol-check5
-rw-r--r--freedreno/freedreno_bo.c29
-rw-r--r--freedreno/freedreno_bo_cache.c8
-rw-r--r--freedreno/freedreno_device.c11
-rw-r--r--freedreno/freedreno_drmif.h6
-rw-r--r--freedreno/freedreno_pipe.c33
-rw-r--r--freedreno/freedreno_priv.h66
-rw-r--r--freedreno/freedreno_ringbuffer.c4
-rw-r--r--freedreno/kgsl/kgsl_bo.c4
-rw-r--r--freedreno/kgsl/kgsl_device.c6
-rw-r--r--freedreno/kgsl/kgsl_pipe.c7
-rw-r--r--freedreno/kgsl/kgsl_priv.h2
-rw-r--r--freedreno/kgsl/kgsl_ringbuffer.c6
-rw-r--r--freedreno/meson.build77
-rw-r--r--freedreno/msm/msm_bo.c17
-rw-r--r--freedreno/msm/msm_device.c6
-rw-r--r--freedreno/msm/msm_drm.h40
-rw-r--r--freedreno/msm/msm_pipe.c51
-rw-r--r--freedreno/msm/msm_priv.h3
-rw-r--r--freedreno/msm/msm_ringbuffer.c20
-rw-r--r--include/drm/README34
-rw-r--r--include/drm/amdgpu_drm.h592
-rw-r--r--include/drm/drm.h92
-rw-r--r--include/drm/drm_fourcc.h184
-rw-r--r--include/drm/drm_mode.h266
-rw-r--r--include/drm/drm_sarea.h8
-rw-r--r--include/drm/i915_drm.h321
-rw-r--r--include/drm/mga_drm.h12
-rw-r--r--include/drm/nouveau_drm.h94
-rw-r--r--include/drm/qxl_drm.h82
-rw-r--r--include/drm/r128_drm.h10
-rw-r--r--include/drm/radeon_drm.h128
-rw-r--r--include/drm/savage_drm.h20
-rw-r--r--include/drm/sis_drm.h10
-rw-r--r--include/drm/tegra_drm.h38
-rw-r--r--include/drm/vc4_drm.h133
-rw-r--r--include/drm/via_drm.h8
-rw-r--r--include/drm/virtgpu_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h44
-rwxr-xr-xintel/intel-symbol-check2
-rw-r--r--intel/intel_bufmgr.c4
-rw-r--r--intel/intel_bufmgr_fake.c4
-rw-r--r--intel/intel_bufmgr_gem.c69
-rw-r--r--intel/intel_chipset.h89
-rw-r--r--intel/intel_decode.c11
-rw-r--r--intel/meson.build106
-rw-r--r--intel/mm.c4
-rw-r--r--intel/mm.h4
-rw-r--r--intel/test_decode.c8
-rw-r--r--libdrm_macros.h2
-rw-r--r--libkms/api.c4
-rw-r--r--libkms/dumb.c4
-rw-r--r--libkms/exynos.c26
-rw-r--r--libkms/intel.c4
-rw-r--r--libkms/internal.h4
-rwxr-xr-xlibkms/kms-symbol-check2
-rw-r--r--libkms/linux.c14
-rw-r--r--libkms/meson.build75
-rw-r--r--libkms/nouveau.c4
-rw-r--r--libkms/radeon.c4
-rw-r--r--libkms/vmwgfx.c4
-rw-r--r--man/drm-kms.xml4
-rw-r--r--man/drm-memory.xml4
-rw-r--r--man/drm.xml6
-rw-r--r--man/drmAvailable.xml4
-rw-r--r--man/drmHandleEvent.xml4
-rw-r--r--man/drmModeGetResources.xml4
-rw-r--r--man/meson.build67
-rw-r--r--meson.build382
-rw-r--r--meson_options.txt143
-rw-r--r--nouveau/abi16.c4
-rw-r--r--nouveau/bufctx.c4
-rw-r--r--nouveau/meson.build59
-rwxr-xr-xnouveau/nouveau-symbol-check2
-rw-r--r--nouveau/nouveau.c4
-rw-r--r--nouveau/pushbuf.c4
-rw-r--r--omap/Android.mk13
-rw-r--r--omap/meson.build54
-rwxr-xr-xomap/omap-symbol-check2
-rw-r--r--omap/omap_drm.c4
-rw-r--r--radeon/meson.build64
-rwxr-xr-xradeon/radeon-symbol-check2
-rw-r--r--radeon/radeon_bo.c3
-rw-r--r--radeon/radeon_bo_gem.c3
-rw-r--r--radeon/radeon_cs.c3
-rw-r--r--radeon/radeon_cs_gem.c3
-rw-r--r--radeon/radeon_cs_space.c3
-rw-r--r--radeon/radeon_surface.c4
-rw-r--r--tegra/meson.build53
-rwxr-xr-xtegra/tegra-symbol-check9
-rw-r--r--tegra/tegra.c4
l---------tests/amdgpu/.editorconfig1
-rw-r--r--tests/amdgpu/Makefile.am12
-rw-r--r--tests/amdgpu/amdgpu_test.c228
-rw-r--r--tests/amdgpu/amdgpu_test.h144
-rw-r--r--tests/amdgpu/basic_tests.c1122
-rw-r--r--tests/amdgpu/bo_tests.c79
-rw-r--r--tests/amdgpu/cs_tests.c74
-rw-r--r--tests/amdgpu/deadlock_tests.c255
-rw-r--r--tests/amdgpu/decode_messages.h (renamed from tests/amdgpu/uvd_messages.h)43
-rw-r--r--tests/amdgpu/frame.h2
-rw-r--r--tests/amdgpu/meson.build34
-rw-r--r--tests/amdgpu/uvd_enc_tests.c491
-rw-r--r--tests/amdgpu/uve_ib.h527
-rw-r--r--tests/amdgpu/vce_tests.c79
-rw-r--r--tests/amdgpu/vcn_tests.c398
-rw-r--r--tests/amdgpu/vm_tests.c169
-rw-r--r--tests/drmsl.c28
-rw-r--r--tests/drmstat.c419
-rw-r--r--tests/etnaviv/Makefile.am1
-rw-r--r--tests/etnaviv/etnaviv_2d_test.c4
-rw-r--r--tests/etnaviv/etnaviv_bo_cache_test.c4
-rw-r--r--tests/etnaviv/meson.build45
-rw-r--r--tests/etnaviv/write_bmp.c4
-rw-r--r--tests/exynos/exynos_fimg2d_event.c27
-rw-r--r--tests/exynos/exynos_fimg2d_perf.c34
-rw-r--r--tests/exynos/exynos_fimg2d_test.c68
-rw-r--r--tests/exynos/meson.build54
-rw-r--r--tests/kms/kms-steal-crtc.c4
-rw-r--r--tests/kms/kms-universal-planes.c4
-rw-r--r--tests/kms/libkms-test-crtc.c4
-rw-r--r--tests/kms/libkms-test-device.c13
-rw-r--r--tests/kms/libkms-test-framebuffer.c4
-rw-r--r--tests/kms/libkms-test-plane.c4
-rw-r--r--tests/kms/libkms-test-screen.c4
-rw-r--r--tests/kms/meson.build49
-rw-r--r--tests/kmstest/meson.build30
-rw-r--r--tests/meson.build86
-rw-r--r--tests/modeprint/meson.build29
-rw-r--r--tests/modeprint/modeprint.c2
-rw-r--r--tests/modetest/buffers.c4
-rw-r--r--tests/modetest/cursor.c4
-rw-r--r--tests/modetest/meson.build29
-rw-r--r--tests/modetest/modetest.c110
-rw-r--r--tests/nouveau/meson.build30
-rw-r--r--tests/nouveau/threaded.c4
-rw-r--r--tests/proptest/meson.build28
-rw-r--r--tests/radeon/meson.build27
-rw-r--r--tests/tegra/meson.build27
-rw-r--r--tests/tegra/openclose.c4
-rw-r--r--tests/util/format.c4
-rw-r--r--tests/util/kms.c7
-rw-r--r--tests/util/meson.build28
-rw-r--r--tests/util/pattern.c19
-rw-r--r--tests/vbltest/meson.build28
-rw-r--r--tests/vbltest/vbltest.c4
-rw-r--r--vc4/meson.build28
-rw-r--r--xf86atomic.h6
-rw-r--r--xf86drm.c243
-rw-r--r--xf86drm.h34
-rw-r--r--xf86drmHash.c6
-rw-r--r--xf86drmMode.c411
-rw-r--r--xf86drmMode.h52
206 files changed, 9813 insertions, 2616 deletions
diff --git a/.editorconfig b/.editorconfig
index 893b7be0..29b4f393 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -17,3 +17,7 @@ indent_style = tab
17[*.m4] 17[*.m4]
18indent_style = space 18indent_style = space
19indent_size = 2 19indent_size = 2
20
21[{meson.build,meson_options.txt}]
22indent_style = space
23indent_size = 2
diff --git a/Android.bp b/Android.bp
index 429c22cc..9121068a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -54,7 +54,7 @@ cc_library {
54 "libdrm_sources", 54 "libdrm_sources",
55 ], 55 ],
56 56
57 export_include_dirs: ["include/drm"], 57 export_include_dirs: ["include/drm", "android"],
58 58
59 cflags: [ 59 cflags: [
60 "-Wno-enum-conversion", 60 "-Wno-enum-conversion",
diff --git a/Makefile.am b/Makefile.am
index 2bf644be..6de56770 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -113,6 +113,7 @@ SUBDIRS = \
113 $(TEGRA_SUBDIR) \ 113 $(TEGRA_SUBDIR) \
114 $(VC4_SUBDIR) \ 114 $(VC4_SUBDIR) \
115 $(ETNAVIV_SUBDIR) \ 115 $(ETNAVIV_SUBDIR) \
116 data \
116 tests \ 117 tests \
117 $(MAN_SUBDIR) \ 118 $(MAN_SUBDIR) \
118 $(ROCKCHIP_SUBDIR) 119 $(ROCKCHIP_SUBDIR)
@@ -139,7 +140,37 @@ if HAVE_VMWGFX
139klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES) 140klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES)
140endif 141endif
141 142
142EXTRA_DIST = include/drm/README 143EXTRA_DIST = \
144 include/drm/README \
145 amdgpu/meson.build \
146 data/meson.build \
147 etnaviv/meson.build \
148 exynos/meson.build \
149 freedreno/meson.build \
150 intel/meson.build \
151 libkms/meson.build \
152 man/meson.build \
153 nouveau/meson.build \
154 omap/meson.build \
155 radeon/meson.build \
156 tegra/meson.build \
157 tests/amdgpu/meson.build \
158 tests/etnaviv/meson.build \
159 tests/exynos/meson.build \
160 tests/kms/meson.build \
161 tests/kmstest/meson.build \
162 tests/meson.build \
163 tests/modeprint/meson.build \
164 tests/modetest/meson.build \
165 tests/nouveau/meson.build \
166 tests/proptest/meson.build \
167 tests/radeon/meson.build \
168 tests/tegra/meson.build \
169 tests/util/meson.build \
170 tests/vbltest/meson.build \
171 vc4/meson.build \
172 meson.build \
173 meson_options.txt
143 174
144copy-headers : 175copy-headers :
145 cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/ 176 cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/
diff --git a/Makefile.sources b/Makefile.sources
index 10aa1d0f..1f8372bc 100644
--- a/Makefile.sources
+++ b/Makefile.sources
@@ -37,5 +37,8 @@ LIBDRM_INCLUDE_H_FILES := \
37 include/drm/via_drm.h \ 37 include/drm/via_drm.h \
38 include/drm/virtgpu_drm.h 38 include/drm/virtgpu_drm.h
39 39
40LIBDRM_INCLUDE_ANDROID_H_FILES := \
41 android/gralloc_handle.h
42
40LIBDRM_INCLUDE_VMWGFX_H_FILES := \ 43LIBDRM_INCLUDE_VMWGFX_H_FILES := \
41 include/drm/vmwgfx_drm.h 44 include/drm/vmwgfx_drm.h
diff --git a/README b/README
index 26cab9d3..f3df9ac1 100644
--- a/README
+++ b/README
@@ -15,9 +15,27 @@ with an older kernel.
15Compiling 15Compiling
16--------- 16---------
17 17
18libdrm is a standard autotools package and follows the normal 18libdrm has two build systems, a legacy autotools build system, and a newer
19configure, build and install steps. The first step is to configure 19meson build system. The meson build system is much faster, and offers a
20the package, which is done by running the configure shell script: 20slightly different interface, but otherwise provides an equivalent feature set.
21
22To use it:
23
24 meson builddir/
25
26By default this will install into /usr/local, you can change your prefix
27with --prefix=/usr (or `meson configure builddir/ -Dprefix=/usr` after
28the initial meson setup).
29
30Then use ninja to build and install:
31
32 ninja -C builddir/ install
33
34If you are installing into a system location you will need to run install
35separately, and as root.
36
37
38Alternatively you can invoke autotools configure:
21 39
22 ./configure 40 ./configure
23 41
diff --git a/RELEASING b/RELEASING
index 262ca08d..7e03e3b9 100644
--- a/RELEASING
+++ b/RELEASING
@@ -9,9 +9,9 @@ However, this is up to whoever is driving the feature in question.
9 9
10Follow these steps to release a new version of libdrm: 10Follow these steps to release a new version of libdrm:
11 11
12 1) Bump the version number in configure.ac. We seem to have settled 12 1) Bump the version number in configure.ac and meson.build. We seem
13 for 2.4.x as the versioning scheme for libdrm, so just bump the 13 to have settled for 2.4.x as the versioning scheme for libdrm, so
14 micro version. 14 just bump the micro version.
15 15
16 2) Run autoconf and then re-run ./configure so the build system 16 2) Run autoconf and then re-run ./configure so the build system
17 picks up the new version number. 17 picks up the new version number.
diff --git a/amdgpu/.editorconfig b/amdgpu/.editorconfig
new file mode 100644
index 00000000..426273fd
--- /dev/null
+++ b/amdgpu/.editorconfig
@@ -0,0 +1,13 @@
1# To use this config with your editor, follow the instructions at:
2# http://editorconfig.org
3
4[*]
5charset = utf-8
6indent_style = tab
7indent_size = 8
8tab_width = 8
9insert_final_newline = true
10
11[meson.build]
12indent_style = space
13indent_size = 2
diff --git a/amdgpu/Makefile.am b/amdgpu/Makefile.am
index cf7bc1ba..a1b0d05c 100644
--- a/amdgpu/Makefile.am
+++ b/amdgpu/Makefile.am
@@ -30,12 +30,16 @@ AM_CFLAGS = \
30 $(PTHREADSTUBS_CFLAGS) \ 30 $(PTHREADSTUBS_CFLAGS) \
31 -I$(top_srcdir)/include/drm 31 -I$(top_srcdir)/include/drm
32 32
33libdrmdatadir = @libdrmdatadir@
34AM_CPPFLAGS = -DAMDGPU_ASIC_ID_TABLE=\"${libdrmdatadir}/amdgpu.ids\"
35
33libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la 36libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la
34libdrm_amdgpu_ladir = $(libdir) 37libdrm_amdgpu_ladir = $(libdir)
35libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined 38libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined
36libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@ 39libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
37 40
38libdrm_amdgpu_la_SOURCES = $(LIBDRM_AMDGPU_FILES) 41libdrm_amdgpu_la_SOURCES = $(LIBDRM_AMDGPU_FILES)
42amdgpu_asic_id.lo: $(top_srcdir)/data/amdgpu.ids
39 43
40libdrm_amdgpuincludedir = ${includedir}/libdrm 44libdrm_amdgpuincludedir = ${includedir}/libdrm
41libdrm_amdgpuinclude_HEADERS = $(LIBDRM_AMDGPU_H_FILES) 45libdrm_amdgpuinclude_HEADERS = $(LIBDRM_AMDGPU_H_FILES)
diff --git a/amdgpu/Makefile.sources b/amdgpu/Makefile.sources
index 487b9e0a..498b64cc 100644
--- a/amdgpu/Makefile.sources
+++ b/amdgpu/Makefile.sources
@@ -1,11 +1,12 @@
1LIBDRM_AMDGPU_FILES := \ 1LIBDRM_AMDGPU_FILES := \
2 amdgpu_asic_id.h \ 2 amdgpu_asic_id.c \
3 amdgpu_bo.c \ 3 amdgpu_bo.c \
4 amdgpu_cs.c \ 4 amdgpu_cs.c \
5 amdgpu_device.c \ 5 amdgpu_device.c \
6 amdgpu_gpu_info.c \ 6 amdgpu_gpu_info.c \
7 amdgpu_internal.h \ 7 amdgpu_internal.h \
8 amdgpu_vamgr.c \ 8 amdgpu_vamgr.c \
9 amdgpu_vm.c \
9 util_hash.c \ 10 util_hash.c \
10 util_hash.h \ 11 util_hash.h \
11 util_hash_table.c \ 12 util_hash_table.c \
diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
index 87f4fd2c..90b7a1d6 100755
--- a/amdgpu/amdgpu-symbol-check
+++ b/amdgpu/amdgpu-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS 4# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
@@ -22,16 +22,34 @@ amdgpu_bo_list_update
22amdgpu_bo_query_info 22amdgpu_bo_query_info
23amdgpu_bo_set_metadata 23amdgpu_bo_set_metadata
24amdgpu_bo_va_op 24amdgpu_bo_va_op
25amdgpu_bo_va_op_raw
25amdgpu_bo_wait_for_idle 26amdgpu_bo_wait_for_idle
26amdgpu_create_bo_from_user_mem 27amdgpu_create_bo_from_user_mem
28amdgpu_cs_chunk_fence_info_to_data
29amdgpu_cs_chunk_fence_to_dep
27amdgpu_cs_create_semaphore 30amdgpu_cs_create_semaphore
31amdgpu_cs_create_syncobj
32amdgpu_cs_create_syncobj2
28amdgpu_cs_ctx_create 33amdgpu_cs_ctx_create
34amdgpu_cs_ctx_create2
29amdgpu_cs_ctx_free 35amdgpu_cs_ctx_free
30amdgpu_cs_destroy_semaphore 36amdgpu_cs_destroy_semaphore
37amdgpu_cs_destroy_syncobj
38amdgpu_cs_export_syncobj
39amdgpu_cs_fence_to_handle
40amdgpu_cs_import_syncobj
31amdgpu_cs_query_fence_status 41amdgpu_cs_query_fence_status
32amdgpu_cs_query_reset_state 42amdgpu_cs_query_reset_state
43amdgpu_query_sw_info
33amdgpu_cs_signal_semaphore 44amdgpu_cs_signal_semaphore
34amdgpu_cs_submit 45amdgpu_cs_submit
46amdgpu_cs_submit_raw
47amdgpu_cs_syncobj_export_sync_file
48amdgpu_cs_syncobj_import_sync_file
49amdgpu_cs_syncobj_reset
50amdgpu_cs_syncobj_signal
51amdgpu_cs_syncobj_wait
52amdgpu_cs_wait_fences
35amdgpu_cs_wait_semaphore 53amdgpu_cs_wait_semaphore
36amdgpu_device_deinitialize 54amdgpu_device_deinitialize
37amdgpu_device_initialize 55amdgpu_device_initialize
@@ -45,10 +63,13 @@ amdgpu_query_heap_info
45amdgpu_query_hw_ip_count 63amdgpu_query_hw_ip_count
46amdgpu_query_hw_ip_info 64amdgpu_query_hw_ip_info
47amdgpu_query_info 65amdgpu_query_info
66amdgpu_query_sensor_info
48amdgpu_read_mm_registers 67amdgpu_read_mm_registers
49amdgpu_va_range_alloc 68amdgpu_va_range_alloc
50amdgpu_va_range_free 69amdgpu_va_range_free
51amdgpu_va_range_query 70amdgpu_va_range_query
71amdgpu_vm_reserve_vmid
72amdgpu_vm_unreserve_vmid
52EOF 73EOF
53done) 74done)
54 75
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 7b26a04c..36f91058 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -37,6 +37,10 @@
37#include <stdint.h> 37#include <stdint.h>
38#include <stdbool.h> 38#include <stdbool.h>
39 39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
40struct drm_amdgpu_info_hw_ip; 44struct drm_amdgpu_info_hw_ip;
41 45
42/*--------------------------------------------------------------------------*/ 46/*--------------------------------------------------------------------------*/
@@ -90,6 +94,10 @@ enum amdgpu_gpu_va_range
90 amdgpu_gpu_va_range_general = 0 94 amdgpu_gpu_va_range_general = 0
91}; 95};
92 96
97enum amdgpu_sw_info {
98 amdgpu_sw_info_address32_hi = 0,
99};
100
93/*--------------------------------------------------------------------------*/ 101/*--------------------------------------------------------------------------*/
94/* -------------------------- Datatypes ----------------------------------- */ 102/* -------------------------- Datatypes ----------------------------------- */
95/*--------------------------------------------------------------------------*/ 103/*--------------------------------------------------------------------------*/
@@ -794,8 +802,9 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
794 * context will always be executed in order (first come, first serve). 802 * context will always be executed in order (first come, first serve).
795 * 803 *
796 * 804 *
797 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 805 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
798 * \param context - \c [out] GPU Context handle 806 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
807 * \param context - \c [out] GPU Context handle
799 * 808 *
800 * \return 0 on success\n 809 * \return 0 on success\n
801 * <0 - Negative POSIX Error code 810 * <0 - Negative POSIX Error code
@@ -803,6 +812,18 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
803 * \sa amdgpu_cs_ctx_free() 812 * \sa amdgpu_cs_ctx_free()
804 * 813 *
805*/ 814*/
815int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
816 uint32_t priority,
817 amdgpu_context_handle *context);
818/**
819 * Create GPU execution Context
820 *
821 * Refer to amdgpu_cs_ctx_create2 for full documentation. This call
822 * is missing the priority parameter.
823 *
824 * \sa amdgpu_cs_ctx_create2()
825 *
826*/
806int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 827int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
807 amdgpu_context_handle *context); 828 amdgpu_context_handle *context);
808 829
@@ -907,6 +928,29 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
907 uint64_t flags, 928 uint64_t flags,
908 uint32_t *expired); 929 uint32_t *expired);
909 930
931/**
932 * Wait for multiple fences
933 *
934 * \param fences - \c [in] The fence array to wait
935 * \param fence_count - \c [in] The fence count
936 * \param wait_all - \c [in] If true, wait all fences to be signaled,
937 * otherwise, wait at least one fence
938 * \param timeout_ns - \c [in] The timeout to wait, in nanoseconds
939 * \param status - \c [out] '1' for signaled, '0' for timeout
940 * \param first - \c [out] the index of the first signaled fence from @fences
941 *
942 * \return 0 on success
943 * <0 - Negative POSIX Error code
944 *
945 * \note Currently it supports only one amdgpu_device. All fences come from
946 * the same amdgpu_device with the same fd.
947*/
948int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
949 uint32_t fence_count,
950 bool wait_all,
951 uint64_t timeout_ns,
952 uint32_t *status, uint32_t *first);
953
910/* 954/*
911 * Query / Info API 955 * Query / Info API
912 * 956 *
@@ -1046,6 +1090,23 @@ int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
1046 unsigned size, void *value); 1090 unsigned size, void *value);
1047 1091
1048/** 1092/**
1093 * Query hardware or driver information.
1094 *
1095 * The return size is query-specific and depends on the "info_id" parameter.
1096 * No more than "size" bytes is returned.
1097 *
1098 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1099 * \param info - \c [in] amdgpu_sw_info_*
1100 * \param value - \c [out] Pointer to the return value.
1101 *
1102 * \return 0 on success\n
1103 * <0 - Negative POSIX error code
1104 *
1105*/
1106int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
1107 void *value);
1108
1109/**
1049 * Query information about GDS 1110 * Query information about GDS
1050 * 1111 *
1051 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1112 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
@@ -1059,6 +1120,24 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
1059 struct amdgpu_gds_resource_info *gds_info); 1120 struct amdgpu_gds_resource_info *gds_info);
1060 1121
1061/** 1122/**
1123 * Query information about sensor.
1124 *
1125 * The return size is query-specific and depends on the "sensor_type"
1126 * parameter. No more than "size" bytes is returned.
1127 *
1128 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1129 * \param sensor_type - \c [in] AMDGPU_INFO_SENSOR_*
1130 * \param size - \c [in] Size of the returned value.
1131 * \param value - \c [out] Pointer to the return value.
1132 *
1133 * \return 0 on success\n
1134 * <0 - Negative POSIX Error code
1135 *
1136*/
1137int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
1138 unsigned size, void *value);
1139
1140/**
1062 * Read a set of consecutive memory-mapped registers. 1141 * Read a set of consecutive memory-mapped registers.
1063 * Not all registers are allowed to be read by userspace. 1142 * Not all registers are allowed to be read by userspace.
1064 * 1143 *
@@ -1083,6 +1162,7 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
1083 * Flag to request VA address range in the 32bit address space 1162 * Flag to request VA address range in the 32bit address space
1084*/ 1163*/
1085#define AMDGPU_VA_RANGE_32_BIT 0x1 1164#define AMDGPU_VA_RANGE_32_BIT 0x1
1165#define AMDGPU_VA_RANGE_HIGH 0x2
1086 1166
1087/** 1167/**
1088 * Allocate virtual address range 1168 * Allocate virtual address range
@@ -1186,6 +1266,34 @@ int amdgpu_bo_va_op(amdgpu_bo_handle bo,
1186 uint32_t ops); 1266 uint32_t ops);
1187 1267
1188/** 1268/**
1269 * VA mapping/unmapping for a buffer object or PRT region.
1270 *
1271 * This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
1272 * parameters are treated "raw", i.e. size is not automatically aligned, and
1273 * all flags must be specified explicitly.
1274 *
1275 * \param dev - \c [in] device handle
1276 * \param bo - \c [in] BO handle (may be NULL)
1277 * \param offset - \c [in] Start offset to map
1278 * \param size - \c [in] Size to map
1279 * \param addr - \c [in] Start virtual address.
1280 * \param flags - \c [in] Supported flags for mapping/unmapping
1281 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
1282 *
1283 * \return 0 on success\n
1284 * <0 - Negative POSIX Error code
1285 *
1286*/
1287
1288int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
1289 amdgpu_bo_handle bo,
1290 uint64_t offset,
1291 uint64_t size,
1292 uint64_t addr,
1293 uint64_t flags,
1294 uint32_t ops);
1295
1296/**
1189 * create semaphore 1297 * create semaphore
1190 * 1298 *
1191 * \param sem - \c [out] semaphore handle 1299 * \param sem - \c [out] semaphore handle
@@ -1255,4 +1363,216 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
1255*/ 1363*/
1256const char *amdgpu_get_marketing_name(amdgpu_device_handle dev); 1364const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
1257 1365
1366/**
1367 * Create kernel sync object
1368 *
1369 * \param dev - \c [in] device handle
1370 * \param flags - \c [in] flags that affect creation
1371 * \param syncobj - \c [out] sync object handle
1372 *
1373 * \return 0 on success\n
1374 * <0 - Negative POSIX Error code
1375 *
1376*/
1377int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
1378 uint32_t flags,
1379 uint32_t *syncobj);
1380
1381/**
1382 * Create kernel sync object
1383 *
1384 * \param dev - \c [in] device handle
1385 * \param syncobj - \c [out] sync object handle
1386 *
1387 * \return 0 on success\n
1388 * <0 - Negative POSIX Error code
1389 *
1390*/
1391int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
1392 uint32_t *syncobj);
1393/**
1394 * Destroy kernel sync object
1395 *
1396 * \param dev - \c [in] device handle
1397 * \param syncobj - \c [in] sync object handle
1398 *
1399 * \return 0 on success\n
1400 * <0 - Negative POSIX Error code
1401 *
1402*/
1403int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
1404 uint32_t syncobj);
1405
1406/**
1407 * Reset kernel sync objects to unsignalled state.
1408 *
1409 * \param dev - \c [in] device handle
1410 * \param syncobjs - \c [in] array of sync object handles
1411 * \param syncobj_count - \c [in] number of handles in syncobjs
1412 *
1413 * \return 0 on success\n
1414 * <0 - Negative POSIX Error code
1415 *
1416*/
1417int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
1418 const uint32_t *syncobjs, uint32_t syncobj_count);
1419
1420/**
1421 * Signal kernel sync objects.
1422 *
1423 * \param dev - \c [in] device handle
1424 * \param syncobjs - \c [in] array of sync object handles
1425 * \param syncobj_count - \c [in] number of handles in syncobjs
1426 *
1427 * \return 0 on success\n
1428 * <0 - Negative POSIX Error code
1429 *
1430*/
1431int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
1432 const uint32_t *syncobjs, uint32_t syncobj_count);
1433
1434/**
1435 * Wait for one or all sync objects to signal.
1436 *
1437 * \param dev - \c [in] self-explanatory
1438 * \param handles - \c [in] array of sync object handles
1439 * \param num_handles - \c [in] self-explanatory
1440 * \param timeout_nsec - \c [in] self-explanatory
1441 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
1442 * \param first_signaled - \c [in] self-explanatory
1443 *
1444 * \return 0 on success\n
1445 * -ETIME - Timeout
1446 * <0 - Negative POSIX Error code
1447 *
1448 */
1449int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
1450 uint32_t *handles, unsigned num_handles,
1451 int64_t timeout_nsec, unsigned flags,
1452 uint32_t *first_signaled);
1453
1454/**
1455 * Export kernel sync object to shareable fd.
1456 *
1457 * \param dev - \c [in] device handle
1458 * \param syncobj - \c [in] sync object handle
1459 * \param shared_fd - \c [out] shared file descriptor.
1460 *
1461 * \return 0 on success\n
1462 * <0 - Negative POSIX Error code
1463 *
1464*/
1465int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
1466 uint32_t syncobj,
1467 int *shared_fd);
1468/**
1469 * Import kernel sync object from shareable fd.
1470 *
1471 * \param dev - \c [in] device handle
1472 * \param shared_fd - \c [in] shared file descriptor.
1473 * \param syncobj - \c [out] sync object handle
1474 *
1475 * \return 0 on success\n
1476 * <0 - Negative POSIX Error code
1477 *
1478*/
1479int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
1480 int shared_fd,
1481 uint32_t *syncobj);
1482
1483/**
1484 * Export kernel sync object to a sync_file.
1485 *
1486 * \param dev - \c [in] device handle
1487 * \param syncobj - \c [in] sync object handle
1488 * \param sync_file_fd - \c [out] sync_file file descriptor.
1489 *
1490 * \return 0 on success\n
1491 * <0 - Negative POSIX Error code
1492 *
1493 */
1494int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
1495 uint32_t syncobj,
1496 int *sync_file_fd);
1497
1498/**
1499 * Import kernel sync object from a sync_file.
1500 *
1501 * \param dev - \c [in] device handle
1502 * \param syncobj - \c [in] sync object handle
1503 * \param sync_file_fd - \c [in] sync_file file descriptor.
1504 *
1505 * \return 0 on success\n
1506 * <0 - Negative POSIX Error code
1507 *
1508 */
1509int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
1510 uint32_t syncobj,
1511 int sync_file_fd);
1512
1513/**
1514 * Export an amdgpu fence as a handle (syncobj or fd).
1515 *
1516 * \param what AMDGPU_FENCE_TO_HANDLE_GET_{SYNCOBJ, FD}
1517 * \param out_handle returned handle
1518 *
1519 * \return 0 on success\n
1520 * <0 - Negative POSIX Error code
1521 */
1522int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
1523 struct amdgpu_cs_fence *fence,
1524 uint32_t what,
1525 uint32_t *out_handle);
1526
1527/**
1528 * Submit raw command submission to kernel
1529 *
1530 * \param dev - \c [in] device handle
1531 * \param context - \c [in] context handle for context id
1532 * \param bo_list_handle - \c [in] request bo list handle (0 for none)
1533 * \param num_chunks - \c [in] number of CS chunks to submit
1534 * \param chunks - \c [in] array of CS chunks
1535 * \param seq_no - \c [out] output sequence number for submission.
1536 *
1537 * \return 0 on success\n
1538 * <0 - Negative POSIX Error code
1539 *
1540 */
1541struct drm_amdgpu_cs_chunk;
1542struct drm_amdgpu_cs_chunk_dep;
1543struct drm_amdgpu_cs_chunk_data;
1544
1545int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
1546 amdgpu_context_handle context,
1547 amdgpu_bo_list_handle bo_list_handle,
1548 int num_chunks,
1549 struct drm_amdgpu_cs_chunk *chunks,
1550 uint64_t *seq_no);
1551
1552void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
1553 struct drm_amdgpu_cs_chunk_dep *dep);
1554void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
1555 struct drm_amdgpu_cs_chunk_data *data);
1556
1557/**
1558 * Reserve VMID
1559 * \param context - \c [in] GPU Context
1560 * \param flags - \c [in] TBD
1561 *
1562 * \return 0 on success otherwise POSIX Error code
1563*/
1564int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
1565
1566/**
1567 * Free reserved VMID
1568 * \param context - \c [in] GPU Context
1569 * \param flags - \c [in] TBD
1570 *
1571 * \return 0 on success otherwise POSIX Error code
1572*/
1573int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
1574
1575#ifdef __cplusplus
1576}
1577#endif
1258#endif /* #ifdef _AMDGPU_H_ */ 1578#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_asic_id.c b/amdgpu/amdgpu_asic_id.c
new file mode 100644
index 00000000..a5007ffc
--- /dev/null
+++ b/amdgpu/amdgpu_asic_id.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright © 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#include <ctype.h>
26#include <stdio.h>
27#include <stdlib.h>
28#include <stdint.h>
29#include <string.h>
30#include <unistd.h>
31#include <errno.h>
32
33#include "xf86drm.h"
34#include "amdgpu_drm.h"
35#include "amdgpu_internal.h"
36
37static int parse_one_line(struct amdgpu_device *dev, const char *line)
38{
39 char *buf, *saveptr;
40 char *s_did;
41 uint32_t did;
42 char *s_rid;
43 uint32_t rid;
44 char *s_name;
45 char *endptr;
46 int r = -EINVAL;
47
48 /* ignore empty line and commented line */
49 if (strlen(line) == 0 || line[0] == '#')
50 return -EAGAIN;
51
52 buf = strdup(line);
53 if (!buf)
54 return -ENOMEM;
55
56 /* device id */
57 s_did = strtok_r(buf, ",", &saveptr);
58 if (!s_did)
59 goto out;
60
61 did = strtol(s_did, &endptr, 16);
62 if (*endptr)
63 goto out;
64
65 if (did != dev->info.asic_id) {
66 r = -EAGAIN;
67 goto out;
68 }
69
70 /* revision id */
71 s_rid = strtok_r(NULL, ",", &saveptr);
72 if (!s_rid)
73 goto out;
74
75 rid = strtol(s_rid, &endptr, 16);
76 if (*endptr)
77 goto out;
78
79 if (rid != dev->info.pci_rev_id) {
80 r = -EAGAIN;
81 goto out;
82 }
83
84 /* marketing name */
85 s_name = strtok_r(NULL, ",", &saveptr);
86 if (!s_name)
87 goto out;
88
89 /* trim leading whitespaces or tabs */
90 while (isblank(*s_name))
91 s_name++;
92 if (strlen(s_name) == 0)
93 goto out;
94
95 dev->marketing_name = strdup(s_name);
96 if (dev->marketing_name)
97 r = 0;
98 else
99 r = -ENOMEM;
100
101out:
102 free(buf);
103
104 return r;
105}
106
107void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
108{
109 FILE *fp;
110 char *line = NULL;
111 size_t len = 0;
112 ssize_t n;
113 int line_num = 1;
114 int r = 0;
115
116 fp = fopen(AMDGPU_ASIC_ID_TABLE, "r");
117 if (!fp) {
118 fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
119 strerror(errno));
120 return;
121 }
122
123 /* 1st valid line is file version */
124 while ((n = getline(&line, &len, fp)) != -1) {
125 /* trim trailing newline */
126 if (line[n - 1] == '\n')
127 line[n - 1] = '\0';
128
129 /* ignore empty line and commented line */
130 if (strlen(line) == 0 || line[0] == '#') {
131 line_num++;
132 continue;
133 }
134
135 drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line);
136 break;
137 }
138
139 while ((n = getline(&line, &len, fp)) != -1) {
140 /* trim trailing newline */
141 if (line[n - 1] == '\n')
142 line[n - 1] = '\0';
143
144 r = parse_one_line(dev, line);
145 if (r != -EAGAIN)
146 break;
147
148 line_num++;
149 }
150
151 if (r == -EINVAL) {
152 fprintf(stderr, "Invalid format: %s: line %d: %s\n",
153 AMDGPU_ASIC_ID_TABLE, line_num, line);
154 } else if (r && r != -EAGAIN) {
155 fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
156 __func__, strerror(-r));
157 }
158
159 free(line);
160 fclose(fp);
161}
diff --git a/amdgpu/amdgpu_asic_id.h b/amdgpu/amdgpu_asic_id.h
deleted file mode 100644
index 3e7d736b..00000000
--- a/amdgpu/amdgpu_asic_id.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __AMDGPU_ASIC_ID_H__
26#define __AMDGPU_ASIC_ID_H__
27
28static struct amdgpu_asic_id_table_t {
29 uint32_t did;
30 uint32_t rid;
31 const char *marketing_name;
32} const amdgpu_asic_id_table [] = {
33 {0x6600, 0x0, "AMD Radeon HD 8600/8700M"},
34 {0x6600, 0x81, "AMD Radeon R7 M370"},
35 {0x6601, 0x0, "AMD Radeon HD 8500M/8700M"},
36 {0x6604, 0x0, "AMD Radeon R7 M265 Series"},
37 {0x6604, 0x81, "AMD Radeon R7 M350"},
38 {0x6605, 0x0, "AMD Radeon R7 M260 Series"},
39 {0x6605, 0x81, "AMD Radeon R7 M340"},
40 {0x6606, 0x0, "AMD Radeon HD 8790M"},
41 {0x6607, 0x0, "AMD Radeon HD8530M"},
42 {0x6608, 0x0, "AMD FirePro W2100"},
43 {0x6610, 0x0, "AMD Radeon HD 8600 Series"},
44 {0x6610, 0x81, "AMD Radeon R7 350"},
45 {0x6610, 0x83, "AMD Radeon R5 340"},
46 {0x6611, 0x0, "AMD Radeon HD 8500 Series"},
47 {0x6613, 0x0, "AMD Radeon HD 8500 series"},
48 {0x6617, 0xC7, "AMD Radeon R7 240 Series"},
49 {0x6640, 0x0, "AMD Radeon HD 8950"},
50 {0x6640, 0x80, "AMD Radeon R9 M380"},
51 {0x6646, 0x0, "AMD Radeon R9 M280X"},
52 {0x6646, 0x80, "AMD Radeon R9 M470X"},
53 {0x6647, 0x0, "AMD Radeon R9 M270X"},
54 {0x6647, 0x80, "AMD Radeon R9 M380"},
55 {0x6649, 0x0, "AMD FirePro W5100"},
56 {0x6658, 0x0, "AMD Radeon R7 200 Series"},
57 {0x665C, 0x0, "AMD Radeon HD 7700 Series"},
58 {0x665D, 0x0, "AMD Radeon R7 200 Series"},
59 {0x665F, 0x81, "AMD Radeon R7 300 Series"},
60 {0x6660, 0x0, "AMD Radeon HD 8600M Series"},
61 {0x6660, 0x81, "AMD Radeon R5 M335"},
62 {0x6660, 0x83, "AMD Radeon R5 M330"},
63 {0x6663, 0x0, "AMD Radeon HD 8500M Series"},
64 {0x6663, 0x83, "AMD Radeon R5 M320"},
65 {0x6664, 0x0, "AMD Radeon R5 M200 Series"},
66 {0x6665, 0x0, "AMD Radeon R5 M200 Series"},
67 {0x6665, 0x83, "AMD Radeon R5 M320"},
68 {0x6667, 0x0, "AMD Radeon R5 M200 Series"},
69 {0x666F, 0x0, "AMD Radeon HD 8500M"},
70 {0x6780, 0x0, "ATI FirePro V (FireGL V) Graphics Adapter"},
71 {0x678A, 0x0, "ATI FirePro V (FireGL V) Graphics Adapter"},
72 {0x6798, 0x0, "AMD Radeon HD 7900 Series"},
73 {0x679A, 0x0, "AMD Radeon HD 7900 Series"},
74 {0x679B, 0x0, "AMD Radeon HD 7900 Series"},
75 {0x679E, 0x0, "AMD Radeon HD 7800 Series"},
76 {0x67A0, 0x0, "HAWAII XTGL (67A0)"},
77 {0x67A1, 0x0, "HAWAII GL40 (67A1)"},
78 {0x67B0, 0x0, "AMD Radeon R9 200 Series"},
79 {0x67B0, 0x80, "AMD Radeon R9 390 Series"},
80 {0x67B1, 0x0, "AMD Radeon R9 200 Series"},
81 {0x67B1, 0x80, "AMD Radeon R9 390 Series"},
82 {0x67B9, 0x0, "AMD Radeon R9 200 Series"},
83 {0x67DF, 0xC4, "AMD Radeon RX 480 Graphics"},
84 {0x67DF, 0xC5, "AMD Radeon RX 470 Graphics"},
85 {0x67DF, 0xC7, "AMD Radeon RX 480 Graphics"},
86 {0x67DF, 0xCF, "AMD Radeon RX 470 Graphics"},
87 {0x67C4, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
88 {0x67C7, 0x00, "AMD Radeon Pro WX 5100 Graphics"},
89 {0x67C0, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
90 {0x67E0, 0x00, "AMD Radeon Pro WX Series Graphics"},
91 {0x67E3, 0x00, "AMD Radeon Pro WX 4100 Graphics"},
92 {0x67E8, 0x00, "AMD Radeon Pro WX Series Graphics"},
93 {0x67E8, 0x01, "AMD Radeon Pro WX Series Graphics"},
94 {0x67E8, 0x80, "AMD Radeon E9260 Graphics"},
95 {0x67EB, 0x00, "AMD Radeon Pro WX Series Graphics"},
96 {0x67EF, 0xC0, "AMD Radeon RX Graphics"},
97 {0x67EF, 0xC1, "AMD Radeon RX 460 Graphics"},
98 {0x67EF, 0xC5, "AMD Radeon RX 460 Graphics"},
99 {0x67EF, 0xC7, "AMD Radeon RX Graphics"},
100 {0x67EF, 0xCF, "AMD Radeon RX 460 Graphics"},
101 {0x67EF, 0xEF, "AMD Radeon RX Graphics"},
102 {0x67FF, 0xC0, "AMD Radeon RX Graphics"},
103 {0x67FF, 0xC1, "AMD Radeon RX Graphics"},
104 {0x6800, 0x0, "AMD Radeon HD 7970M"},
105 {0x6801, 0x0, "AMD Radeon(TM) HD8970M"},
106 {0x6808, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
107 {0x6809, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
108 {0x6810, 0x0, "AMD Radeon(TM) HD 8800 Series"},
109 {0x6810, 0x81, "AMD Radeon R7 370 Series"},
110 {0x6811, 0x0, "AMD Radeon(TM) HD8800 Series"},
111 {0x6811, 0x81, "AMD Radeon R7 300 Series"},
112 {0x6818, 0x0, "AMD Radeon HD 7800 Series"},
113 {0x6819, 0x0, "AMD Radeon HD 7800 Series"},
114 {0x6820, 0x0, "AMD Radeon HD 8800M Series"},
115 {0x6820, 0x81, "AMD Radeon R9 M375"},
116 {0x6820, 0x83, "AMD Radeon R9 M375X"},
117 {0x6821, 0x0, "AMD Radeon HD 8800M Series"},
118 {0x6821, 0x87, "AMD Radeon R7 M380"},
119 {0x6821, 0x83, "AMD Radeon R9 M370X"},
120 {0x6822, 0x0, "AMD Radeon E8860"},
121 {0x6823, 0x0, "AMD Radeon HD 8800M Series"},
122 {0x6825, 0x0, "AMD Radeon HD 7800M Series"},
123 {0x6827, 0x0, "AMD Radeon HD 7800M Series"},
124 {0x6828, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
125 {0x682B, 0x0, "AMD Radeon HD 8800M Series"},
126 {0x682B, 0x87, "AMD Radeon R9 M360"},
127 {0x682C, 0x0, "AMD FirePro W4100"},
128 {0x682D, 0x0, "AMD Radeon HD 7700M Series"},
129 {0x682F, 0x0, "AMD Radeon HD 7700M Series"},
130 {0x6835, 0x0, "AMD Radeon R7 Series / HD 9000 Series"},
131 {0x6837, 0x0, "AMD Radeon HD7700 Series"},
132 {0x683D, 0x0, "AMD Radeon HD 7700 Series"},
133 {0x683F, 0x0, "AMD Radeon HD 7700 Series"},
134 {0x6900, 0x0, "AMD Radeon R7 M260"},
135 {0x6900, 0x81, "AMD Radeon R7 M360"},
136 {0x6900, 0x83, "AMD Radeon R7 M340"},
137 {0x6901, 0x0, "AMD Radeon R5 M255"},
138 {0x6907, 0x0, "AMD Radeon R5 M255"},
139 {0x6907, 0x87, "AMD Radeon R5 M315"},
140 {0x6920, 0x0, "AMD Radeon R9 M395X"},
141 {0x6920, 0x1, "AMD Radeon R9 M390X"},
142 {0x6921, 0x0, "AMD Radeon R9 M295X"},
143 {0x6929, 0x0, "AMD FirePro S7150"},
144 {0x692B, 0x0, "AMD FirePro W7100"},
145 {0x6938, 0x0, "AMD Radeon R9 200 Series"},
146 {0x6938, 0xF0, "AMD Radeon R9 200 Series"},
147 {0x6938, 0xF1, "AMD Radeon R9 380 Series"},
148 {0x6939, 0xF0, "AMD Radeon R9 200 Series"},
149 {0x6939, 0x0, "AMD Radeon R9 200 Series"},
150 {0x6939, 0xF1, "AMD Radeon R9 380 Series"},
151 {0x7300, 0xC8, "AMD Radeon R9 Fury Series"},
152 {0x7300, 0xCB, "AMD Radeon R9 Fury Series"},
153 {0x7300, 0xCA, "AMD Radeon R9 Fury Series"},
154 {0x9874, 0xC4, "AMD Radeon R7 Graphics"},
155 {0x9874, 0xC5, "AMD Radeon R6 Graphics"},
156 {0x9874, 0xC6, "AMD Radeon R6 Graphics"},
157 {0x9874, 0xC7, "AMD Radeon R5 Graphics"},
158 {0x9874, 0x81, "AMD Radeon R6 Graphics"},
159 {0x9874, 0x87, "AMD Radeon R5 Graphics"},
160 {0x9874, 0x85, "AMD Radeon R6 Graphics"},
161 {0x9874, 0x84, "AMD Radeon R7 Graphics"},
162
163 {0x0000, 0x0, "\0"},
164};
165#endif
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index d30fd1e7..9e37b149 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -22,10 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <stdlib.h> 25#include <stdlib.h>
30#include <stdio.h> 26#include <stdio.h>
31#include <stdint.h> 27#include <stdint.h>
@@ -53,29 +49,6 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
53 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args); 49 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54} 50}
55 51
56drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
57{
58 /* Remove the buffer from the hash tables. */
59 pthread_mutex_lock(&bo->dev->bo_table_mutex);
60 util_hash_table_remove(bo->dev->bo_handles,
61 (void*)(uintptr_t)bo->handle);
62 if (bo->flink_name) {
63 util_hash_table_remove(bo->dev->bo_flink_names,
64 (void*)(uintptr_t)bo->flink_name);
65 }
66 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
67
68 /* Release CPU access. */
69 if (bo->cpu_map_count > 0) {
70 bo->cpu_map_count = 1;
71 amdgpu_bo_cpu_unmap(bo);
72 }
73
74 amdgpu_close_kms_handle(bo->dev, bo->handle);
75 pthread_mutex_destroy(&bo->cpu_access_mutex);
76 free(bo);
77}
78
79int amdgpu_bo_alloc(amdgpu_device_handle dev, 52int amdgpu_bo_alloc(amdgpu_device_handle dev,
80 struct amdgpu_bo_alloc_request *alloc_buffer, 53 struct amdgpu_bo_alloc_request *alloc_buffer,
81 amdgpu_bo_handle *buf_handle) 54 amdgpu_bo_handle *buf_handle)
@@ -273,8 +246,9 @@ int amdgpu_bo_export(amdgpu_bo_handle bo,
273 246
274 case amdgpu_bo_handle_type_dma_buf_fd: 247 case amdgpu_bo_handle_type_dma_buf_fd:
275 amdgpu_add_handle_to_table(bo); 248 amdgpu_add_handle_to_table(bo);
276 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, 249 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
277 (int*)shared_handle); 250 DRM_CLOEXEC | DRM_RDWR,
251 (int*)shared_handle);
278 } 252 }
279 return -EINVAL; 253 return -EINVAL;
280} 254}
@@ -302,6 +276,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
302 /* Get a KMS handle. */ 276 /* Get a KMS handle. */
303 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle); 277 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
304 if (r) { 278 if (r) {
279 pthread_mutex_unlock(&dev->bo_table_mutex);
305 return r; 280 return r;
306 } 281 }
307 282
@@ -341,10 +316,9 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
341 } 316 }
342 317
343 if (bo) { 318 if (bo) {
344 pthread_mutex_unlock(&dev->bo_table_mutex);
345
346 /* The buffer already exists, just bump the refcount. */ 319 /* The buffer already exists, just bump the refcount. */
347 atomic_inc(&bo->refcount); 320 atomic_inc(&bo->refcount);
321 pthread_mutex_unlock(&dev->bo_table_mutex);
348 322
349 output->buf_handle = bo; 323 output->buf_handle = bo;
350 output->alloc_size = bo->alloc_size; 324 output->alloc_size = bo->alloc_size;
@@ -419,8 +393,35 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
419 393
420int amdgpu_bo_free(amdgpu_bo_handle buf_handle) 394int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
421{ 395{
422 /* Just drop the reference. */ 396 struct amdgpu_device *dev;
423 amdgpu_bo_reference(&buf_handle, NULL); 397 struct amdgpu_bo *bo = buf_handle;
398
399 assert(bo != NULL);
400 dev = bo->dev;
401 pthread_mutex_lock(&dev->bo_table_mutex);
402
403 if (update_references(&bo->refcount, NULL)) {
404 /* Remove the buffer from the hash tables. */
405 util_hash_table_remove(dev->bo_handles,
406 (void*)(uintptr_t)bo->handle);
407
408 if (bo->flink_name) {
409 util_hash_table_remove(dev->bo_flink_names,
410 (void*)(uintptr_t)bo->flink_name);
411 }
412
413 /* Release CPU access. */
414 if (bo->cpu_map_count > 0) {
415 bo->cpu_map_count = 1;
416 amdgpu_bo_cpu_unmap(bo);
417 }
418
419 amdgpu_close_kms_handle(dev, bo->handle);
420 pthread_mutex_destroy(&bo->cpu_access_mutex);
421 free(bo);
422 }
423
424 pthread_mutex_unlock(&dev->bo_table_mutex);
424 return 0; 425 return 0;
425} 426}
426 427
@@ -652,7 +653,7 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
652 return -EINVAL; 653 return -EINVAL;
653 654
654 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry)); 655 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
655 if (list == NULL) 656 if (!list)
656 return -ENOMEM; 657 return -ENOMEM;
657 658
658 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE; 659 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
@@ -683,21 +684,37 @@ int amdgpu_bo_va_op(amdgpu_bo_handle bo,
683 uint32_t ops) 684 uint32_t ops)
684{ 685{
685 amdgpu_device_handle dev = bo->dev; 686 amdgpu_device_handle dev = bo->dev;
687
688 size = ALIGN(size, getpagesize());
689
690 return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
691 AMDGPU_VM_PAGE_READABLE |
692 AMDGPU_VM_PAGE_WRITEABLE |
693 AMDGPU_VM_PAGE_EXECUTABLE, ops);
694}
695
696int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
697 amdgpu_bo_handle bo,
698 uint64_t offset,
699 uint64_t size,
700 uint64_t addr,
701 uint64_t flags,
702 uint32_t ops)
703{
686 struct drm_amdgpu_gem_va va; 704 struct drm_amdgpu_gem_va va;
687 int r; 705 int r;
688 706
689 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP) 707 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
708 ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
690 return -EINVAL; 709 return -EINVAL;
691 710
692 memset(&va, 0, sizeof(va)); 711 memset(&va, 0, sizeof(va));
693 va.handle = bo->handle; 712 va.handle = bo ? bo->handle : 0;
694 va.operation = ops; 713 va.operation = ops;
695 va.flags = AMDGPU_VM_PAGE_READABLE | 714 va.flags = flags;
696 AMDGPU_VM_PAGE_WRITEABLE |
697 AMDGPU_VM_PAGE_EXECUTABLE;
698 va.va_address = addr; 715 va.va_address = addr;
699 va.offset_in_bo = offset; 716 va.offset_in_bo = offset;
700 va.map_size = ALIGN(size, getpagesize()); 717 va.map_size = size;
701 718
702 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va)); 719 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
703 720
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index fb5b3a8c..3c9be6c2 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -21,10 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdlib.h> 24#include <stdlib.h>
29#include <stdio.h> 25#include <stdio.h>
30#include <string.h> 26#include <string.h>
@@ -46,26 +42,25 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
46/** 42/**
47 * Create command submission context 43 * Create command submission context
48 * 44 *
49 * \param dev - \c [in] amdgpu device handle 45 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
50 * \param context - \c [out] amdgpu context handle 46 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
47 * \param context - \c [out] GPU Context handle
51 * 48 *
52 * \return 0 on success otherwise POSIX Error code 49 * \return 0 on success otherwise POSIX Error code
53*/ 50*/
54int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 51int amdgpu_cs_ctx_create2(amdgpu_device_handle dev, uint32_t priority,
55 amdgpu_context_handle *context) 52 amdgpu_context_handle *context)
56{ 53{
57 struct amdgpu_context *gpu_context; 54 struct amdgpu_context *gpu_context;
58 union drm_amdgpu_ctx args; 55 union drm_amdgpu_ctx args;
59 int i, j, k; 56 int i, j, k;
60 int r; 57 int r;
61 58
62 if (NULL == dev) 59 if (!dev || !context)
63 return -EINVAL;
64 if (NULL == context)
65 return -EINVAL; 60 return -EINVAL;
66 61
67 gpu_context = calloc(1, sizeof(struct amdgpu_context)); 62 gpu_context = calloc(1, sizeof(struct amdgpu_context));
68 if (NULL == gpu_context) 63 if (!gpu_context)
69 return -ENOMEM; 64 return -ENOMEM;
70 65
71 gpu_context->dev = dev; 66 gpu_context->dev = dev;
@@ -77,6 +72,8 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
77 /* Create the context */ 72 /* Create the context */
78 memset(&args, 0, sizeof(args)); 73 memset(&args, 0, sizeof(args));
79 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX; 74 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
75 args.in.priority = priority;
76
80 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); 77 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
81 if (r) 78 if (r)
82 goto error; 79 goto error;
@@ -96,6 +93,12 @@ error:
96 return r; 93 return r;
97} 94}
98 95
96int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
97 amdgpu_context_handle *context)
98{
99 return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
100}
101
99/** 102/**
100 * Release command submission context 103 * Release command submission context
101 * 104 *
@@ -110,7 +113,7 @@ int amdgpu_cs_ctx_free(amdgpu_context_handle context)
110 int i, j, k; 113 int i, j, k;
111 int r; 114 int r;
112 115
113 if (NULL == context) 116 if (!context)
114 return -EINVAL; 117 return -EINVAL;
115 118
116 pthread_mutex_destroy(&context->sequence_mutex); 119 pthread_mutex_destroy(&context->sequence_mutex);
@@ -188,8 +191,6 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
188 return -EINVAL; 191 return -EINVAL;
189 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS) 192 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
190 return -EINVAL; 193 return -EINVAL;
191 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
192 return -EINVAL;
193 if (ibs_request->number_of_ibs == 0) { 194 if (ibs_request->number_of_ibs == 0) {
194 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ; 195 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
195 return 0; 196 return 0;
@@ -330,9 +331,7 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
330 uint32_t i; 331 uint32_t i;
331 int r; 332 int r;
332 333
333 if (NULL == context) 334 if (!context || !ibs_request)
334 return -EINVAL;
335 if (NULL == ibs_request)
336 return -EINVAL; 335 return -EINVAL;
337 336
338 r = 0; 337 r = 0;
@@ -416,11 +415,7 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
416 bool busy = true; 415 bool busy = true;
417 int r; 416 int r;
418 417
419 if (NULL == fence) 418 if (!fence || !expired || !fence->context)
420 return -EINVAL;
421 if (NULL == expired)
422 return -EINVAL;
423 if (NULL == fence->context)
424 return -EINVAL; 419 return -EINVAL;
425 if (fence->ip_type >= AMDGPU_HW_IP_NUM) 420 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
426 return -EINVAL; 421 return -EINVAL;
@@ -443,15 +438,83 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
443 return r; 438 return r;
444} 439}
445 440
441static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
442 uint32_t fence_count,
443 bool wait_all,
444 uint64_t timeout_ns,
445 uint32_t *status,
446 uint32_t *first)
447{
448 struct drm_amdgpu_fence *drm_fences;
449 amdgpu_device_handle dev = fences[0].context->dev;
450 union drm_amdgpu_wait_fences args;
451 int r;
452 uint32_t i;
453
454 drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
455 for (i = 0; i < fence_count; i++) {
456 drm_fences[i].ctx_id = fences[i].context->id;
457 drm_fences[i].ip_type = fences[i].ip_type;
458 drm_fences[i].ip_instance = fences[i].ip_instance;
459 drm_fences[i].ring = fences[i].ring;
460 drm_fences[i].seq_no = fences[i].fence;
461 }
462
463 memset(&args, 0, sizeof(args));
464 args.in.fences = (uint64_t)(uintptr_t)drm_fences;
465 args.in.fence_count = fence_count;
466 args.in.wait_all = wait_all;
467 args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
468
469 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
470 if (r)
471 return -errno;
472
473 *status = args.out.status;
474
475 if (first)
476 *first = args.out.first_signaled;
477
478 return 0;
479}
480
481int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
482 uint32_t fence_count,
483 bool wait_all,
484 uint64_t timeout_ns,
485 uint32_t *status,
486 uint32_t *first)
487{
488 uint32_t i;
489
490 /* Sanity check */
491 if (!fences || !status || !fence_count)
492 return -EINVAL;
493
494 for (i = 0; i < fence_count; i++) {
495 if (NULL == fences[i].context)
496 return -EINVAL;
497 if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
498 return -EINVAL;
499 if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
500 return -EINVAL;
501 }
502
503 *status = 0;
504
505 return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
506 timeout_ns, status, first);
507}
508
446int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem) 509int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
447{ 510{
448 struct amdgpu_semaphore *gpu_semaphore; 511 struct amdgpu_semaphore *gpu_semaphore;
449 512
450 if (NULL == sem) 513 if (!sem)
451 return -EINVAL; 514 return -EINVAL;
452 515
453 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore)); 516 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
454 if (NULL == gpu_semaphore) 517 if (!gpu_semaphore)
455 return -ENOMEM; 518 return -ENOMEM;
456 519
457 atomic_set(&gpu_semaphore->refcount, 1); 520 atomic_set(&gpu_semaphore->refcount, 1);
@@ -466,14 +529,12 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
466 uint32_t ring, 529 uint32_t ring,
467 amdgpu_semaphore_handle sem) 530 amdgpu_semaphore_handle sem)
468{ 531{
469 if (NULL == ctx) 532 if (!ctx || !sem)
470 return -EINVAL; 533 return -EINVAL;
471 if (ip_type >= AMDGPU_HW_IP_NUM) 534 if (ip_type >= AMDGPU_HW_IP_NUM)
472 return -EINVAL; 535 return -EINVAL;
473 if (ring >= AMDGPU_CS_MAX_RINGS) 536 if (ring >= AMDGPU_CS_MAX_RINGS)
474 return -EINVAL; 537 return -EINVAL;
475 if (NULL == sem)
476 return -EINVAL;
477 /* sem has been signaled */ 538 /* sem has been signaled */
478 if (sem->signal_fence.context) 539 if (sem->signal_fence.context)
479 return -EINVAL; 540 return -EINVAL;
@@ -494,16 +555,14 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
494 uint32_t ring, 555 uint32_t ring,
495 amdgpu_semaphore_handle sem) 556 amdgpu_semaphore_handle sem)
496{ 557{
497 if (NULL == ctx) 558 if (!ctx || !sem)
498 return -EINVAL; 559 return -EINVAL;
499 if (ip_type >= AMDGPU_HW_IP_NUM) 560 if (ip_type >= AMDGPU_HW_IP_NUM)
500 return -EINVAL; 561 return -EINVAL;
501 if (ring >= AMDGPU_CS_MAX_RINGS) 562 if (ring >= AMDGPU_CS_MAX_RINGS)
502 return -EINVAL; 563 return -EINVAL;
503 if (NULL == sem)
504 return -EINVAL;
505 /* must signal first */ 564 /* must signal first */
506 if (NULL == sem->signal_fence.context) 565 if (!sem->signal_fence.context)
507 return -EINVAL; 566 return -EINVAL;
508 567
509 pthread_mutex_lock(&ctx->sequence_mutex); 568 pthread_mutex_lock(&ctx->sequence_mutex);
@@ -514,12 +573,10 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
514 573
515static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem) 574static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
516{ 575{
517 if (NULL == sem) 576 if (!sem || !sem->signal_fence.context)
518 return -EINVAL;
519 if (NULL == sem->signal_fence.context)
520 return -EINVAL; 577 return -EINVAL;
521 578
522 sem->signal_fence.context = NULL;; 579 sem->signal_fence.context = NULL;
523 sem->signal_fence.ip_type = 0; 580 sem->signal_fence.ip_type = 0;
524 sem->signal_fence.ip_instance = 0; 581 sem->signal_fence.ip_instance = 0;
525 sem->signal_fence.ring = 0; 582 sem->signal_fence.ring = 0;
@@ -530,7 +587,7 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
530 587
531static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem) 588static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
532{ 589{
533 if (NULL == sem) 590 if (!sem)
534 return -EINVAL; 591 return -EINVAL;
535 592
536 if (update_references(&sem->refcount, NULL)) 593 if (update_references(&sem->refcount, NULL))
@@ -542,3 +599,170 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
542{ 599{
543 return amdgpu_cs_unreference_sem(sem); 600 return amdgpu_cs_unreference_sem(sem);
544} 601}
602
603int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
604 uint32_t flags,
605 uint32_t *handle)
606{
607 if (NULL == dev)
608 return -EINVAL;
609
610 return drmSyncobjCreate(dev->fd, flags, handle);
611}
612
613int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
614 uint32_t *handle)
615{
616 if (NULL == dev)
617 return -EINVAL;
618
619 return drmSyncobjCreate(dev->fd, 0, handle);
620}
621
622int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
623 uint32_t handle)
624{
625 if (NULL == dev)
626 return -EINVAL;
627
628 return drmSyncobjDestroy(dev->fd, handle);
629}
630
631int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
632 const uint32_t *syncobjs, uint32_t syncobj_count)
633{
634 if (NULL == dev)
635 return -EINVAL;
636
637 return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
638}
639
640int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
641 const uint32_t *syncobjs, uint32_t syncobj_count)
642{
643 if (NULL == dev)
644 return -EINVAL;
645
646 return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
647}
648
649int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
650 uint32_t *handles, unsigned num_handles,
651 int64_t timeout_nsec, unsigned flags,
652 uint32_t *first_signaled)
653{
654 if (NULL == dev)
655 return -EINVAL;
656
657 return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
658 flags, first_signaled);
659}
660
661int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
662 uint32_t handle,
663 int *shared_fd)
664{
665 if (NULL == dev)
666 return -EINVAL;
667
668 return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
669}
670
671int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
672 int shared_fd,
673 uint32_t *handle)
674{
675 if (NULL == dev)
676 return -EINVAL;
677
678 return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
679}
680
681int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
682 uint32_t syncobj,
683 int *sync_file_fd)
684{
685 if (NULL == dev)
686 return -EINVAL;
687
688 return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
689}
690
691int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
692 uint32_t syncobj,
693 int sync_file_fd)
694{
695 if (NULL == dev)
696 return -EINVAL;
697
698 return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
699}
700
701int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
702 amdgpu_context_handle context,
703 amdgpu_bo_list_handle bo_list_handle,
704 int num_chunks,
705 struct drm_amdgpu_cs_chunk *chunks,
706 uint64_t *seq_no)
707{
708 union drm_amdgpu_cs cs = {0};
709 uint64_t *chunk_array;
710 int i, r;
711 if (num_chunks == 0)
712 return -EINVAL;
713
714 chunk_array = alloca(sizeof(uint64_t) * num_chunks);
715 for (i = 0; i < num_chunks; i++)
716 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
717 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
718 cs.in.ctx_id = context->id;
719 cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
720 cs.in.num_chunks = num_chunks;
721 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
722 &cs, sizeof(cs));
723 if (r)
724 return r;
725
726 if (seq_no)
727 *seq_no = cs.out.handle;
728 return 0;
729}
730
731void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
732 struct drm_amdgpu_cs_chunk_data *data)
733{
734 data->fence_data.handle = fence_info->handle->handle;
735 data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
736}
737
738void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
739 struct drm_amdgpu_cs_chunk_dep *dep)
740{
741 dep->ip_type = fence->ip_type;
742 dep->ip_instance = fence->ip_instance;
743 dep->ring = fence->ring;
744 dep->ctx_id = fence->context->id;
745 dep->handle = fence->fence;
746}
747
748int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
749 struct amdgpu_cs_fence *fence,
750 uint32_t what,
751 uint32_t *out_handle)
752{
753 union drm_amdgpu_fence_to_handle fth = {0};
754 int r;
755
756 fth.in.fence.ctx_id = fence->context->id;
757 fth.in.fence.ip_type = fence->ip_type;
758 fth.in.fence.ip_instance = fence->ip_instance;
759 fth.in.fence.ring = fence->ring;
760 fth.in.fence.seq_no = fence->fence;
761 fth.in.what = what;
762
763 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
764 &fth, sizeof(fth));
765 if (r == 0)
766 *out_handle = fth.out.handle;
767 return r;
768}
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index f4ede031..d81efcf8 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -28,10 +28,6 @@
28 * 28 *
29 */ 29 */
30 30
31#ifdef HAVE_CONFIG_H
32#include "config.h"
33#endif
34
35#include <sys/stat.h> 31#include <sys/stat.h>
36#include <errno.h> 32#include <errno.h>
37#include <string.h> 33#include <string.h>
@@ -44,7 +40,6 @@
44#include "amdgpu_internal.h" 40#include "amdgpu_internal.h"
45#include "util_hash_table.h" 41#include "util_hash_table.h"
46#include "util_math.h" 42#include "util_math.h"
47#include "amdgpu_asic_id.h"
48 43
49#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 44#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
50#define UINT_TO_PTR(x) ((void *)((intptr_t)(x))) 45#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@@ -131,10 +126,8 @@ static int amdgpu_get_auth(int fd, int *auth)
131 126
132static void amdgpu_device_free_internal(amdgpu_device_handle dev) 127static void amdgpu_device_free_internal(amdgpu_device_handle dev)
133{ 128{
134 amdgpu_vamgr_deinit(dev->vamgr); 129 amdgpu_vamgr_deinit(&dev->vamgr_32);
135 free(dev->vamgr); 130 amdgpu_vamgr_deinit(&dev->vamgr);
136 amdgpu_vamgr_deinit(dev->vamgr_32);
137 free(dev->vamgr_32);
138 util_hash_table_destroy(dev->bo_flink_names); 131 util_hash_table_destroy(dev->bo_flink_names);
139 util_hash_table_destroy(dev->bo_handles); 132 util_hash_table_destroy(dev->bo_handles);
140 pthread_mutex_destroy(&dev->bo_table_mutex); 133 pthread_mutex_destroy(&dev->bo_table_mutex);
@@ -142,6 +135,7 @@ static void amdgpu_device_free_internal(amdgpu_device_handle dev)
142 close(dev->fd); 135 close(dev->fd);
143 if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd)) 136 if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
144 close(dev->flink_fd); 137 close(dev->flink_fd);
138 free(dev->marketing_name);
145 free(dev); 139 free(dev);
146} 140}
147 141
@@ -187,6 +181,8 @@ int amdgpu_device_initialize(int fd,
187 fd_tab = util_hash_table_create(fd_hash, fd_compare); 181 fd_tab = util_hash_table_create(fd_hash, fd_compare);
188 r = amdgpu_get_auth(fd, &flag_auth); 182 r = amdgpu_get_auth(fd, &flag_auth);
189 if (r) { 183 if (r) {
184 fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
185 __func__, r);
190 pthread_mutex_unlock(&fd_mutex); 186 pthread_mutex_unlock(&fd_mutex);
191 return r; 187 return r;
192 } 188 }
@@ -194,6 +190,8 @@ int amdgpu_device_initialize(int fd,
194 if (dev) { 190 if (dev) {
195 r = amdgpu_get_auth(dev->fd, &flag_authexist); 191 r = amdgpu_get_auth(dev->fd, &flag_authexist);
196 if (r) { 192 if (r) {
193 fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
194 __func__, r);
197 pthread_mutex_unlock(&fd_mutex); 195 pthread_mutex_unlock(&fd_mutex);
198 return r; 196 return r;
199 } 197 }
@@ -209,6 +207,7 @@ int amdgpu_device_initialize(int fd,
209 207
210 dev = calloc(1, sizeof(struct amdgpu_device)); 208 dev = calloc(1, sizeof(struct amdgpu_device));
211 if (!dev) { 209 if (!dev) {
210 fprintf(stderr, "%s: calloc failed\n", __func__);
212 pthread_mutex_unlock(&fd_mutex); 211 pthread_mutex_unlock(&fd_mutex);
213 return -ENOMEM; 212 return -ENOMEM;
214 } 213 }
@@ -244,38 +243,47 @@ int amdgpu_device_initialize(int fd,
244 243
245 /* Check if acceleration is working. */ 244 /* Check if acceleration is working. */
246 r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working); 245 r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
247 if (r) 246 if (r) {
247 fprintf(stderr, "%s: amdgpu_query_info(ACCEL_WORKING) failed (%i)\n",
248 __func__, r);
248 goto cleanup; 249 goto cleanup;
250 }
249 if (!accel_working) { 251 if (!accel_working) {
252 fprintf(stderr, "%s: AMDGPU_INFO_ACCEL_WORKING = 0\n", __func__);
250 r = -EBADF; 253 r = -EBADF;
251 goto cleanup; 254 goto cleanup;
252 } 255 }
253 256
254 r = amdgpu_query_gpu_info_init(dev); 257 r = amdgpu_query_gpu_info_init(dev);
255 if (r) 258 if (r) {
259 fprintf(stderr, "%s: amdgpu_query_gpu_info_init failed\n", __func__);
256 goto cleanup; 260 goto cleanup;
261 }
257 262
258 dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr)); 263 start = dev->dev_info.virtual_address_offset;
259 if (dev->vamgr == NULL) 264 max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
260 goto cleanup; 265 amdgpu_vamgr_init(&dev->vamgr_32, start, max,
266 dev->dev_info.virtual_address_alignment);
261 267
262 amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset, 268 start = max;
263 dev->dev_info.virtual_address_max, 269 max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
270 amdgpu_vamgr_init(&dev->vamgr, start, max,
264 dev->dev_info.virtual_address_alignment); 271 dev->dev_info.virtual_address_alignment);
265 272
266 max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff); 273 start = dev->dev_info.high_va_offset;
267 start = amdgpu_vamgr_find_va(dev->vamgr, 274 max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
268 max - dev->dev_info.virtual_address_offset, 275 0x100000000ULL);
269 dev->dev_info.virtual_address_alignment, 0); 276 amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
270 if (start > 0xffffffff)
271 goto free_va; /* shouldn't get here */
272
273 dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
274 if (dev->vamgr_32 == NULL)
275 goto free_va;
276 amdgpu_vamgr_init(dev->vamgr_32, start, max,
277 dev->dev_info.virtual_address_alignment); 277 dev->dev_info.virtual_address_alignment);
278 278
279 start = max;
280 max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
281 0x100000000ULL);
282 amdgpu_vamgr_init(&dev->vamgr_high, start, max,
283 dev->dev_info.virtual_address_alignment);
284
285 amdgpu_parse_asic_ids(dev);
286
279 *major_version = dev->major_version; 287 *major_version = dev->major_version;
280 *minor_version = dev->minor_version; 288 *minor_version = dev->minor_version;
281 *device_handle = dev; 289 *device_handle = dev;
@@ -284,13 +292,6 @@ int amdgpu_device_initialize(int fd,
284 292
285 return 0; 293 return 0;
286 294
287free_va:
288 r = -ENOMEM;
289 amdgpu_vamgr_free_va(dev->vamgr, start,
290 max - dev->dev_info.virtual_address_offset);
291 amdgpu_vamgr_deinit(dev->vamgr);
292 free(dev->vamgr);
293
294cleanup: 295cleanup:
295 if (dev->fd >= 0) 296 if (dev->fd >= 0)
296 close(dev->fd); 297 close(dev->fd);
@@ -307,14 +308,21 @@ int amdgpu_device_deinitialize(amdgpu_device_handle dev)
307 308
308const char *amdgpu_get_marketing_name(amdgpu_device_handle dev) 309const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
309{ 310{
310 const struct amdgpu_asic_id_table_t *t = amdgpu_asic_id_table; 311 return dev->marketing_name;
312}
311 313
312 while (t->did) { 314int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
313 if ((t->did == dev->info.asic_id) && 315 void *value)
314 (t->rid == dev->info.pci_rev_id)) 316{
315 return t->marketing_name; 317 uint32_t *val32 = (uint32_t*)value;
316 t++; 318
319 switch (info) {
320 case amdgpu_sw_info_address32_hi:
321 if (dev->vamgr_high_32.va_max)
322 *val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
323 else
324 *val32 = (dev->vamgr_32.va_max - 1) >> 32;
325 return 0;
317 } 326 }
318 327 return -EINVAL;
319 return NULL;
320} 328}
diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
index 66c7e0e1..b68e1c4f 100644
--- a/amdgpu/amdgpu_gpu_info.c
+++ b/amdgpu/amdgpu_gpu_info.c
@@ -22,10 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <errno.h> 25#include <errno.h>
30#include <string.h> 26#include <string.h>
31 27
@@ -169,53 +165,57 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
169 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config; 165 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
170 dev->info.pci_rev_id = dev->dev_info.pci_rev; 166 dev->info.pci_rev_id = dev->dev_info.pci_rev;
171 167
172 for (i = 0; i < (int)dev->info.num_shader_engines; i++) { 168 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
173 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) | 169 for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
174 (AMDGPU_INFO_MMR_SH_INDEX_MASK << 170 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
175 AMDGPU_INFO_MMR_SH_INDEX_SHIFT); 171 (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
172 AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
176 173
177 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0, 174 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
178 &dev->info.backend_disable[i]); 175 &dev->info.backend_disable[i]);
179 if (r) 176 if (r)
180 return r; 177 return r;
181 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */ 178 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
182 dev->info.backend_disable[i] = 179 dev->info.backend_disable[i] =
183 (dev->info.backend_disable[i] >> 16) & 0xff; 180 (dev->info.backend_disable[i] >> 16) & 0xff;
184
185 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
186 &dev->info.pa_sc_raster_cfg[i]);
187 if (r)
188 return r;
189 181
190 if (dev->info.family_id >= AMDGPU_FAMILY_CI) { 182 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
191 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0, 183 &dev->info.pa_sc_raster_cfg[i]);
192 &dev->info.pa_sc_raster_cfg1[i]);
193 if (r) 184 if (r)
194 return r; 185 return r;
186
187 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
188 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
189 &dev->info.pa_sc_raster_cfg1[i]);
190 if (r)
191 return r;
192 }
195 } 193 }
196 } 194 }
197 195
198 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0, 196 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
199 dev->info.gb_tile_mode); 197 &dev->info.gb_addr_cfg);
200 if (r) 198 if (r)
201 return r; 199 return r;
202 200
203 if (dev->info.family_id >= AMDGPU_FAMILY_CI) { 201 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
204 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0, 202 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
205 dev->info.gb_macro_tile_mode); 203 dev->info.gb_tile_mode);
206 if (r) 204 if (r)
207 return r; 205 return r;
208 }
209 206
210 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0, 207 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
211 &dev->info.gb_addr_cfg); 208 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
212 if (r) 209 dev->info.gb_macro_tile_mode);
213 return r; 210 if (r)
211 return r;
212 }
214 213
215 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0, 214 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
216 &dev->info.mc_arb_ramcfg); 215 &dev->info.mc_arb_ramcfg);
217 if (r) 216 if (r)
218 return r; 217 return r;
218 }
219 219
220 dev->info.cu_active_number = dev->dev_info.cu_active_number; 220 dev->info.cu_active_number = dev->dev_info.cu_active_number;
221 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask; 221 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
@@ -230,8 +230,9 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
230int amdgpu_query_gpu_info(amdgpu_device_handle dev, 230int amdgpu_query_gpu_info(amdgpu_device_handle dev,
231 struct amdgpu_gpu_info *info) 231 struct amdgpu_gpu_info *info)
232{ 232{
233 if ((dev == NULL) || (info == NULL)) 233 if (!dev || !info)
234 return -EINVAL; 234 return -EINVAL;
235
235 /* Get ASIC info*/ 236 /* Get ASIC info*/
236 *info = dev->info; 237 *info = dev->info;
237 238
@@ -296,7 +297,7 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
296 struct drm_amdgpu_info_gds gds_config = {}; 297 struct drm_amdgpu_info_gds gds_config = {};
297 int r; 298 int r;
298 299
299 if (gds_info == NULL) 300 if (!gds_info)
300 return -EINVAL; 301 return -EINVAL;
301 302
302 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG, 303 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
@@ -314,3 +315,18 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
314 315
315 return 0; 316 return 0;
316} 317}
318
319int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
320 unsigned size, void *value)
321{
322 struct drm_amdgpu_info request;
323
324 memset(&request, 0, sizeof(request));
325 request.return_pointer = (uintptr_t)value;
326 request.return_size = size;
327 request.query = AMDGPU_INFO_SENSOR;
328 request.sensor_info.type = sensor_type;
329
330 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
331 sizeof(struct drm_amdgpu_info));
332}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 4f039b68..99b8ce0b 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -25,10 +25,6 @@
25#ifndef _AMDGPU_INTERNAL_H_ 25#ifndef _AMDGPU_INTERNAL_H_
26#define _AMDGPU_INTERNAL_H_ 26#define _AMDGPU_INTERNAL_H_
27 27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <assert.h> 28#include <assert.h>
33#include <pthread.h> 29#include <pthread.h>
34 30
@@ -53,8 +49,6 @@ struct amdgpu_bo_va_hole {
53}; 49};
54 50
55struct amdgpu_bo_va_mgr { 51struct amdgpu_bo_va_mgr {
56 /* the start virtual address */
57 uint64_t va_offset;
58 uint64_t va_max; 52 uint64_t va_max;
59 struct list_head va_holes; 53 struct list_head va_holes;
60 pthread_mutex_t bo_va_mutex; 54 pthread_mutex_t bo_va_mutex;
@@ -76,6 +70,7 @@ struct amdgpu_device {
76 unsigned major_version; 70 unsigned major_version;
77 unsigned minor_version; 71 unsigned minor_version;
78 72
73 char *marketing_name;
79 /** List of buffer handles. Protected by bo_table_mutex. */ 74 /** List of buffer handles. Protected by bo_table_mutex. */
80 struct util_hash_table *bo_handles; 75 struct util_hash_table *bo_handles;
81 /** List of buffer GEM flink names. Protected by bo_table_mutex. */ 76 /** List of buffer GEM flink names. Protected by bo_table_mutex. */
@@ -84,10 +79,14 @@ struct amdgpu_device {
84 pthread_mutex_t bo_table_mutex; 79 pthread_mutex_t bo_table_mutex;
85 struct drm_amdgpu_info_device dev_info; 80 struct drm_amdgpu_info_device dev_info;
86 struct amdgpu_gpu_info info; 81 struct amdgpu_gpu_info info;
87 /** The global VA manager for the whole virtual address space */ 82 /** The VA manager for the lower virtual address space */
88 struct amdgpu_bo_va_mgr *vamgr; 83 struct amdgpu_bo_va_mgr vamgr;
89 /** The VA manager for the 32bit address space */ 84 /** The VA manager for the 32bit address space */
90 struct amdgpu_bo_va_mgr *vamgr_32; 85 struct amdgpu_bo_va_mgr vamgr_32;
86 /** The VA manager for the high virtual address space */
87 struct amdgpu_bo_va_mgr vamgr_high;
88 /** The VA manager for the 32bit high address space */
89 struct amdgpu_bo_va_mgr vamgr_high_32;
91}; 90};
92 91
93struct amdgpu_bo { 92struct amdgpu_bo {
@@ -135,19 +134,12 @@ struct amdgpu_semaphore {
135 * Functions. 134 * Functions.
136 */ 135 */
137 136
138drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
139
140drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 137drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
141 uint64_t max, uint64_t alignment); 138 uint64_t max, uint64_t alignment);
142 139
143drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr); 140drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
144 141
145drm_private uint64_t 142drm_private void amdgpu_parse_asic_ids(struct amdgpu_device *dev);
146amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
147 uint64_t alignment, uint64_t base_required);
148
149drm_private void
150amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size);
151 143
152drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev); 144drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
153 145
@@ -179,26 +171,4 @@ static inline bool update_references(atomic_t *dst, atomic_t *src)
179 return false; 171 return false;
180} 172}
181 173
182/**
183 * Assignment between two amdgpu_bo pointers with reference counting.
184 *
185 * Usage:
186 * struct amdgpu_bo *dst = ... , *src = ...;
187 *
188 * dst = src;
189 * // No reference counting. Only use this when you need to move
190 * // a reference from one pointer to another.
191 *
192 * amdgpu_bo_reference(&dst, src);
193 * // Reference counters are updated. dst is decremented and src is
194 * // incremented. dst is freed if its reference counter is 0.
195 */
196static inline void amdgpu_bo_reference(struct amdgpu_bo **dst,
197 struct amdgpu_bo *src)
198{
199 if (update_references(&(*dst)->refcount, &src->refcount))
200 amdgpu_bo_free_internal(*dst);
201 *dst = src;
202}
203
204#endif 174#endif
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 8a707cbc..1de9f952 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -21,10 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdlib.h> 24#include <stdlib.h>
29#include <string.h> 25#include <string.h>
30#include <errno.h> 26#include <errno.h>
@@ -34,25 +30,33 @@
34#include "util_math.h" 30#include "util_math.h"
35 31
36int amdgpu_va_range_query(amdgpu_device_handle dev, 32int amdgpu_va_range_query(amdgpu_device_handle dev,
37 enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end) 33 enum amdgpu_gpu_va_range type,
34 uint64_t *start, uint64_t *end)
38{ 35{
39 if (type == amdgpu_gpu_va_range_general) { 36 if (type != amdgpu_gpu_va_range_general)
40 *start = dev->dev_info.virtual_address_offset; 37 return -EINVAL;
41 *end = dev->dev_info.virtual_address_max; 38
42 return 0; 39 *start = dev->dev_info.virtual_address_offset;
43 } 40 *end = dev->dev_info.virtual_address_max;
44 return -EINVAL; 41 return 0;
45} 42}
46 43
47drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 44drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
48 uint64_t max, uint64_t alignment) 45 uint64_t max, uint64_t alignment)
49{ 46{
50 mgr->va_offset = start; 47 struct amdgpu_bo_va_hole *n;
48
51 mgr->va_max = max; 49 mgr->va_max = max;
52 mgr->va_alignment = alignment; 50 mgr->va_alignment = alignment;
53 51
54 list_inithead(&mgr->va_holes); 52 list_inithead(&mgr->va_holes);
55 pthread_mutex_init(&mgr->bo_va_mutex, NULL); 53 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
54 pthread_mutex_lock(&mgr->bo_va_mutex);
55 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
56 n->size = mgr->va_max - start;
57 n->offset = start;
58 list_add(&n->list, &mgr->va_holes);
59 pthread_mutex_unlock(&mgr->bo_va_mutex);
56} 60}
57 61
58drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) 62drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
@@ -65,13 +69,14 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
65 pthread_mutex_destroy(&mgr->bo_va_mutex); 69 pthread_mutex_destroy(&mgr->bo_va_mutex);
66} 70}
67 71
68drm_private uint64_t 72static drm_private uint64_t
69amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 73amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
70 uint64_t alignment, uint64_t base_required) 74 uint64_t alignment, uint64_t base_required)
71{ 75{
72 struct amdgpu_bo_va_hole *hole, *n; 76 struct amdgpu_bo_va_hole *hole, *n;
73 uint64_t offset = 0, waste = 0; 77 uint64_t offset = 0, waste = 0;
74 78
79
75 alignment = MAX2(alignment, mgr->va_alignment); 80 alignment = MAX2(alignment, mgr->va_alignment);
76 size = ALIGN(size, mgr->va_alignment); 81 size = ALIGN(size, mgr->va_alignment);
77 82
@@ -79,12 +84,10 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
79 return AMDGPU_INVALID_VA_ADDRESS; 84 return AMDGPU_INVALID_VA_ADDRESS;
80 85
81 pthread_mutex_lock(&mgr->bo_va_mutex); 86 pthread_mutex_lock(&mgr->bo_va_mutex);
82 /* TODO: using more appropriate way to track the holes */ 87 LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
83 /* first look for a hole */
84 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
85 if (base_required) { 88 if (base_required) {
86 if(hole->offset > base_required || 89 if (hole->offset > base_required ||
87 (hole->offset + hole->size) < (base_required + size)) 90 (hole->offset + hole->size) < (base_required + size))
88 continue; 91 continue;
89 waste = base_required - hole->offset; 92 waste = base_required - hole->offset;
90 offset = base_required; 93 offset = base_required;
@@ -123,41 +126,14 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
123 } 126 }
124 } 127 }
125 128
126 if (base_required) {
127 if (base_required < mgr->va_offset) {
128 pthread_mutex_unlock(&mgr->bo_va_mutex);
129 return AMDGPU_INVALID_VA_ADDRESS;
130 }
131 offset = mgr->va_offset;
132 waste = base_required - mgr->va_offset;
133 } else {
134 offset = mgr->va_offset;
135 waste = offset % alignment;
136 waste = waste ? alignment - waste : 0;
137 }
138
139 if (offset + waste + size > mgr->va_max) {
140 pthread_mutex_unlock(&mgr->bo_va_mutex);
141 return AMDGPU_INVALID_VA_ADDRESS;
142 }
143
144 if (waste) {
145 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
146 n->size = waste;
147 n->offset = offset;
148 list_add(&n->list, &mgr->va_holes);
149 }
150
151 offset += waste;
152 mgr->va_offset += size + waste;
153 pthread_mutex_unlock(&mgr->bo_va_mutex); 129 pthread_mutex_unlock(&mgr->bo_va_mutex);
154 return offset; 130 return AMDGPU_INVALID_VA_ADDRESS;
155} 131}
156 132
157drm_private void 133static drm_private void
158amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) 134amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
159{ 135{
160 struct amdgpu_bo_va_hole *hole; 136 struct amdgpu_bo_va_hole *hole, *next;
161 137
162 if (va == AMDGPU_INVALID_VA_ADDRESS) 138 if (va == AMDGPU_INVALID_VA_ADDRESS)
163 return; 139 return;
@@ -165,61 +141,47 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
165 size = ALIGN(size, mgr->va_alignment); 141 size = ALIGN(size, mgr->va_alignment);
166 142
167 pthread_mutex_lock(&mgr->bo_va_mutex); 143 pthread_mutex_lock(&mgr->bo_va_mutex);
168 if ((va + size) == mgr->va_offset) { 144 hole = container_of(&mgr->va_holes, hole, list);
169 mgr->va_offset = va; 145 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
170 /* Delete uppermost hole if it reaches the new top */ 146 if (next->offset < va)
171 if (!LIST_IS_EMPTY(&mgr->va_holes)) { 147 break;
172 hole = container_of(mgr->va_holes.next, hole, list); 148 hole = next;
173 if ((hole->offset + hole->size) == va) { 149 }
174 mgr->va_offset = hole->offset; 150
151 if (&hole->list != &mgr->va_holes) {
152 /* Grow upper hole if it's adjacent */
153 if (hole->offset == (va + size)) {
154 hole->offset = va;
155 hole->size += size;
156 /* Merge lower hole if it's adjacent */
157 if (next != hole &&
158 &next->list != &mgr->va_holes &&
159 (next->offset + next->size) == va) {
160 next->size += hole->size;
175 list_del(&hole->list); 161 list_del(&hole->list);
176 free(hole); 162 free(hole);
177 } 163 }
178 }
179 } else {
180 struct amdgpu_bo_va_hole *next;
181
182 hole = container_of(&mgr->va_holes, hole, list);
183 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
184 if (next->offset < va)
185 break;
186 hole = next;
187 }
188
189 if (&hole->list != &mgr->va_holes) {
190 /* Grow upper hole if it's adjacent */
191 if (hole->offset == (va + size)) {
192 hole->offset = va;
193 hole->size += size;
194 /* Merge lower hole if it's adjacent */
195 if (next != hole
196 && &next->list != &mgr->va_holes
197 && (next->offset + next->size) == va) {
198 next->size += hole->size;
199 list_del(&hole->list);
200 free(hole);
201 }
202 goto out;
203 }
204 }
205
206 /* Grow lower hole if it's adjacent */
207 if (next != hole && &next->list != &mgr->va_holes &&
208 (next->offset + next->size) == va) {
209 next->size += size;
210 goto out; 164 goto out;
211 } 165 }
166 }
212 167
213 /* FIXME on allocation failure we just lose virtual address space 168 /* Grow lower hole if it's adjacent */
214 * maybe print a warning 169 if (next != hole && &next->list != &mgr->va_holes &&
215 */ 170 (next->offset + next->size) == va) {
216 next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); 171 next->size += size;
217 if (next) { 172 goto out;
218 next->size = size; 173 }
219 next->offset = va; 174
220 list_add(&next->list, &hole->list); 175 /* FIXME on allocation failure we just lose virtual address space
221 } 176 * maybe print a warning
177 */
178 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
179 if (next) {
180 next->size = size;
181 next->offset = va;
182 list_add(&next->list, &hole->list);
222 } 183 }
184
223out: 185out:
224 pthread_mutex_unlock(&mgr->bo_va_mutex); 186 pthread_mutex_unlock(&mgr->bo_va_mutex);
225} 187}
@@ -235,10 +197,21 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
235{ 197{
236 struct amdgpu_bo_va_mgr *vamgr; 198 struct amdgpu_bo_va_mgr *vamgr;
237 199
238 if (flags & AMDGPU_VA_RANGE_32_BIT) 200 /* Clear the flag when the high VA manager is not initialized */
239 vamgr = dev->vamgr_32; 201 if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
240 else 202 flags &= ~AMDGPU_VA_RANGE_HIGH;
241 vamgr = dev->vamgr; 203
204 if (flags & AMDGPU_VA_RANGE_HIGH) {
205 if (flags & AMDGPU_VA_RANGE_32_BIT)
206 vamgr = &dev->vamgr_high_32;
207 else
208 vamgr = &dev->vamgr_high;
209 } else {
210 if (flags & AMDGPU_VA_RANGE_32_BIT)
211 vamgr = &dev->vamgr_32;
212 else
213 vamgr = &dev->vamgr;
214 }
242 215
243 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment); 216 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
244 size = ALIGN(size, vamgr->va_alignment); 217 size = ALIGN(size, vamgr->va_alignment);
@@ -249,7 +222,10 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
249 if (!(flags & AMDGPU_VA_RANGE_32_BIT) && 222 if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
250 (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) { 223 (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
251 /* fallback to 32bit address */ 224 /* fallback to 32bit address */
252 vamgr = dev->vamgr_32; 225 if (flags & AMDGPU_VA_RANGE_HIGH)
226 vamgr = &dev->vamgr_high_32;
227 else
228 vamgr = &dev->vamgr_32;
253 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size, 229 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
254 va_base_alignment, va_base_required); 230 va_base_alignment, va_base_required);
255 } 231 }
diff --git a/amdgpu/amdgpu_vm.c b/amdgpu/amdgpu_vm.c
new file mode 100644
index 00000000..da9d07f8
--- /dev/null
+++ b/amdgpu/amdgpu_vm.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "amdgpu_drm.h"
26#include "xf86drm.h"
27#include "amdgpu_internal.h"
28
29int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
30{
31 union drm_amdgpu_vm vm;
32
33 vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
34 vm.in.flags = flags;
35
36 return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
37 &vm, sizeof(vm));
38}
39
40int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags)
41{
42 union drm_amdgpu_vm vm;
43
44 vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
45 vm.in.flags = flags;
46
47 return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
48 &vm, sizeof(vm));
49}
diff --git a/amdgpu/meson.build b/amdgpu/meson.build
new file mode 100644
index 00000000..f39d7bf6
--- /dev/null
+++ b/amdgpu/meson.build
@@ -0,0 +1,66 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
23
24libdrm_amdgpu = shared_library(
25 'drm_amdgpu',
26 [
27 files(
28 'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
29 'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'util_hash.c',
30 'util_hash_table.c',
31 ),
32 config_file,
33 ],
34 c_args : [
35 warn_c_args,
36 '-DAMDGPU_ASIC_ID_TABLE="@0@"'.format(join_paths(datadir_amdgpu, 'amdgpu.ids')),
37 ],
38 include_directories : [inc_root, inc_drm],
39 link_with : libdrm,
40 dependencies : [dep_pthread_stubs, dep_atomic_ops],
41 version : '1.0.0',
42 install : true,
43)
44
45install_headers('amdgpu.h', subdir : 'libdrm')
46
47pkg.generate(
48 name : 'libdrm_amdgpu',
49 libraries : libdrm_amdgpu,
50 subdirs : ['.', 'libdrm'],
51 version : meson.project_version(),
52 requires_private : 'libdrm',
53 description : 'Userspace interface to kernel DRM services for amdgpu',
54)
55
56ext_libdrm_amdgpu = declare_dependency(
57 link_with : [libdrm, libdrm_amdgpu],
58 include_directories : [inc_drm, include_directories('.')],
59)
60
61test(
62 'amdgpu-symbol-check',
63 prog_bash,
64 env : env_test,
65 args : [files('amdgpu-symbol-check'), libdrm_amdgpu]
66)
diff --git a/amdgpu/util_hash.c b/amdgpu/util_hash.c
index 87cb671b..7e590419 100644
--- a/amdgpu/util_hash.c
+++ b/amdgpu/util_hash.c
@@ -30,10 +30,6 @@
30 * Zack Rusin <zackr@vmware.com> 30 * Zack Rusin <zackr@vmware.com>
31 */ 31 */
32 32
33#ifdef HAVE_CONFIG_H
34#include "config.h"
35#endif
36
37#include "util_hash.h" 33#include "util_hash.h"
38 34
39#include <stdlib.h> 35#include <stdlib.h>
diff --git a/amdgpu/util_hash.h b/amdgpu/util_hash.h
index 01a4779b..6eed1569 100644
--- a/amdgpu/util_hash.h
+++ b/amdgpu/util_hash.h
@@ -44,10 +44,6 @@
44#ifndef UTIL_HASH_H 44#ifndef UTIL_HASH_H
45#define UTIL_HASH_H 45#define UTIL_HASH_H
46 46
47#ifdef HAVE_CONFIG_H
48#include "config.h"
49#endif
50
51#include <stdbool.h> 47#include <stdbool.h>
52 48
53#include "libdrm_macros.h" 49#include "libdrm_macros.h"
diff --git a/amdgpu/util_hash_table.c b/amdgpu/util_hash_table.c
index fa7f6eab..89a8bf9b 100644
--- a/amdgpu/util_hash_table.c
+++ b/amdgpu/util_hash_table.c
@@ -38,10 +38,6 @@
38 */ 38 */
39 39
40 40
41#ifdef HAVE_CONFIG_H
42#include "config.h"
43#endif
44
45#include "util_hash_table.h" 41#include "util_hash_table.h"
46#include "util_hash.h" 42#include "util_hash.h"
47 43
diff --git a/amdgpu/util_hash_table.h b/amdgpu/util_hash_table.h
index e0001289..5e295a81 100644
--- a/amdgpu/util_hash_table.h
+++ b/amdgpu/util_hash_table.h
@@ -34,10 +34,6 @@
34#ifndef U_HASH_TABLE_H_ 34#ifndef U_HASH_TABLE_H_
35#define U_HASH_TABLE_H_ 35#define U_HASH_TABLE_H_
36 36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#include "libdrm_macros.h" 37#include "libdrm_macros.h"
42 38
43/** 39/**
diff --git a/android/gralloc_handle.h b/android/gralloc_handle.h
new file mode 100644
index 00000000..bcf753da
--- /dev/null
+++ b/android/gralloc_handle.h
@@ -0,0 +1,111 @@
1/*
2 * Copyright (C) 2010-2011 Chia-I Wu <olvaffe@gmail.com>
3 * Copyright (C) 2010-2011 LunarG Inc.
4 * Copyright (C) 2016 Linaro, Ltd., Rob Herring <robh@kernel.org>
5 * Copyright (C) 2018 Collabora, Robert Foss <robert.foss@collabora.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __ANDROID_GRALLOC_HANDLE_H__
27#define __ANDROID_GRALLOC_HANDLE_H__
28
29#include <cutils/native_handle.h>
30#include <stdint.h>
31
32/* support users of drm_gralloc/gbm_gralloc */
33#define gralloc_gbm_handle_t gralloc_handle_t
34#define gralloc_drm_handle_t gralloc_handle_t
35
36struct gralloc_handle_t {
37 native_handle_t base;
38
39 /* dma-buf file descriptor
40 * Must be located first since, native_handle_t is allocated
41 * using native_handle_create(), which allocates space for
42 * sizeof(native_handle_t) + sizeof(int) * (numFds + numInts)
43 * numFds = GRALLOC_HANDLE_NUM_FDS
44 * numInts = GRALLOC_HANDLE_NUM_INTS
45 * Where numFds represents the number of FDs and
46 * numInts represents the space needed for the
47 * remainder of this struct.
48 * And the FDs are expected to be found first following
49 * native_handle_t.
50 */
51 int prime_fd;
52
53 /* api variables */
54 uint32_t magic; /* differentiate between allocator impls */
55 uint32_t version; /* api version */
56
57 uint32_t width; /* width of buffer in pixels */
58 uint32_t height; /* height of buffer in pixels */
59 uint32_t format; /* pixel format (Android) */
60 uint32_t usage; /* android libhardware usage flags */
61
62 uint32_t stride; /* the stride in bytes */
63 uint64_t modifier; /* buffer modifiers */
64
65 int data_owner; /* owner of data (for validation) */
66 union {
67 void *data; /* pointer to struct gralloc_gbm_bo_t */
68 uint64_t reserved;
69 } __attribute__((aligned(8)));
70};
71
72#define GRALLOC_HANDLE_VERSION 3
73#define GRALLOC_HANDLE_MAGIC 0x60585350
74#define GRALLOC_HANDLE_NUM_FDS 1
75#define GRALLOC_HANDLE_NUM_INTS ( \
76 ((sizeof(struct gralloc_handle_t) - sizeof(native_handle_t))/sizeof(int)) \
77 - GRALLOC_HANDLE_NUM_FDS)
78
79static inline struct gralloc_handle_t *gralloc_handle(buffer_handle_t handle)
80{
81 return (struct gralloc_handle_t *)handle;
82}
83
84/**
85 * Create a buffer handle.
86 */
87static inline native_handle_t *gralloc_handle_create(int32_t width,
88 int32_t height,
89 int32_t hal_format,
90 int32_t usage)
91{
92 struct gralloc_handle_t *handle;
93 native_handle_t *nhandle = native_handle_create(GRALLOC_HANDLE_NUM_FDS,
94 GRALLOC_HANDLE_NUM_INTS);
95
96 if (!nhandle)
97 return NULL;
98
99 handle = gralloc_handle(nhandle);
100 handle->magic = GRALLOC_HANDLE_MAGIC;
101 handle->version = GRALLOC_HANDLE_VERSION;
102 handle->width = width;
103 handle->height = height;
104 handle->format = hal_format;
105 handle->usage = usage;
106 handle->prime_fd = -1;
107
108 return nhandle;
109}
110
111#endif
diff --git a/autogen.sh b/autogen.sh
index d82ab180..13d6991e 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -6,15 +6,15 @@ test -z "$srcdir" && srcdir=.
6ORIGDIR=`pwd` 6ORIGDIR=`pwd`
7cd "$srcdir" 7cd "$srcdir"
8 8
9autoreconf --force --verbose --install || exit 1 9git config --local --get format.subjectPrefix >/dev/null ||
10cd "$ORIGDIR" || exit $?
11
12git config --local --get format.subjectPrefix ||
13 git config --local format.subjectPrefix "PATCH libdrm" 2>/dev/null 10 git config --local format.subjectPrefix "PATCH libdrm" 2>/dev/null
14 11
15git config --local --get sendemail.to || 12git config --local --get sendemail.to >/dev/null ||
16 git config --local sendemail.to "dri-devel@lists.freedesktop.org" 2>/dev/null 13 git config --local sendemail.to "dri-devel@lists.freedesktop.org" 2>/dev/null
17 14
15autoreconf --force --verbose --install || exit 1
16cd "$ORIGDIR" || exit $?
17
18if test -z "$NOCONFIGURE"; then 18if test -z "$NOCONFIGURE"; then
19 "$srcdir"/configure "$@" 19 "$srcdir"/configure "$@"
20fi 20fi
diff --git a/configure.ac b/configure.ac
index 1da9d86b..98a350c0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -20,7 +20,7 @@
20 20
21AC_PREREQ([2.63]) 21AC_PREREQ([2.63])
22AC_INIT([libdrm], 22AC_INIT([libdrm],
23 [2.4.75], 23 [2.4.91],
24 [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI], 24 [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
25 [libdrm]) 25 [libdrm])
26 26
@@ -28,6 +28,7 @@ AC_CONFIG_HEADERS([config.h])
28AC_CONFIG_SRCDIR([Makefile.am]) 28AC_CONFIG_SRCDIR([Makefile.am])
29AC_CONFIG_MACRO_DIR([m4]) 29AC_CONFIG_MACRO_DIR([m4])
30AC_CONFIG_AUX_DIR([build-aux]) 30AC_CONFIG_AUX_DIR([build-aux])
31PKG_PROG_PKG_CONFIG
31 32
32# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC 33# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC
33m4_ifndef([XORG_MACROS_VERSION], 34m4_ifndef([XORG_MACROS_VERSION],
@@ -44,6 +45,7 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
44# Check for programs 45# Check for programs
45AC_PROG_CC 46AC_PROG_CC
46AC_PROG_CC_C99 47AC_PROG_CC_C99
48AC_PROG_NM
47 49
48if test "x$ac_cv_prog_cc_c99" = xno; then 50if test "x$ac_cv_prog_cc_c99" = xno; then
49 AC_MSG_ERROR([Building libdrm requires C99 enabled compiler]) 51 AC_MSG_ERROR([Building libdrm requires C99 enabled compiler])
@@ -53,20 +55,39 @@ AC_USE_SYSTEM_EXTENSIONS
53AC_SYS_LARGEFILE 55AC_SYS_LARGEFILE
54AC_FUNC_ALLOCA 56AC_FUNC_ALLOCA
55 57
58save_CFLAGS="$CFLAGS"
59export CFLAGS="$CFLAGS -Werror"
56AC_HEADER_MAJOR 60AC_HEADER_MAJOR
61CFLAGS="$save_CFLAGS"
62
57AC_CHECK_HEADERS([sys/sysctl.h sys/select.h]) 63AC_CHECK_HEADERS([sys/sysctl.h sys/select.h])
58 64
59# Initialize libtool 65# Initialize libtool
60LT_PREREQ([2.2]) 66LT_PREREQ([2.2])
61LT_INIT([disable-static]) 67LT_INIT([disable-static])
62 68
63 69dnl pthread-stubs is mandatory on some BSD platforms, due to the nature of the
64 70dnl project. Even then there's a notable issue as described in the project README
65AC_SUBST(PTHREADSTUBS_CFLAGS) 71case "$host_os" in
66AC_SUBST(PTHREADSTUBS_LIBS) 72linux* | cygwin* | darwin* | solaris* | *-gnu* | gnu* | openbsd*)
73 pthread_stubs_possible="no"
74 ;;
75* )
76 pthread_stubs_possible="yes"
77 ;;
78esac
79
80if test "x$pthread_stubs_possible" = xyes; then
81 PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs >= 0.4)
82 AC_SUBST(PTHREADSTUBS_CFLAGS)
83 AC_SUBST(PTHREADSTUBS_LIBS)
84fi
67 85
68pkgconfigdir=${libdir}/pkgconfig 86pkgconfigdir=${libdir}/pkgconfig
69AC_SUBST(pkgconfigdir) 87AC_SUBST(pkgconfigdir)
88libdrmdatadir=${datadir}/libdrm
89AC_SUBST(libdrmdatadir)
90
70AC_ARG_ENABLE([udev], 91AC_ARG_ENABLE([udev],
71 [AS_HELP_STRING([--enable-udev], 92 [AS_HELP_STRING([--enable-udev],
72 [Enable support for using udev instead of mknod (default: disabled)])], 93 [Enable support for using udev instead of mknod (default: disabled)])],
@@ -173,7 +194,9 @@ AC_CHECK_FUNCS([clock_gettime], [CLOCK_LIB=],
173 [AC_MSG_ERROR([Couldn't find clock_gettime])])]) 194 [AC_MSG_ERROR([Couldn't find clock_gettime])])])
174AC_SUBST([CLOCK_LIB]) 195AC_SUBST([CLOCK_LIB])
175 196
176AC_CHECK_FUNCS([open_memstream], [HAVE_OPEN_MEMSTREAM=yes]) 197AC_CHECK_FUNCS([open_memstream],
198 [AC_DEFINE([HAVE_OPEN_MEMSTREAM], 1, [Have open_memstream()])],
199 [AC_DEFINE([HAVE_OPEN_MEMSTREAM], 0)])
177 200
178dnl Use lots of warning flags with with gcc and compatible compilers 201dnl Use lots of warning flags with with gcc and compatible compilers
179 202
@@ -182,7 +205,7 @@ dnl skipped and all flags rechecked. So there's no need to do anything
182dnl else. If for any reason you need to force a recheck, just change 205dnl else. If for any reason you need to force a recheck, just change
183dnl MAYBE_WARN in an ignorable way (like adding whitespace) 206dnl MAYBE_WARN in an ignorable way (like adding whitespace)
184 207
185MAYBE_WARN="-Wall -Wextra \ 208MAYBE_WARN="-Wall -Wextra -Werror=undef \
186-Wsign-compare -Werror-implicit-function-declaration \ 209-Wsign-compare -Werror-implicit-function-declaration \
187-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \ 210-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
188-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \ 211-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
@@ -244,9 +267,13 @@ AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives, [
244if test "x$drm_cv_atomic_primitives" = xIntel; then 267if test "x$drm_cv_atomic_primitives" = xIntel; then
245 AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1, 268 AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1,
246 [Enable if your compiler supports the Intel __sync_* atomic primitives]) 269 [Enable if your compiler supports the Intel __sync_* atomic primitives])
270else
271 AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 0)
247fi 272fi
248if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then 273if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then
249 AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed]) 274 AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed])
275else
276 AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 0)
250fi 277fi
251 278
252dnl Print out the approapriate message considering the value set be the 279dnl Print out the approapriate message considering the value set be the
@@ -325,6 +352,8 @@ AC_SUBST(PCIACCESS_LIBS)
325 352
326if test "x$UDEV" = xyes; then 353if test "x$UDEV" = xyes; then
327 AC_DEFINE(UDEV, 1, [Have UDEV support]) 354 AC_DEFINE(UDEV, 1, [Have UDEV support])
355else
356 AC_DEFINE(UDEV, 0)
328fi 357fi
329 358
330AC_CANONICAL_HOST 359AC_CANONICAL_HOST
@@ -343,32 +372,34 @@ AM_CONDITIONAL(HAVE_LIBKMS, [test "x$LIBKMS" = xyes])
343AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes]) 372AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes])
344if test "x$INTEL" = xyes; then 373if test "x$INTEL" = xyes; then
345 AC_DEFINE(HAVE_INTEL, 1, [Have intel support]) 374 AC_DEFINE(HAVE_INTEL, 1, [Have intel support])
375else
376 AC_DEFINE(HAVE_INTEL, 0)
346fi 377fi
347 378
348AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes]) 379AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes])
349if test "x$VMWGFX" = xyes; then 380if test "x$VMWGFX" = xyes; then
350 AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers]) 381 AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers])
382else
383 AC_DEFINE(HAVE_VMWGFX, 0)
351fi 384fi
352 385
353AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes]) 386AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes])
354if test "x$NOUVEAU" = xyes; then 387if test "x$NOUVEAU" = xyes; then
355 AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support]) 388 AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support])
389else
390 AC_DEFINE(HAVE_NOUVEAU, 0)
356fi 391fi
357 392
358AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes]) 393AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes])
359if test "x$OMAP" = xyes; then
360 AC_DEFINE(HAVE_OMAP, 1, [Have OMAP support])
361fi
362 394
363AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes]) 395AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes])
364if test "x$EXYNOS" = xyes; then 396if test "x$EXYNOS" = xyes; then
365 AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support]) 397 AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support])
398else
399 AC_DEFINE(HAVE_EXYNOS, 0)
366fi 400fi
367 401
368AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes]) 402AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes])
369if test "x$FREEDRENO" = xyes; then
370 AC_DEFINE(HAVE_FREEDRENO, 1, [Have freedreno support])
371fi
372 403
373if test "x$FREEDRENO_KGSL" = xyes; then 404if test "x$FREEDRENO_KGSL" = xyes; then
374 if test "x$FREEDRENO" != xyes; then 405 if test "x$FREEDRENO" != xyes; then
@@ -378,11 +409,15 @@ fi
378AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes]) 409AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes])
379if test "x$FREEDRENO_KGSL" = xyes; then 410if test "x$FREEDRENO_KGSL" = xyes; then
380 AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface]) 411 AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface])
412else
413 AC_DEFINE(HAVE_FREEDRENO_KGSL, 0)
381fi 414fi
382 415
383AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes]) 416AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes])
384if test "x$RADEON" = xyes; then 417if test "x$RADEON" = xyes; then
385 AC_DEFINE(HAVE_RADEON, 1, [Have radeon support]) 418 AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
419else
420 AC_DEFINE(HAVE_RADEON, 0)
386fi 421fi
387 422
388if test "x$AMDGPU" != xno; then 423if test "x$AMDGPU" != xno; then
@@ -409,36 +444,30 @@ AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
409if test "x$AMDGPU" = xyes; then 444if test "x$AMDGPU" = xyes; then
410 AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support]) 445 AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
411 446
412 AC_DEFINE(HAVE_CUNIT, [test "x$have_cunit" != "xno"], [Enable CUNIT Have amdgpu support])
413
414 if test "x$have_cunit" = "xno"; then 447 if test "x$have_cunit" = "xno"; then
415 AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests]) 448 AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests])
416 fi 449 fi
450else
451 AC_DEFINE(HAVE_AMDGPU, 0)
417fi 452fi
418 453
419AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes]) 454AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
420if test "x$TEGRA" = xyes; then
421 AC_DEFINE(HAVE_TEGRA, 1, [Have Tegra support])
422fi
423 455
424AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes]) 456AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes])
425if test "x$ROCKCHIP" = xyes; then 457if test "x$ROCKCHIP" = xyes; then
426 AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support]) 458 AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support])
459fi
427 460
428AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes]) 461AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes])
429if test "x$VC4" = xyes; then 462if test "x$VC4" = xyes; then
430 AC_DEFINE(HAVE_VC4, 1, [Have VC4 support]) 463 AC_DEFINE(HAVE_VC4, 1, [Have VC4 support])
464else
465 AC_DEFINE(HAVE_VC4, 0)
431fi 466fi
432 467
433AM_CONDITIONAL(HAVE_ETNAVIV, [test "x$ETNAVIV" = xyes]) 468AM_CONDITIONAL(HAVE_ETNAVIV, [test "x$ETNAVIV" = xyes])
434if test "x$ETNAVIV" = xyes; then
435 AC_DEFINE(HAVE_ETNAVIV, 1, [Have etnaviv support])
436fi
437 469
438AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes]) 470AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes])
439if test "x$INSTALL_TESTS" = xyes; then
440 AC_DEFINE(HAVE_INSTALL_TESTS, 1, [Install test programs])
441fi
442 471
443AC_ARG_ENABLE([cairo-tests], 472AC_ARG_ENABLE([cairo-tests],
444 [AS_HELP_STRING([--enable-cairo-tests], 473 [AS_HELP_STRING([--enable-cairo-tests],
@@ -456,6 +485,8 @@ if test "x$CAIRO" = xyes; then
456 AC_MSG_ERROR([Cairo support required but not present]) 485 AC_MSG_ERROR([Cairo support required but not present])
457 fi 486 fi
458 AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support]) 487 AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support])
488else
489 AC_DEFINE(HAVE_CAIRO, 0)
459fi 490fi
460AC_MSG_RESULT([$CAIRO]) 491AC_MSG_RESULT([$CAIRO])
461AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes]) 492AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes])
@@ -497,6 +528,8 @@ if test "x$VALGRIND" = "xyes"; then
497 AC_MSG_ERROR([Valgrind support required but not present]) 528 AC_MSG_ERROR([Valgrind support required but not present])
498 fi 529 fi
499 AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings]) 530 AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
531else
532 AC_DEFINE([HAVE_VALGRIND], 0)
500fi 533fi
501 534
502AC_MSG_RESULT([$VALGRIND]) 535AC_MSG_RESULT([$VALGRIND])
@@ -514,11 +547,16 @@ AC_LINK_IFELSE([AC_LANG_PROGRAM([
514 547
515if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then 548if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then
516 AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))]) 549 AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))])
550else
551 AC_DEFINE(HAVE_VISIBILITY, 0)
517fi 552fi
518 553
554CFLAGS="$CFLAGS -include config.h"
555
519AC_SUBST(WARN_CFLAGS) 556AC_SUBST(WARN_CFLAGS)
520AC_CONFIG_FILES([ 557AC_CONFIG_FILES([
521 Makefile 558 Makefile
559 data/Makefile
522 libkms/Makefile 560 libkms/Makefile
523 libkms/libkms.pc 561 libkms/libkms.pc
524 intel/Makefile 562 intel/Makefile
diff --git a/data/Android.mk b/data/Android.mk
new file mode 100644
index 00000000..62013f0c
--- /dev/null
+++ b/data/Android.mk
@@ -0,0 +1,10 @@
1LOCAL_PATH := $(call my-dir)
2
3include $(CLEAR_VARS)
4LOCAL_MODULE := amdgpu.ids
5LOCAL_MODULE_TAGS := optional
6LOCAL_MODULE_CLASS := ETC
7LOCAL_PROPRIETARY_MODULE := true
8LOCAL_MODULE_RELATIVE_PATH := hwdata
9LOCAL_SRC_FILES := $(LOCAL_MODULE)
10include $(BUILD_PREBUILT)
diff --git a/data/Makefile.am b/data/Makefile.am
new file mode 100644
index 00000000..897a7f35
--- /dev/null
+++ b/data/Makefile.am
@@ -0,0 +1,25 @@
1# Copyright © 2017 Advanced Micro Devices, Inc.
2# All Rights Reserved.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# on the rights to use, copy, modify, merge, publish, distribute, sub
8# license, and/or sell copies of the Software, and to permit persons to whom
9# the Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice (including the next
12# paragraph) shall be included in all copies or substantial portions of the
13# Software.
14#
15# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22libdrmdatadir = @libdrmdatadir@
23if HAVE_AMDGPU
24dist_libdrmdata_DATA = amdgpu.ids
25endif
diff --git a/data/amdgpu.ids b/data/amdgpu.ids
new file mode 100644
index 00000000..1828e410
--- /dev/null
+++ b/data/amdgpu.ids
@@ -0,0 +1,187 @@
1# List of AMDGPU IDs
2#
3# Syntax:
4# device_id, revision_id, product_name <-- single tab after comma
5
61.0.0
76600, 0, AMD Radeon HD 8600/8700M
86600, 81, AMD Radeon (TM) R7 M370
96601, 0, AMD Radeon (TM) HD 8500M/8700M
106604, 0, AMD Radeon R7 M265 Series
116604, 81, AMD Radeon (TM) R7 M350
126605, 0, AMD Radeon R7 M260 Series
136605, 81, AMD Radeon (TM) R7 M340
146606, 0, AMD Radeon HD 8790M
156607, 0, AMD Radeon (TM) HD8530M
166608, 0, AMD FirePro W2100
176610, 0, AMD Radeon HD 8600 Series
186610, 81, AMD Radeon (TM) R7 350
196610, 83, AMD Radeon (TM) R5 340
206611, 0, AMD Radeon HD 8500 Series
216613, 0, AMD Radeon HD 8500 series
226617, C7, AMD Radeon R7 240 Series
236640, 0, AMD Radeon HD 8950
246640, 80, AMD Radeon (TM) R9 M380
256646, 0, AMD Radeon R9 M280X
266646, 80, AMD Radeon (TM) R9 M470X
276647, 0, AMD Radeon R9 M270X
286647, 80, AMD Radeon (TM) R9 M380
296649, 0, AMD FirePro W5100
306658, 0, AMD Radeon R7 200 Series
31665C, 0, AMD Radeon HD 7700 Series
32665D, 0, AMD Radeon R7 200 Series
33665F, 81, AMD Radeon (TM) R7 300 Series
346660, 0, AMD Radeon HD 8600M Series
356660, 81, AMD Radeon (TM) R5 M335
366660, 83, AMD Radeon (TM) R5 M330
376663, 0, AMD Radeon HD 8500M Series
386663, 83, AMD Radeon (TM) R5 M320
396664, 0, AMD Radeon R5 M200 Series
406665, 0, AMD Radeon R5 M200 Series
416665, 83, AMD Radeon (TM) R5 M320
426667, 0, AMD Radeon R5 M200 Series
43666F, 0, AMD Radeon HD 8500M
446780, 0, ATI FirePro V (FireGL V) Graphics Adapter
45678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
466798, 0, AMD Radeon HD 7900 Series
47679A, 0, AMD Radeon HD 7900 Series
48679B, 0, AMD Radeon HD 7900 Series
49679E, 0, AMD Radeon HD 7800 Series
5067A0, 0, AMD Radeon FirePro W9100
5167A1, 0, AMD Radeon FirePro W8100
5267B0, 0, AMD Radeon R9 200 Series
5367B0, 80, AMD Radeon (TM) R9 390 Series
5467B1, 0, AMD Radeon R9 200 Series
5567B1, 80, AMD Radeon (TM) R9 390 Series
5667B9, 0, AMD Radeon R9 200 Series
5767DF, C1, Radeon RX 580 Series
5867DF, C2, Radeon RX 570 Series
5967DF, C3, Radeon RX 580 Series
6067DF, C4, AMD Radeon (TM) RX 480 Graphics
6167DF, C5, AMD Radeon (TM) RX 470 Graphics
6267DF, C6, Radeon RX 570 Series
6367DF, C7, AMD Radeon (TM) RX 480 Graphics
6467DF, CF, AMD Radeon (TM) RX 470 Graphics
6567DF, D7, Radeon(TM) RX 470 Graphics
6667DF, E3, Radeon RX Series
6767DF, E7, Radeon RX 580 Series
6867DF, EF, Radeon RX 570 Series
6967C2, 01, AMD Radeon (TM) Pro V7350x2
7067C2, 02, AMD Radeon (TM) Pro V7300X
7167C4, 00, AMD Radeon (TM) Pro WX 7100 Graphics
7267C7, 00, AMD Radeon (TM) Pro WX 5100 Graphics
7367C0, 00, AMD Radeon (TM) Pro WX 7100 Graphics
7467D0, 01, AMD Radeon (TM) Pro V7350x2
7567D0, 02, AMD Radeon (TM) Pro V7300X
7667E0, 00, AMD Radeon (TM) Pro WX Series
7767E3, 00, AMD Radeon (TM) Pro WX 4100
7867E8, 00, AMD Radeon (TM) Pro WX Series
7967E8, 01, AMD Radeon (TM) Pro WX Series
8067E8, 80, AMD Radeon (TM) E9260 Graphics
8167EB, 00, AMD Radeon (TM) Pro V5300X
8267EF, C0, AMD Radeon (TM) RX Graphics
8367EF, C1, AMD Radeon (TM) RX 460 Graphics
8467EF, C3, Radeon RX Series
8567EF, C5, AMD Radeon (TM) RX 460 Graphics
8667EF, C7, AMD Radeon (TM) RX Graphics
8767EF, CF, AMD Radeon (TM) RX 460 Graphics
8867EF, E0, Radeon RX 560 Series
8967EF, E1, Radeon RX Series
9067EF, E3, Radeon RX Series
9167EF, E5, Radeon RX 560 Series
9267EF, EF, AMD Radeon (TM) RX Graphics
9367EF, FF, Radeon(TM) RX 460 Graphics
9467FF, C0, AMD Radeon (TM) RX Graphics
9567FF, C1, AMD Radeon (TM) RX Graphics
9667FF, CF, Radeon RX 560 Series
9767FF, EF, Radeon RX 560 Series
9867FF, FF, Radeon RX 550 Series
996800, 0, AMD Radeon HD 7970M
1006801, 0, AMD Radeon(TM) HD8970M
1016808, 0, ATI FirePro V(FireGL V) Graphics Adapter
1026809, 0, ATI FirePro V(FireGL V) Graphics Adapter
1036810, 0, AMD Radeon(TM) HD 8800 Series
1046810, 81, AMD Radeon (TM) R7 370 Series
1056811, 0, AMD Radeon(TM) HD8800 Series
1066811, 81, AMD Radeon (TM) R7 300 Series
1076818, 0, AMD Radeon HD 7800 Series
1086819, 0, AMD Radeon HD 7800 Series
1096820, 0, AMD Radeon HD 8800M Series
1106820, 81, AMD Radeon (TM) R9 M375
1116820, 83, AMD Radeon (TM) R9 M375X
1126821, 0, AMD Radeon HD 8800M Series
1136821, 87, AMD Radeon (TM) R7 M380
1146821, 83, AMD Radeon R9 (TM) M370X
1156822, 0, AMD Radeon E8860
1166823, 0, AMD Radeon HD 8800M Series
1176825, 0, AMD Radeon HD 7800M Series
1186827, 0, AMD Radeon HD 7800M Series
1196828, 0, ATI FirePro V(FireGL V) Graphics Adapter
120682B, 0, AMD Radeon HD 8800M Series
121682B, 87, AMD Radeon (TM) R9 M360
122682C, 0, AMD FirePro W4100
123682D, 0, AMD Radeon HD 7700M Series
124682F, 0, AMD Radeon HD 7700M Series
1256835, 0, AMD Radeon R7 Series / HD 9000 Series
1266837, 0, AMD Radeon HD7700 Series
127683D, 0, AMD Radeon HD 7700 Series
128683F, 0, AMD Radeon HD 7700 Series
1296860, 00, Radeon Instinct MI25
1306860, 01, Radeon Pro V320
1316860, 02, Radeon Instinct MI25
1326860, 03, Radeon Pro V340
1336860, 04, Radeon Instinct MI25x2
1346861, 00, Radeon(TM) Pro WX9100
1356862, 00, Radeon Pro SSG
1366863, 00, Radeon Vega Frontier Edition
1376864, 03, Radeon Pro V340
1386864, 04, Instinct MI25x2
1396868, 00, Radeon(TM) Pro WX8100
140686C, 00, GLXT (Radeon Instinct MI25) MxGPU VFID
141686C, 01, GLXT (Radeon Pro V320) MxGPU
142686C, 02, GLXT (Radeon Instinct MI25) MxGPU
143686C, 03, GLXT (Radeon Pro V340) MxGPU
144686C, 04, GLXT (Radeon Instinct MI25x2) MxGPU
145687F, C0, Radeon RX Vega
146687F, C1, Radeon RX Vega
147687F, C3, Radeon RX Vega
1486900, 0, AMD Radeon R7 M260
1496900, 81, AMD Radeon (TM) R7 M360
1506900, 83, AMD Radeon (TM) R7 M340
1516901, 0, AMD Radeon R5 M255
1526907, 0, AMD Radeon R5 M255
1536907, 87, AMD Radeon (TM) R5 M315
1546920, 0, AMD RADEON R9 M395X
1556920, 1, AMD RADEON R9 M390X
1566921, 0, AMD Radeon R9 M295X
1576929, 0, AMD FirePro S7150
158692B, 0, AMD FirePro W7100
1596938, 0, AMD Radeon R9 200 Series
1606938, F0, AMD Radeon R9 200 Series
1616938, F1, AMD Radeon (TM) R9 380 Series
1626939, F0, AMD Radeon R9 200 Series
1636939, 0, AMD Radeon R9 200 Series
1646939, F1, AMD Radeon (TM) R9 380 Series
1656980, 00, Radeon Pro WX3100
1666985, 00, AMD Radeon Pro WX3100
1676987, 80, AMD Embedded Radeon E9171
1686995, 00, AMD Radeon Pro WX2100
1696997, 00, Radeon Pro WX2100
170699F, 81, AMD Embedded Radeon E9170 Series
171699F, C0, Radeon 500 Series
172699F, C3, Radeon 500 Series
173699F, C7, Radeon RX 550 Series
1747300, C1, AMD FirePro (TM) S9300 x2
1757300, C8, AMD Radeon (TM) R9 Fury Series
1767300, C9, Radeon (TM) Pro Duo
1777300, CB, AMD Radeon (TM) R9 Fury Series
1787300, CA, AMD Radeon (TM) R9 Fury Series
1799874, C4, AMD Radeon R7 Graphics
1809874, C5, AMD Radeon R6 Graphics
1819874, C6, AMD Radeon R6 Graphics
1829874, C7, AMD Radeon R5 Graphics
1839874, C8, AMD Radeon R7 Graphics
1849874, 81, AMD Radeon R6 Graphics
1859874, 87, AMD Radeon R5 Graphics
1869874, 85, AMD Radeon R6 Graphics
1879874, 84, AMD Radeon R7 Graphics
diff --git a/data/meson.build b/data/meson.build
new file mode 100644
index 00000000..9c26b66e
--- /dev/null
+++ b/data/meson.build
@@ -0,0 +1,27 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21if with_amdgpu
22 install_data(
23 'amdgpu.ids',
24 install_mode : 'rw-r--r--',
25 install_dir : datadir_amdgpu,
26 )
27endif
diff --git a/etnaviv/Makefile.sources b/etnaviv/Makefile.sources
index 52580567..0eb73783 100644
--- a/etnaviv/Makefile.sources
+++ b/etnaviv/Makefile.sources
@@ -3,6 +3,7 @@ LIBDRM_ETNAVIV_FILES := \
3 etnaviv_gpu.c \ 3 etnaviv_gpu.c \
4 etnaviv_bo.c \ 4 etnaviv_bo.c \
5 etnaviv_bo_cache.c \ 5 etnaviv_bo_cache.c \
6 etnaviv_perfmon.c \
6 etnaviv_pipe.c \ 7 etnaviv_pipe.c \
7 etnaviv_cmd_stream.c \ 8 etnaviv_cmd_stream.c \
8 etnaviv_drm.h \ 9 etnaviv_drm.h \
diff --git a/etnaviv/etnaviv-symbol-check b/etnaviv/etnaviv-symbol-check
index 22afd168..bc509615 100755
--- a/etnaviv/etnaviv-symbol-check
+++ b/etnaviv/etnaviv-symbol-check
@@ -39,8 +39,14 @@ etna_cmd_stream_new
39etna_cmd_stream_del 39etna_cmd_stream_del
40etna_cmd_stream_timestamp 40etna_cmd_stream_timestamp
41etna_cmd_stream_flush 41etna_cmd_stream_flush
42etna_cmd_stream_flush2
42etna_cmd_stream_finish 43etna_cmd_stream_finish
44etna_cmd_stream_perf
43etna_cmd_stream_reloc 45etna_cmd_stream_reloc
46etna_perfmon_create
47etna_perfmon_del
48etna_perfmon_get_dom_by_name
49etna_perfmon_get_sig_by_name
44EOF 50EOF
45done) 51done)
46 52
diff --git a/etnaviv/etnaviv_bo.c b/etnaviv/etnaviv_bo.c
index 4ad0434e..32f7b348 100644
--- a/etnaviv/etnaviv_bo.c
+++ b/etnaviv/etnaviv_bo.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h" 27#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h" 28#include "etnaviv_drmif.h"
33 29
@@ -173,7 +169,7 @@ struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name)
173 pthread_mutex_lock(&table_lock); 169 pthread_mutex_lock(&table_lock);
174 170
175 /* check name table first, to see if bo is already open: */ 171 /* check name table first, to see if bo is already open: */
176 bo = lookup_bo(dev->name_table, req.handle); 172 bo = lookup_bo(dev->name_table, name);
177 if (bo) 173 if (bo)
178 goto out_unlock; 174 goto out_unlock;
179 175
@@ -206,10 +202,15 @@ struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
206 int ret, size; 202 int ret, size;
207 uint32_t handle; 203 uint32_t handle;
208 204
205 /* take the lock before calling drmPrimeFDToHandle to avoid
206 * racing against etna_bo_del, which might invalidate the
207 * returned handle.
208 */
209 pthread_mutex_lock(&table_lock); 209 pthread_mutex_lock(&table_lock);
210 210
211 ret = drmPrimeFDToHandle(dev->fd, fd, &handle); 211 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
212 if (ret) { 212 if (ret) {
213 pthread_mutex_unlock(&table_lock);
213 return NULL; 214 return NULL;
214 } 215 }
215 216
diff --git a/etnaviv/etnaviv_bo_cache.c b/etnaviv/etnaviv_bo_cache.c
index 8924651f..c81de262 100644
--- a/etnaviv/etnaviv_bo_cache.c
+++ b/etnaviv/etnaviv_bo_cache.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h" 27#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h" 28#include "etnaviv_drmif.h"
33 29
@@ -124,20 +120,32 @@ static int is_idle(struct etna_bo *bo)
124 120
125static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags) 121static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags)
126{ 122{
127 struct etna_bo *bo = NULL; 123 struct etna_bo *bo = NULL, *tmp;
128 124
129 pthread_mutex_lock(&table_lock); 125 pthread_mutex_lock(&table_lock);
130 while (!LIST_IS_EMPTY(&bucket->list)) {
131 bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
132 126
133 if (bo->flags == flags && is_idle(bo)) { 127 if (LIST_IS_EMPTY(&bucket->list))
134 list_del(&bo->list); 128 goto out_unlock;
135 break; 129
130 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket->list, list) {
131 /* skip BOs with different flags */
132 if (bo->flags != flags)
133 continue;
134
135 /* check if the first BO with matching flags is idle */
136 if (is_idle(bo)) {
137 list_delinit(&bo->list);
138 goto out_unlock;
136 } 139 }
137 140
138 bo = NULL; 141 /* If the oldest BO is still busy, don't try younger ones */
139 break; 142 break;
140 } 143 }
144
145 /* There was no matching buffer found */
146 bo = NULL;
147
148out_unlock:
141 pthread_mutex_unlock(&table_lock); 149 pthread_mutex_unlock(&table_lock);
142 150
143 return bo; 151 return bo;
diff --git a/etnaviv/etnaviv_cmd_stream.c b/etnaviv/etnaviv_cmd_stream.c
index 9ce3f363..13730168 100644
--- a/etnaviv/etnaviv_cmd_stream.c
+++ b/etnaviv/etnaviv_cmd_stream.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include <assert.h> 27#include <assert.h>
32 28
33#include "etnaviv_drmif.h" 29#include "etnaviv_drmif.h"
@@ -105,6 +101,7 @@ void etna_cmd_stream_del(struct etna_cmd_stream *stream)
105 101
106 free(stream->buffer); 102 free(stream->buffer);
107 free(priv->submit.relocs); 103 free(priv->submit.relocs);
104 free(priv->submit.pmrs);
108 free(priv); 105 free(priv);
109} 106}
110 107
@@ -115,6 +112,7 @@ static void reset_buffer(struct etna_cmd_stream *stream)
115 stream->offset = 0; 112 stream->offset = 0;
116 priv->submit.nr_bos = 0; 113 priv->submit.nr_bos = 0;
117 priv->submit.nr_relocs = 0; 114 priv->submit.nr_relocs = 0;
115 priv->submit.nr_pmrs = 0;
118 priv->nr_bos = 0; 116 priv->nr_bos = 0;
119 117
120 if (priv->reset_notify) 118 if (priv->reset_notify)
@@ -177,7 +175,8 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
177 return idx; 175 return idx;
178} 176}
179 177
180static void flush(struct etna_cmd_stream *stream) 178static void flush(struct etna_cmd_stream *stream, int in_fence_fd,
179 int *out_fence_fd)
181{ 180{
182 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream); 181 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
183 int ret, id = priv->pipe->id; 182 int ret, id = priv->pipe->id;
@@ -190,10 +189,20 @@ static void flush(struct etna_cmd_stream *stream)
190 .nr_bos = priv->submit.nr_bos, 189 .nr_bos = priv->submit.nr_bos,
191 .relocs = VOID2U64(priv->submit.relocs), 190 .relocs = VOID2U64(priv->submit.relocs),
192 .nr_relocs = priv->submit.nr_relocs, 191 .nr_relocs = priv->submit.nr_relocs,
192 .pmrs = VOID2U64(priv->submit.pmrs),
193 .nr_pmrs = priv->submit.nr_pmrs,
193 .stream = VOID2U64(stream->buffer), 194 .stream = VOID2U64(stream->buffer),
194 .stream_size = stream->offset * 4, /* in bytes */ 195 .stream_size = stream->offset * 4, /* in bytes */
195 }; 196 };
196 197
198 if (in_fence_fd != -1) {
199 req.flags |= ETNA_SUBMIT_FENCE_FD_IN | ETNA_SUBMIT_NO_IMPLICIT;
200 req.fence_fd = in_fence_fd;
201 }
202
203 if (out_fence_fd)
204 req.flags |= ETNA_SUBMIT_FENCE_FD_OUT;
205
197 ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT, 206 ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT,
198 &req, sizeof(req)); 207 &req, sizeof(req));
199 208
@@ -208,11 +217,21 @@ static void flush(struct etna_cmd_stream *stream)
208 bo->current_stream = NULL; 217 bo->current_stream = NULL;
209 etna_bo_del(bo); 218 etna_bo_del(bo);
210 } 219 }
220
221 if (out_fence_fd)
222 *out_fence_fd = req.fence_fd;
211} 223}
212 224
213void etna_cmd_stream_flush(struct etna_cmd_stream *stream) 225void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
214{ 226{
215 flush(stream); 227 flush(stream, -1, NULL);
228 reset_buffer(stream);
229}
230
231void etna_cmd_stream_flush2(struct etna_cmd_stream *stream, int in_fence_fd,
232 int *out_fence_fd)
233{
234 flush(stream, in_fence_fd, out_fence_fd);
216 reset_buffer(stream); 235 reset_buffer(stream);
217} 236}
218 237
@@ -220,7 +239,7 @@ void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
220{ 239{
221 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream); 240 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
222 241
223 flush(stream); 242 flush(stream, -1, NULL);
224 etna_pipe_wait(priv->pipe, priv->last_timestamp, 5000); 243 etna_pipe_wait(priv->pipe, priv->last_timestamp, 5000);
225 reset_buffer(stream); 244 reset_buffer(stream);
226} 245}
@@ -241,3 +260,19 @@ void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_rel
241 260
242 etna_cmd_stream_emit(stream, addr); 261 etna_cmd_stream_emit(stream, addr);
243} 262}
263
264void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p)
265{
266 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
267 struct drm_etnaviv_gem_submit_pmr *pmr;
268 uint32_t idx = APPEND(&priv->submit, pmrs);
269
270 pmr = &priv->submit.pmrs[idx];
271
272 pmr->flags = p->flags;
273 pmr->sequence = p->sequence;
274 pmr->read_offset = p->offset;
275 pmr->read_idx = bo2idx(stream, p->bo, ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE);
276 pmr->domain = p->signal->domain->id;
277 pmr->signal = p->signal->signal;
278}
diff --git a/etnaviv/etnaviv_device.c b/etnaviv/etnaviv_device.c
index 3ce92030..d83e8d3e 100644
--- a/etnaviv/etnaviv_device.c
+++ b/etnaviv/etnaviv_device.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <stdlib.h> 27#include <stdlib.h>
32#include <linux/stddef.h> 28#include <linux/stddef.h>
33#include <linux/types.h> 29#include <linux/types.h>
diff --git a/etnaviv/etnaviv_drm.h b/etnaviv/etnaviv_drm.h
index 2584c1cc..0d5c49dc 100644
--- a/etnaviv/etnaviv_drm.h
+++ b/etnaviv/etnaviv_drm.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* 2/*
2 * Copyright (C) 2015 Etnaviv Project 3 * Copyright (C) 2015 Etnaviv Project
3 * 4 *
@@ -54,6 +55,12 @@ struct drm_etnaviv_timespec {
54#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 55#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
55#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08 56#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
56#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09 57#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
58#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
59#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
60#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
61#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
62#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
63#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
57 64
58#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 65#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
59#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 66#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
@@ -150,10 +157,29 @@ struct drm_etnaviv_gem_submit_bo {
150 __u64 presumed; /* in/out, presumed buffer address */ 157 __u64 presumed; /* in/out, presumed buffer address */
151}; 158};
152 159
160/* performance monitor request (pmr) */
161#define ETNA_PM_PROCESS_PRE 0x0001
162#define ETNA_PM_PROCESS_POST 0x0002
163struct drm_etnaviv_gem_submit_pmr {
164 __u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
165 __u8 domain; /* in, pm domain */
166 __u8 pad;
167 __u16 signal; /* in, pm signal */
168 __u32 sequence; /* in, sequence number */
169 __u32 read_offset; /* in, offset from read_bo */
170 __u32 read_idx; /* in, index of read_bo buffer */
171};
172
153/* Each cmdstream submit consists of a table of buffers involved, and 173/* Each cmdstream submit consists of a table of buffers involved, and
154 * one or more cmdstream buffers. This allows for conditional execution 174 * one or more cmdstream buffers. This allows for conditional execution
155 * (context-restore), and IB buffers needed for per tile/bin draw cmds. 175 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
156 */ 176 */
177#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
178#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
179#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
180#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
181 ETNA_SUBMIT_FENCE_FD_IN | \
182 ETNA_SUBMIT_FENCE_FD_OUT)
157#define ETNA_PIPE_3D 0x00 183#define ETNA_PIPE_3D 0x00
158#define ETNA_PIPE_2D 0x01 184#define ETNA_PIPE_2D 0x01
159#define ETNA_PIPE_VG 0x02 185#define ETNA_PIPE_VG 0x02
@@ -167,6 +193,11 @@ struct drm_etnaviv_gem_submit {
167 __u64 bos; /* in, ptr to array of submit_bo's */ 193 __u64 bos; /* in, ptr to array of submit_bo's */
168 __u64 relocs; /* in, ptr to array of submit_reloc's */ 194 __u64 relocs; /* in, ptr to array of submit_reloc's */
169 __u64 stream; /* in, ptr to cmdstream */ 195 __u64 stream; /* in, ptr to cmdstream */
196 __u32 flags; /* in, mask of ETNA_SUBMIT_x */
197 __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
198 __u64 pmrs; /* in, ptr to array of submit_pmr's */
199 __u32 nr_pmrs; /* in, number of submit_pmr's */
200 __u32 pad;
170}; 201};
171 202
172/* The normal way to synchronize with the GPU is just to CPU_PREP on 203/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -202,6 +233,27 @@ struct drm_etnaviv_gem_wait {
202 struct drm_etnaviv_timespec timeout; /* in */ 233 struct drm_etnaviv_timespec timeout; /* in */
203}; 234};
204 235
236/*
237 * Performance Monitor (PM):
238 */
239
240struct drm_etnaviv_pm_domain {
241 __u32 pipe; /* in */
242 __u8 iter; /* in/out, select pm domain at index iter */
243 __u8 id; /* out, id of domain */
244 __u16 nr_signals; /* out, how many signals does this domain provide */
245 char name[64]; /* out, name of domain */
246};
247
248struct drm_etnaviv_pm_signal {
249 __u32 pipe; /* in */
250 __u8 domain; /* in, pm domain index */
251 __u8 pad;
252 __u16 iter; /* in/out, select pm source at index iter */
253 __u16 id; /* out, id of signal */
254 char name[64]; /* out, name of domain */
255};
256
205#define DRM_ETNAVIV_GET_PARAM 0x00 257#define DRM_ETNAVIV_GET_PARAM 0x00
206/* placeholder: 258/* placeholder:
207#define DRM_ETNAVIV_SET_PARAM 0x01 259#define DRM_ETNAVIV_SET_PARAM 0x01
@@ -214,7 +266,9 @@ struct drm_etnaviv_gem_wait {
214#define DRM_ETNAVIV_WAIT_FENCE 0x07 266#define DRM_ETNAVIV_WAIT_FENCE 0x07
215#define DRM_ETNAVIV_GEM_USERPTR 0x08 267#define DRM_ETNAVIV_GEM_USERPTR 0x08
216#define DRM_ETNAVIV_GEM_WAIT 0x09 268#define DRM_ETNAVIV_GEM_WAIT 0x09
217#define DRM_ETNAVIV_NUM_IOCTLS 0x0a 269#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
270#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
271#define DRM_ETNAVIV_NUM_IOCTLS 0x0c
218 272
219#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param) 273#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
220#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new) 274#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
@@ -225,6 +279,8 @@ struct drm_etnaviv_gem_wait {
225#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence) 279#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
226#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) 280#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
227#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) 281#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
282#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
283#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
228 284
229#if defined(__cplusplus) 285#if defined(__cplusplus)
230} 286}
diff --git a/etnaviv/etnaviv_drmif.h b/etnaviv/etnaviv_drmif.h
index 8119baad..5a6bef8d 100644
--- a/etnaviv/etnaviv_drmif.h
+++ b/etnaviv/etnaviv_drmif.h
@@ -35,6 +35,9 @@ struct etna_pipe;
35struct etna_gpu; 35struct etna_gpu;
36struct etna_device; 36struct etna_device;
37struct etna_cmd_stream; 37struct etna_cmd_stream;
38struct etna_perfmon;
39struct etna_perfmon_domain;
40struct etna_perfmon_signal;
38 41
39enum etna_pipe_id { 42enum etna_pipe_id {
40 ETNA_PIPE_3D = 0, 43 ETNA_PIPE_3D = 0,
@@ -142,6 +145,8 @@ struct etna_cmd_stream *etna_cmd_stream_new(struct etna_pipe *pipe, uint32_t siz
142void etna_cmd_stream_del(struct etna_cmd_stream *stream); 145void etna_cmd_stream_del(struct etna_cmd_stream *stream);
143uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream); 146uint32_t etna_cmd_stream_timestamp(struct etna_cmd_stream *stream);
144void etna_cmd_stream_flush(struct etna_cmd_stream *stream); 147void etna_cmd_stream_flush(struct etna_cmd_stream *stream);
148void etna_cmd_stream_flush2(struct etna_cmd_stream *stream, int in_fence_fd,
149 int *out_fence_fd);
145void etna_cmd_stream_finish(struct etna_cmd_stream *stream); 150void etna_cmd_stream_finish(struct etna_cmd_stream *stream);
146 151
147static inline uint32_t etna_cmd_stream_avail(struct etna_cmd_stream *stream) 152static inline uint32_t etna_cmd_stream_avail(struct etna_cmd_stream *stream)
@@ -188,4 +193,24 @@ struct etna_reloc {
188 193
189void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_reloc *r); 194void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_reloc *r);
190 195
196/* performance monitoring functions:
197 */
198
199struct etna_perfmon *etna_perfmon_create(struct etna_pipe *pipe);
200void etna_perfmon_del(struct etna_perfmon *perfmon);
201struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_perfmon *pm, const char *name);
202struct etna_perfmon_signal *etna_perfmon_get_sig_by_name(struct etna_perfmon_domain *dom, const char *name);
203
204struct etna_perf {
205#define ETNA_PM_PROCESS_PRE 0x0001
206#define ETNA_PM_PROCESS_POST 0x0002
207 uint32_t flags;
208 uint32_t sequence;
209 struct etna_perfmon_signal *signal;
210 struct etna_bo *bo;
211 uint32_t offset;
212};
213
214void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p);
215
191#endif /* ETNAVIV_DRMIF_H_ */ 216#endif /* ETNAVIV_DRMIF_H_ */
diff --git a/etnaviv/etnaviv_gpu.c b/etnaviv/etnaviv_gpu.c
index 35dec6cd..f7efa028 100644
--- a/etnaviv/etnaviv_gpu.c
+++ b/etnaviv/etnaviv_gpu.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h" 27#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h" 28#include "etnaviv_drmif.h"
33 29
@@ -61,32 +57,13 @@ struct etna_gpu *etna_gpu_new(struct etna_device *dev, unsigned int core)
61 gpu->dev = dev; 57 gpu->dev = dev;
62 gpu->core = core; 58 gpu->core = core;
63 59
64 /* get specs from kernel space */ 60 gpu->model = get_param(dev, core, ETNAVIV_PARAM_GPU_MODEL);
65 gpu->specs.model = get_param(dev, core, ETNAVIV_PARAM_GPU_MODEL); 61 gpu->revision = get_param(dev, core, ETNAVIV_PARAM_GPU_REVISION);
66 gpu->specs.revision = get_param(dev, core, ETNAVIV_PARAM_GPU_REVISION); 62
67 gpu->specs.features[0] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_0); 63 if (!gpu->model)
68 gpu->specs.features[1] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_1);
69 gpu->specs.features[2] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_2);
70 gpu->specs.features[3] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_3);
71 gpu->specs.features[4] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_4);
72 gpu->specs.features[5] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_5);
73 gpu->specs.features[6] = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_6);
74 gpu->specs.stream_count = get_param(dev, core, ETNA_GPU_STREAM_COUNT);
75 gpu->specs.register_max = get_param(dev, core, ETNA_GPU_REGISTER_MAX);
76 gpu->specs.thread_count = get_param(dev, core, ETNA_GPU_THREAD_COUNT);
77 gpu->specs.vertex_cache_size = get_param(dev, core, ETNA_GPU_VERTEX_CACHE_SIZE);
78 gpu->specs.shader_core_count = get_param(dev, core, ETNA_GPU_SHADER_CORE_COUNT);
79 gpu->specs.pixel_pipes = get_param(dev, core, ETNA_GPU_PIXEL_PIPES);
80 gpu->specs.vertex_output_buffer_size = get_param(dev, core, ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE);
81 gpu->specs.buffer_size = get_param(dev, core, ETNA_GPU_BUFFER_SIZE);
82 gpu->specs.instruction_count = get_param(dev, core, ETNA_GPU_INSTRUCTION_COUNT);
83 gpu->specs.num_constants = get_param(dev, core, ETNA_GPU_NUM_CONSTANTS);
84 gpu->specs.num_varyings = get_param(dev, core, ETNA_GPU_NUM_VARYINGS);
85
86 if (!gpu->specs.model)
87 goto fail; 64 goto fail;
88 65
89 INFO_MSG(" GPU model: 0x%x (rev %x)", gpu->specs.model, gpu->specs.revision); 66 INFO_MSG(" GPU model: 0x%x (rev %x)", gpu->model, gpu->revision);
90 67
91 return gpu; 68 return gpu;
92fail: 69fail:
@@ -104,66 +81,69 @@ void etna_gpu_del(struct etna_gpu *gpu)
104int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param, 81int etna_gpu_get_param(struct etna_gpu *gpu, enum etna_param_id param,
105 uint64_t *value) 82 uint64_t *value)
106{ 83{
84 struct etna_device *dev = gpu->dev;
85 unsigned int core = gpu->core;
86
107 switch(param) { 87 switch(param) {
108 case ETNA_GPU_MODEL: 88 case ETNA_GPU_MODEL:
109 *value = gpu->specs.model; 89 *value = gpu->model;
110 return 0; 90 return 0;
111 case ETNA_GPU_REVISION: 91 case ETNA_GPU_REVISION:
112 *value = gpu->specs.revision; 92 *value = gpu->revision;
113 return 0; 93 return 0;
114 case ETNA_GPU_FEATURES_0: 94 case ETNA_GPU_FEATURES_0:
115 *value = gpu->specs.features[0]; 95 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_0);
116 return 0; 96 return 0;
117 case ETNA_GPU_FEATURES_1: 97 case ETNA_GPU_FEATURES_1:
118 *value = gpu->specs.features[1]; 98 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_1);
119 return 0; 99 return 0;
120 case ETNA_GPU_FEATURES_2: 100 case ETNA_GPU_FEATURES_2:
121 *value = gpu->specs.features[2]; 101 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_2);
122 return 0; 102 return 0;
123 case ETNA_GPU_FEATURES_3: 103 case ETNA_GPU_FEATURES_3:
124 *value = gpu->specs.features[3]; 104 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_3);
125 return 0; 105 return 0;
126 case ETNA_GPU_FEATURES_4: 106 case ETNA_GPU_FEATURES_4:
127 *value = gpu->specs.features[4]; 107 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_4);
128 return 0; 108 return 0;
129 case ETNA_GPU_FEATURES_5: 109 case ETNA_GPU_FEATURES_5:
130 *value = gpu->specs.features[5]; 110 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_5);
131 return 0; 111 return 0;
132 case ETNA_GPU_FEATURES_6: 112 case ETNA_GPU_FEATURES_6:
133 *value = gpu->specs.features[6]; 113 *value = get_param(dev, core, ETNAVIV_PARAM_GPU_FEATURES_6);
134 return 0; 114 return 0;
135 case ETNA_GPU_STREAM_COUNT: 115 case ETNA_GPU_STREAM_COUNT:
136 *value = gpu->specs.stream_count; 116 *value = get_param(dev, core, ETNA_GPU_STREAM_COUNT);
137 return 0; 117 return 0;
138 case ETNA_GPU_REGISTER_MAX: 118 case ETNA_GPU_REGISTER_MAX:
139 *value = gpu->specs.register_max; 119 *value = get_param(dev, core, ETNA_GPU_REGISTER_MAX);
140 return 0; 120 return 0;
141 case ETNA_GPU_THREAD_COUNT: 121 case ETNA_GPU_THREAD_COUNT:
142 *value = gpu->specs.thread_count; 122 *value = get_param(dev, core, ETNA_GPU_THREAD_COUNT);
143 return 0; 123 return 0;
144 case ETNA_GPU_VERTEX_CACHE_SIZE: 124 case ETNA_GPU_VERTEX_CACHE_SIZE:
145 *value = gpu->specs.vertex_cache_size; 125 *value = get_param(dev, core, ETNA_GPU_VERTEX_CACHE_SIZE);
146 return 0; 126 return 0;
147 case ETNA_GPU_SHADER_CORE_COUNT: 127 case ETNA_GPU_SHADER_CORE_COUNT:
148 *value = gpu->specs.shader_core_count; 128 *value = get_param(dev, core, ETNA_GPU_SHADER_CORE_COUNT);
149 return 0; 129 return 0;
150 case ETNA_GPU_PIXEL_PIPES: 130 case ETNA_GPU_PIXEL_PIPES:
151 *value = gpu->specs.pixel_pipes; 131 *value = get_param(dev, core, ETNA_GPU_PIXEL_PIPES);
152 return 0; 132 return 0;
153 case ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE: 133 case ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
154 *value = gpu->specs.vertex_output_buffer_size; 134 *value = get_param(dev, core, ETNA_GPU_VERTEX_OUTPUT_BUFFER_SIZE);
155 return 0; 135 return 0;
156 case ETNA_GPU_BUFFER_SIZE: 136 case ETNA_GPU_BUFFER_SIZE:
157 *value = gpu->specs.buffer_size; 137 *value = get_param(dev, core, ETNA_GPU_BUFFER_SIZE);
158 return 0; 138 return 0;
159 case ETNA_GPU_INSTRUCTION_COUNT: 139 case ETNA_GPU_INSTRUCTION_COUNT:
160 *value = gpu->specs.instruction_count; 140 *value = get_param(dev, core, ETNA_GPU_INSTRUCTION_COUNT);
161 return 0; 141 return 0;
162 case ETNA_GPU_NUM_CONSTANTS: 142 case ETNA_GPU_NUM_CONSTANTS:
163 *value = gpu->specs.num_constants; 143 *value = get_param(dev, core, ETNA_GPU_NUM_CONSTANTS);
164 return 0; 144 return 0;
165 case ETNA_GPU_NUM_VARYINGS: 145 case ETNA_GPU_NUM_VARYINGS:
166 *value = gpu->specs.num_varyings; 146 *value = get_param(dev, core, ETNA_GPU_NUM_VARYINGS);
167 return 0; 147 return 0;
168 148
169 default: 149 default:
diff --git a/etnaviv/etnaviv_perfmon.c b/etnaviv/etnaviv_perfmon.c
new file mode 100644
index 00000000..5f408a7b
--- /dev/null
+++ b/etnaviv/etnaviv_perfmon.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (C) 2017 Etnaviv Project
3 * Copyright (C) 2017 Zodiac Inflight Innovations
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Christian Gmeiner <christian.gmeiner@gmail.com>
26 */
27
28#include "etnaviv_priv.h"
29
30static int etna_perfmon_query_signals(struct etna_perfmon *pm, struct etna_perfmon_domain *dom)
31{
32 struct etna_device *dev = pm->pipe->gpu->dev;
33 struct drm_etnaviv_pm_signal req = {
34 .pipe = pm->pipe->id,
35 .domain = dom->id
36 };
37
38 do {
39 struct etna_perfmon_signal *sig;
40 int ret;
41
42 ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_PM_QUERY_SIG, &req, sizeof(req));
43 if (ret)
44 break;
45
46 sig = calloc(1, sizeof(*sig));
47 if (!sig)
48 return -ENOMEM;
49
50 INFO_MSG("perfmon signal:");
51 INFO_MSG("id = %d", req.id);
52 INFO_MSG("name = %s", req.name);
53
54 sig->domain = dom;
55 sig->signal = req.id;
56 strncpy(sig->name, req.name, sizeof(sig->name));
57 list_addtail(&sig->head, &dom->signals);
58 } while (req.iter != 0xffff);
59
60 return 0;
61}
62
63static int etna_perfmon_query_domains(struct etna_perfmon *pm)
64{
65 struct etna_device *dev = pm->pipe->gpu->dev;
66 struct drm_etnaviv_pm_domain req = {
67 .pipe = pm->pipe->id
68 };
69
70 do {
71 struct etna_perfmon_domain *dom;
72 int ret;
73
74 ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_PM_QUERY_DOM, &req, sizeof(req));
75 if (ret)
76 break;
77
78 dom = calloc(1, sizeof(*dom));
79 if (!dom)
80 return -ENOMEM;
81
82 list_inithead(&dom->signals);
83 dom->id = req.id;
84 strncpy(dom->name, req.name, sizeof(dom->name));
85 list_addtail(&dom->head, &pm->domains);
86
87 INFO_MSG("perfmon domain:");
88 INFO_MSG("id = %d", req.id);
89 INFO_MSG("name = %s", req.name);
90 INFO_MSG("nr_signals = %d", req.nr_signals);
91
92 /* Query all available signals for this domain. */
93 if (req.nr_signals > 0) {
94 ret = etna_perfmon_query_signals(pm, dom);
95 if (ret)
96 return ret;
97 }
98 } while (req.iter != 0xff);
99
100 return 0;
101}
102
103static void etna_perfmon_free_signals(struct etna_perfmon_domain *dom)
104{
105 struct etna_perfmon_signal *sig, *next;
106
107 LIST_FOR_EACH_ENTRY_SAFE(sig, next, &dom->signals, head) {
108 list_del(&sig->head);
109 free(sig);
110 }
111}
112
113static void etna_perfmon_free_domains(struct etna_perfmon *pm)
114{
115 struct etna_perfmon_domain *dom, *next;
116
117 LIST_FOR_EACH_ENTRY_SAFE(dom, next, &pm->domains, head) {
118 etna_perfmon_free_signals(dom);
119 list_del(&dom->head);
120 free(dom);
121 }
122}
123
124struct etna_perfmon *etna_perfmon_create(struct etna_pipe *pipe)
125{
126 struct etna_perfmon *pm;
127 int ret;
128
129 pm = calloc(1, sizeof(*pm));
130 if (!pm) {
131 ERROR_MSG("allocation failed");
132 return NULL;
133 }
134
135 list_inithead(&pm->domains);
136 pm->pipe = pipe;
137
138 /* query all available domains and sources for this device */
139 ret = etna_perfmon_query_domains(pm);
140 if (ret)
141 goto fail;
142
143 return pm;
144
145fail:
146 etna_perfmon_del(pm);
147 return NULL;
148}
149
150void etna_perfmon_del(struct etna_perfmon *pm)
151{
152 if (!pm)
153 return;
154
155 etna_perfmon_free_domains(pm);
156 free(pm);
157}
158
159struct etna_perfmon_domain *etna_perfmon_get_dom_by_name(struct etna_perfmon *pm, const char *name)
160{
161 struct etna_perfmon_domain *dom;
162
163 if (pm) {
164 LIST_FOR_EACH_ENTRY(dom, &pm->domains, head) {
165 if (!strcmp(dom->name, name))
166 return dom;
167 }
168 }
169
170 return NULL;
171}
172
173struct etna_perfmon_signal *etna_perfmon_get_sig_by_name(struct etna_perfmon_domain *dom, const char *name)
174{
175 struct etna_perfmon_signal *signal;
176
177 if (dom) {
178 LIST_FOR_EACH_ENTRY(signal, &dom->signals, head) {
179 if (!strcmp(signal->name, name))
180 return signal;
181 }
182 }
183
184 return NULL;
185}
diff --git a/etnaviv/etnaviv_pipe.c b/etnaviv/etnaviv_pipe.c
index 94c5d377..53954aa3 100644
--- a/etnaviv/etnaviv_pipe.c
+++ b/etnaviv/etnaviv_pipe.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h" 27#include "etnaviv_priv.h"
32 28
33int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms) 29int etna_pipe_wait(struct etna_pipe *pipe, uint32_t timestamp, uint32_t ms)
diff --git a/etnaviv/etnaviv_priv.h b/etnaviv/etnaviv_priv.h
index feaa5ad9..e45d364c 100644
--- a/etnaviv/etnaviv_priv.h
+++ b/etnaviv/etnaviv_priv.h
@@ -47,25 +47,6 @@
47#include "etnaviv_drmif.h" 47#include "etnaviv_drmif.h"
48#include "etnaviv_drm.h" 48#include "etnaviv_drm.h"
49 49
50#define VIV_FEATURES_WORD_COUNT 7
51
52struct etna_specs {
53 uint32_t model;
54 uint32_t revision;
55 uint32_t features[VIV_FEATURES_WORD_COUNT];
56 uint32_t stream_count;
57 uint32_t register_max;
58 uint32_t thread_count;
59 uint32_t shader_core_count;
60 uint32_t vertex_cache_size;
61 uint32_t vertex_output_buffer_size;
62 uint32_t pixel_pipes;
63 uint32_t instruction_count;
64 uint32_t num_constants;
65 uint32_t num_varyings;
66 uint32_t buffer_size;
67};
68
69struct etna_bo_bucket { 50struct etna_bo_bucket {
70 uint32_t size; 51 uint32_t size;
71 struct list_head list; 52 struct list_head list;
@@ -134,8 +115,9 @@ struct etna_bo {
134 115
135struct etna_gpu { 116struct etna_gpu {
136 struct etna_device *dev; 117 struct etna_device *dev;
137 struct etna_specs specs;
138 uint32_t core; 118 uint32_t core;
119 uint32_t model;
120 uint32_t revision;
139}; 121};
140 122
141struct etna_pipe { 123struct etna_pipe {
@@ -158,6 +140,10 @@ struct etna_cmd_stream_priv {
158 /* reloc's table: */ 140 /* reloc's table: */
159 struct drm_etnaviv_gem_submit_reloc *relocs; 141 struct drm_etnaviv_gem_submit_reloc *relocs;
160 uint32_t nr_relocs, max_relocs; 142 uint32_t nr_relocs, max_relocs;
143
144 /* perf's table: */
145 struct drm_etnaviv_gem_submit_pmr *pmrs;
146 uint32_t nr_pmrs, max_pmrs;
161 } submit; 147 } submit;
162 148
163 /* should have matching entries in submit.bos: */ 149 /* should have matching entries in submit.bos: */
@@ -169,6 +155,27 @@ struct etna_cmd_stream_priv {
169 void *reset_notify_priv; 155 void *reset_notify_priv;
170}; 156};
171 157
158struct etna_perfmon {
159 struct list_head domains;
160 struct etna_pipe *pipe;
161};
162
163struct etna_perfmon_domain
164{
165 struct list_head head;
166 struct list_head signals;
167 uint8_t id;
168 char name[64];
169};
170
171struct etna_perfmon_signal
172{
173 struct list_head head;
174 struct etna_perfmon_domain *domain;
175 uint8_t signal;
176 char name[64];
177};
178
172#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1)) 179#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
173#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 180#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
174 181
diff --git a/etnaviv/meson.build b/etnaviv/meson.build
new file mode 100644
index 00000000..ca2aa544
--- /dev/null
+++ b/etnaviv/meson.build
@@ -0,0 +1,59 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22libdrm_etnaviv = shared_library(
23 'drm_etnaviv',
24 [
25 files(
26 'etnaviv_device.c', 'etnaviv_gpu.c', 'etnaviv_bo.c', 'etnaviv_bo_cache.c',
27 'etnaviv_perfmon.c', 'etnaviv_pipe.c', 'etnaviv_cmd_stream.c',
28 ),
29 config_file
30 ],
31 include_directories : [inc_root, inc_drm],
32 link_with : libdrm,
33 c_args : warn_c_args,
34 dependencies : [dep_pthread_stubs, dep_rt, dep_atomic_ops],
35 version : '1.0.0',
36 install : true,
37)
38
39install_headers('etnaviv_drmif.h', subdir : 'libdrm')
40
41pkg.generate(
42 name : 'libdrm_etnaviv',
43 libraries : libdrm_etnaviv,
44 subdirs : ['.', 'libdrm'],
45 version : meson.project_version(),
46 requires_private : 'libdrm',
47 description : 'Userspace interface to Tegra kernel DRM services',
48)
49
50ext_libdrm_etnaviv = declare_dependency(
51 link_with : [libdrm, libdrm_etnaviv],
52 include_directories : [inc_drm, include_directories('.')],
53)
54
55test(
56 'etnaviv-symbol-check',
57 prog_bash,
58 args : [files('etnaviv-symbol-check'), libdrm_etnaviv]
59)
diff --git a/exynos/exynos-symbol-check b/exynos/exynos-symbol-check
index 9692caa6..e9f1b04d 100755
--- a/exynos/exynos-symbol-check
+++ b/exynos/exynos-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.am/libdrm_exynos*_HEADERS 4# A list of the latter should be available Makefile.am/libdrm_exynos*_HEADERS
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_exynos.so} | awk '{print $3}'| while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_exynos.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
diff --git a/exynos/exynos_drm.c b/exynos/exynos_drm.c
index b961e520..e1afef65 100644
--- a/exynos/exynos_drm.c
+++ b/exynos/exynos_drm.c
@@ -24,10 +24,6 @@
24 * Inki Dae <inki.dae@samsung.com> 24 * Inki Dae <inki.dae@samsung.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <stdlib.h> 27#include <stdlib.h>
32#include <stdio.h> 28#include <stdio.h>
33#include <string.h> 29#include <string.h>
@@ -417,7 +413,7 @@ exynos_handle_event(struct exynos_device *dev, struct exynos_event_context *ctx)
417 413
418 i = 0; 414 i = 0;
419 while (i < len) { 415 while (i < len) {
420 e = (struct drm_event *) &buffer[i]; 416 e = (struct drm_event *)(buffer + i);
421 switch (e->type) { 417 switch (e->type) {
422 case DRM_EVENT_VBLANK: 418 case DRM_EVENT_VBLANK:
423 if (evctx->version < 1 || 419 if (evctx->version < 1 ||
diff --git a/exynos/exynos_drmif.h b/exynos/exynos_drmif.h
index 626e3998..154439bb 100644
--- a/exynos/exynos_drmif.h
+++ b/exynos/exynos_drmif.h
@@ -31,6 +31,10 @@
31#include <stdint.h> 31#include <stdint.h>
32#include "exynos_drm.h" 32#include "exynos_drm.h"
33 33
34#if defined(__cplusplus)
35extern "C" {
36#endif
37
34struct exynos_device { 38struct exynos_device {
35 int fd; 39 int fd;
36}; 40};
@@ -109,4 +113,8 @@ int exynos_handle_event(struct exynos_device *dev,
109 struct exynos_event_context *ctx); 113 struct exynos_event_context *ctx);
110 114
111 115
116#if defined(__cplusplus)
117}
118#endif
119
112#endif /* EXYNOS_DRMIF_H_ */ 120#endif /* EXYNOS_DRMIF_H_ */
diff --git a/exynos/exynos_fimg2d.c b/exynos/exynos_fimg2d.c
index 7f1d105a..bca884b9 100644
--- a/exynos/exynos_fimg2d.c
+++ b/exynos/exynos_fimg2d.c
@@ -3,17 +3,26 @@
3 * Authors: 3 * Authors:
4 * Inki Dae <inki.dae@samsung.com> 4 * Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * under the terms of the GNU General Public License as published by the 7 * copy of this software and associated documentation files (the "Software"),
8 * Free Software Foundation; either version 2 of the License, or (at your 8 * to deal in the Software without restriction, including without limitation
9 * option) any later version. 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
10 * 12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
11 */ 24 */
12 25
13#ifdef HAVE_CONFIG_H
14#include "config.h"
15#endif
16
17#include <stdlib.h> 26#include <stdlib.h>
18#include <stdio.h> 27#include <stdio.h>
19#include <string.h> 28#include <string.h>
@@ -293,20 +302,6 @@ static void g2d_set_direction(struct g2d_context *ctx,
293} 302}
294 303
295/* 304/*
296 * g2d_reset - reset fimg2d hardware.
297 *
298 * @ctx: a pointer to g2d_context structure.
299 *
300 */
301static void g2d_reset(struct g2d_context *ctx)
302{
303 ctx->cmd_nr = 0;
304 ctx->cmd_buf_nr = 0;
305
306 g2d_add_cmd(ctx, SOFT_RESET_REG, 0x01);
307}
308
309/*
310 * g2d_flush - submit all commands and values in user side command buffer 305 * g2d_flush - submit all commands and values in user side command buffer
311 * to command queue aware of fimg2d dma. 306 * to command queue aware of fimg2d dma.
312 * 307 *
diff --git a/exynos/exynos_fimg2d.h b/exynos/exynos_fimg2d.h
index a825c683..a4dfbe73 100644
--- a/exynos/exynos_fimg2d.h
+++ b/exynos/exynos_fimg2d.h
@@ -3,11 +3,24 @@
3 * Authors: 3 * Authors:
4 * Inki Dae <inki.dae@samsung.com> 4 * Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * under the terms of the GNU General Public License as published by the 7 * copy of this software and associated documentation files (the "Software"),
8 * Free Software Foundation; either version 2 of the License, or (at your 8 * to deal in the Software without restriction, including without limitation
9 * option) any later version. 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
10 * 12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
11 */ 24 */
12 25
13#ifndef _FIMG2D_H_ 26#ifndef _FIMG2D_H_
diff --git a/exynos/fimg2d_reg.h b/exynos/fimg2d_reg.h
index 07dd6349..d42296d4 100644
--- a/exynos/fimg2d_reg.h
+++ b/exynos/fimg2d_reg.h
@@ -3,11 +3,24 @@
3 * Authors: 3 * Authors:
4 * Inki Dae <inki.dae@samsung.com> 4 * Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * under the terms of the GNU General Public License as published by the 7 * copy of this software and associated documentation files (the "Software"),
8 * Free Software Foundation; either version 2 of the License, or (at your 8 * to deal in the Software without restriction, including without limitation
9 * option) any later version. 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
10 * 12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
11 */ 24 */
12 25
13#ifndef _FIMG2D_REG_H_ 26#ifndef _FIMG2D_REG_H_
diff --git a/exynos/meson.build b/exynos/meson.build
new file mode 100644
index 00000000..30d36405
--- /dev/null
+++ b/exynos/meson.build
@@ -0,0 +1,54 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21libdrm_exynos = shared_library(
22 'drm_exynos',
23 [files('exynos_drm.c', 'exynos_fimg2d.c'), config_file],
24 c_args : warn_c_args,
25 include_directories : [inc_root, inc_drm],
26 link_with : libdrm,
27 dependencies : [dep_pthread_stubs],
28 version : '1.0.0',
29 install : true,
30)
31
32install_headers('exynos_drmif.h', subdir : 'libdrm')
33install_headers('exynos_drm.h', 'exynos_fimg2d.h', subdir : 'exynos')
34
35ext_libdrm_exynos = declare_dependency(
36 link_with : [libdrm, libdrm_exynos],
37 include_directories : [inc_drm, include_directories('.')],
38)
39
40pkg.generate(
41 name : 'libdrm_exynos',
42 libraries : libdrm_exynos,
43 subdirs : ['.', 'libdrm', 'exynos'],
44 version : '0.7',
45 requires_private : 'libdrm',
46 description : 'Userspace interface to exynos kernel DRM services',
47)
48
49test(
50 'exynos-symbol-check',
51 prog_bash,
52 env : env_test,
53 args : [files('exynos-symbol-check'), libdrm_exynos]
54)
diff --git a/freedreno/Makefile.am b/freedreno/Makefile.am
index 0771d146..cbb0d031 100644
--- a/freedreno/Makefile.am
+++ b/freedreno/Makefile.am
@@ -5,6 +5,7 @@ AM_CFLAGS = \
5 $(WARN_CFLAGS) \ 5 $(WARN_CFLAGS) \
6 -I$(top_srcdir) \ 6 -I$(top_srcdir) \
7 $(PTHREADSTUBS_CFLAGS) \ 7 $(PTHREADSTUBS_CFLAGS) \
8 $(VALGRIND_CFLAGS) \
8 -I$(top_srcdir)/include/drm 9 -I$(top_srcdir)/include/drm
9 10
10libdrm_freedreno_la_LTLIBRARIES = libdrm_freedreno.la 11libdrm_freedreno_la_LTLIBRARIES = libdrm_freedreno.la
diff --git a/freedreno/freedreno-symbol-check b/freedreno/freedreno-symbol-check
index 42f2c439..3b119528 100755
--- a/freedreno/freedreno-symbol-check
+++ b/freedreno/freedreno-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES 4# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_freedreno.so} | awk '{print $3}'| while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_freedreno.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
@@ -18,10 +18,12 @@ fd_bo_from_dmabuf
18fd_bo_from_fbdev 18fd_bo_from_fbdev
19fd_bo_from_handle 19fd_bo_from_handle
20fd_bo_from_name 20fd_bo_from_name
21fd_bo_get_iova
21fd_bo_get_name 22fd_bo_get_name
22fd_bo_handle 23fd_bo_handle
23fd_bo_map 24fd_bo_map
24fd_bo_new 25fd_bo_new
26fd_bo_put_iova
25fd_bo_ref 27fd_bo_ref
26fd_bo_size 28fd_bo_size
27fd_device_del 29fd_device_del
@@ -33,6 +35,7 @@ fd_device_version
33fd_pipe_del 35fd_pipe_del
34fd_pipe_get_param 36fd_pipe_get_param
35fd_pipe_new 37fd_pipe_new
38fd_pipe_new2
36fd_pipe_wait 39fd_pipe_wait
37fd_pipe_wait_timeout 40fd_pipe_wait_timeout
38fd_ringbuffer_cmd_count 41fd_ringbuffer_cmd_count
diff --git a/freedreno/freedreno_bo.c b/freedreno/freedreno_bo.c
index 996d6b95..34c285fb 100644
--- a/freedreno/freedreno_bo.c
+++ b/freedreno/freedreno_bo.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "freedreno_drmif.h" 29#include "freedreno_drmif.h"
34#include "freedreno_priv.h" 30#include "freedreno_priv.h"
35 31
@@ -102,6 +98,8 @@ fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
102 bo->bo_reuse = TRUE; 98 bo->bo_reuse = TRUE;
103 pthread_mutex_unlock(&table_lock); 99 pthread_mutex_unlock(&table_lock);
104 100
101 VG_BO_ALLOC(bo);
102
105 return bo; 103 return bo;
106} 104}
107 105
@@ -118,6 +116,8 @@ fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
118 116
119 bo = bo_from_handle(dev, size, handle); 117 bo = bo_from_handle(dev, size, handle);
120 118
119 VG_BO_ALLOC(bo);
120
121out_unlock: 121out_unlock:
122 pthread_mutex_unlock(&table_lock); 122 pthread_mutex_unlock(&table_lock);
123 123
@@ -134,6 +134,7 @@ fd_bo_from_dmabuf(struct fd_device *dev, int fd)
134 pthread_mutex_lock(&table_lock); 134 pthread_mutex_lock(&table_lock);
135 ret = drmPrimeFDToHandle(dev->fd, fd, &handle); 135 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
136 if (ret) { 136 if (ret) {
137 pthread_mutex_unlock(&table_lock);
137 return NULL; 138 return NULL;
138 } 139 }
139 140
@@ -147,6 +148,8 @@ fd_bo_from_dmabuf(struct fd_device *dev, int fd)
147 148
148 bo = bo_from_handle(dev, size, handle); 149 bo = bo_from_handle(dev, size, handle);
149 150
151 VG_BO_ALLOC(bo);
152
150out_unlock: 153out_unlock:
151 pthread_mutex_unlock(&table_lock); 154 pthread_mutex_unlock(&table_lock);
152 155
@@ -177,8 +180,10 @@ struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
177 goto out_unlock; 180 goto out_unlock;
178 181
179 bo = bo_from_handle(dev, req.size, req.handle); 182 bo = bo_from_handle(dev, req.size, req.handle);
180 if (bo) 183 if (bo) {
181 set_name(bo, name); 184 set_name(bo, name);
185 VG_BO_ALLOC(bo);
186 }
182 187
183out_unlock: 188out_unlock:
184 pthread_mutex_unlock(&table_lock); 189 pthread_mutex_unlock(&table_lock);
@@ -186,6 +191,16 @@ out_unlock:
186 return bo; 191 return bo;
187} 192}
188 193
194uint64_t fd_bo_get_iova(struct fd_bo *bo)
195{
196 return bo->funcs->iova(bo);
197}
198
199void fd_bo_put_iova(struct fd_bo *bo)
200{
201 /* currently a no-op */
202}
203
189struct fd_bo * fd_bo_ref(struct fd_bo *bo) 204struct fd_bo * fd_bo_ref(struct fd_bo *bo)
190{ 205{
191 atomic_inc(&bo->refcnt); 206 atomic_inc(&bo->refcnt);
@@ -213,6 +228,8 @@ out:
213/* Called under table_lock */ 228/* Called under table_lock */
214drm_private void bo_del(struct fd_bo *bo) 229drm_private void bo_del(struct fd_bo *bo)
215{ 230{
231 VG_BO_FREE(bo);
232
216 if (bo->map) 233 if (bo->map)
217 drm_munmap(bo->map, bo->size); 234 drm_munmap(bo->map, bo->size);
218 235
@@ -315,7 +332,7 @@ void fd_bo_cpu_fini(struct fd_bo *bo)
315 bo->funcs->cpu_fini(bo); 332 bo->funcs->cpu_fini(bo);
316} 333}
317 334
318#ifndef HAVE_FREEDRENO_KGSL 335#if !HAVE_FREEDRENO_KGSL
319struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size) 336struct fd_bo * fd_bo_from_fbdev(struct fd_pipe *pipe, int fbfd, uint32_t size)
320{ 337{
321 return NULL; 338 return NULL;
diff --git a/freedreno/freedreno_bo_cache.c b/freedreno/freedreno_bo_cache.c
index 7becb0d6..3b737159 100644
--- a/freedreno/freedreno_bo_cache.c
+++ b/freedreno/freedreno_bo_cache.c
@@ -26,14 +26,9 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "freedreno_drmif.h" 29#include "freedreno_drmif.h"
34#include "freedreno_priv.h" 30#include "freedreno_priv.h"
35 31
36
37drm_private void bo_del(struct fd_bo *bo); 32drm_private void bo_del(struct fd_bo *bo);
38drm_private extern pthread_mutex_t table_lock; 33drm_private extern pthread_mutex_t table_lock;
39 34
@@ -102,6 +97,7 @@ fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
102 if (time && ((time - bo->free_time) <= 1)) 97 if (time && ((time - bo->free_time) <= 1))
103 break; 98 break;
104 99
100 VG_BO_OBTAIN(bo);
105 list_del(&bo->list); 101 list_del(&bo->list);
106 bo_del(bo); 102 bo_del(bo);
107 } 103 }
@@ -177,6 +173,7 @@ retry:
177 *size = bucket->size; 173 *size = bucket->size;
178 bo = find_in_bucket(bucket, flags); 174 bo = find_in_bucket(bucket, flags);
179 if (bo) { 175 if (bo) {
176 VG_BO_OBTAIN(bo);
180 if (bo->funcs->madvise(bo, TRUE) <= 0) { 177 if (bo->funcs->madvise(bo, TRUE) <= 0) {
181 /* we've lost the backing pages, delete and try again: */ 178 /* we've lost the backing pages, delete and try again: */
182 pthread_mutex_lock(&table_lock); 179 pthread_mutex_lock(&table_lock);
@@ -207,6 +204,7 @@ fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
207 clock_gettime(CLOCK_MONOTONIC, &time); 204 clock_gettime(CLOCK_MONOTONIC, &time);
208 205
209 bo->free_time = time.tv_sec; 206 bo->free_time = time.tv_sec;
207 VG_BO_RELEASE(bo);
210 list_addtail(&bo->list, &bucket->list); 208 list_addtail(&bo->list, &bucket->list);
211 fd_bo_cache_cleanup(cache, time.tv_sec); 209 fd_bo_cache_cleanup(cache, time.tv_sec);
212 210
diff --git a/freedreno/freedreno_device.c b/freedreno/freedreno_device.c
index fcbf1402..0b42561a 100644
--- a/freedreno/freedreno_device.c
+++ b/freedreno/freedreno_device.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include <sys/types.h> 29#include <sys/types.h>
34#include <sys/stat.h> 30#include <sys/stat.h>
35#include <unistd.h> 31#include <unistd.h>
@@ -65,7 +61,7 @@ struct fd_device * fd_device_new(int fd)
65 61
66 dev = msm_device_new(fd); 62 dev = msm_device_new(fd);
67 dev->version = version->version_minor; 63 dev->version = version->version_minor;
68#ifdef HAVE_FREEDRENO_KGSL 64#if HAVE_FREEDRENO_KGSL
69 } else if (!strcmp(version->name, "kgsl")) { 65 } else if (!strcmp(version->name, "kgsl")) {
70 DEBUG_MSG("kgsl DRM device"); 66 DEBUG_MSG("kgsl DRM device");
71 dev = kgsl_device_new(fd); 67 dev = kgsl_device_new(fd);
@@ -112,12 +108,13 @@ struct fd_device * fd_device_ref(struct fd_device *dev)
112 108
113static void fd_device_del_impl(struct fd_device *dev) 109static void fd_device_del_impl(struct fd_device *dev)
114{ 110{
111 int close_fd = dev->closefd ? dev->fd : -1;
115 fd_bo_cache_cleanup(&dev->bo_cache, 0); 112 fd_bo_cache_cleanup(&dev->bo_cache, 0);
116 drmHashDestroy(dev->handle_table); 113 drmHashDestroy(dev->handle_table);
117 drmHashDestroy(dev->name_table); 114 drmHashDestroy(dev->name_table);
118 if (dev->closefd)
119 close(dev->fd);
120 dev->funcs->destroy(dev); 115 dev->funcs->destroy(dev);
116 if (close_fd >= 0)
117 close(close_fd);
121} 118}
122 119
123drm_private void fd_device_del_locked(struct fd_device *dev) 120drm_private void fd_device_del_locked(struct fd_device *dev)
diff --git a/freedreno/freedreno_drmif.h b/freedreno/freedreno_drmif.h
index 7a8073ff..2711518b 100644
--- a/freedreno/freedreno_drmif.h
+++ b/freedreno/freedreno_drmif.h
@@ -61,6 +61,7 @@ enum fd_param_id {
61 FD_CHIP_ID, 61 FD_CHIP_ID,
62 FD_MAX_FREQ, 62 FD_MAX_FREQ,
63 FD_TIMESTAMP, 63 FD_TIMESTAMP,
64 FD_NR_RINGS, /* # of rings == # of distinct priority levels */
64}; 65};
65 66
66/* bo flags: */ 67/* bo flags: */
@@ -93,6 +94,8 @@ enum fd_version {
93 FD_VERSION_MADVISE = 1, /* kernel supports madvise */ 94 FD_VERSION_MADVISE = 1, /* kernel supports madvise */
94 FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */ 95 FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
95 FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */ 96 FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
97 FD_VERSION_SUBMIT_QUEUES = 3, /* submit queues and multiple priority levels */
98 FD_VERSION_BO_IOVA = 3, /* supports fd_bo_get/put_iova() */
96}; 99};
97enum fd_version fd_device_version(struct fd_device *dev); 100enum fd_version fd_device_version(struct fd_device *dev);
98 101
@@ -100,6 +103,7 @@ enum fd_version fd_device_version(struct fd_device *dev);
100 */ 103 */
101 104
102struct fd_pipe * fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id); 105struct fd_pipe * fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
106struct fd_pipe * fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio);
103void fd_pipe_del(struct fd_pipe *pipe); 107void fd_pipe_del(struct fd_pipe *pipe);
104int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param, 108int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
105 uint64_t *value); 109 uint64_t *value);
@@ -120,6 +124,8 @@ struct fd_bo *fd_bo_from_handle(struct fd_device *dev,
120 uint32_t handle, uint32_t size); 124 uint32_t handle, uint32_t size);
121struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name); 125struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name);
122struct fd_bo * fd_bo_from_dmabuf(struct fd_device *dev, int fd); 126struct fd_bo * fd_bo_from_dmabuf(struct fd_device *dev, int fd);
127uint64_t fd_bo_get_iova(struct fd_bo *bo);
128void fd_bo_put_iova(struct fd_bo *bo);
123struct fd_bo * fd_bo_ref(struct fd_bo *bo); 129struct fd_bo * fd_bo_ref(struct fd_bo *bo);
124void fd_bo_del(struct fd_bo *bo); 130void fd_bo_del(struct fd_bo *bo);
125int fd_bo_get_name(struct fd_bo *bo, uint32_t *name); 131int fd_bo_get_name(struct fd_bo *bo, uint32_t *name);
diff --git a/freedreno/freedreno_pipe.c b/freedreno/freedreno_pipe.c
index 3f8c8342..77b160e7 100644
--- a/freedreno/freedreno_pipe.c
+++ b/freedreno/freedreno_pipe.c
@@ -26,28 +26,33 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "freedreno_drmif.h" 29#include "freedreno_drmif.h"
34#include "freedreno_priv.h" 30#include "freedreno_priv.h"
35 31
32/**
33 * priority of zero is highest priority, and higher numeric values are
34 * lower priorities
35 */
36struct fd_pipe * 36struct fd_pipe *
37fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id) 37fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
38{ 38{
39 struct fd_pipe *pipe = NULL; 39 struct fd_pipe *pipe;
40 uint64_t val; 40 uint64_t val;
41 41
42 if (id > FD_PIPE_MAX) { 42 if (id > FD_PIPE_MAX) {
43 ERROR_MSG("invalid pipe id: %d", id); 43 ERROR_MSG("invalid pipe id: %d", id);
44 goto fail; 44 return NULL;
45 }
46
47 if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
48 ERROR_MSG("invalid priority!");
49 return NULL;
45 } 50 }
46 51
47 pipe = dev->funcs->pipe_new(dev, id); 52 pipe = dev->funcs->pipe_new(dev, id, prio);
48 if (!pipe) { 53 if (!pipe) {
49 ERROR_MSG("allocation failed"); 54 ERROR_MSG("allocation failed");
50 goto fail; 55 return NULL;
51 } 56 }
52 57
53 pipe->dev = dev; 58 pipe->dev = dev;
@@ -57,10 +62,12 @@ fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
57 pipe->gpu_id = val; 62 pipe->gpu_id = val;
58 63
59 return pipe; 64 return pipe;
60fail: 65}
61 if (pipe) 66
62 fd_pipe_del(pipe); 67struct fd_pipe *
63 return NULL; 68fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
69{
70 return fd_pipe_new2(dev, id, 1);
64} 71}
65 72
66void fd_pipe_del(struct fd_pipe *pipe) 73void fd_pipe_del(struct fd_pipe *pipe)
diff --git a/freedreno/freedreno_priv.h b/freedreno/freedreno_priv.h
index 32170391..6c9e509f 100644
--- a/freedreno/freedreno_priv.h
+++ b/freedreno/freedreno_priv.h
@@ -29,10 +29,6 @@
29#ifndef FREEDRENO_PRIV_H_ 29#ifndef FREEDRENO_PRIV_H_
30#define FREEDRENO_PRIV_H_ 30#define FREEDRENO_PRIV_H_
31 31
32#ifdef HAVE_CONFIG_H
33#include "config.h"
34#endif
35
36#include <stdlib.h> 32#include <stdlib.h>
37#include <errno.h> 33#include <errno.h>
38#include <string.h> 34#include <string.h>
@@ -49,6 +45,7 @@
49#include "xf86atomic.h" 45#include "xf86atomic.h"
50 46
51#include "util_double_list.h" 47#include "util_double_list.h"
48#include "util_math.h"
52 49
53#include "freedreno_drmif.h" 50#include "freedreno_drmif.h"
54#include "freedreno_ringbuffer.h" 51#include "freedreno_ringbuffer.h"
@@ -66,7 +63,8 @@ struct fd_device_funcs {
66 uint32_t flags, uint32_t *handle); 63 uint32_t flags, uint32_t *handle);
67 struct fd_bo * (*bo_from_handle)(struct fd_device *dev, 64 struct fd_bo * (*bo_from_handle)(struct fd_device *dev,
68 uint32_t size, uint32_t handle); 65 uint32_t size, uint32_t handle);
69 struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id); 66 struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
67 unsigned prio);
70 void (*destroy)(struct fd_device *dev); 68 void (*destroy)(struct fd_device *dev);
71}; 69};
72 70
@@ -102,6 +100,9 @@ struct fd_device {
102 struct fd_bo_cache bo_cache; 100 struct fd_bo_cache bo_cache;
103 101
104 int closefd; /* call close(fd) upon destruction */ 102 int closefd; /* call close(fd) upon destruction */
103
104 /* just for valgrind: */
105 int bo_size;
105}; 106};
106 107
107drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse); 108drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
@@ -152,6 +153,7 @@ struct fd_bo_funcs {
152 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op); 153 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
153 void (*cpu_fini)(struct fd_bo *bo); 154 void (*cpu_fini)(struct fd_bo *bo);
154 int (*madvise)(struct fd_bo *bo, int willneed); 155 int (*madvise)(struct fd_bo *bo, int willneed);
156 uint64_t (*iova)(struct fd_bo *bo);
155 void (*destroy)(struct fd_bo *bo); 157 void (*destroy)(struct fd_bo *bo);
156}; 158};
157 159
@@ -169,7 +171,6 @@ struct fd_bo {
169 time_t free_time; /* time when added to bucket-list */ 171 time_t free_time; /* time when added to bucket-list */
170}; 172};
171 173
172#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
173#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 174#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
174 175
175#define enable_debug 0 /* TODO make dynamic */ 176#define enable_debug 0 /* TODO make dynamic */
@@ -196,4 +197,57 @@ offset_bytes(void *end, void *start)
196 return ((char *)end) - ((char *)start); 197 return ((char *)end) - ((char *)start);
197} 198}
198 199
200#if HAVE_VALGRIND
201# include <memcheck.h>
202
203/*
204 * For tracking the backing memory (if valgrind enabled, we force a mmap
205 * for the purposes of tracking)
206 */
207static inline void VG_BO_ALLOC(struct fd_bo *bo)
208{
209 if (bo && RUNNING_ON_VALGRIND) {
210 VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
211 }
212}
213
214static inline void VG_BO_FREE(struct fd_bo *bo)
215{
216 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
217}
218
219/*
220 * For tracking bo structs that are in the buffer-cache, so that valgrind
221 * doesn't attribute ownership to the first one to allocate the recycled
222 * bo.
223 *
224 * Note that the list_head in fd_bo is used to track the buffers in cache
225 * so disable error reporting on the range while they are in cache so
226 * valgrind doesn't squawk about list traversal.
227 *
228 */
229static inline void VG_BO_RELEASE(struct fd_bo *bo)
230{
231 if (RUNNING_ON_VALGRIND) {
232 VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
233 VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
234 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
235 }
236}
237static inline void VG_BO_OBTAIN(struct fd_bo *bo)
238{
239 if (RUNNING_ON_VALGRIND) {
240 VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
241 VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
242 VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
243 }
244}
245#else
246static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
247static inline void VG_BO_FREE(struct fd_bo *bo) {}
248static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
249static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
250#endif
251
252
199#endif /* FREEDRENO_PRIV_H_ */ 253#endif /* FREEDRENO_PRIV_H_ */
diff --git a/freedreno/freedreno_ringbuffer.c b/freedreno/freedreno_ringbuffer.c
index 7310f1fd..3834b51b 100644
--- a/freedreno/freedreno_ringbuffer.c
+++ b/freedreno/freedreno_ringbuffer.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include <assert.h> 29#include <assert.h>
34 30
35#include "freedreno_drmif.h" 31#include "freedreno_drmif.h"
diff --git a/freedreno/kgsl/kgsl_bo.c b/freedreno/kgsl/kgsl_bo.c
index ab3485e3..c6d2d499 100644
--- a/freedreno/kgsl/kgsl_bo.c
+++ b/freedreno/kgsl/kgsl_bo.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "kgsl_priv.h" 29#include "kgsl_priv.h"
34 30
35#include <linux/fb.h> 31#include <linux/fb.h>
diff --git a/freedreno/kgsl/kgsl_device.c b/freedreno/kgsl/kgsl_device.c
index 175e8378..914f3412 100644
--- a/freedreno/kgsl/kgsl_device.c
+++ b/freedreno/kgsl/kgsl_device.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include <sys/types.h> 29#include <sys/types.h>
34#include <sys/stat.h> 30#include <sys/stat.h>
35#include <unistd.h> 31#include <unistd.h>
@@ -61,5 +57,7 @@ drm_private struct fd_device * kgsl_device_new(int fd)
61 dev = &kgsl_dev->base; 57 dev = &kgsl_dev->base;
62 dev->funcs = &funcs; 58 dev->funcs = &funcs;
63 59
60 dev->bo_size = sizeof(struct kgsl_bo);
61
64 return dev; 62 return dev;
65} 63}
diff --git a/freedreno/kgsl/kgsl_pipe.c b/freedreno/kgsl/kgsl_pipe.c
index 8a39eb49..0a8b6586 100644
--- a/freedreno/kgsl/kgsl_pipe.c
+++ b/freedreno/kgsl/kgsl_pipe.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "kgsl_priv.h" 29#include "kgsl_priv.h"
34 30
35 31
@@ -52,6 +48,7 @@ static int kgsl_pipe_get_param(struct fd_pipe *pipe,
52 return 0; 48 return 0;
53 case FD_MAX_FREQ: 49 case FD_MAX_FREQ:
54 case FD_TIMESTAMP: 50 case FD_TIMESTAMP:
51 case FD_NR_RINGS:
55 /* unsupported on kgsl */ 52 /* unsupported on kgsl */
56 return -1; 53 return -1;
57 default: 54 default:
@@ -210,7 +207,7 @@ static int getprop(int fd, enum kgsl_property_type type,
210 207
211 208
212drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev, 209drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
213 enum fd_pipe_id id) 210 enum fd_pipe_id id, uint32_t prio)
214{ 211{
215 static const char *paths[] = { 212 static const char *paths[] = {
216 [FD_PIPE_3D] = "/dev/kgsl-3d0", 213 [FD_PIPE_3D] = "/dev/kgsl-3d0",
diff --git a/freedreno/kgsl/kgsl_priv.h b/freedreno/kgsl/kgsl_priv.h
index 6ab64965..41b13920 100644
--- a/freedreno/kgsl/kgsl_priv.h
+++ b/freedreno/kgsl/kgsl_priv.h
@@ -103,7 +103,7 @@ drm_private void kgsl_pipe_post_submit(struct kgsl_pipe *pipe,
103drm_private void kgsl_pipe_process_pending(struct kgsl_pipe *pipe, 103drm_private void kgsl_pipe_process_pending(struct kgsl_pipe *pipe,
104 uint32_t timestamp); 104 uint32_t timestamp);
105drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev, 105drm_private struct fd_pipe * kgsl_pipe_new(struct fd_device *dev,
106 enum fd_pipe_id id); 106 enum fd_pipe_id id, uint32_t prio);
107 107
108drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe, 108drm_private struct fd_ringbuffer * kgsl_ringbuffer_new(struct fd_pipe *pipe,
109 uint32_t size); 109 uint32_t size);
diff --git a/freedreno/kgsl/kgsl_ringbuffer.c b/freedreno/kgsl/kgsl_ringbuffer.c
index e4696b1b..a756deda 100644
--- a/freedreno/kgsl/kgsl_ringbuffer.c
+++ b/freedreno/kgsl/kgsl_ringbuffer.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include <assert.h> 29#include <assert.h>
34 30
35#include "freedreno_ringbuffer.h" 31#include "freedreno_ringbuffer.h"
@@ -146,7 +142,7 @@ static int kgsl_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_star
146 ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr; 142 ibdesc.gpuaddr = kgsl_ring->bo->gpuaddr;
147 ibdesc.hostptr = kgsl_ring->bo->hostptr; 143 ibdesc.hostptr = kgsl_ring->bo->hostptr;
148 ibdesc.sizedwords = 0x145; 144 ibdesc.sizedwords = 0x145;
149 req.timestamp = (uint32_t)kgsl_ring->bo->hostptr; 145 req.timestamp = (uintptr_t)kgsl_ring->bo->hostptr;
150 } 146 }
151 147
152 do { 148 do {
diff --git a/freedreno/meson.build b/freedreno/meson.build
new file mode 100644
index 00000000..015b7fb1
--- /dev/null
+++ b/freedreno/meson.build
@@ -0,0 +1,77 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21files_freedreno = files(
22 'freedreno_device.c',
23 'freedreno_pipe.c',
24 'freedreno_ringbuffer.c',
25 'freedreno_bo.c',
26 'freedreno_bo_cache.c',
27 'msm/msm_bo.c',
28 'msm/msm_device.c',
29 'msm/msm_pipe.c',
30 'msm/msm_ringbuffer.c',
31)
32
33if with_freedreno_kgsl
34 files_freedreno += files(
35 'kgsl/kgsl_bo.c',
36 'kgsl/kgsl_device.c',
37 'kgsl/kgsl_pipe.c',
38 'kgsl/kgsl_ringbuffer.c',
39 )
40endif
41
42libdrm_freedreno = shared_library(
43 'drm_freedreno',
44 [files_freedreno, config_file],
45 c_args : warn_c_args,
46 include_directories : [inc_root, inc_drm],
47 dependencies : [dep_valgrind, dep_pthread_stubs, dep_rt, dep_atomic_ops],
48 link_with : libdrm,
49 version : '1.0.0',
50 install : true,
51)
52
53ext_libdrm_freedreno = declare_dependency(
54 link_with : [libdrm, libdrm_freedreno],
55 include_directories : [inc_drm, include_directories('.')],
56)
57
58install_headers(
59 'freedreno_drmif.h', 'freedreno_ringbuffer.h',
60 subdir : 'freedreno'
61)
62
63pkg.generate(
64 name : 'libdrm_freedreno',
65 libraries : libdrm_freedreno,
66 subdirs : ['.', 'libdrm', 'freedreno'],
67 version : meson.project_version(),
68 requires_private : 'libdrm',
69 description : 'Userspace interface to freedreno kernel DRM services',
70)
71
72test(
73 'freedreno-symbol-check',
74 prog_bash,
75 env : env_test,
76 args : [files('freedreno-symbol-check'), libdrm_freedreno]
77)
diff --git a/freedreno/msm/msm_bo.c b/freedreno/msm/msm_bo.c
index 72471df6..8b3d0bcb 100644
--- a/freedreno/msm/msm_bo.c
+++ b/freedreno/msm/msm_bo.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "msm_priv.h" 29#include "msm_priv.h"
34 30
35static int bo_allocate(struct msm_bo *msm_bo) 31static int bo_allocate(struct msm_bo *msm_bo)
@@ -108,6 +104,18 @@ static int msm_bo_madvise(struct fd_bo *bo, int willneed)
108 return req.retained; 104 return req.retained;
109} 105}
110 106
107static uint64_t msm_bo_iova(struct fd_bo *bo)
108{
109 struct drm_msm_gem_info req = {
110 .handle = bo->handle,
111 .flags = MSM_INFO_IOVA,
112 };
113
114 drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
115
116 return req.offset;
117}
118
111static void msm_bo_destroy(struct fd_bo *bo) 119static void msm_bo_destroy(struct fd_bo *bo)
112{ 120{
113 struct msm_bo *msm_bo = to_msm_bo(bo); 121 struct msm_bo *msm_bo = to_msm_bo(bo);
@@ -120,6 +128,7 @@ static const struct fd_bo_funcs funcs = {
120 .cpu_prep = msm_bo_cpu_prep, 128 .cpu_prep = msm_bo_cpu_prep,
121 .cpu_fini = msm_bo_cpu_fini, 129 .cpu_fini = msm_bo_cpu_fini,
122 .madvise = msm_bo_madvise, 130 .madvise = msm_bo_madvise,
131 .iova = msm_bo_iova,
123 .destroy = msm_bo_destroy, 132 .destroy = msm_bo_destroy,
124}; 133};
125 134
diff --git a/freedreno/msm/msm_device.c b/freedreno/msm/msm_device.c
index 727baa44..7bb57677 100644
--- a/freedreno/msm/msm_device.c
+++ b/freedreno/msm/msm_device.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include <sys/types.h> 29#include <sys/types.h>
34#include <sys/stat.h> 30#include <sys/stat.h>
35#include <unistd.h> 31#include <unistd.h>
@@ -64,5 +60,7 @@ drm_private struct fd_device * msm_device_new(int fd)
64 60
65 fd_bo_cache_init(&msm_dev->ring_cache, TRUE); 61 fd_bo_cache_init(&msm_dev->ring_cache, TRUE);
66 62
63 dev->bo_size = sizeof(struct msm_bo);
64
67 return dev; 65 return dev;
68} 66}
diff --git a/freedreno/msm/msm_drm.h b/freedreno/msm/msm_drm.h
index ed4c8d47..dac49e59 100644
--- a/freedreno/msm/msm_drm.h
+++ b/freedreno/msm/msm_drm.h
@@ -73,6 +73,8 @@ struct drm_msm_timespec {
73#define MSM_PARAM_CHIP_ID 0x03 73#define MSM_PARAM_CHIP_ID 0x03
74#define MSM_PARAM_MAX_FREQ 0x04 74#define MSM_PARAM_MAX_FREQ 0x04
75#define MSM_PARAM_TIMESTAMP 0x05 75#define MSM_PARAM_TIMESTAMP 0x05
76#define MSM_PARAM_GMEM_BASE 0x06
77#define MSM_PARAM_NR_RINGS 0x07
76 78
77struct drm_msm_param { 79struct drm_msm_param {
78 __u32 pipe; /* in, MSM_PIPE_x */ 80 __u32 pipe; /* in, MSM_PIPE_x */
@@ -104,10 +106,14 @@ struct drm_msm_gem_new {
104 __u32 handle; /* out */ 106 __u32 handle; /* out */
105}; 107};
106 108
109#define MSM_INFO_IOVA 0x01
110
111#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
112
107struct drm_msm_gem_info { 113struct drm_msm_gem_info {
108 __u32 handle; /* in */ 114 __u32 handle; /* in */
109 __u32 pad; 115 __u32 flags; /* in - combination of MSM_INFO_* flags */
110 __u64 offset; /* out, offset to pass to mmap() */ 116 __u64 offset; /* out, mmap() offset or iova */
111}; 117};
112 118
113#define MSM_PREP_READ 0x01 119#define MSM_PREP_READ 0x01
@@ -167,7 +173,7 @@ struct drm_msm_gem_submit_cmd {
167 __u32 size; /* in, cmdstream size */ 173 __u32 size; /* in, cmdstream size */
168 __u32 pad; 174 __u32 pad;
169 __u32 nr_relocs; /* in, number of submit_reloc's */ 175 __u32 nr_relocs; /* in, number of submit_reloc's */
170 __u64 __user relocs; /* in, ptr to array of submit_reloc's */ 176 __u64 relocs; /* in, ptr to array of submit_reloc's */
171}; 177};
172 178
173/* Each buffer referenced elsewhere in the cmdstream submit (ie. the 179/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -211,9 +217,10 @@ struct drm_msm_gem_submit {
211 __u32 fence; /* out */ 217 __u32 fence; /* out */
212 __u32 nr_bos; /* in, number of submit_bo's */ 218 __u32 nr_bos; /* in, number of submit_bo's */
213 __u32 nr_cmds; /* in, number of submit_cmd's */ 219 __u32 nr_cmds; /* in, number of submit_cmd's */
214 __u64 __user bos; /* in, ptr to array of submit_bo's */ 220 __u64 bos; /* in, ptr to array of submit_bo's */
215 __u64 __user cmds; /* in, ptr to array of submit_cmd's */ 221 __u64 cmds; /* in, ptr to array of submit_cmd's */
216 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ 222 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
223 __u32 queueid; /* in, submitqueue id */
217}; 224};
218 225
219/* The normal way to synchronize with the GPU is just to CPU_PREP on 226/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -227,6 +234,7 @@ struct drm_msm_wait_fence {
227 __u32 fence; /* in */ 234 __u32 fence; /* in */
228 __u32 pad; 235 __u32 pad;
229 struct drm_msm_timespec timeout; /* in */ 236 struct drm_msm_timespec timeout; /* in */
237 __u32 queueid; /* in, submitqueue id */
230}; 238};
231 239
232/* madvise provides a way to tell the kernel in case a buffers contents 240/* madvise provides a way to tell the kernel in case a buffers contents
@@ -250,6 +258,20 @@ struct drm_msm_gem_madvise {
250 __u32 retained; /* out, whether backing store still exists */ 258 __u32 retained; /* out, whether backing store still exists */
251}; 259};
252 260
261/*
262 * Draw queues allow the user to set specific submission parameter. Command
263 * submissions specify a specific submitqueue to use. ID 0 is reserved for
264 * backwards compatibility as a "default" submitqueue
265 */
266
267#define MSM_SUBMITQUEUE_FLAGS (0)
268
269struct drm_msm_submitqueue {
270 __u32 flags; /* in, MSM_SUBMITQUEUE_x */
271 __u32 prio; /* in, Priority level */
272 __u32 id; /* out, identifier */
273};
274
253#define DRM_MSM_GET_PARAM 0x00 275#define DRM_MSM_GET_PARAM 0x00
254/* placeholder: 276/* placeholder:
255#define DRM_MSM_SET_PARAM 0x01 277#define DRM_MSM_SET_PARAM 0x01
@@ -261,7 +283,11 @@ struct drm_msm_gem_madvise {
261#define DRM_MSM_GEM_SUBMIT 0x06 283#define DRM_MSM_GEM_SUBMIT 0x06
262#define DRM_MSM_WAIT_FENCE 0x07 284#define DRM_MSM_WAIT_FENCE 0x07
263#define DRM_MSM_GEM_MADVISE 0x08 285#define DRM_MSM_GEM_MADVISE 0x08
264#define DRM_MSM_NUM_IOCTLS 0x09 286/* placeholder:
287#define DRM_MSM_GEM_SVM_NEW 0x09
288 */
289#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
290#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
265 291
266#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) 292#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
267#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) 293#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -271,6 +297,8 @@ struct drm_msm_gem_madvise {
271#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) 297#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
272#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) 298#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
273#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise) 299#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
300#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
301#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
274 302
275#if defined(__cplusplus) 303#if defined(__cplusplus)
276} 304}
diff --git a/freedreno/msm/msm_pipe.c b/freedreno/msm/msm_pipe.c
index f872e245..f28778ef 100644
--- a/freedreno/msm/msm_pipe.c
+++ b/freedreno/msm/msm_pipe.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include "msm_priv.h" 29#include "msm_priv.h"
34 30
35static int query_param(struct fd_pipe *pipe, uint32_t param, 31static int query_param(struct fd_pipe *pipe, uint32_t param,
@@ -71,6 +67,8 @@ static int msm_pipe_get_param(struct fd_pipe *pipe,
71 return query_param(pipe, MSM_PARAM_MAX_FREQ, value); 67 return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
72 case FD_TIMESTAMP: 68 case FD_TIMESTAMP:
73 return query_param(pipe, MSM_PARAM_TIMESTAMP, value); 69 return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
70 case FD_NR_RINGS:
71 return query_param(pipe, MSM_PARAM_NR_RINGS, value);
74 default: 72 default:
75 ERROR_MSG("invalid param id: %d", param); 73 ERROR_MSG("invalid param id: %d", param);
76 return -1; 74 return -1;
@@ -83,6 +81,7 @@ static int msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
83 struct fd_device *dev = pipe->dev; 81 struct fd_device *dev = pipe->dev;
84 struct drm_msm_wait_fence req = { 82 struct drm_msm_wait_fence req = {
85 .fence = timestamp, 83 .fence = timestamp,
84 .queueid = to_msm_pipe(pipe)->queue_id,
86 }; 85 };
87 int ret; 86 int ret;
88 87
@@ -97,9 +96,48 @@ static int msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
97 return 0; 96 return 0;
98} 97}
99 98
99static int open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
100{
101 struct drm_msm_submitqueue req = {
102 .flags = 0,
103 .prio = prio,
104 };
105 uint64_t nr_rings = 1;
106 int ret;
107
108 if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES) {
109 to_msm_pipe(pipe)->queue_id = 0;
110 return 0;
111 }
112
113 msm_pipe_get_param(pipe, FD_NR_RINGS, &nr_rings);
114
115 req.prio = MIN2(req.prio, MAX2(nr_rings, 1) - 1);
116
117 ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_NEW,
118 &req, sizeof(req));
119 if (ret) {
120 ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
121 return ret;
122 }
123
124 to_msm_pipe(pipe)->queue_id = req.id;
125 return 0;
126}
127
128static void close_submitqueue(struct fd_pipe *pipe, uint32_t queue_id)
129{
130 if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES)
131 return;
132
133 drmCommandWrite(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_CLOSE,
134 &queue_id, sizeof(queue_id));
135}
136
100static void msm_pipe_destroy(struct fd_pipe *pipe) 137static void msm_pipe_destroy(struct fd_pipe *pipe)
101{ 138{
102 struct msm_pipe *msm_pipe = to_msm_pipe(pipe); 139 struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
140 close_submitqueue(pipe, msm_pipe->queue_id);
103 free(msm_pipe); 141 free(msm_pipe);
104} 142}
105 143
@@ -122,7 +160,7 @@ static uint64_t get_param(struct fd_pipe *pipe, uint32_t param)
122} 160}
123 161
124drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev, 162drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
125 enum fd_pipe_id id) 163 enum fd_pipe_id id, uint32_t prio)
126{ 164{
127 static const uint32_t pipe_id[] = { 165 static const uint32_t pipe_id[] = {
128 [FD_PIPE_3D] = MSM_PIPE_3D0, 166 [FD_PIPE_3D] = MSM_PIPE_3D0,
@@ -157,6 +195,9 @@ drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
157 INFO_MSG(" Chip-id: 0x%08x", msm_pipe->chip_id); 195 INFO_MSG(" Chip-id: 0x%08x", msm_pipe->chip_id);
158 INFO_MSG(" GMEM size: 0x%08x", msm_pipe->gmem); 196 INFO_MSG(" GMEM size: 0x%08x", msm_pipe->gmem);
159 197
198 if (open_submitqueue(pipe, prio))
199 goto fail;
200
160 return pipe; 201 return pipe;
161fail: 202fail:
162 if (pipe) 203 if (pipe)
diff --git a/freedreno/msm/msm_priv.h b/freedreno/msm/msm_priv.h
index 6d670aab..88ac3aa4 100644
--- a/freedreno/msm/msm_priv.h
+++ b/freedreno/msm/msm_priv.h
@@ -56,6 +56,7 @@ struct msm_pipe {
56 uint32_t gpu_id; 56 uint32_t gpu_id;
57 uint32_t gmem; 57 uint32_t gmem;
58 uint32_t chip_id; 58 uint32_t chip_id;
59 uint32_t queue_id;
59}; 60};
60 61
61static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x) 62static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x)
@@ -64,7 +65,7 @@ static inline struct msm_pipe * to_msm_pipe(struct fd_pipe *x)
64} 65}
65 66
66drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev, 67drm_private struct fd_pipe * msm_pipe_new(struct fd_device *dev,
67 enum fd_pipe_id id); 68 enum fd_pipe_id id, uint32_t prio);
68 69
69drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe, 70drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
70 uint32_t size); 71 uint32_t size);
diff --git a/freedreno/msm/msm_ringbuffer.c b/freedreno/msm/msm_ringbuffer.c
index 17194f4c..a87e1b9a 100644
--- a/freedreno/msm/msm_ringbuffer.c
+++ b/freedreno/msm/msm_ringbuffer.c
@@ -26,10 +26,6 @@
26 * Rob Clark <robclark@freedesktop.org> 26 * Rob Clark <robclark@freedesktop.org>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30# include <config.h>
31#endif
32
33#include <assert.h> 29#include <assert.h>
34#include <inttypes.h> 30#include <inttypes.h>
35 31
@@ -401,6 +397,7 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
401 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring); 397 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
402 struct drm_msm_gem_submit req = { 398 struct drm_msm_gem_submit req = {
403 .flags = to_msm_pipe(ring->pipe)->pipe, 399 .flags = to_msm_pipe(ring->pipe)->pipe,
400 .queueid = to_msm_pipe(ring->pipe)->queue_id,
404 }; 401 };
405 uint32_t i; 402 uint32_t i;
406 int ret; 403 int ret;
@@ -496,11 +493,16 @@ static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
496 if (ring->pipe->gpu_id >= 500) { 493 if (ring->pipe->gpu_id >= 500) {
497 struct drm_msm_gem_submit_reloc *reloc_hi; 494 struct drm_msm_gem_submit_reloc *reloc_hi;
498 495
496 /* NOTE: grab reloc_idx *before* APPEND() since that could
497 * realloc() meaning that 'reloc' ptr is no longer valid:
498 */
499 uint32_t reloc_idx = reloc->reloc_idx;
500
499 idx = APPEND(cmd, relocs); 501 idx = APPEND(cmd, relocs);
500 502
501 reloc_hi = &cmd->relocs[idx]; 503 reloc_hi = &cmd->relocs[idx];
502 504
503 reloc_hi->reloc_idx = reloc->reloc_idx; 505 reloc_hi->reloc_idx = reloc_idx;
504 reloc_hi->reloc_offset = r->offset; 506 reloc_hi->reloc_offset = r->offset;
505 reloc_hi->or = r->orhi; 507 reloc_hi->or = r->orhi;
506 reloc_hi->shift = r->shift - 32; 508 reloc_hi->shift = r->shift - 32;
@@ -584,12 +586,12 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
584 uint32_t size) 586 uint32_t size)
585{ 587{
586 struct msm_ringbuffer *msm_ring; 588 struct msm_ringbuffer *msm_ring;
587 struct fd_ringbuffer *ring = NULL; 589 struct fd_ringbuffer *ring;
588 590
589 msm_ring = calloc(1, sizeof(*msm_ring)); 591 msm_ring = calloc(1, sizeof(*msm_ring));
590 if (!msm_ring) { 592 if (!msm_ring) {
591 ERROR_MSG("allocation failed"); 593 ERROR_MSG("allocation failed");
592 goto fail; 594 return NULL;
593 } 595 }
594 596
595 if (size == 0) { 597 if (size == 0) {
@@ -609,8 +611,4 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
609 ring_cmd_new(ring, size); 611 ring_cmd_new(ring, size);
610 612
611 return ring; 613 return ring;
612fail:
613 if (ring)
614 fd_ringbuffer_del(ring);
615 return NULL;
616} 614}
diff --git a/include/drm/README b/include/drm/README
index a50b02c0..b4658dd7 100644
--- a/include/drm/README
+++ b/include/drm/README
@@ -67,6 +67,8 @@ That said, it's up-to the individual developers to sync with newer version
67 67
68When and how to update these files 68When and how to update these files
69---------------------------------- 69----------------------------------
70Note: One should not do _any_ changes to the files apart from the steps below.
71
70In order to update the files do the following: 72In order to update the files do the following:
71 - Switch to a Linux kernel tree/branch which is not rebased. 73 - Switch to a Linux kernel tree/branch which is not rebased.
72For example: airlied/drm-next 74For example: airlied/drm-next
@@ -84,47 +86,21 @@ Outdated or Broken Headers
84This section contains a list of headers and the respective "issues" they might 86This section contains a list of headers and the respective "issues" they might
85have relative to their kernel equivalent. 87have relative to their kernel equivalent.
86 88
87Nearly all headers:
88 - Missing extern C notation.
89Status: Trivial.
90
91Most UMS headers: 89Most UMS headers:
92 - Not using fixed size integers - compat ioctls are broken. 90 - Not using fixed size integers - compat ioctls are broken.
93Status: ? 91Status: ?
94Promote to fixed size ints, which match the current (32bit) ones. 92Promote to fixed size ints, which match the current (32bit) ones.
95 93
96
97amdgpu_drm.h
98 - Using the stdint.h uint*_t over the respective __u* ones
99Status: Trivial.
100
101drm_mode.h
102 - Missing DPI encode/connector pair.
103Status: Trivial.
104
105i915_drm.h
106 - Missing PARAMS - HAS_POOLED_EU, MIN_EU_IN_POOL CONTEXT_PARAM_NO_ERROR_CAPTURE
107Status: Trivial.
108
109mga_drm.h
110 - Typo fix, use struct over typedef.
111Status: Trivial.
112
113nouveau_drm.h 94nouveau_drm.h
114 - Missing macros NOUVEAU_GETPARAM*, NOUVEAU_DRM_HEADER_PATCHLEVEL, structs, 95 - Missing macros NOUVEAU_GETPARAM*, NOUVEAU_DRM_HEADER_PATCHLEVEL, structs,
115enums, using stdint.h over the __u* types. 96enums
116Status: ? 97Status: Deliberate UABI choice; nouveau hides the exact kernel ABI behind libdrm
117
118qxl_drm.h
119 - Using the stdint.h uint*_t over the respective __u* ones
120Status: Trivial.
121 98
122r128_drm.h 99r128_drm.h
123 - Broken compat ioctls. 100 - Broken compat ioctls.
124 101
125radeon_drm.h 102radeon_drm.h
126 - Missing RADEON_TILING_R600_NO_SCANOUT, CIK_TILE_MODE_*, broken UMS ioctls, 103 - Missing RADEON_TILING_R600_NO_SCANOUT, CIK_TILE_MODE_*, broken UMS ioctls
127using stdint types.
128 - Both kernel and libdrm: missing padding - 104 - Both kernel and libdrm: missing padding -
129drm_radeon_gem_{create,{g,s}et_tiling,set_domain} others ? 105drm_radeon_gem_{create,{g,s}et_tiling,set_domain} others ?
130Status: ? 106Status: ?
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index d8f24976..c363b67f 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -50,6 +50,10 @@ extern "C" {
50#define DRM_AMDGPU_WAIT_CS 0x09 50#define DRM_AMDGPU_WAIT_CS 0x09
51#define DRM_AMDGPU_GEM_OP 0x10 51#define DRM_AMDGPU_GEM_OP 0x10
52#define DRM_AMDGPU_GEM_USERPTR 0x11 52#define DRM_AMDGPU_GEM_USERPTR 0x11
53#define DRM_AMDGPU_WAIT_FENCES 0x12
54#define DRM_AMDGPU_VM 0x13
55#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
56#define DRM_AMDGPU_SCHED 0x15
53 57
54#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) 58#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
55#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) 59#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -63,6 +67,10 @@ extern "C" {
63#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) 67#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
64#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) 68#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
65#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) 69#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
70#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
71#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
72#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
73#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
66 74
67#define AMDGPU_GEM_DOMAIN_CPU 0x1 75#define AMDGPU_GEM_DOMAIN_CPU 0x1
68#define AMDGPU_GEM_DOMAIN_GTT 0x2 76#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -79,22 +87,30 @@ extern "C" {
79#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) 87#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
80/* Flag that the memory should be in VRAM and cleared */ 88/* Flag that the memory should be in VRAM and cleared */
81#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) 89#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
90/* Flag that create shadow bo(GTT) while allocating vram bo */
91#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
92/* Flag that allocating the BO should use linear VRAM */
93#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
94/* Flag that BO is always valid in this VM */
95#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
96/* Flag that BO sharing will be explicitly synchronized */
97#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
82 98
83struct drm_amdgpu_gem_create_in { 99struct drm_amdgpu_gem_create_in {
84 /** the requested memory size */ 100 /** the requested memory size */
85 uint64_t bo_size; 101 __u64 bo_size;
86 /** physical start_addr alignment in bytes for some HW requirements */ 102 /** physical start_addr alignment in bytes for some HW requirements */
87 uint64_t alignment; 103 __u64 alignment;
88 /** the requested memory domains */ 104 /** the requested memory domains */
89 uint64_t domains; 105 __u64 domains;
90 /** allocation flags */ 106 /** allocation flags */
91 uint64_t domain_flags; 107 __u64 domain_flags;
92}; 108};
93 109
94struct drm_amdgpu_gem_create_out { 110struct drm_amdgpu_gem_create_out {
95 /** returned GEM object handle */ 111 /** returned GEM object handle */
96 uint32_t handle; 112 __u32 handle;
97 uint32_t _pad; 113 __u32 _pad;
98}; 114};
99 115
100union drm_amdgpu_gem_create { 116union drm_amdgpu_gem_create {
@@ -111,28 +127,28 @@ union drm_amdgpu_gem_create {
111 127
112struct drm_amdgpu_bo_list_in { 128struct drm_amdgpu_bo_list_in {
113 /** Type of operation */ 129 /** Type of operation */
114 uint32_t operation; 130 __u32 operation;
115 /** Handle of list or 0 if we want to create one */ 131 /** Handle of list or 0 if we want to create one */
116 uint32_t list_handle; 132 __u32 list_handle;
117 /** Number of BOs in list */ 133 /** Number of BOs in list */
118 uint32_t bo_number; 134 __u32 bo_number;
119 /** Size of each element describing BO */ 135 /** Size of each element describing BO */
120 uint32_t bo_info_size; 136 __u32 bo_info_size;
121 /** Pointer to array describing BOs */ 137 /** Pointer to array describing BOs */
122 uint64_t bo_info_ptr; 138 __u64 bo_info_ptr;
123}; 139};
124 140
125struct drm_amdgpu_bo_list_entry { 141struct drm_amdgpu_bo_list_entry {
126 /** Handle of BO */ 142 /** Handle of BO */
127 uint32_t bo_handle; 143 __u32 bo_handle;
128 /** New (if specified) BO priority to be used during migration */ 144 /** New (if specified) BO priority to be used during migration */
129 uint32_t bo_priority; 145 __u32 bo_priority;
130}; 146};
131 147
132struct drm_amdgpu_bo_list_out { 148struct drm_amdgpu_bo_list_out {
133 /** Handle of resource list */ 149 /** Handle of resource list */
134 uint32_t list_handle; 150 __u32 list_handle;
135 uint32_t _pad; 151 __u32 _pad;
136}; 152};
137 153
138union drm_amdgpu_bo_list { 154union drm_amdgpu_bo_list {
@@ -144,6 +160,7 @@ union drm_amdgpu_bo_list {
144#define AMDGPU_CTX_OP_ALLOC_CTX 1 160#define AMDGPU_CTX_OP_ALLOC_CTX 1
145#define AMDGPU_CTX_OP_FREE_CTX 2 161#define AMDGPU_CTX_OP_FREE_CTX 2
146#define AMDGPU_CTX_OP_QUERY_STATE 3 162#define AMDGPU_CTX_OP_QUERY_STATE 3
163#define AMDGPU_CTX_OP_QUERY_STATE2 4
147 164
148/* GPU reset status */ 165/* GPU reset status */
149#define AMDGPU_CTX_NO_RESET 0 166#define AMDGPU_CTX_NO_RESET 0
@@ -154,28 +171,44 @@ union drm_amdgpu_bo_list {
154/* unknown cause */ 171/* unknown cause */
155#define AMDGPU_CTX_UNKNOWN_RESET 3 172#define AMDGPU_CTX_UNKNOWN_RESET 3
156 173
174/* indicate gpu reset occured after ctx created */
175#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
176/* indicate vram lost occured after ctx created */
177#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
178/* indicate some job from this context once cause gpu hang */
179#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
180
181/* Context priority level */
182#define AMDGPU_CTX_PRIORITY_UNSET -2048
183#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
184#define AMDGPU_CTX_PRIORITY_LOW -512
185#define AMDGPU_CTX_PRIORITY_NORMAL 0
186/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
187#define AMDGPU_CTX_PRIORITY_HIGH 512
188#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
189
157struct drm_amdgpu_ctx_in { 190struct drm_amdgpu_ctx_in {
158 /** AMDGPU_CTX_OP_* */ 191 /** AMDGPU_CTX_OP_* */
159 uint32_t op; 192 __u32 op;
160 /** For future use, no flags defined so far */ 193 /** For future use, no flags defined so far */
161 uint32_t flags; 194 __u32 flags;
162 uint32_t ctx_id; 195 __u32 ctx_id;
163 uint32_t _pad; 196 __s32 priority;
164}; 197};
165 198
166union drm_amdgpu_ctx_out { 199union drm_amdgpu_ctx_out {
167 struct { 200 struct {
168 uint32_t ctx_id; 201 __u32 ctx_id;
169 uint32_t _pad; 202 __u32 _pad;
170 } alloc; 203 } alloc;
171 204
172 struct { 205 struct {
173 /** For future use, no flags defined so far */ 206 /** For future use, no flags defined so far */
174 uint64_t flags; 207 __u64 flags;
175 /** Number of resets caused by this context so far. */ 208 /** Number of resets caused by this context so far. */
176 uint32_t hangs; 209 __u32 hangs;
177 /** Reset status since the last call of the ioctl. */ 210 /** Reset status since the last call of the ioctl. */
178 uint32_t reset_status; 211 __u32 reset_status;
179 } state; 212 } state;
180}; 213};
181 214
@@ -184,6 +217,41 @@ union drm_amdgpu_ctx {
184 union drm_amdgpu_ctx_out out; 217 union drm_amdgpu_ctx_out out;
185}; 218};
186 219
220/* vm ioctl */
221#define AMDGPU_VM_OP_RESERVE_VMID 1
222#define AMDGPU_VM_OP_UNRESERVE_VMID 2
223
224struct drm_amdgpu_vm_in {
225 /** AMDGPU_VM_OP_* */
226 __u32 op;
227 __u32 flags;
228};
229
230struct drm_amdgpu_vm_out {
231 /** For future use, no flags defined so far */
232 __u64 flags;
233};
234
235union drm_amdgpu_vm {
236 struct drm_amdgpu_vm_in in;
237 struct drm_amdgpu_vm_out out;
238};
239
240/* sched ioctl */
241#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
242
243struct drm_amdgpu_sched_in {
244 /* AMDGPU_SCHED_OP_* */
245 __u32 op;
246 __u32 fd;
247 __s32 priority;
248 __u32 flags;
249};
250
251union drm_amdgpu_sched {
252 struct drm_amdgpu_sched_in in;
253};
254
187/* 255/*
188 * This is not a reliable API and you should expect it to fail for any 256 * This is not a reliable API and you should expect it to fail for any
189 * number of reasons and have fallback path that do not use userptr to 257 * number of reasons and have fallback path that do not use userptr to
@@ -195,14 +263,15 @@ union drm_amdgpu_ctx {
195#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) 263#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3)
196 264
197struct drm_amdgpu_gem_userptr { 265struct drm_amdgpu_gem_userptr {
198 uint64_t addr; 266 __u64 addr;
199 uint64_t size; 267 __u64 size;
200 /* AMDGPU_GEM_USERPTR_* */ 268 /* AMDGPU_GEM_USERPTR_* */
201 uint32_t flags; 269 __u32 flags;
202 /* Resulting GEM handle */ 270 /* Resulting GEM handle */
203 uint32_t handle; 271 __u32 handle;
204}; 272};
205 273
274/* SI-CI-VI: */
206/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ 275/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
207#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 276#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
208#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf 277#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
@@ -221,10 +290,15 @@ struct drm_amdgpu_gem_userptr {
221#define AMDGPU_TILING_NUM_BANKS_SHIFT 21 290#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
222#define AMDGPU_TILING_NUM_BANKS_MASK 0x3 291#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
223 292
293/* GFX9 and later: */
294#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
295#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
296
297/* Set/Get helpers for tiling flags. */
224#define AMDGPU_TILING_SET(field, value) \ 298#define AMDGPU_TILING_SET(field, value) \
225 (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) 299 (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
226#define AMDGPU_TILING_GET(value, field) \ 300#define AMDGPU_TILING_GET(value, field) \
227 (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) 301 (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
228 302
229#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 303#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
230#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 304#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
@@ -232,28 +306,28 @@ struct drm_amdgpu_gem_userptr {
232/** The same structure is shared for input/output */ 306/** The same structure is shared for input/output */
233struct drm_amdgpu_gem_metadata { 307struct drm_amdgpu_gem_metadata {
234 /** GEM Object handle */ 308 /** GEM Object handle */
235 uint32_t handle; 309 __u32 handle;
236 /** Do we want get or set metadata */ 310 /** Do we want get or set metadata */
237 uint32_t op; 311 __u32 op;
238 struct { 312 struct {
239 /** For future use, no flags defined so far */ 313 /** For future use, no flags defined so far */
240 uint64_t flags; 314 __u64 flags;
241 /** family specific tiling info */ 315 /** family specific tiling info */
242 uint64_t tiling_info; 316 __u64 tiling_info;
243 uint32_t data_size_bytes; 317 __u32 data_size_bytes;
244 uint32_t data[64]; 318 __u32 data[64];
245 } data; 319 } data;
246}; 320};
247 321
248struct drm_amdgpu_gem_mmap_in { 322struct drm_amdgpu_gem_mmap_in {
249 /** the GEM object handle */ 323 /** the GEM object handle */
250 uint32_t handle; 324 __u32 handle;
251 uint32_t _pad; 325 __u32 _pad;
252}; 326};
253 327
254struct drm_amdgpu_gem_mmap_out { 328struct drm_amdgpu_gem_mmap_out {
255 /** mmap offset from the vma offset manager */ 329 /** mmap offset from the vma offset manager */
256 uint64_t addr_ptr; 330 __u64 addr_ptr;
257}; 331};
258 332
259union drm_amdgpu_gem_mmap { 333union drm_amdgpu_gem_mmap {
@@ -263,18 +337,18 @@ union drm_amdgpu_gem_mmap {
263 337
264struct drm_amdgpu_gem_wait_idle_in { 338struct drm_amdgpu_gem_wait_idle_in {
265 /** GEM object handle */ 339 /** GEM object handle */
266 uint32_t handle; 340 __u32 handle;
267 /** For future use, no flags defined so far */ 341 /** For future use, no flags defined so far */
268 uint32_t flags; 342 __u32 flags;
269 /** Absolute timeout to wait */ 343 /** Absolute timeout to wait */
270 uint64_t timeout; 344 __u64 timeout;
271}; 345};
272 346
273struct drm_amdgpu_gem_wait_idle_out { 347struct drm_amdgpu_gem_wait_idle_out {
274 /** BO status: 0 - BO is idle, 1 - BO is busy */ 348 /** BO status: 0 - BO is idle, 1 - BO is busy */
275 uint32_t status; 349 __u32 status;
276 /** Returned current memory domain */ 350 /** Returned current memory domain */
277 uint32_t domain; 351 __u32 domain;
278}; 352};
279 353
280union drm_amdgpu_gem_wait_idle { 354union drm_amdgpu_gem_wait_idle {
@@ -283,19 +357,22 @@ union drm_amdgpu_gem_wait_idle {
283}; 357};
284 358
285struct drm_amdgpu_wait_cs_in { 359struct drm_amdgpu_wait_cs_in {
286 /** Command submission handle */ 360 /* Command submission handle
287 uint64_t handle; 361 * handle equals 0 means none to wait for
362 * handle equals ~0ull means wait for the latest sequence number
363 */
364 __u64 handle;
288 /** Absolute timeout to wait */ 365 /** Absolute timeout to wait */
289 uint64_t timeout; 366 __u64 timeout;
290 uint32_t ip_type; 367 __u32 ip_type;
291 uint32_t ip_instance; 368 __u32 ip_instance;
292 uint32_t ring; 369 __u32 ring;
293 uint32_t ctx_id; 370 __u32 ctx_id;
294}; 371};
295 372
296struct drm_amdgpu_wait_cs_out { 373struct drm_amdgpu_wait_cs_out {
297 /** CS status: 0 - CS completed, 1 - CS still busy */ 374 /** CS status: 0 - CS completed, 1 - CS still busy */
298 uint64_t status; 375 __u64 status;
299}; 376};
300 377
301union drm_amdgpu_wait_cs { 378union drm_amdgpu_wait_cs {
@@ -303,21 +380,49 @@ union drm_amdgpu_wait_cs {
303 struct drm_amdgpu_wait_cs_out out; 380 struct drm_amdgpu_wait_cs_out out;
304}; 381};
305 382
383struct drm_amdgpu_fence {
384 __u32 ctx_id;
385 __u32 ip_type;
386 __u32 ip_instance;
387 __u32 ring;
388 __u64 seq_no;
389};
390
391struct drm_amdgpu_wait_fences_in {
392 /** This points to uint64_t * which points to fences */
393 __u64 fences;
394 __u32 fence_count;
395 __u32 wait_all;
396 __u64 timeout_ns;
397};
398
399struct drm_amdgpu_wait_fences_out {
400 __u32 status;
401 __u32 first_signaled;
402};
403
404union drm_amdgpu_wait_fences {
405 struct drm_amdgpu_wait_fences_in in;
406 struct drm_amdgpu_wait_fences_out out;
407};
408
306#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 409#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
307#define AMDGPU_GEM_OP_SET_PLACEMENT 1 410#define AMDGPU_GEM_OP_SET_PLACEMENT 1
308 411
309/* Sets or returns a value associated with a buffer. */ 412/* Sets or returns a value associated with a buffer. */
310struct drm_amdgpu_gem_op { 413struct drm_amdgpu_gem_op {
311 /** GEM object handle */ 414 /** GEM object handle */
312 uint32_t handle; 415 __u32 handle;
313 /** AMDGPU_GEM_OP_* */ 416 /** AMDGPU_GEM_OP_* */
314 uint32_t op; 417 __u32 op;
315 /** Input or return value */ 418 /** Input or return value */
316 uint64_t value; 419 __u64 value;
317}; 420};
318 421
319#define AMDGPU_VA_OP_MAP 1 422#define AMDGPU_VA_OP_MAP 1
320#define AMDGPU_VA_OP_UNMAP 2 423#define AMDGPU_VA_OP_UNMAP 2
424#define AMDGPU_VA_OP_CLEAR 3
425#define AMDGPU_VA_OP_REPLACE 4
321 426
322/* Delay the page table update till the next CS */ 427/* Delay the page table update till the next CS */
323#define AMDGPU_VM_DELAY_UPDATE (1 << 0) 428#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
@@ -329,21 +434,35 @@ struct drm_amdgpu_gem_op {
329#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) 434#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
330/* executable mapping, new for VI */ 435/* executable mapping, new for VI */
331#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) 436#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
437/* partially resident texture */
438#define AMDGPU_VM_PAGE_PRT (1 << 4)
439/* MTYPE flags use bit 5 to 8 */
440#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
441/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
442#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
443/* Use NC MTYPE instead of default MTYPE */
444#define AMDGPU_VM_MTYPE_NC (1 << 5)
445/* Use WC MTYPE instead of default MTYPE */
446#define AMDGPU_VM_MTYPE_WC (2 << 5)
447/* Use CC MTYPE instead of default MTYPE */
448#define AMDGPU_VM_MTYPE_CC (3 << 5)
449/* Use UC MTYPE instead of default MTYPE */
450#define AMDGPU_VM_MTYPE_UC (4 << 5)
332 451
333struct drm_amdgpu_gem_va { 452struct drm_amdgpu_gem_va {
334 /** GEM object handle */ 453 /** GEM object handle */
335 uint32_t handle; 454 __u32 handle;
336 uint32_t _pad; 455 __u32 _pad;
337 /** AMDGPU_VA_OP_* */ 456 /** AMDGPU_VA_OP_* */
338 uint32_t operation; 457 __u32 operation;
339 /** AMDGPU_VM_PAGE_* */ 458 /** AMDGPU_VM_PAGE_* */
340 uint32_t flags; 459 __u32 flags;
341 /** va address to assign . Must be correctly aligned.*/ 460 /** va address to assign . Must be correctly aligned.*/
342 uint64_t va_address; 461 __u64 va_address;
343 /** Specify offset inside of BO to assign. Must be correctly aligned.*/ 462 /** Specify offset inside of BO to assign. Must be correctly aligned.*/
344 uint64_t offset_in_bo; 463 __u64 offset_in_bo;
345 /** Specify mapping size. Must be correctly aligned. */ 464 /** Specify mapping size. Must be correctly aligned. */
346 uint64_t map_size; 465 __u64 map_size;
347}; 466};
348 467
349#define AMDGPU_HW_IP_GFX 0 468#define AMDGPU_HW_IP_GFX 0
@@ -351,33 +470,38 @@ struct drm_amdgpu_gem_va {
351#define AMDGPU_HW_IP_DMA 2 470#define AMDGPU_HW_IP_DMA 2
352#define AMDGPU_HW_IP_UVD 3 471#define AMDGPU_HW_IP_UVD 3
353#define AMDGPU_HW_IP_VCE 4 472#define AMDGPU_HW_IP_VCE 4
354#define AMDGPU_HW_IP_NUM 5 473#define AMDGPU_HW_IP_UVD_ENC 5
474#define AMDGPU_HW_IP_VCN_DEC 6
475#define AMDGPU_HW_IP_VCN_ENC 7
476#define AMDGPU_HW_IP_NUM 8
355 477
356#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 478#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
357 479
358#define AMDGPU_CHUNK_ID_IB 0x01 480#define AMDGPU_CHUNK_ID_IB 0x01
359#define AMDGPU_CHUNK_ID_FENCE 0x02 481#define AMDGPU_CHUNK_ID_FENCE 0x02
360#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 482#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
483#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
484#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
361 485
362struct drm_amdgpu_cs_chunk { 486struct drm_amdgpu_cs_chunk {
363 uint32_t chunk_id; 487 __u32 chunk_id;
364 uint32_t length_dw; 488 __u32 length_dw;
365 uint64_t chunk_data; 489 __u64 chunk_data;
366}; 490};
367 491
368struct drm_amdgpu_cs_in { 492struct drm_amdgpu_cs_in {
369 /** Rendering context id */ 493 /** Rendering context id */
370 uint32_t ctx_id; 494 __u32 ctx_id;
371 /** Handle of resource list associated with CS */ 495 /** Handle of resource list associated with CS */
372 uint32_t bo_list_handle; 496 __u32 bo_list_handle;
373 uint32_t num_chunks; 497 __u32 num_chunks;
374 uint32_t _pad; 498 __u32 _pad;
375 /** this points to uint64_t * which point to cs chunks */ 499 /** this points to __u64 * which point to cs chunks */
376 uint64_t chunks; 500 __u64 chunks;
377}; 501};
378 502
379struct drm_amdgpu_cs_out { 503struct drm_amdgpu_cs_out {
380 uint64_t handle; 504 __u64 handle;
381}; 505};
382 506
383union drm_amdgpu_cs { 507union drm_amdgpu_cs {
@@ -390,36 +514,58 @@ union drm_amdgpu_cs {
390/* This IB should be submitted to CE */ 514/* This IB should be submitted to CE */
391#define AMDGPU_IB_FLAG_CE (1<<0) 515#define AMDGPU_IB_FLAG_CE (1<<0)
392 516
393/* CE Preamble */ 517/* Preamble flag, which means the IB could be dropped if no context switch */
394#define AMDGPU_IB_FLAG_PREAMBLE (1<<1) 518#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
395 519
520/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
521#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
522
396struct drm_amdgpu_cs_chunk_ib { 523struct drm_amdgpu_cs_chunk_ib {
397 uint32_t _pad; 524 __u32 _pad;
398 /** AMDGPU_IB_FLAG_* */ 525 /** AMDGPU_IB_FLAG_* */
399 uint32_t flags; 526 __u32 flags;
400 /** Virtual address to begin IB execution */ 527 /** Virtual address to begin IB execution */
401 uint64_t va_start; 528 __u64 va_start;
402 /** Size of submission */ 529 /** Size of submission */
403 uint32_t ib_bytes; 530 __u32 ib_bytes;
404 /** HW IP to submit to */ 531 /** HW IP to submit to */
405 uint32_t ip_type; 532 __u32 ip_type;
406 /** HW IP index of the same type to submit to */ 533 /** HW IP index of the same type to submit to */
407 uint32_t ip_instance; 534 __u32 ip_instance;
408 /** Ring index to submit to */ 535 /** Ring index to submit to */
409 uint32_t ring; 536 __u32 ring;
410}; 537};
411 538
412struct drm_amdgpu_cs_chunk_dep { 539struct drm_amdgpu_cs_chunk_dep {
413 uint32_t ip_type; 540 __u32 ip_type;
414 uint32_t ip_instance; 541 __u32 ip_instance;
415 uint32_t ring; 542 __u32 ring;
416 uint32_t ctx_id; 543 __u32 ctx_id;
417 uint64_t handle; 544 __u64 handle;
418}; 545};
419 546
420struct drm_amdgpu_cs_chunk_fence { 547struct drm_amdgpu_cs_chunk_fence {
421 uint32_t handle; 548 __u32 handle;
422 uint32_t offset; 549 __u32 offset;
550};
551
552struct drm_amdgpu_cs_chunk_sem {
553 __u32 handle;
554};
555
556#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
557#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
558#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
559
560union drm_amdgpu_fence_to_handle {
561 struct {
562 struct drm_amdgpu_fence fence;
563 __u32 what;
564 __u32 pad;
565 } in;
566 struct {
567 __u32 handle;
568 } out;
423}; 569};
424 570
425struct drm_amdgpu_cs_chunk_data { 571struct drm_amdgpu_cs_chunk_data {
@@ -434,6 +580,7 @@ struct drm_amdgpu_cs_chunk_data {
434 * 580 *
435 */ 581 */
436#define AMDGPU_IDS_FLAGS_FUSION 0x1 582#define AMDGPU_IDS_FLAGS_FUSION 0x1
583#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
437 584
438/* indicate if acceleration can be working */ 585/* indicate if acceleration can be working */
439#define AMDGPU_INFO_ACCEL_WORKING 0x00 586#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -467,6 +614,12 @@ struct drm_amdgpu_cs_chunk_data {
467 #define AMDGPU_INFO_FW_SMC 0x0a 614 #define AMDGPU_INFO_FW_SMC 0x0a
468 /* Subquery id: Query SDMA firmware version */ 615 /* Subquery id: Query SDMA firmware version */
469 #define AMDGPU_INFO_FW_SDMA 0x0b 616 #define AMDGPU_INFO_FW_SDMA 0x0b
617 /* Subquery id: Query PSP SOS firmware version */
618 #define AMDGPU_INFO_FW_SOS 0x0c
619 /* Subquery id: Query PSP ASD firmware version */
620 #define AMDGPU_INFO_FW_ASD 0x0d
621 /* Subquery id: Query VCN firmware version */
622 #define AMDGPU_INFO_FW_VCN 0x0e
470/* number of bytes moved for TTM migration */ 623/* number of bytes moved for TTM migration */
471#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f 624#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
472/* the used VRAM size */ 625/* the used VRAM size */
@@ -483,6 +636,43 @@ struct drm_amdgpu_cs_chunk_data {
483#define AMDGPU_INFO_DEV_INFO 0x16 636#define AMDGPU_INFO_DEV_INFO 0x16
484/* visible vram usage */ 637/* visible vram usage */
485#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 638#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
639/* number of TTM buffer evictions */
640#define AMDGPU_INFO_NUM_EVICTIONS 0x18
641/* Query memory about VRAM and GTT domains */
642#define AMDGPU_INFO_MEMORY 0x19
643/* Query vce clock table */
644#define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A
645/* Query vbios related information */
646#define AMDGPU_INFO_VBIOS 0x1B
647 /* Subquery id: Query vbios size */
648 #define AMDGPU_INFO_VBIOS_SIZE 0x1
649 /* Subquery id: Query vbios image */
650 #define AMDGPU_INFO_VBIOS_IMAGE 0x2
651/* Query UVD handles */
652#define AMDGPU_INFO_NUM_HANDLES 0x1C
653/* Query sensor related information */
654#define AMDGPU_INFO_SENSOR 0x1D
655 /* Subquery id: Query GPU shader clock */
656 #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1
657 /* Subquery id: Query GPU memory clock */
658 #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2
659 /* Subquery id: Query GPU temperature */
660 #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3
661 /* Subquery id: Query GPU load */
662 #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4
663 /* Subquery id: Query average GPU power */
664 #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5
665 /* Subquery id: Query northbridge voltage */
666 #define AMDGPU_INFO_SENSOR_VDDNB 0x6
667 /* Subquery id: Query graphics voltage */
668 #define AMDGPU_INFO_SENSOR_VDDGFX 0x7
669 /* Subquery id: Query GPU stable pstate shader clock */
670 #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
671 /* Subquery id: Query GPU stable pstate memory clock */
672 #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
673/* Number of VRAM page faults on CPU access. */
674#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
675#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
486 676
487#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 677#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
488#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff 678#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -491,86 +681,123 @@ struct drm_amdgpu_cs_chunk_data {
491 681
492struct drm_amdgpu_query_fw { 682struct drm_amdgpu_query_fw {
493 /** AMDGPU_INFO_FW_* */ 683 /** AMDGPU_INFO_FW_* */
494 uint32_t fw_type; 684 __u32 fw_type;
495 /** 685 /**
496 * Index of the IP if there are more IPs of 686 * Index of the IP if there are more IPs of
497 * the same type. 687 * the same type.
498 */ 688 */
499 uint32_t ip_instance; 689 __u32 ip_instance;
500 /** 690 /**
501 * Index of the engine. Whether this is used depends 691 * Index of the engine. Whether this is used depends
502 * on the firmware type. (e.g. MEC, SDMA) 692 * on the firmware type. (e.g. MEC, SDMA)
503 */ 693 */
504 uint32_t index; 694 __u32 index;
505 uint32_t _pad; 695 __u32 _pad;
506}; 696};
507 697
508/* Input structure for the INFO ioctl */ 698/* Input structure for the INFO ioctl */
509struct drm_amdgpu_info { 699struct drm_amdgpu_info {
510 /* Where the return value will be stored */ 700 /* Where the return value will be stored */
511 uint64_t return_pointer; 701 __u64 return_pointer;
512 /* The size of the return value. Just like "size" in "snprintf", 702 /* The size of the return value. Just like "size" in "snprintf",
513 * it limits how many bytes the kernel can write. */ 703 * it limits how many bytes the kernel can write. */
514 uint32_t return_size; 704 __u32 return_size;
515 /* The query request id. */ 705 /* The query request id. */
516 uint32_t query; 706 __u32 query;
517 707
518 union { 708 union {
519 struct { 709 struct {
520 uint32_t id; 710 __u32 id;
521 uint32_t _pad; 711 __u32 _pad;
522 } mode_crtc; 712 } mode_crtc;
523 713
524 struct { 714 struct {
525 /** AMDGPU_HW_IP_* */ 715 /** AMDGPU_HW_IP_* */
526 uint32_t type; 716 __u32 type;
527 /** 717 /**
528 * Index of the IP if there are more IPs of the same 718 * Index of the IP if there are more IPs of the same
529 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. 719 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
530 */ 720 */
531 uint32_t ip_instance; 721 __u32 ip_instance;
532 } query_hw_ip; 722 } query_hw_ip;
533 723
534 struct { 724 struct {
535 uint32_t dword_offset; 725 __u32 dword_offset;
536 /** number of registers to read */ 726 /** number of registers to read */
537 uint32_t count; 727 __u32 count;
538 uint32_t instance; 728 __u32 instance;
539 /** For future use, no flags defined so far */ 729 /** For future use, no flags defined so far */
540 uint32_t flags; 730 __u32 flags;
541 } read_mmr_reg; 731 } read_mmr_reg;
542 732
543 struct drm_amdgpu_query_fw query_fw; 733 struct drm_amdgpu_query_fw query_fw;
734
735 struct {
736 __u32 type;
737 __u32 offset;
738 } vbios_info;
739
740 struct {
741 __u32 type;
742 } sensor_info;
544 }; 743 };
545}; 744};
546 745
547struct drm_amdgpu_info_gds { 746struct drm_amdgpu_info_gds {
548 /** GDS GFX partition size */ 747 /** GDS GFX partition size */
549 uint32_t gds_gfx_partition_size; 748 __u32 gds_gfx_partition_size;
550 /** GDS compute partition size */ 749 /** GDS compute partition size */
551 uint32_t compute_partition_size; 750 __u32 compute_partition_size;
552 /** total GDS memory size */ 751 /** total GDS memory size */
553 uint32_t gds_total_size; 752 __u32 gds_total_size;
554 /** GWS size per GFX partition */ 753 /** GWS size per GFX partition */
555 uint32_t gws_per_gfx_partition; 754 __u32 gws_per_gfx_partition;
556 /** GSW size per compute partition */ 755 /** GSW size per compute partition */
557 uint32_t gws_per_compute_partition; 756 __u32 gws_per_compute_partition;
558 /** OA size per GFX partition */ 757 /** OA size per GFX partition */
559 uint32_t oa_per_gfx_partition; 758 __u32 oa_per_gfx_partition;
560 /** OA size per compute partition */ 759 /** OA size per compute partition */
561 uint32_t oa_per_compute_partition; 760 __u32 oa_per_compute_partition;
562 uint32_t _pad; 761 __u32 _pad;
563}; 762};
564 763
565struct drm_amdgpu_info_vram_gtt { 764struct drm_amdgpu_info_vram_gtt {
566 uint64_t vram_size; 765 __u64 vram_size;
567 uint64_t vram_cpu_accessible_size; 766 __u64 vram_cpu_accessible_size;
568 uint64_t gtt_size; 767 __u64 gtt_size;
768};
769
770struct drm_amdgpu_heap_info {
771 /** max. physical memory */
772 __u64 total_heap_size;
773
774 /** Theoretical max. available memory in the given heap */
775 __u64 usable_heap_size;
776
777 /**
778 * Number of bytes allocated in the heap. This includes all processes
779 * and private allocations in the kernel. It changes when new buffers
780 * are allocated, freed, and moved. It cannot be larger than
781 * heap_size.
782 */
783 __u64 heap_usage;
784
785 /**
786 * Theoretical possible max. size of buffer which
787 * could be allocated in the given heap
788 */
789 __u64 max_allocation;
790};
791
792struct drm_amdgpu_memory_info {
793 struct drm_amdgpu_heap_info vram;
794 struct drm_amdgpu_heap_info cpu_accessible_vram;
795 struct drm_amdgpu_heap_info gtt;
569}; 796};
570 797
571struct drm_amdgpu_info_firmware { 798struct drm_amdgpu_info_firmware {
572 uint32_t ver; 799 __u32 ver;
573 uint32_t feature; 800 __u32 feature;
574}; 801};
575 802
576#define AMDGPU_VRAM_TYPE_UNKNOWN 0 803#define AMDGPU_VRAM_TYPE_UNKNOWN 0
@@ -581,74 +808,139 @@ struct drm_amdgpu_info_firmware {
581#define AMDGPU_VRAM_TYPE_GDDR5 5 808#define AMDGPU_VRAM_TYPE_GDDR5 5
582#define AMDGPU_VRAM_TYPE_HBM 6 809#define AMDGPU_VRAM_TYPE_HBM 6
583#define AMDGPU_VRAM_TYPE_DDR3 7 810#define AMDGPU_VRAM_TYPE_DDR3 7
811#define AMDGPU_VRAM_TYPE_DDR4 8
584 812
585struct drm_amdgpu_info_device { 813struct drm_amdgpu_info_device {
586 /** PCI Device ID */ 814 /** PCI Device ID */
587 uint32_t device_id; 815 __u32 device_id;
588 /** Internal chip revision: A0, A1, etc.) */ 816 /** Internal chip revision: A0, A1, etc.) */
589 uint32_t chip_rev; 817 __u32 chip_rev;
590 uint32_t external_rev; 818 __u32 external_rev;
591 /** Revision id in PCI Config space */ 819 /** Revision id in PCI Config space */
592 uint32_t pci_rev; 820 __u32 pci_rev;
593 uint32_t family; 821 __u32 family;
594 uint32_t num_shader_engines; 822 __u32 num_shader_engines;
595 uint32_t num_shader_arrays_per_engine; 823 __u32 num_shader_arrays_per_engine;
596 /* in KHz */ 824 /* in KHz */
597 uint32_t gpu_counter_freq; 825 __u32 gpu_counter_freq;
598 uint64_t max_engine_clock; 826 __u64 max_engine_clock;
599 uint64_t max_memory_clock; 827 __u64 max_memory_clock;
600 /* cu information */ 828 /* cu information */
601 uint32_t cu_active_number; 829 __u32 cu_active_number;
602 uint32_t cu_ao_mask; 830 /* NOTE: cu_ao_mask is INVALID, DON'T use it */
603 uint32_t cu_bitmap[4][4]; 831 __u32 cu_ao_mask;
832 __u32 cu_bitmap[4][4];
604 /** Render backend pipe mask. One render backend is CB+DB. */ 833 /** Render backend pipe mask. One render backend is CB+DB. */
605 uint32_t enabled_rb_pipes_mask; 834 __u32 enabled_rb_pipes_mask;
606 uint32_t num_rb_pipes; 835 __u32 num_rb_pipes;
607 uint32_t num_hw_gfx_contexts; 836 __u32 num_hw_gfx_contexts;
608 uint32_t _pad; 837 __u32 _pad;
609 uint64_t ids_flags; 838 __u64 ids_flags;
610 /** Starting virtual address for UMDs. */ 839 /** Starting virtual address for UMDs. */
611 uint64_t virtual_address_offset; 840 __u64 virtual_address_offset;
612 /** The maximum virtual address */ 841 /** The maximum virtual address */
613 uint64_t virtual_address_max; 842 __u64 virtual_address_max;
614 /** Required alignment of virtual addresses. */ 843 /** Required alignment of virtual addresses. */
615 uint32_t virtual_address_alignment; 844 __u32 virtual_address_alignment;
616 /** Page table entry - fragment size */ 845 /** Page table entry - fragment size */
617 uint32_t pte_fragment_size; 846 __u32 pte_fragment_size;
618 uint32_t gart_page_size; 847 __u32 gart_page_size;
619 /** constant engine ram size*/ 848 /** constant engine ram size*/
620 uint32_t ce_ram_size; 849 __u32 ce_ram_size;
621 /** video memory type info*/ 850 /** video memory type info*/
622 uint32_t vram_type; 851 __u32 vram_type;
623 /** video memory bit width*/ 852 /** video memory bit width*/
624 uint32_t vram_bit_width; 853 __u32 vram_bit_width;
625 /* vce harvesting instance */ 854 /* vce harvesting instance */
626 uint32_t vce_harvest_config; 855 __u32 vce_harvest_config;
856 /* gfx double offchip LDS buffers */
857 __u32 gc_double_offchip_lds_buf;
858 /* NGG Primitive Buffer */
859 __u64 prim_buf_gpu_addr;
860 /* NGG Position Buffer */
861 __u64 pos_buf_gpu_addr;
862 /* NGG Control Sideband */
863 __u64 cntl_sb_buf_gpu_addr;
864 /* NGG Parameter Cache */
865 __u64 param_buf_gpu_addr;
866 __u32 prim_buf_size;
867 __u32 pos_buf_size;
868 __u32 cntl_sb_buf_size;
869 __u32 param_buf_size;
870 /* wavefront size*/
871 __u32 wave_front_size;
872 /* shader visible vgprs*/
873 __u32 num_shader_visible_vgprs;
874 /* CU per shader array*/
875 __u32 num_cu_per_sh;
876 /* number of tcc blocks*/
877 __u32 num_tcc_blocks;
878 /* gs vgt table depth*/
879 __u32 gs_vgt_table_depth;
880 /* gs primitive buffer depth*/
881 __u32 gs_prim_buffer_depth;
882 /* max gs wavefront per vgt*/
883 __u32 max_gs_waves_per_vgt;
884 __u32 _pad1;
885 /* always on cu bitmap */
886 __u32 cu_ao_bitmap[4][4];
887 /** Starting high virtual address for UMDs. */
888 __u64 high_va_offset;
889 /** The maximum high virtual address */
890 __u64 high_va_max;
627}; 891};
628 892
629struct drm_amdgpu_info_hw_ip { 893struct drm_amdgpu_info_hw_ip {
630 /** Version of h/w IP */ 894 /** Version of h/w IP */
631 uint32_t hw_ip_version_major; 895 __u32 hw_ip_version_major;
632 uint32_t hw_ip_version_minor; 896 __u32 hw_ip_version_minor;
633 /** Capabilities */ 897 /** Capabilities */
634 uint64_t capabilities_flags; 898 __u64 capabilities_flags;
635 /** command buffer address start alignment*/ 899 /** command buffer address start alignment*/
636 uint32_t ib_start_alignment; 900 __u32 ib_start_alignment;
637 /** command buffer size alignment*/ 901 /** command buffer size alignment*/
638 uint32_t ib_size_alignment; 902 __u32 ib_size_alignment;
639 /** Bitmask of available rings. Bit 0 means ring 0, etc. */ 903 /** Bitmask of available rings. Bit 0 means ring 0, etc. */
640 uint32_t available_rings; 904 __u32 available_rings;
641 uint32_t _pad; 905 __u32 _pad;
906};
907
908struct drm_amdgpu_info_num_handles {
909 /** Max handles as supported by firmware for UVD */
910 __u32 uvd_max_handles;
911 /** Handles currently in use for UVD */
912 __u32 uvd_used_handles;
913};
914
915#define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6
916
917struct drm_amdgpu_info_vce_clock_table_entry {
918 /** System clock */
919 __u32 sclk;
920 /** Memory clock */
921 __u32 mclk;
922 /** VCE clock */
923 __u32 eclk;
924 __u32 pad;
925};
926
927struct drm_amdgpu_info_vce_clock_table {
928 struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES];
929 __u32 num_valid_entries;
930 __u32 pad;
642}; 931};
643 932
644/* 933/*
645 * Supported GPU families 934 * Supported GPU families
646 */ 935 */
647#define AMDGPU_FAMILY_UNKNOWN 0 936#define AMDGPU_FAMILY_UNKNOWN 0
937#define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
648#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ 938#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
649#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ 939#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
650#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ 940#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
651#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ 941#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
942#define AMDGPU_FAMILY_AI 141 /* Vega10 */
943#define AMDGPU_FAMILY_RV 142 /* Raven */
652 944
653#if defined(__cplusplus) 945#if defined(__cplusplus)
654} 946}
diff --git a/include/drm/drm.h b/include/drm/drm.h
index f6fd5c2c..f0bd91de 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -641,6 +641,8 @@ struct drm_gem_open {
641#define DRM_CAP_CURSOR_HEIGHT 0x9 641#define DRM_CAP_CURSOR_HEIGHT 0x9
642#define DRM_CAP_ADDFB2_MODIFIERS 0x10 642#define DRM_CAP_ADDFB2_MODIFIERS 0x10
643#define DRM_CAP_PAGE_FLIP_TARGET 0x11 643#define DRM_CAP_PAGE_FLIP_TARGET 0x11
644#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
645#define DRM_CAP_SYNCOBJ 0x13
644 646
645/** DRM_IOCTL_GET_CAP ioctl argument type */ 647/** DRM_IOCTL_GET_CAP ioctl argument type */
646struct drm_get_cap { 648struct drm_get_cap {
@@ -690,6 +692,67 @@ struct drm_prime_handle {
690 __s32 fd; 692 __s32 fd;
691}; 693};
692 694
695struct drm_syncobj_create {
696 __u32 handle;
697#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
698 __u32 flags;
699};
700
701struct drm_syncobj_destroy {
702 __u32 handle;
703 __u32 pad;
704};
705
706#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
707#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
708struct drm_syncobj_handle {
709 __u32 handle;
710 __u32 flags;
711
712 __s32 fd;
713 __u32 pad;
714};
715
716#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
717#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
718struct drm_syncobj_wait {
719 __u64 handles;
720 /* absolute timeout */
721 __s64 timeout_nsec;
722 __u32 count_handles;
723 __u32 flags;
724 __u32 first_signaled; /* only valid when not waiting all */
725 __u32 pad;
726};
727
728struct drm_syncobj_array {
729 __u64 handles;
730 __u32 count_handles;
731 __u32 pad;
732};
733
734/* Query current scanout sequence number */
735struct drm_crtc_get_sequence {
736 __u32 crtc_id; /* requested crtc_id */
737 __u32 active; /* return: crtc output is active */
738 __u64 sequence; /* return: most recent vblank sequence */
739 __s64 sequence_ns; /* return: most recent time of first pixel out */
740};
741
742/* Queue event to be delivered at specified sequence. Time stamp marks
743 * when the first pixel of the refresh cycle leaves the display engine
744 * for the display
745 */
746#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
747#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
748
749struct drm_crtc_queue_sequence {
750 __u32 crtc_id;
751 __u32 flags;
752 __u64 sequence; /* on input, target sequence. on output, actual sequence */
753 __u64 user_data; /* user data passed to event */
754};
755
693#if defined(__cplusplus) 756#if defined(__cplusplus)
694} 757}
695#endif 758#endif
@@ -772,6 +835,9 @@ extern "C" {
772 835
773#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 836#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
774 837
838#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
839#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
840
775#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 841#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
776 842
777#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 843#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -808,6 +874,19 @@ extern "C" {
808#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) 874#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
809#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) 875#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
810 876
877#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create)
878#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
879#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
880#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
881#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
882#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
883#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
884
885#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
886#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
887#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
888#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
889
811/** 890/**
812 * Device specific ioctls should only be in their respective headers 891 * Device specific ioctls should only be in their respective headers
813 * The device specific ioctl range is from 0x40 to 0x9f. 892 * The device specific ioctl range is from 0x40 to 0x9f.
@@ -838,6 +917,7 @@ struct drm_event {
838 917
839#define DRM_EVENT_VBLANK 0x01 918#define DRM_EVENT_VBLANK 0x01
840#define DRM_EVENT_FLIP_COMPLETE 0x02 919#define DRM_EVENT_FLIP_COMPLETE 0x02
920#define DRM_EVENT_CRTC_SEQUENCE 0x03
841 921
842struct drm_event_vblank { 922struct drm_event_vblank {
843 struct drm_event base; 923 struct drm_event base;
@@ -845,7 +925,17 @@ struct drm_event_vblank {
845 __u32 tv_sec; 925 __u32 tv_sec;
846 __u32 tv_usec; 926 __u32 tv_usec;
847 __u32 sequence; 927 __u32 sequence;
848 __u32 reserved; 928 __u32 crtc_id; /* 0 on older kernels that do not support this */
929};
930
931/* Event delivered at sequence. Time stamp marks when the first pixel
932 * of the refresh cycle leaves the display engine for the display
933 */
934struct drm_event_crtc_sequence {
935 struct drm_event base;
936 __u64 user_data;
937 __s64 time_ns;
938 __u64 sequence;
849}; 939};
850 940
851/* typedef area */ 941/* typedef area */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 4d8da699..e04613d3 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -26,6 +26,10 @@
26 26
27#include "drm.h" 27#include "drm.h"
28 28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
29#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ 33#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
30 ((__u32)(c) << 16) | ((__u32)(d) << 24)) 34 ((__u32)(c) << 16) | ((__u32)(d) << 24))
31 35
@@ -37,10 +41,17 @@
37/* 8 bpp Red */ 41/* 8 bpp Red */
38#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ 42#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
39 43
44/* 16 bpp Red */
45#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
46
40/* 16 bpp RG */ 47/* 16 bpp RG */
41#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ 48#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
42#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ 49#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
43 50
51/* 32 bpp RG */
52#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */
53#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */
54
44/* 8 bpp RGB */ 55/* 8 bpp RGB */
45#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ 56#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
46#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ 57#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
@@ -103,6 +114,20 @@
103#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ 114#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
104 115
105/* 116/*
117 * 2 plane RGB + A
118 * index 0 = RGB plane, same format as the corresponding non _A8 format has
119 * index 1 = A plane, [7:0] A
120 */
121#define DRM_FORMAT_XRGB8888_A8 fourcc_code('X', 'R', 'A', '8')
122#define DRM_FORMAT_XBGR8888_A8 fourcc_code('X', 'B', 'A', '8')
123#define DRM_FORMAT_RGBX8888_A8 fourcc_code('R', 'X', 'A', '8')
124#define DRM_FORMAT_BGRX8888_A8 fourcc_code('B', 'X', 'A', '8')
125#define DRM_FORMAT_RGB888_A8 fourcc_code('R', '8', 'A', '8')
126#define DRM_FORMAT_BGR888_A8 fourcc_code('B', '8', 'A', '8')
127#define DRM_FORMAT_RGB565_A8 fourcc_code('R', '5', 'A', '8')
128#define DRM_FORMAT_BGR565_A8 fourcc_code('B', '5', 'A', '8')
129
130/*
106 * 2 plane YCbCr 131 * 2 plane YCbCr
107 * index 0 = Y plane, [7:0] Y 132 * index 0 = Y plane, [7:0] Y
108 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian 133 * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
@@ -150,15 +175,20 @@
150 175
151/* Vendor Ids: */ 176/* Vendor Ids: */
152#define DRM_FORMAT_MOD_NONE 0 177#define DRM_FORMAT_MOD_NONE 0
178#define DRM_FORMAT_MOD_VENDOR_NONE 0
153#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 179#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
154#define DRM_FORMAT_MOD_VENDOR_AMD 0x02 180#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
155#define DRM_FORMAT_MOD_VENDOR_NV 0x03 181#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03
156#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 182#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
157#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 183#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
184#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
185#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
158/* add more to the end as needed */ 186/* add more to the end as needed */
159 187
188#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
189
160#define fourcc_mod_code(vendor, val) \ 190#define fourcc_mod_code(vendor, val) \
161 ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) 191 ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
162 192
163/* 193/*
164 * Format Modifier tokens: 194 * Format Modifier tokens:
@@ -168,6 +198,25 @@
168 * authoritative source for all of these. 198 * authoritative source for all of these.
169 */ 199 */
170 200
201/*
202 * Invalid Modifier
203 *
204 * This modifier can be used as a sentinel to terminate the format modifiers
205 * list, or to initialize a variable with an invalid modifier. It might also be
206 * used to report an error back to userspace for certain APIs.
207 */
208#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
209
210/*
211 * Linear Layout
212 *
213 * Just plain linear layout. Note that this is different from no specifying any
214 * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl),
215 * which tells the driver to also take driver-internal information into account
216 * and so might actually result in a tiled framebuffer.
217 */
218#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
219
171/* Intel framebuffer modifiers */ 220/* Intel framebuffer modifiers */
172 221
173/* 222/*
@@ -215,6 +264,26 @@
215#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) 264#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
216 265
217/* 266/*
267 * Intel color control surface (CCS) for render compression
268 *
269 * The framebuffer format must be one of the 8:8:8:8 RGB formats.
270 * The main surface will be plane index 0 and must be Y/Yf-tiled,
271 * the CCS will be plane index 1.
272 *
273 * Each CCS tile matches a 1024x512 pixel area of the main surface.
274 * To match certain aspects of the 3D hardware the CCS is
275 * considered to be made up of normal 128Bx32 Y tiles, Thus
276 * the CCS pitch must be specified in multiples of 128 bytes.
277 *
278 * In reality the CCS tile appears to be a 64Bx64 Y tile, composed
279 * of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks.
280 * But that fact is not relevant unless the memory is accessed
281 * directly.
282 */
283#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4)
284#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
285
286/*
218 * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks 287 * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
219 * 288 *
220 * Macroblocks are laid in a Z-shape, and each pixel data is following the 289 * Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -229,4 +298,115 @@
229 */ 298 */
230#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) 299#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
231 300
301/* Vivante framebuffer modifiers */
302
303/*
304 * Vivante 4x4 tiling layout
305 *
306 * This is a simple tiled layout using tiles of 4x4 pixels in a row-major
307 * layout.
308 */
309#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1)
310
311/*
312 * Vivante 64x64 super-tiling layout
313 *
314 * This is a tiled layout using 64x64 pixel super-tiles, where each super-tile
315 * contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row-
316 * major layout.
317 *
318 * For more information: see
319 * https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling
320 */
321#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2)
322
323/*
324 * Vivante 4x4 tiling layout for dual-pipe
325 *
326 * Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a
327 * different base address. Offsets from the base addresses are therefore halved
328 * compared to the non-split tiled layout.
329 */
330#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3)
331
332/*
333 * Vivante 64x64 super-tiling layout for dual-pipe
334 *
335 * Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile
336 * starts at a different base address. Offsets from the base addresses are
337 * therefore halved compared to the non-split super-tiled layout.
338 */
339#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
340
341/* NVIDIA frame buffer modifiers */
342
343/*
344 * Tegra Tiled Layout, used by Tegra 2, 3 and 4.
345 *
346 * Pixels are arranged in simple tiles of 16 x 16 bytes.
347 */
348#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
349
350/*
351 * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
352 *
353 * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
354 * vertically by a power of 2 (1 to 32 GOBs) to form a block.
355 *
356 * Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
357 *
358 * Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
359 * Valid values are:
360 *
361 * 0 == ONE_GOB
362 * 1 == TWO_GOBS
363 * 2 == FOUR_GOBS
364 * 3 == EIGHT_GOBS
365 * 4 == SIXTEEN_GOBS
366 * 5 == THIRTYTWO_GOBS
367 *
368 * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
369 * in full detail.
370 */
371#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
372 fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
373
374#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
375 fourcc_mod_code(NVIDIA, 0x10)
376#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
377 fourcc_mod_code(NVIDIA, 0x11)
378#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
379 fourcc_mod_code(NVIDIA, 0x12)
380#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
381 fourcc_mod_code(NVIDIA, 0x13)
382#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
383 fourcc_mod_code(NVIDIA, 0x14)
384#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
385 fourcc_mod_code(NVIDIA, 0x15)
386
387/*
388 * Broadcom VC4 "T" format
389 *
390 * This is the primary layout that the V3D GPU can texture from (it
391 * can't do linear). The T format has:
392 *
393 * - 64b utiles of pixels in a raster-order grid according to cpp. It's 4x4
394 * pixels at 32 bit depth.
395 *
396 * - 1k subtiles made of a 4x4 raster-order grid of 64b utiles (so usually
397 * 16x16 pixels).
398 *
399 * - 4k tiles made of a 2x2 grid of 1k subtiles (so usually 32x32 pixels). On
400 * even 4k tile rows, they're arranged as (BL, TL, TR, BR), and on odd rows
401 * they're (TR, BR, BL, TL), where bottom left is start of memory.
402 *
403 * - an image made of 4k tiles in rows either left-to-right (even rows of 4k
404 * tiles) or right-to-left (odd rows of 4k tiles).
405 */
406#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
407
408#if defined(__cplusplus)
409}
410#endif
411
232#endif /* DRM_FOURCC_H */ 412#endif /* DRM_FOURCC_H */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 6708e2b7..5f9fadbd 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -38,16 +38,24 @@ extern "C" {
38#define DRM_DISPLAY_MODE_LEN 32 38#define DRM_DISPLAY_MODE_LEN 32
39#define DRM_PROP_NAME_LEN 32 39#define DRM_PROP_NAME_LEN 32
40 40
41#define DRM_MODE_TYPE_BUILTIN (1<<0) 41#define DRM_MODE_TYPE_BUILTIN (1<<0) /* deprecated */
42#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) 42#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
43#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) 43#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) /* deprecated */
44#define DRM_MODE_TYPE_PREFERRED (1<<3) 44#define DRM_MODE_TYPE_PREFERRED (1<<3)
45#define DRM_MODE_TYPE_DEFAULT (1<<4) 45#define DRM_MODE_TYPE_DEFAULT (1<<4) /* deprecated */
46#define DRM_MODE_TYPE_USERDEF (1<<5) 46#define DRM_MODE_TYPE_USERDEF (1<<5)
47#define DRM_MODE_TYPE_DRIVER (1<<6) 47#define DRM_MODE_TYPE_DRIVER (1<<6)
48 48
49/* Video mode flags */ 49/* Video mode flags */
50/* bit compatible with the xorg definitions. */ 50/* bit compatible with the xrandr RR_ definitions (bits 0-13)
51 *
52 * ABI warning: Existing userspace really expects
53 * the mode flags to match the xrandr definitions. Any
54 * changes that don't match the xrandr definitions will
55 * likely need a new client cap or some other mechanism
56 * to avoid breaking existing userspace. This includes
57 * allocating new flags in the previously unused bits!
58 */
51#define DRM_MODE_FLAG_PHSYNC (1<<0) 59#define DRM_MODE_FLAG_PHSYNC (1<<0)
52#define DRM_MODE_FLAG_NHSYNC (1<<1) 60#define DRM_MODE_FLAG_NHSYNC (1<<1)
53#define DRM_MODE_FLAG_PVSYNC (1<<2) 61#define DRM_MODE_FLAG_PVSYNC (1<<2)
@@ -58,8 +66,8 @@ extern "C" {
58#define DRM_MODE_FLAG_PCSYNC (1<<7) 66#define DRM_MODE_FLAG_PCSYNC (1<<7)
59#define DRM_MODE_FLAG_NCSYNC (1<<8) 67#define DRM_MODE_FLAG_NCSYNC (1<<8)
60#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ 68#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
61#define DRM_MODE_FLAG_BCAST (1<<10) 69#define DRM_MODE_FLAG_BCAST (1<<10) /* deprecated */
62#define DRM_MODE_FLAG_PIXMUX (1<<11) 70#define DRM_MODE_FLAG_PIXMUX (1<<11) /* deprecated */
63#define DRM_MODE_FLAG_DBLCLK (1<<12) 71#define DRM_MODE_FLAG_DBLCLK (1<<12)
64#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 72#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
65 /* 73 /*
@@ -67,7 +75,7 @@ extern "C" {
67 * (define not exposed to user space). 75 * (define not exposed to user space).
68 */ 76 */
69#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) 77#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
70#define DRM_MODE_FLAG_3D_NONE (0<<14) 78#define DRM_MODE_FLAG_3D_NONE (0<<14)
71#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) 79#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
72#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) 80#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
73#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) 81#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
@@ -77,6 +85,19 @@ extern "C" {
77#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) 85#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
78#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) 86#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
79 87
88/* Picture aspect ratio options */
89#define DRM_MODE_PICTURE_ASPECT_NONE 0
90#define DRM_MODE_PICTURE_ASPECT_4_3 1
91#define DRM_MODE_PICTURE_ASPECT_16_9 2
92
93/* Aspect ratio flag bitmask (4 bits 22:19) */
94#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
95#define DRM_MODE_FLAG_PIC_AR_NONE \
96 (DRM_MODE_PICTURE_ASPECT_NONE<<19)
97#define DRM_MODE_FLAG_PIC_AR_4_3 \
98 (DRM_MODE_PICTURE_ASPECT_4_3<<19)
99#define DRM_MODE_FLAG_PIC_AR_16_9 \
100 (DRM_MODE_PICTURE_ASPECT_16_9<<19)
80 101
81/* DPMS flags */ 102/* DPMS flags */
82/* bit compatible with the xorg definitions. */ 103/* bit compatible with the xorg definitions. */
@@ -92,11 +113,6 @@ extern "C" {
92#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ 113#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
93#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ 114#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
94 115
95/* Picture aspect ratio options */
96#define DRM_MODE_PICTURE_ASPECT_NONE 0
97#define DRM_MODE_PICTURE_ASPECT_4_3 1
98#define DRM_MODE_PICTURE_ASPECT_16_9 2
99
100/* Dithering mode options */ 116/* Dithering mode options */
101#define DRM_MODE_DITHERING_OFF 0 117#define DRM_MODE_DITHERING_OFF 0
102#define DRM_MODE_DITHERING_ON 1 118#define DRM_MODE_DITHERING_ON 1
@@ -107,13 +123,60 @@ extern "C" {
107#define DRM_MODE_DIRTY_ON 1 123#define DRM_MODE_DIRTY_ON 1
108#define DRM_MODE_DIRTY_ANNOTATE 2 124#define DRM_MODE_DIRTY_ANNOTATE 2
109 125
110/* rotation property bits */ 126/* Link Status options */
111#define DRM_ROTATE_0 0 127#define DRM_MODE_LINK_STATUS_GOOD 0
112#define DRM_ROTATE_90 1 128#define DRM_MODE_LINK_STATUS_BAD 1
113#define DRM_ROTATE_180 2 129
114#define DRM_ROTATE_270 3 130/*
115#define DRM_REFLECT_X 4 131 * DRM_MODE_ROTATE_<degrees>
116#define DRM_REFLECT_Y 5 132 *
133 * Signals that a drm plane is been rotated <degrees> degrees in counter
134 * clockwise direction.
135 *
136 * This define is provided as a convenience, looking up the property id
137 * using the name->prop id lookup is the preferred method.
138 */
139#define DRM_MODE_ROTATE_0 (1<<0)
140#define DRM_MODE_ROTATE_90 (1<<1)
141#define DRM_MODE_ROTATE_180 (1<<2)
142#define DRM_MODE_ROTATE_270 (1<<3)
143
144/*
145 * DRM_MODE_ROTATE_MASK
146 *
147 * Bitmask used to look for drm plane rotations.
148 */
149#define DRM_MODE_ROTATE_MASK (\
150 DRM_MODE_ROTATE_0 | \
151 DRM_MODE_ROTATE_90 | \
152 DRM_MODE_ROTATE_180 | \
153 DRM_MODE_ROTATE_270)
154
155/*
156 * DRM_MODE_REFLECT_<axis>
157 *
158 * Signals that the contents of a drm plane is reflected in the <axis> axis,
159 * in the same way as mirroring.
160 *
161 * This define is provided as a convenience, looking up the property id
162 * using the name->prop id lookup is the preferred method.
163 */
164#define DRM_MODE_REFLECT_X (1<<4)
165#define DRM_MODE_REFLECT_Y (1<<5)
166
167/*
168 * DRM_MODE_REFLECT_MASK
169 *
170 * Bitmask used to look for drm plane reflections.
171 */
172#define DRM_MODE_REFLECT_MASK (\
173 DRM_MODE_REFLECT_X | \
174 DRM_MODE_REFLECT_Y)
175
176/* Content Protection Flags */
177#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
178#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
179#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
117 180
118struct drm_mode_modeinfo { 181struct drm_mode_modeinfo {
119 __u32 clock; 182 __u32 clock;
@@ -228,14 +291,16 @@ struct drm_mode_get_encoder {
228 291
229/* This is for connectors with multiple signal types. */ 292/* This is for connectors with multiple signal types. */
230/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ 293/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
231#define DRM_MODE_SUBCONNECTOR_Automatic 0 294enum drm_mode_subconnector {
232#define DRM_MODE_SUBCONNECTOR_Unknown 0 295 DRM_MODE_SUBCONNECTOR_Automatic = 0,
233#define DRM_MODE_SUBCONNECTOR_DVID 3 296 DRM_MODE_SUBCONNECTOR_Unknown = 0,
234#define DRM_MODE_SUBCONNECTOR_DVIA 4 297 DRM_MODE_SUBCONNECTOR_DVID = 3,
235#define DRM_MODE_SUBCONNECTOR_Composite 5 298 DRM_MODE_SUBCONNECTOR_DVIA = 4,
236#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 299 DRM_MODE_SUBCONNECTOR_Composite = 5,
237#define DRM_MODE_SUBCONNECTOR_Component 8 300 DRM_MODE_SUBCONNECTOR_SVIDEO = 6,
238#define DRM_MODE_SUBCONNECTOR_SCART 9 301 DRM_MODE_SUBCONNECTOR_Component = 8,
302 DRM_MODE_SUBCONNECTOR_SCART = 9,
303};
239 304
240#define DRM_MODE_CONNECTOR_Unknown 0 305#define DRM_MODE_CONNECTOR_Unknown 0
241#define DRM_MODE_CONNECTOR_VGA 1 306#define DRM_MODE_CONNECTOR_VGA 1
@@ -280,7 +345,7 @@ struct drm_mode_get_connector {
280 __u32 pad; 345 __u32 pad;
281}; 346};
282 347
283#define DRM_MODE_PROP_PENDING (1<<0) 348#define DRM_MODE_PROP_PENDING (1<<0) /* deprecated, do not use */
284#define DRM_MODE_PROP_RANGE (1<<1) 349#define DRM_MODE_PROP_RANGE (1<<1)
285#define DRM_MODE_PROP_IMMUTABLE (1<<2) 350#define DRM_MODE_PROP_IMMUTABLE (1<<2)
286#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ 351#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
@@ -400,17 +465,20 @@ struct drm_mode_fb_cmd2 {
400 * offsets[1]. Note that offsets[0] will generally 465 * offsets[1]. Note that offsets[0] will generally
401 * be 0 (but this is not required). 466 * be 0 (but this is not required).
402 * 467 *
403 * To accommodate tiled, compressed, etc formats, a per-plane 468 * To accommodate tiled, compressed, etc formats, a
404 * modifier can be specified. The default value of zero 469 * modifier can be specified. The default value of zero
405 * indicates "native" format as specified by the fourcc. 470 * indicates "native" format as specified by the fourcc.
406 * Vendor specific modifier token. This allows, for example, 471 * Vendor specific modifier token. Note that even though
407 * different tiling/swizzling pattern on different planes. 472 * it looks like we have a modifier per-plane, we in fact
408 * See discussion above of DRM_FORMAT_MOD_xxx. 473 * do not. The modifier for each plane must be identical.
474 * Thus all combinations of different data layouts for
475 * multi plane formats must be enumerated as separate
476 * modifiers.
409 */ 477 */
410 __u32 handles[4]; 478 __u32 handles[4];
411 __u32 pitches[4]; /* pitch for each plane */ 479 __u32 pitches[4]; /* pitch for each plane */
412 __u32 offsets[4]; /* offset of each plane */ 480 __u32 offsets[4]; /* offset of each plane */
413 __u64 modifier[4]; /* ie, tiling, compressed (per plane) */ 481 __u64 modifier[4]; /* ie, tiling, compress */
414}; 482};
415 483
416#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 484#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@@ -512,8 +580,11 @@ struct drm_mode_crtc_lut {
512}; 580};
513 581
514struct drm_color_ctm { 582struct drm_color_ctm {
515 /* Conversion matrix in S31.32 format. */ 583 /*
516 __s64 matrix[9]; 584 * Conversion matrix in S31.32 sign-magnitude
585 * (not two's complement!) format.
586 */
587 __u64 matrix[9];
517}; 588};
518 589
519struct drm_color_lut { 590struct drm_color_lut {
@@ -637,13 +708,6 @@ struct drm_mode_destroy_dumb {
637 DRM_MODE_ATOMIC_NONBLOCK |\ 708 DRM_MODE_ATOMIC_NONBLOCK |\
638 DRM_MODE_ATOMIC_ALLOW_MODESET) 709 DRM_MODE_ATOMIC_ALLOW_MODESET)
639 710
640#define DRM_MODE_ATOMIC_FLAGS (\
641 DRM_MODE_PAGE_FLIP_EVENT |\
642 DRM_MODE_PAGE_FLIP_ASYNC |\
643 DRM_MODE_ATOMIC_TEST_ONLY |\
644 DRM_MODE_ATOMIC_NONBLOCK |\
645 DRM_MODE_ATOMIC_ALLOW_MODESET)
646
647struct drm_mode_atomic { 711struct drm_mode_atomic {
648 __u32 flags; 712 __u32 flags;
649 __u32 count_objs; 713 __u32 count_objs;
@@ -655,6 +719,56 @@ struct drm_mode_atomic {
655 __u64 user_data; 719 __u64 user_data;
656}; 720};
657 721
722struct drm_format_modifier_blob {
723#define FORMAT_BLOB_CURRENT 1
724 /* Version of this blob format */
725 __u32 version;
726
727 /* Flags */
728 __u32 flags;
729
730 /* Number of fourcc formats supported */
731 __u32 count_formats;
732
733 /* Where in this blob the formats exist (in bytes) */
734 __u32 formats_offset;
735
736 /* Number of drm_format_modifiers */
737 __u32 count_modifiers;
738
739 /* Where in this blob the modifiers exist (in bytes) */
740 __u32 modifiers_offset;
741
742 /* __u32 formats[] */
743 /* struct drm_format_modifier modifiers[] */
744};
745
746struct drm_format_modifier {
747 /* Bitmask of formats in get_plane format list this info applies to. The
748 * offset allows a sliding window of which 64 formats (bits).
749 *
750 * Some examples:
751 * In today's world with < 65 formats, and formats 0, and 2 are
752 * supported
753 * 0x0000000000000005
754 * ^-offset = 0, formats = 5
755 *
756 * If the number formats grew to 128, and formats 98-102 are
757 * supported with the modifier:
758 *
759 * 0x0000007c00000000 0000000000000000
760 * ^
761 * |__offset = 64, formats = 0x7c00000000
762 *
763 */
764 __u64 formats;
765 __u32 offset;
766 __u32 pad;
767
768 /* The modifier that applies to the >get_plane format list bitmask. */
769 __u64 modifier;
770};
771
658/** 772/**
659 * Create a new 'blob' data property, copying length bytes from data pointer, 773 * Create a new 'blob' data property, copying length bytes from data pointer,
660 * and returning new blob ID. 774 * and returning new blob ID.
@@ -675,6 +789,72 @@ struct drm_mode_destroy_blob {
675 __u32 blob_id; 789 __u32 blob_id;
676}; 790};
677 791
792/**
793 * Lease mode resources, creating another drm_master.
794 */
795struct drm_mode_create_lease {
796 /** Pointer to array of object ids (__u32) */
797 __u64 object_ids;
798 /** Number of object ids */
799 __u32 object_count;
800 /** flags for new FD (O_CLOEXEC, etc) */
801 __u32 flags;
802
803 /** Return: unique identifier for lessee. */
804 __u32 lessee_id;
805 /** Return: file descriptor to new drm_master file */
806 __u32 fd;
807};
808
809/**
810 * List lesses from a drm_master
811 */
812struct drm_mode_list_lessees {
813 /** Number of lessees.
814 * On input, provides length of the array.
815 * On output, provides total number. No
816 * more than the input number will be written
817 * back, so two calls can be used to get
818 * the size and then the data.
819 */
820 __u32 count_lessees;
821 __u32 pad;
822
823 /** Pointer to lessees.
824 * pointer to __u64 array of lessee ids
825 */
826 __u64 lessees_ptr;
827};
828
829/**
830 * Get leased objects
831 */
832struct drm_mode_get_lease {
833 /** Number of leased objects.
834 * On input, provides length of the array.
835 * On output, provides total number. No
836 * more than the input number will be written
837 * back, so two calls can be used to get
838 * the size and then the data.
839 */
840 __u32 count_objects;
841 __u32 pad;
842
843 /** Pointer to objects.
844 * pointer to __u32 array of object ids
845 */
846 __u64 objects_ptr;
847};
848
849/**
850 * Revoke lease
851 */
852struct drm_mode_revoke_lease {
853 /** Unique ID of lessee
854 */
855 __u32 lessee_id;
856};
857
678#if defined(__cplusplus) 858#if defined(__cplusplus)
679} 859}
680#endif 860#endif
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
index 502934ed..93025be8 100644
--- a/include/drm/drm_sarea.h
+++ b/include/drm/drm_sarea.h
@@ -34,6 +34,10 @@
34 34
35#include "drm.h" 35#include "drm.h"
36 36
37#if defined(__cplusplus)
38extern "C" {
39#endif
40
37/* SAREA area needs to be at least a page */ 41/* SAREA area needs to be at least a page */
38#if defined(__alpha__) 42#if defined(__alpha__)
39#define SAREA_MAX 0x2000U 43#define SAREA_MAX 0x2000U
@@ -81,4 +85,8 @@ typedef struct drm_sarea_drawable drm_sarea_drawable_t;
81typedef struct drm_sarea_frame drm_sarea_frame_t; 85typedef struct drm_sarea_frame drm_sarea_frame_t;
82typedef struct drm_sarea drm_sarea_t; 86typedef struct drm_sarea drm_sarea_t;
83 87
88#if defined(__cplusplus)
89}
90#endif
91
84#endif /* _DRM_SAREA_H_ */ 92#endif /* _DRM_SAREA_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 5ebe0462..16e452aa 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
86 I915_MOCS_CACHED, 86 I915_MOCS_CACHED,
87}; 87};
88 88
89/*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103};
104
105/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
113 I915_SAMPLE_SEMA = 2
114};
115
116#define I915_PMU_SAMPLE_BITS (4)
117#define I915_PMU_SAMPLE_MASK (0xf)
118#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119#define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122#define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127#define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130#define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133#define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
140#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
141#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
142
143#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
144
89/* Each region is a minimum of 16k, and there are at most 255 of them. 145/* Each region is a minimum of 16k, and there are at most 255 of them.
90 */ 146 */
91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 147#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -260,6 +316,9 @@ typedef struct _drm_i915_sarea {
260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 316#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 317#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
262#define DRM_I915_PERF_OPEN 0x36 318#define DRM_I915_PERF_OPEN 0x36
319#define DRM_I915_PERF_ADD_CONFIG 0x37
320#define DRM_I915_PERF_REMOVE_CONFIG 0x38
321#define DRM_I915_QUERY 0x39
263 322
264#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 323#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
265#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 324#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +374,9 @@ typedef struct _drm_i915_sarea {
315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 374#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 375#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
317#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 376#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
377#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
378#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
379#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
318 380
319/* Allow drivers to submit batchbuffers directly to hardware, relying 381/* Allow drivers to submit batchbuffers directly to hardware, relying
320 * on the security mechanisms provided by hardware. 382 * on the security mechanisms provided by hardware.
@@ -393,10 +455,20 @@ typedef struct drm_i915_irq_wait {
393#define I915_PARAM_MIN_EU_IN_POOL 39 455#define I915_PARAM_MIN_EU_IN_POOL 39
394#define I915_PARAM_MMAP_GTT_VERSION 40 456#define I915_PARAM_MMAP_GTT_VERSION 40
395 457
396/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 458/*
459 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
397 * priorities and the driver will attempt to execute batches in priority order. 460 * priorities and the driver will attempt to execute batches in priority order.
461 * The param returns a capability bitmask, nonzero implies that the scheduler
462 * is enabled, with different features present according to the mask.
463 *
464 * The initial priority for each batch is supplied by the context and is
465 * controlled via I915_CONTEXT_PARAM_PRIORITY.
398 */ 466 */
399#define I915_PARAM_HAS_SCHEDULER 41 467#define I915_PARAM_HAS_SCHEDULER 41
468#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
469#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
470#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
471
400#define I915_PARAM_HUC_STATUS 42 472#define I915_PARAM_HUC_STATUS 42
401 473
402/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 474/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -412,6 +484,51 @@ typedef struct drm_i915_irq_wait {
412 */ 484 */
413#define I915_PARAM_HAS_EXEC_FENCE 44 485#define I915_PARAM_HAS_EXEC_FENCE 44
414 486
487/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
488 * user specified bufffers for post-mortem debugging of GPU hangs. See
489 * EXEC_OBJECT_CAPTURE.
490 */
491#define I915_PARAM_HAS_EXEC_CAPTURE 45
492
493#define I915_PARAM_SLICE_MASK 46
494
495/* Assuming it's uniform for each slice, this queries the mask of subslices
496 * per-slice for this system.
497 */
498#define I915_PARAM_SUBSLICE_MASK 47
499
500/*
501 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
502 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
503 */
504#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
505
506/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
507 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
508 */
509#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
510
511/*
512 * Query whether every context (both per-file default and user created) is
513 * isolated (insofar as HW supports). If this parameter is not true, then
514 * freshly created contexts may inherit values from an existing context,
515 * rather than default HW values. If true, it also ensures (insofar as HW
516 * supports) that all state set by this context will not leak to any other
517 * context.
518 *
519 * As not every engine across every gen support contexts, the returned
520 * value reports the support of context isolation for individual engines by
521 * returning a bitmask of each engine class set to true if that class supports
522 * isolation.
523 */
524#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
525
526/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
527 * registers. This used to be fixed per platform but from CNL onwards, this
528 * might vary depending on the parts.
529 */
530#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
531
415typedef struct drm_i915_getparam { 532typedef struct drm_i915_getparam {
416 __s32 param; 533 __s32 param;
417 /* 534 /*
@@ -666,6 +783,8 @@ struct drm_i915_gem_relocation_entry {
666#define I915_GEM_DOMAIN_VERTEX 0x00000020 783#define I915_GEM_DOMAIN_VERTEX 0x00000020
667/** GTT domain - aperture and scanout */ 784/** GTT domain - aperture and scanout */
668#define I915_GEM_DOMAIN_GTT 0x00000040 785#define I915_GEM_DOMAIN_GTT 0x00000040
786/** WC domain - uncached access */
787#define I915_GEM_DOMAIN_WC 0x00000080
669/** @} */ 788/** @} */
670 789
671struct drm_i915_gem_exec_object { 790struct drm_i915_gem_exec_object {
@@ -773,8 +892,15 @@ struct drm_i915_gem_exec_object2 {
773 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 892 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
774 */ 893 */
775#define EXEC_OBJECT_ASYNC (1<<6) 894#define EXEC_OBJECT_ASYNC (1<<6)
895/* Request that the contents of this execobject be copied into the error
896 * state upon a GPU hang involving this batch for post-mortem debugging.
897 * These buffers are recorded in no particular order as "user" in
898 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
899 * if the kernel supports this flag.
900 */
901#define EXEC_OBJECT_CAPTURE (1<<7)
776/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 902/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
777#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1) 903#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
778 __u64 flags; 904 __u64 flags;
779 905
780 union { 906 union {
@@ -784,6 +910,18 @@ struct drm_i915_gem_exec_object2 {
784 __u64 rsvd2; 910 __u64 rsvd2;
785}; 911};
786 912
913struct drm_i915_gem_exec_fence {
914 /**
915 * User's handle for a drm_syncobj to wait on or signal.
916 */
917 __u32 handle;
918
919#define I915_EXEC_FENCE_WAIT (1<<0)
920#define I915_EXEC_FENCE_SIGNAL (1<<1)
921#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
922 __u32 flags;
923};
924
787struct drm_i915_gem_execbuffer2 { 925struct drm_i915_gem_execbuffer2 {
788 /** 926 /**
789 * List of gem_exec_object2 structs 927 * List of gem_exec_object2 structs
@@ -798,7 +936,11 @@ struct drm_i915_gem_execbuffer2 {
798 __u32 DR1; 936 __u32 DR1;
799 __u32 DR4; 937 __u32 DR4;
800 __u32 num_cliprects; 938 __u32 num_cliprects;
801 /** This is a struct drm_clip_rect *cliprects */ 939 /**
940 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
941 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
942 * struct drm_i915_gem_exec_fence *fences.
943 */
802 __u64 cliprects_ptr; 944 __u64 cliprects_ptr;
803#define I915_EXEC_RING_MASK (7<<0) 945#define I915_EXEC_RING_MASK (7<<0)
804#define I915_EXEC_DEFAULT (0<<0) 946#define I915_EXEC_DEFAULT (0<<0)
@@ -889,7 +1031,24 @@ struct drm_i915_gem_execbuffer2 {
889 */ 1031 */
890#define I915_EXEC_FENCE_OUT (1<<17) 1032#define I915_EXEC_FENCE_OUT (1<<17)
891 1033
892#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1)) 1034/*
1035 * Traditionally the execbuf ioctl has only considered the final element in
1036 * the execobject[] to be the executable batch. Often though, the client
1037 * will known the batch object prior to construction and being able to place
1038 * it into the execobject[] array first can simplify the relocation tracking.
1039 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1040 * execobject[] as the * batch instead (the default is to use the last
1041 * element).
1042 */
1043#define I915_EXEC_BATCH_FIRST (1<<18)
1044
1045/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1046 * define an array of i915_gem_exec_fence structures which specify a set of
1047 * dma fences to wait upon or signal.
1048 */
1049#define I915_EXEC_FENCE_ARRAY (1<<19)
1050
1051#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
893 1052
894#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1053#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
895#define i915_execbuffer2_set_context_id(eb2, context) \ 1054#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1201,7 +1360,9 @@ struct drm_intel_overlay_attrs {
1201 * active on a given plane. 1360 * active on a given plane.
1202 */ 1361 */
1203 1362
1204#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ 1363#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1364 * flags==0 to disable colorkeying.
1365 */
1205#define I915_SET_COLORKEY_DESTINATION (1<<1) 1366#define I915_SET_COLORKEY_DESTINATION (1<<1)
1206#define I915_SET_COLORKEY_SOURCE (1<<2) 1367#define I915_SET_COLORKEY_SOURCE (1<<2)
1207struct drm_intel_sprite_colorkey { 1368struct drm_intel_sprite_colorkey {
@@ -1239,14 +1400,16 @@ struct drm_i915_reg_read {
1239 * be specified 1400 * be specified
1240 */ 1401 */
1241 __u64 offset; 1402 __u64 offset;
1403#define I915_REG_READ_8B_WA (1ul << 0)
1404
1242 __u64 val; /* Return value */ 1405 __u64 val; /* Return value */
1243}; 1406};
1244/* Known registers: 1407/* Known registers:
1245 * 1408 *
1246 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1409 * Render engine timestamp - 0x2358 + 64bit - gen7+
1247 * - Note this register returns an invalid value if using the default 1410 * - Note this register returns an invalid value if using the default
1248 * single instruction 8byte read, in order to workaround that use 1411 * single instruction 8byte read, in order to workaround that pass
1249 * offset (0x2538 | 1) instead. 1412 * flag I915_REG_READ_8B_WA in offset field.
1250 * 1413 *
1251 */ 1414 */
1252 1415
@@ -1289,17 +1452,26 @@ struct drm_i915_gem_context_param {
1289#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1452#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1290#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1453#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1291#define I915_CONTEXT_PARAM_BANNABLE 0x5 1454#define I915_CONTEXT_PARAM_BANNABLE 0x5
1455#define I915_CONTEXT_PARAM_PRIORITY 0x6
1456#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1457#define I915_CONTEXT_DEFAULT_PRIORITY 0
1458#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1292 __u64 value; 1459 __u64 value;
1293}; 1460};
1294 1461
1295enum drm_i915_oa_format { 1462enum drm_i915_oa_format {
1296 I915_OA_FORMAT_A13 = 1, 1463 I915_OA_FORMAT_A13 = 1, /* HSW only */
1297 I915_OA_FORMAT_A29, 1464 I915_OA_FORMAT_A29, /* HSW only */
1298 I915_OA_FORMAT_A13_B8_C8, 1465 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1299 I915_OA_FORMAT_B4_C8, 1466 I915_OA_FORMAT_B4_C8, /* HSW only */
1300 I915_OA_FORMAT_A45_B8_C8, 1467 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1301 I915_OA_FORMAT_B4_C8_A16, 1468 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1302 I915_OA_FORMAT_C4_B8, 1469 I915_OA_FORMAT_C4_B8, /* HSW+ */
1470
1471 /* Gen8+ */
1472 I915_OA_FORMAT_A12,
1473 I915_OA_FORMAT_A12_B8_C8,
1474 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1303 1475
1304 I915_OA_FORMAT_MAX /* non-ABI */ 1476 I915_OA_FORMAT_MAX /* non-ABI */
1305}; 1477};
@@ -1424,6 +1596,127 @@ enum drm_i915_perf_record_type {
1424 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1596 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1425}; 1597};
1426 1598
1599/**
1600 * Structure to upload perf dynamic configuration into the kernel.
1601 */
1602struct drm_i915_perf_oa_config {
1603 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1604 char uuid[36];
1605
1606 __u32 n_mux_regs;
1607 __u32 n_boolean_regs;
1608 __u32 n_flex_regs;
1609
1610 /*
1611 * These fields are pointers to tuples of u32 values (register address,
1612 * value). For example the expected length of the buffer pointed by
1613 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1614 */
1615 __u64 mux_regs_ptr;
1616 __u64 boolean_regs_ptr;
1617 __u64 flex_regs_ptr;
1618};
1619
1620struct drm_i915_query_item {
1621 __u64 query_id;
1622#define DRM_I915_QUERY_TOPOLOGY_INFO 1
1623
1624 /*
1625 * When set to zero by userspace, this is filled with the size of the
1626 * data to be written at the data_ptr pointer. The kernel sets this
1627 * value to a negative value to signal an error on a particular query
1628 * item.
1629 */
1630 __s32 length;
1631
1632 /*
1633 * Unused for now. Must be cleared to zero.
1634 */
1635 __u32 flags;
1636
1637 /*
1638 * Data will be written at the location pointed by data_ptr when the
1639 * value of length matches the length of the data to be written by the
1640 * kernel.
1641 */
1642 __u64 data_ptr;
1643};
1644
1645struct drm_i915_query {
1646 __u32 num_items;
1647
1648 /*
1649 * Unused for now. Must be cleared to zero.
1650 */
1651 __u32 flags;
1652
1653 /*
1654 * This points to an array of num_items drm_i915_query_item structures.
1655 */
1656 __u64 items_ptr;
1657};
1658
1659/*
1660 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1661 *
1662 * data: contains the 3 pieces of information :
1663 *
1664 * - the slice mask with one bit per slice telling whether a slice is
1665 * available. The availability of slice X can be queried with the following
1666 * formula :
1667 *
1668 * (data[X / 8] >> (X % 8)) & 1
1669 *
1670 * - the subslice mask for each slice with one bit per subslice telling
1671 * whether a subslice is available. The availability of subslice Y in slice
1672 * X can be queried with the following formula :
1673 *
1674 * (data[subslice_offset +
1675 * X * subslice_stride +
1676 * Y / 8] >> (Y % 8)) & 1
1677 *
1678 * - the EU mask for each subslice in each slice with one bit per EU telling
1679 * whether an EU is available. The availability of EU Z in subslice Y in
1680 * slice X can be queried with the following formula :
1681 *
1682 * (data[eu_offset +
1683 * (X * max_subslices + Y) * eu_stride +
1684 * Z / 8] >> (Z % 8)) & 1
1685 */
1686struct drm_i915_query_topology_info {
1687 /*
1688 * Unused for now. Must be cleared to zero.
1689 */
1690 __u16 flags;
1691
1692 __u16 max_slices;
1693 __u16 max_subslices;
1694 __u16 max_eus_per_subslice;
1695
1696 /*
1697 * Offset in data[] at which the subslice masks are stored.
1698 */
1699 __u16 subslice_offset;
1700
1701 /*
1702 * Stride at which each of the subslice masks for each slice are
1703 * stored.
1704 */
1705 __u16 subslice_stride;
1706
1707 /*
1708 * Offset in data[] at which the EU masks are stored.
1709 */
1710 __u16 eu_offset;
1711
1712 /*
1713 * Stride at which each of the EU masks for each subslice are stored.
1714 */
1715 __u16 eu_stride;
1716
1717 __u8 data[];
1718};
1719
1427#if defined(__cplusplus) 1720#if defined(__cplusplus)
1428} 1721}
1429#endif 1722#endif
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index b630e8fa..79300111 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -37,6 +37,10 @@
37 37
38#include "drm.h" 38#include "drm.h"
39 39
40#if defined(__cplusplus)
41extern "C" {
42#endif
43
40/* WARNING: If you change any of these defines, make sure to change the 44/* WARNING: If you change any of these defines, make sure to change the
41 * defines in the Xserver file (mga_sarea.h) 45 * defines in the Xserver file (mga_sarea.h)
42 */ 46 */
@@ -107,7 +111,7 @@
107 */ 111 */
108#define MGA_NR_SAREA_CLIPRECTS 8 112#define MGA_NR_SAREA_CLIPRECTS 8
109 113
110/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 114/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
111 * regions, subject to a minimum region size of (1<<16) == 64k. 115 * regions, subject to a minimum region size of (1<<16) == 64k.
112 * 116 *
113 * Clients may subdivide regions internally, but when sharing between 117 * Clients may subdivide regions internally, but when sharing between
@@ -248,7 +252,7 @@ typedef struct _drm_mga_sarea {
248#define DRM_MGA_DMA_BOOTSTRAP 0x0c 252#define DRM_MGA_DMA_BOOTSTRAP 0x0c
249 253
250#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) 254#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
251#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) 255#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
252#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) 256#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
253#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) 257#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
254#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) 258#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
@@ -416,4 +420,8 @@ typedef struct drm_mga_getparam {
416 void *value; 420 void *value;
417} drm_mga_getparam_t; 421} drm_mga_getparam_t;
418 422
423#if defined(__cplusplus)
424}
425#endif
426
419#endif 427#endif
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 1372f533..91d2f314 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -27,6 +27,12 @@
27 27
28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16 28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
29 29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
30/* reserved object handles when using deprecated object APIs - these 36/* reserved object handles when using deprecated object APIs - these
31 * are here so that libdrm can allow interoperability with the new 37 * are here so that libdrm can allow interoperability with the new
32 * object APIs 38 * object APIs
@@ -106,6 +112,7 @@ struct drm_nouveau_setparam {
106#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) 112#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
107#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4) 113#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
108 114
115#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
109#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 116#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
110#define NOUVEAU_GEM_TILE_16BPP 0x00000001 117#define NOUVEAU_GEM_TILE_16BPP 0x00000001
111#define NOUVEAU_GEM_TILE_32BPP 0x00000002 118#define NOUVEAU_GEM_TILE_32BPP 0x00000002
@@ -113,13 +120,13 @@ struct drm_nouveau_setparam {
113#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 120#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
114 121
115struct drm_nouveau_gem_info { 122struct drm_nouveau_gem_info {
116 uint32_t handle; 123 __u32 handle;
117 uint32_t domain; 124 __u32 domain;
118 uint64_t size; 125 __u64 size;
119 uint64_t offset; 126 __u64 offset;
120 uint64_t map_handle; 127 __u64 map_handle;
121 uint32_t tile_mode; 128 __u32 tile_mode;
122 uint32_t tile_flags; 129 __u32 tile_flags;
123}; 130};
124 131
125struct drm_nouveau_gem_set_tiling { 132struct drm_nouveau_gem_set_tiling {
@@ -130,23 +137,23 @@ struct drm_nouveau_gem_set_tiling {
130 137
131struct drm_nouveau_gem_new { 138struct drm_nouveau_gem_new {
132 struct drm_nouveau_gem_info info; 139 struct drm_nouveau_gem_info info;
133 uint32_t channel_hint; 140 __u32 channel_hint;
134 uint32_t align; 141 __u32 align;
135}; 142};
136 143
137#define NOUVEAU_GEM_MAX_BUFFERS 1024 144#define NOUVEAU_GEM_MAX_BUFFERS 1024
138struct drm_nouveau_gem_pushbuf_bo_presumed { 145struct drm_nouveau_gem_pushbuf_bo_presumed {
139 uint32_t valid; 146 __u32 valid;
140 uint32_t domain; 147 __u32 domain;
141 uint64_t offset; 148 __u64 offset;
142}; 149};
143 150
144struct drm_nouveau_gem_pushbuf_bo { 151struct drm_nouveau_gem_pushbuf_bo {
145 uint64_t user_priv; 152 __u64 user_priv;
146 uint32_t handle; 153 __u32 handle;
147 uint32_t read_domains; 154 __u32 read_domains;
148 uint32_t write_domains; 155 __u32 write_domains;
149 uint32_t valid_domains; 156 __u32 valid_domains;
150 struct drm_nouveau_gem_pushbuf_bo_presumed presumed; 157 struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
151}; 158};
152 159
@@ -155,35 +162,35 @@ struct drm_nouveau_gem_pushbuf_bo {
155#define NOUVEAU_GEM_RELOC_OR (1 << 2) 162#define NOUVEAU_GEM_RELOC_OR (1 << 2)
156#define NOUVEAU_GEM_MAX_RELOCS 1024 163#define NOUVEAU_GEM_MAX_RELOCS 1024
157struct drm_nouveau_gem_pushbuf_reloc { 164struct drm_nouveau_gem_pushbuf_reloc {
158 uint32_t reloc_bo_index; 165 __u32 reloc_bo_index;
159 uint32_t reloc_bo_offset; 166 __u32 reloc_bo_offset;
160 uint32_t bo_index; 167 __u32 bo_index;
161 uint32_t flags; 168 __u32 flags;
162 uint32_t data; 169 __u32 data;
163 uint32_t vor; 170 __u32 vor;
164 uint32_t tor; 171 __u32 tor;
165}; 172};
166 173
167#define NOUVEAU_GEM_MAX_PUSH 512 174#define NOUVEAU_GEM_MAX_PUSH 512
168struct drm_nouveau_gem_pushbuf_push { 175struct drm_nouveau_gem_pushbuf_push {
169 uint32_t bo_index; 176 __u32 bo_index;
170 uint32_t pad; 177 __u32 pad;
171 uint64_t offset; 178 __u64 offset;
172 uint64_t length; 179 __u64 length;
173}; 180};
174 181
175struct drm_nouveau_gem_pushbuf { 182struct drm_nouveau_gem_pushbuf {
176 uint32_t channel; 183 __u32 channel;
177 uint32_t nr_buffers; 184 __u32 nr_buffers;
178 uint64_t buffers; 185 __u64 buffers;
179 uint32_t nr_relocs; 186 __u32 nr_relocs;
180 uint32_t nr_push; 187 __u32 nr_push;
181 uint64_t relocs; 188 __u64 relocs;
182 uint64_t push; 189 __u64 push;
183 uint32_t suffix0; 190 __u32 suffix0;
184 uint32_t suffix1; 191 __u32 suffix1;
185 uint64_t vram_available; 192 __u64 vram_available;
186 uint64_t gart_available; 193 __u64 gart_available;
187}; 194};
188 195
189#define NOUVEAU_GEM_PUSHBUF_2_FENCE_WAIT 0x00000001 196#define NOUVEAU_GEM_PUSHBUF_2_FENCE_WAIT 0x00000001
@@ -205,12 +212,12 @@ struct drm_nouveau_gem_pushbuf_2 {
205#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 212#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
206#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 213#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
207struct drm_nouveau_gem_cpu_prep { 214struct drm_nouveau_gem_cpu_prep {
208 uint32_t handle; 215 __u32 handle;
209 uint32_t flags; 216 __u32 flags;
210}; 217};
211 218
212struct drm_nouveau_gem_cpu_fini { 219struct drm_nouveau_gem_cpu_fini {
213 uint32_t handle; 220 __u32 handle;
214}; 221};
215 222
216#define NOUVEAU_GEM_AS_SPARSE 0x00000001 223#define NOUVEAU_GEM_AS_SPARSE 0x00000001
@@ -287,4 +294,7 @@ struct drm_nouveau_gem_unmap {
287#define DRM_NOUVEAU_GEM_MAP 0x56 294#define DRM_NOUVEAU_GEM_MAP 0x56
288#define DRM_NOUVEAU_GEM_UNMAP 0x57 295#define DRM_NOUVEAU_GEM_UNMAP 0x57
289 296
297#if defined(__cplusplus)
298}
299#endif
290#endif /* __NOUVEAU_DRM_H__ */ 300#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/qxl_drm.h b/include/drm/qxl_drm.h
index 1e331a86..38a0dbdf 100644
--- a/include/drm/qxl_drm.h
+++ b/include/drm/qxl_drm.h
@@ -27,10 +27,14 @@
27#include <stddef.h> 27#include <stddef.h>
28#include "drm.h" 28#include "drm.h"
29 29
30#if defined(__cplusplus)
31extern "C" {
32#endif
33
30/* Please note that modifications to all structs defined here are 34/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 35 * subject to backwards-compatibility constraints.
32 * 36 *
33 * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 37 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
34 * compatibility Keep fields aligned to their size 38 * compatibility Keep fields aligned to their size
35 */ 39 */
36 40
@@ -48,14 +52,14 @@
48#define DRM_QXL_ALLOC_SURF 0x06 52#define DRM_QXL_ALLOC_SURF 0x06
49 53
50struct drm_qxl_alloc { 54struct drm_qxl_alloc {
51 uint32_t size; 55 __u32 size;
52 uint32_t handle; /* 0 is an invalid handle */ 56 __u32 handle; /* 0 is an invalid handle */
53}; 57};
54 58
55struct drm_qxl_map { 59struct drm_qxl_map {
56 uint64_t offset; /* use for mmap system call */ 60 __u64 offset; /* use for mmap system call */
57 uint32_t handle; 61 __u32 handle;
58 uint32_t pad; 62 __u32 pad;
59}; 63};
60 64
61/* 65/*
@@ -68,59 +72,59 @@ struct drm_qxl_map {
68#define QXL_RELOC_TYPE_SURF 2 72#define QXL_RELOC_TYPE_SURF 2
69 73
70struct drm_qxl_reloc { 74struct drm_qxl_reloc {
71 uint64_t src_offset; /* offset into src_handle or src buffer */ 75 __u64 src_offset; /* offset into src_handle or src buffer */
72 uint64_t dst_offset; /* offset in dest handle */ 76 __u64 dst_offset; /* offset in dest handle */
73 uint32_t src_handle; /* dest handle to compute address from */ 77 __u32 src_handle; /* dest handle to compute address from */
74 uint32_t dst_handle; /* 0 if to command buffer */ 78 __u32 dst_handle; /* 0 if to command buffer */
75 uint32_t reloc_type; 79 __u32 reloc_type;
76 uint32_t pad; 80 __u32 pad;
77}; 81};
78 82
79struct drm_qxl_command { 83struct drm_qxl_command {
80 uint64_t command; /* void* */ 84 __u64 command; /* void* */
81 uint64_t relocs; /* struct drm_qxl_reloc* */ 85 __u64 relocs; /* struct drm_qxl_reloc* */
82 uint32_t type; 86 __u32 type;
83 uint32_t command_size; 87 __u32 command_size;
84 uint32_t relocs_num; 88 __u32 relocs_num;
85 uint32_t pad; 89 __u32 pad;
86}; 90};
87 91
88/* XXX: call it drm_qxl_commands? */ 92/* XXX: call it drm_qxl_commands? */
89struct drm_qxl_execbuffer { 93struct drm_qxl_execbuffer {
90 uint32_t flags; /* for future use */ 94 __u32 flags; /* for future use */
91 uint32_t commands_num; 95 __u32 commands_num;
92 uint64_t commands; /* struct drm_qxl_command* */ 96 __u64 commands; /* struct drm_qxl_command* */
93}; 97};
94 98
95struct drm_qxl_update_area { 99struct drm_qxl_update_area {
96 uint32_t handle; 100 __u32 handle;
97 uint32_t top; 101 __u32 top;
98 uint32_t left; 102 __u32 left;
99 uint32_t bottom; 103 __u32 bottom;
100 uint32_t right; 104 __u32 right;
101 uint32_t pad; 105 __u32 pad;
102}; 106};
103 107
104#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ 108#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
105#define QXL_PARAM_MAX_RELOCS 2 109#define QXL_PARAM_MAX_RELOCS 2
106struct drm_qxl_getparam { 110struct drm_qxl_getparam {
107 uint64_t param; 111 __u64 param;
108 uint64_t value; 112 __u64 value;
109}; 113};
110 114
111/* these are one bit values */ 115/* these are one bit values */
112struct drm_qxl_clientcap { 116struct drm_qxl_clientcap {
113 uint32_t index; 117 __u32 index;
114 uint32_t pad; 118 __u32 pad;
115}; 119};
116 120
117struct drm_qxl_alloc_surf { 121struct drm_qxl_alloc_surf {
118 uint32_t format; 122 __u32 format;
119 uint32_t width; 123 __u32 width;
120 uint32_t height; 124 __u32 height;
121 int32_t stride; 125 __s32 stride;
122 uint32_t handle; 126 __u32 handle;
123 uint32_t pad; 127 __u32 pad;
124}; 128};
125 129
126#define DRM_IOCTL_QXL_ALLOC \ 130#define DRM_IOCTL_QXL_ALLOC \
@@ -149,4 +153,8 @@ struct drm_qxl_alloc_surf {
149 DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\ 153 DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
150 struct drm_qxl_alloc_surf) 154 struct drm_qxl_alloc_surf)
151 155
156#if defined(__cplusplus)
157}
158#endif
159
152#endif 160#endif
diff --git a/include/drm/r128_drm.h b/include/drm/r128_drm.h
index ede78ff9..bf431a02 100644
--- a/include/drm/r128_drm.h
+++ b/include/drm/r128_drm.h
@@ -33,6 +33,12 @@
33#ifndef __R128_DRM_H__ 33#ifndef __R128_DRM_H__
34#define __R128_DRM_H__ 34#define __R128_DRM_H__
35 35
36#include "drm.h"
37
38#if defined(__cplusplus)
39extern "C" {
40#endif
41
36/* WARNING: If you change any of these defines, make sure to change the 42/* WARNING: If you change any of these defines, make sure to change the
37 * defines in the X server file (r128_sarea.h) 43 * defines in the X server file (r128_sarea.h)
38 */ 44 */
@@ -323,4 +329,8 @@ typedef struct drm_r128_getparam {
323 void *value; 329 void *value;
324} drm_r128_getparam_t; 330} drm_r128_getparam_t;
325 331
332#if defined(__cplusplus)
333}
334#endif
335
326#endif 336#endif
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index f09cc04c..a1e385d6 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -797,9 +797,9 @@ typedef struct drm_radeon_surface_free {
797#define RADEON_GEM_DOMAIN_VRAM 0x4 797#define RADEON_GEM_DOMAIN_VRAM 0x4
798 798
799struct drm_radeon_gem_info { 799struct drm_radeon_gem_info {
800 uint64_t gart_size; 800 __u64 gart_size;
801 uint64_t vram_size; 801 __u64 vram_size;
802 uint64_t vram_visible; 802 __u64 vram_visible;
803}; 803};
804 804
805#define RADEON_GEM_NO_BACKING_STORE (1 << 0) 805#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
@@ -811,11 +811,11 @@ struct drm_radeon_gem_info {
811#define RADEON_GEM_NO_CPU_ACCESS (1 << 4) 811#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
812 812
813struct drm_radeon_gem_create { 813struct drm_radeon_gem_create {
814 uint64_t size; 814 __u64 size;
815 uint64_t alignment; 815 __u64 alignment;
816 uint32_t handle; 816 __u32 handle;
817 uint32_t initial_domain; 817 __u32 initial_domain;
818 uint32_t flags; 818 __u32 flags;
819}; 819};
820 820
821/* 821/*
@@ -829,10 +829,10 @@ struct drm_radeon_gem_create {
829#define RADEON_GEM_USERPTR_REGISTER (1 << 3) 829#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
830 830
831struct drm_radeon_gem_userptr { 831struct drm_radeon_gem_userptr {
832 uint64_t addr; 832 __u64 addr;
833 uint64_t size; 833 __u64 size;
834 uint32_t flags; 834 __u32 flags;
835 uint32_t handle; 835 __u32 handle;
836}; 836};
837 837
838#define RADEON_TILING_MACRO 0x1 838#define RADEON_TILING_MACRO 0x1
@@ -855,72 +855,72 @@ struct drm_radeon_gem_userptr {
855#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf 855#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
856 856
857struct drm_radeon_gem_set_tiling { 857struct drm_radeon_gem_set_tiling {
858 uint32_t handle; 858 __u32 handle;
859 uint32_t tiling_flags; 859 __u32 tiling_flags;
860 uint32_t pitch; 860 __u32 pitch;
861}; 861};
862 862
863struct drm_radeon_gem_get_tiling { 863struct drm_radeon_gem_get_tiling {
864 uint32_t handle; 864 __u32 handle;
865 uint32_t tiling_flags; 865 __u32 tiling_flags;
866 uint32_t pitch; 866 __u32 pitch;
867}; 867};
868 868
869struct drm_radeon_gem_mmap { 869struct drm_radeon_gem_mmap {
870 uint32_t handle; 870 __u32 handle;
871 uint32_t pad; 871 __u32 pad;
872 uint64_t offset; 872 __u64 offset;
873 uint64_t size; 873 __u64 size;
874 uint64_t addr_ptr; 874 __u64 addr_ptr;
875}; 875};
876 876
877struct drm_radeon_gem_set_domain { 877struct drm_radeon_gem_set_domain {
878 uint32_t handle; 878 __u32 handle;
879 uint32_t read_domains; 879 __u32 read_domains;
880 uint32_t write_domain; 880 __u32 write_domain;
881}; 881};
882 882
883struct drm_radeon_gem_wait_idle { 883struct drm_radeon_gem_wait_idle {
884 uint32_t handle; 884 __u32 handle;
885 uint32_t pad; 885 __u32 pad;
886}; 886};
887 887
888struct drm_radeon_gem_busy { 888struct drm_radeon_gem_busy {
889 uint32_t handle; 889 __u32 handle;
890 uint32_t domain; 890 __u32 domain;
891}; 891};
892 892
893struct drm_radeon_gem_pread { 893struct drm_radeon_gem_pread {
894 /** Handle for the object being read. */ 894 /** Handle for the object being read. */
895 uint32_t handle; 895 __u32 handle;
896 uint32_t pad; 896 __u32 pad;
897 /** Offset into the object to read from */ 897 /** Offset into the object to read from */
898 uint64_t offset; 898 __u64 offset;
899 /** Length of data to read */ 899 /** Length of data to read */
900 uint64_t size; 900 __u64 size;
901 /** Pointer to write the data into. */ 901 /** Pointer to write the data into. */
902 /* void *, but pointers are not 32/64 compatible */ 902 /* void *, but pointers are not 32/64 compatible */
903 uint64_t data_ptr; 903 __u64 data_ptr;
904}; 904};
905 905
906struct drm_radeon_gem_pwrite { 906struct drm_radeon_gem_pwrite {
907 /** Handle for the object being written to. */ 907 /** Handle for the object being written to. */
908 uint32_t handle; 908 __u32 handle;
909 uint32_t pad; 909 __u32 pad;
910 /** Offset into the object to write to */ 910 /** Offset into the object to write to */
911 uint64_t offset; 911 __u64 offset;
912 /** Length of data to write */ 912 /** Length of data to write */
913 uint64_t size; 913 __u64 size;
914 /** Pointer to read the data from. */ 914 /** Pointer to read the data from. */
915 /* void *, but pointers are not 32/64 compatible */ 915 /* void *, but pointers are not 32/64 compatible */
916 uint64_t data_ptr; 916 __u64 data_ptr;
917}; 917};
918 918
919/* Sets or returns a value associated with a buffer. */ 919/* Sets or returns a value associated with a buffer. */
920struct drm_radeon_gem_op { 920struct drm_radeon_gem_op {
921 uint32_t handle; /* buffer */ 921 __u32 handle; /* buffer */
922 uint32_t op; /* RADEON_GEM_OP_* */ 922 __u32 op; /* RADEON_GEM_OP_* */
923 uint64_t value; /* input or return value */ 923 __u64 value; /* input or return value */
924}; 924};
925 925
926#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 926#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
@@ -940,11 +940,11 @@ struct drm_radeon_gem_op {
940#define RADEON_VM_PAGE_SNOOPED (1 << 4) 940#define RADEON_VM_PAGE_SNOOPED (1 << 4)
941 941
942struct drm_radeon_gem_va { 942struct drm_radeon_gem_va {
943 uint32_t handle; 943 __u32 handle;
944 uint32_t operation; 944 __u32 operation;
945 uint32_t vm_id; 945 __u32 vm_id;
946 uint32_t flags; 946 __u32 flags;
947 uint64_t offset; 947 __u64 offset;
948}; 948};
949 949
950#define RADEON_CHUNK_ID_RELOCS 0x01 950#define RADEON_CHUNK_ID_RELOCS 0x01
@@ -966,29 +966,29 @@ struct drm_radeon_gem_va {
966/* 0 = normal, + = higher priority, - = lower priority */ 966/* 0 = normal, + = higher priority, - = lower priority */
967 967
968struct drm_radeon_cs_chunk { 968struct drm_radeon_cs_chunk {
969 uint32_t chunk_id; 969 __u32 chunk_id;
970 uint32_t length_dw; 970 __u32 length_dw;
971 uint64_t chunk_data; 971 __u64 chunk_data;
972}; 972};
973 973
974/* drm_radeon_cs_reloc.flags */ 974/* drm_radeon_cs_reloc.flags */
975#define RADEON_RELOC_PRIO_MASK (0xf << 0) 975#define RADEON_RELOC_PRIO_MASK (0xf << 0)
976 976
977struct drm_radeon_cs_reloc { 977struct drm_radeon_cs_reloc {
978 uint32_t handle; 978 __u32 handle;
979 uint32_t read_domains; 979 __u32 read_domains;
980 uint32_t write_domain; 980 __u32 write_domain;
981 uint32_t flags; 981 __u32 flags;
982}; 982};
983 983
984struct drm_radeon_cs { 984struct drm_radeon_cs {
985 uint32_t num_chunks; 985 __u32 num_chunks;
986 uint32_t cs_id; 986 __u32 cs_id;
987 /* this points to uint64_t * which point to cs chunks */ 987 /* this points to __u64 * which point to cs chunks */
988 uint64_t chunks; 988 __u64 chunks;
989 /* updates to the limits after this CS ioctl */ 989 /* updates to the limits after this CS ioctl */
990 uint64_t gart_limit; 990 __u64 gart_limit;
991 uint64_t vram_limit; 991 __u64 vram_limit;
992}; 992};
993 993
994#define RADEON_INFO_DEVICE_ID 0x00 994#define RADEON_INFO_DEVICE_ID 0x00
@@ -1047,9 +1047,9 @@ struct drm_radeon_cs {
1047#define RADEON_INFO_GPU_RESET_COUNTER 0x26 1047#define RADEON_INFO_GPU_RESET_COUNTER 0x26
1048 1048
1049struct drm_radeon_info { 1049struct drm_radeon_info {
1050 uint32_t request; 1050 __u32 request;
1051 uint32_t pad; 1051 __u32 pad;
1052 uint64_t value; 1052 __u64 value;
1053}; 1053};
1054 1054
1055/* Those correspond to the tile index to use, this is to explicitly state 1055/* Those correspond to the tile index to use, this is to explicitly state
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h
index f7a75eff..1a91234e 100644
--- a/include/drm/savage_drm.h
+++ b/include/drm/savage_drm.h
@@ -26,10 +26,16 @@
26#ifndef __SAVAGE_DRM_H__ 26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__ 27#define __SAVAGE_DRM_H__
28 28
29#include "drm.h"
30
31#if defined(__cplusplus)
32extern "C" {
33#endif
34
29#ifndef __SAVAGE_SAREA_DEFINES__ 35#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__ 36#define __SAVAGE_SAREA_DEFINES__
31 37
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 38/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
33 * regions, subject to a minimum region size of (1<<16) == 64k. 39 * regions, subject to a minimum region size of (1<<16) == 64k.
34 * 40 *
35 * Clients may subdivide regions internally, but when sharing between 41 * Clients may subdivide regions internally, but when sharing between
@@ -63,10 +69,10 @@ typedef struct _drm_savage_sarea {
63#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 69#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
64#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 70#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
65 71
66#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) 72#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
67#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) 73#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
68#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) 74#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
69#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) 75#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
70 76
71#define SAVAGE_DMA_PCI 1 77#define SAVAGE_DMA_PCI 1
72#define SAVAGE_DMA_AGP 3 78#define SAVAGE_DMA_AGP 3
@@ -207,4 +213,8 @@ union drm_savage_cmd_header {
207 } clear1; /* SAVAGE_CMD_CLEAR data */ 213 } clear1; /* SAVAGE_CMD_CLEAR data */
208}; 214};
209 215
216#if defined(__cplusplus)
217}
218#endif
219
210#endif 220#endif
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
index 30f7b382..8e51bb9a 100644
--- a/include/drm/sis_drm.h
+++ b/include/drm/sis_drm.h
@@ -27,6 +27,12 @@
27#ifndef __SIS_DRM_H__ 27#ifndef __SIS_DRM_H__
28#define __SIS_DRM_H__ 28#define __SIS_DRM_H__
29 29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
30/* SiS specific ioctls */ 36/* SiS specific ioctls */
31#define NOT_USED_0_3 37#define NOT_USED_0_3
32#define DRM_SIS_FB_ALLOC 0x04 38#define DRM_SIS_FB_ALLOC 0x04
@@ -64,4 +70,8 @@ typedef struct {
64 unsigned int offset, size; 70 unsigned int offset, size;
65} drm_sis_fb_t; 71} drm_sis_fb_t;
66 72
73#if defined(__cplusplus)
74}
75#endif
76
67#endif /* __SIS_DRM_H__ */ 77#endif /* __SIS_DRM_H__ */
diff --git a/include/drm/tegra_drm.h b/include/drm/tegra_drm.h
index 1be09c4b..f01f7a11 100644
--- a/include/drm/tegra_drm.h
+++ b/include/drm/tegra_drm.h
@@ -1,23 +1,33 @@
1/* 1/*
2 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * under the terms and conditions of the GNU General Public License, 5 * copy of this software and associated documentation files (the "Software"),
6 * version 2, as published by the Free Software Foundation. 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
7 * 10 *
8 * This program is distributed in the hope it will be useful, but WITHOUT 11 * The above copyright notice and this permission notice shall be included in
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * all copies or substantial portions of the Software.
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 * 13 *
13 * You should have received a copy of the GNU General Public License 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
15 */ 21 */
16 22
17#ifndef _UAPI_TEGRA_DRM_H_ 23#ifndef _TEGRA_DRM_H_
18#define _UAPI_TEGRA_DRM_H_ 24#define _TEGRA_DRM_H_
19 25
20#include <drm/drm.h> 26#include "drm.h"
27
28#if defined(__cplusplus)
29extern "C" {
30#endif
21 31
22#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0) 32#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
23#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1) 33#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
@@ -229,4 +239,8 @@ struct drm_tegra_keepon {
229#define DRM_IOCTL_TEGRA_START_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_START_KEEPON, struct drm_tegra_keepon) 239#define DRM_IOCTL_TEGRA_START_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_START_KEEPON, struct drm_tegra_keepon)
230#define DRM_IOCTL_TEGRA_STOP_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_STOP_KEEPON, struct drm_tegra_keepon) 240#define DRM_IOCTL_TEGRA_STOP_KEEPON DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_STOP_KEEPON, struct drm_tegra_keepon)
231 241
242#if defined(__cplusplus)
243}
244#endif
245
232#endif 246#endif
diff --git a/include/drm/vc4_drm.h b/include/drm/vc4_drm.h
index 319881d8..4117117b 100644
--- a/include/drm/vc4_drm.h
+++ b/include/drm/vc4_drm.h
@@ -38,6 +38,13 @@ extern "C" {
38#define DRM_VC4_CREATE_SHADER_BO 0x05 38#define DRM_VC4_CREATE_SHADER_BO 0x05
39#define DRM_VC4_GET_HANG_STATE 0x06 39#define DRM_VC4_GET_HANG_STATE 0x06
40#define DRM_VC4_GET_PARAM 0x07 40#define DRM_VC4_GET_PARAM 0x07
41#define DRM_VC4_SET_TILING 0x08
42#define DRM_VC4_GET_TILING 0x09
43#define DRM_VC4_LABEL_BO 0x0a
44#define DRM_VC4_GEM_MADVISE 0x0b
45#define DRM_VC4_PERFMON_CREATE 0x0c
46#define DRM_VC4_PERFMON_DESTROY 0x0d
47#define DRM_VC4_PERFMON_GET_VALUES 0x0e
41 48
42#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) 49#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
43#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) 50#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -47,6 +54,13 @@ extern "C" {
47#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) 54#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
48#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) 55#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
49#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param) 56#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
57#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
58#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
59#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
60#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
61#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
62#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
63#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
50 64
51struct drm_vc4_submit_rcl_surface { 65struct drm_vc4_submit_rcl_surface {
52 __u32 hindex; /* Handle index, or ~0 if not present. */ 66 __u32 hindex; /* Handle index, or ~0 if not present. */
@@ -149,12 +163,31 @@ struct drm_vc4_submit_cl {
149 __u32 pad:24; 163 __u32 pad:24;
150 164
151#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) 165#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
166/* By default, the kernel gets to choose the order that the tiles are
167 * rendered in. If this is set, then the tiles will be rendered in a
168 * raster order, with the right-to-left vs left-to-right and
169 * top-to-bottom vs bottom-to-top dictated by
170 * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
171 * blits to be implemented using the 3D engine.
172 */
173#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
174#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
175#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
152 __u32 flags; 176 __u32 flags;
153 177
154 /* Returned value of the seqno of this render job (for the 178 /* Returned value of the seqno of this render job (for the
155 * wait ioctl). 179 * wait ioctl).
156 */ 180 */
157 __u64 seqno; 181 __u64 seqno;
182
183 /* ID of the perfmon to attach to this job. 0 means no perfmon. */
184 __u32 perfmonid;
185
186 /* Unused field to align this struct on 64 bits. Must be set to 0.
187 * If one ever needs to add an u32 field to this struct, this field
188 * can be used.
189 */
190 __u32 pad2;
158}; 191};
159 192
160/** 193/**
@@ -288,6 +321,9 @@ struct drm_vc4_get_hang_state {
288#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3 321#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
289#define DRM_VC4_PARAM_SUPPORTS_ETC1 4 322#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
290#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 323#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
324#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
325#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
326#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
291 327
292struct drm_vc4_get_param { 328struct drm_vc4_get_param {
293 __u32 param; 329 __u32 param;
@@ -295,6 +331,103 @@ struct drm_vc4_get_param {
295 __u64 value; 331 __u64 value;
296}; 332};
297 333
334struct drm_vc4_get_tiling {
335 __u32 handle;
336 __u32 flags;
337 __u64 modifier;
338};
339
340struct drm_vc4_set_tiling {
341 __u32 handle;
342 __u32 flags;
343 __u64 modifier;
344};
345
346/**
347 * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
348 */
349struct drm_vc4_label_bo {
350 __u32 handle;
351 __u32 len;
352 __u64 name;
353};
354
355/*
356 * States prefixed with '__' are internal states and cannot be passed to the
357 * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
358 */
359#define VC4_MADV_WILLNEED 0
360#define VC4_MADV_DONTNEED 1
361#define __VC4_MADV_PURGED 2
362#define __VC4_MADV_NOTSUPP 3
363
364struct drm_vc4_gem_madvise {
365 __u32 handle;
366 __u32 madv;
367 __u32 retained;
368 __u32 pad;
369};
370
371enum {
372 VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
373 VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
374 VC4_PERFCNT_FEP_CLIPPED_QUADS,
375 VC4_PERFCNT_FEP_VALID_QUADS,
376 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
377 VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
378 VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
379 VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
380 VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
381 VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
382 VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
383 VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
384 VC4_PERFCNT_PSE_PRIMS_REVERSED,
385 VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
386 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
387 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
388 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
389 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
390 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
391 VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
392 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
393 VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
394 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
395 VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
396 VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
397 VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
398 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
399 VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
400 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
401 VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
402 VC4_PERFCNT_NUM_EVENTS,
403};
404
405#define DRM_VC4_MAX_PERF_COUNTERS 16
406
407struct drm_vc4_perfmon_create {
408 __u32 id;
409 __u32 ncounters;
410 __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
411};
412
413struct drm_vc4_perfmon_destroy {
414 __u32 id;
415};
416
417/*
418 * Returns the values of the performance counters tracked by this
419 * perfmon (as an array of ncounters u64 values).
420 *
421 * No implicit synchronization is performed, so the user has to
422 * guarantee that any jobs using this perfmon have already been
423 * completed (probably by blocking on the seqno returned by the
424 * last exec that used the perfmon).
425 */
426struct drm_vc4_perfmon_get_values {
427 __u32 id;
428 __u64 values_ptr;
429};
430
298#if defined(__cplusplus) 431#if defined(__cplusplus)
299} 432}
300#endif 433#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 182f8792..8b69e819 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -26,6 +26,10 @@
26 26
27#include "drm.h" 27#include "drm.h"
28 28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
29/* WARNING: These defines must be the same as what the Xserver uses. 33/* WARNING: These defines must be the same as what the Xserver uses.
30 * if you change them, you must change the defines in the Xserver. 34 * if you change them, you must change the defines in the Xserver.
31 */ 35 */
@@ -272,4 +276,8 @@ typedef struct drm_via_dmablit {
272 drm_via_blitsync_t sync; 276 drm_via_blitsync_t sync;
273} drm_via_dmablit_t; 277} drm_via_dmablit_t;
274 278
279#if defined(__cplusplus)
280}
281#endif
282
275#endif /* _VIA_DRM_H_ */ 283#endif /* _VIA_DRM_H_ */
diff --git a/include/drm/virtgpu_drm.h b/include/drm/virtgpu_drm.h
index 91a31ffe..9a781f06 100644
--- a/include/drm/virtgpu_drm.h
+++ b/include/drm/virtgpu_drm.h
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
63}; 63};
64 64
65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 65#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
66#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
66 67
67struct drm_virtgpu_getparam { 68struct drm_virtgpu_getparam {
68 __u64 param; 69 __u64 param;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 5b68b4d1..0bc784f5 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -30,6 +30,10 @@
30 30
31#include "drm.h" 31#include "drm.h"
32 32
33#if defined(__cplusplus)
34extern "C" {
35#endif
36
33#define DRM_VMW_MAX_SURFACE_FACES 6 37#define DRM_VMW_MAX_SURFACE_FACES 6
34#define DRM_VMW_MAX_MIP_LEVELS 24 38#define DRM_VMW_MAX_MIP_LEVELS 24
35 39
@@ -37,6 +41,7 @@
37#define DRM_VMW_GET_PARAM 0 41#define DRM_VMW_GET_PARAM 0
38#define DRM_VMW_ALLOC_DMABUF 1 42#define DRM_VMW_ALLOC_DMABUF 1
39#define DRM_VMW_UNREF_DMABUF 2 43#define DRM_VMW_UNREF_DMABUF 2
44#define DRM_VMW_HANDLE_CLOSE 2
40#define DRM_VMW_CURSOR_BYPASS 3 45#define DRM_VMW_CURSOR_BYPASS 3
41/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 46/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
42#define DRM_VMW_CONTROL_STREAM 4 47#define DRM_VMW_CONTROL_STREAM 4
@@ -292,13 +297,17 @@ union drm_vmw_surface_reference_arg {
292 * @version: Allows expanding the execbuf ioctl parameters without breaking 297 * @version: Allows expanding the execbuf ioctl parameters without breaking
293 * backwards compatibility, since user-space will always tell the kernel 298 * backwards compatibility, since user-space will always tell the kernel
294 * which version it uses. 299 * which version it uses.
295 * @flags: Execbuf flags. None currently. 300 * @flags: Execbuf flags.
301 * @imported_fence_fd: FD for a fence imported from another device
296 * 302 *
297 * Argument to the DRM_VMW_EXECBUF Ioctl. 303 * Argument to the DRM_VMW_EXECBUF Ioctl.
298 */ 304 */
299 305
300#define DRM_VMW_EXECBUF_VERSION 2 306#define DRM_VMW_EXECBUF_VERSION 2
301 307
308#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
309#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
310
302struct drm_vmw_execbuf_arg { 311struct drm_vmw_execbuf_arg {
303 __u64 commands; 312 __u64 commands;
304 __u32 command_size; 313 __u32 command_size;
@@ -307,7 +316,7 @@ struct drm_vmw_execbuf_arg {
307 __u32 version; 316 __u32 version;
308 __u32 flags; 317 __u32 flags;
309 __u32 context_handle; 318 __u32 context_handle;
310 __u32 pad64; 319 __s32 imported_fence_fd;
311}; 320};
312 321
313/** 322/**
@@ -323,6 +332,7 @@ struct drm_vmw_execbuf_arg {
323 * @passed_seqno: The highest seqno number processed by the hardware 332 * @passed_seqno: The highest seqno number processed by the hardware
324 * so far. This can be used to mark user-space fence objects as signaled, and 333 * so far. This can be used to mark user-space fence objects as signaled, and
325 * to determine whether a fence seqno might be stale. 334 * to determine whether a fence seqno might be stale.
335 * @fd: FD associated with the fence, -1 if not exported
326 * @error: This member should've been set to -EFAULT on submission. 336 * @error: This member should've been set to -EFAULT on submission.
327 * The following actions should be take on completion: 337 * The following actions should be take on completion:
328 * error == -EFAULT: Fence communication failed. The host is synchronized. 338 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -340,7 +350,7 @@ struct drm_vmw_fence_rep {
340 __u32 mask; 350 __u32 mask;
341 __u32 seqno; 351 __u32 seqno;
342 __u32 passed_seqno; 352 __u32 passed_seqno;
343 __u32 pad64; 353 __s32 fd;
344 __s32 error; 354 __s32 error;
345}; 355};
346 356
@@ -1087,4 +1097,32 @@ union drm_vmw_extended_context_arg {
1087 enum drm_vmw_extended_context req; 1097 enum drm_vmw_extended_context req;
1088 struct drm_vmw_context_arg rep; 1098 struct drm_vmw_context_arg rep;
1089}; 1099};
1100
1101/*************************************************************************/
1102/*
1103 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1104 * underlying resource.
1105 *
1106 * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
1107 * The ioctl arguments therefore need to be identical in layout.
1108 *
1109 */
1110
1111/**
1112 * struct drm_vmw_handle_close_arg
1113 *
1114 * @handle: Handle to close.
1115 *
1116 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1117 */
1118struct drm_vmw_handle_close_arg {
1119 __u32 handle;
1120 __u32 pad64;
1121};
1122
1123
1124#if defined(__cplusplus)
1125}
1126#endif
1127
1090#endif 1128#endif
diff --git a/intel/intel-symbol-check b/intel/intel-symbol-check
index 2aa2d819..4d30a4b1 100755
--- a/intel/intel-symbol-check
+++ b/intel/intel-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.sources/LIBDRM_INTEL_H_FILES 4# A list of the latter should be available Makefile.sources/LIBDRM_INTEL_H_FILES
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_intel.so} | awk '{print $3}' | while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_intel.so} | awk '{print $3}' | while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
diff --git a/intel/intel_bufmgr.c b/intel/intel_bufmgr.c
index 5bad93fa..192de093 100644
--- a/intel/intel_bufmgr.c
+++ b/intel/intel_bufmgr.c
@@ -25,10 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <string.h> 28#include <string.h>
33#include <stdlib.h> 29#include <stdlib.h>
34#include <stdint.h> 30#include <stdint.h>
diff --git a/intel/intel_bufmgr_fake.c b/intel/intel_bufmgr_fake.c
index 641df6a1..3b24b81b 100644
--- a/intel/intel_bufmgr_fake.c
+++ b/intel/intel_bufmgr_fake.c
@@ -34,10 +34,6 @@
34 * the bugs in the old texture manager. 34 * the bugs in the old texture manager.
35 */ 35 */
36 36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#include <stdlib.h> 37#include <stdlib.h>
42#include <string.h> 38#include <string.h>
43#include <assert.h> 39#include <assert.h>
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index a6656003..5c47a46f 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -34,10 +34,6 @@
34 * Dave Airlie <airlied@linux.ie> 34 * Dave Airlie <airlied@linux.ie>
35 */ 35 */
36 36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#include <xf86drm.h> 37#include <xf86drm.h>
42#include <xf86atomic.h> 38#include <xf86atomic.h>
43#include <fcntl.h> 39#include <fcntl.h>
@@ -66,7 +62,7 @@
66#include "i915_drm.h" 62#include "i915_drm.h"
67#include "uthash.h" 63#include "uthash.h"
68 64
69#ifdef HAVE_VALGRIND 65#if HAVE_VALGRIND
70#include <valgrind.h> 66#include <valgrind.h>
71#include <memcheck.h> 67#include <memcheck.h>
72#define VG(x) x 68#define VG(x) x
@@ -270,20 +266,6 @@ struct _drm_intel_bo_gem {
270 bool is_userptr; 266 bool is_userptr;
271 267
272 /** 268 /**
273 * Boolean of whether this buffer can be placed in the full 48-bit
274 * address range on gen8+.
275 *
276 * By default, buffers will be keep in a 32-bit range, unless this
277 * flag is explicitly set.
278 */
279 bool use_48b_address_range;
280
281 /**
282 * Whether this buffer is softpinned at offset specified by the user
283 */
284 bool is_softpin;
285
286 /**
287 * Size in bytes of this buffer and its relocation descendents. 269 * Size in bytes of this buffer and its relocation descendents.
288 * 270 *
289 * Used to avoid costly tree walking in 271 * Used to avoid costly tree walking in
@@ -438,7 +420,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
438 420
439 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) { 421 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
440 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle, 422 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
441 bo_gem->is_softpin ? "*" : "", 423 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
442 bo_gem->name); 424 bo_gem->name);
443 continue; 425 continue;
444 } 426 }
@@ -452,7 +434,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
452 "%d (%s)@0x%08x %08x + 0x%08x\n", 434 "%d (%s)@0x%08x %08x + 0x%08x\n",
453 i, 435 i,
454 bo_gem->gem_handle, 436 bo_gem->gem_handle,
455 bo_gem->is_softpin ? "*" : "", 437 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
456 bo_gem->name, 438 bo_gem->name,
457 upper_32_bits(bo_gem->relocs[j].offset), 439 upper_32_bits(bo_gem->relocs[j].offset),
458 lower_32_bits(bo_gem->relocs[j].offset), 440 lower_32_bits(bo_gem->relocs[j].offset),
@@ -471,7 +453,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
471 "%d *(%s)@0x%08x %08x\n", 453 "%d *(%s)@0x%08x %08x\n",
472 i, 454 i,
473 bo_gem->gem_handle, 455 bo_gem->gem_handle,
474 bo_gem->is_softpin ? "*" : "", 456 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
475 bo_gem->name, 457 bo_gem->name,
476 target_gem->gem_handle, 458 target_gem->gem_handle,
477 target_gem->name, 459 target_gem->name,
@@ -541,14 +523,11 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
541 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 523 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
542 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 524 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
543 int index; 525 int index;
544 int flags = 0; 526 unsigned long flags;
545 527
528 flags = 0;
546 if (need_fence) 529 if (need_fence)
547 flags |= EXEC_OBJECT_NEEDS_FENCE; 530 flags |= EXEC_OBJECT_NEEDS_FENCE;
548 if (bo_gem->use_48b_address_range)
549 flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
550 if (bo_gem->is_softpin)
551 flags |= EXEC_OBJECT_PINNED;
552 531
553 if (bo_gem->validate_index != -1) { 532 if (bo_gem->validate_index != -1) {
554 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags; 533 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
@@ -579,7 +558,7 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
579 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; 558 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
580 bufmgr_gem->exec2_objects[index].alignment = bo->align; 559 bufmgr_gem->exec2_objects[index].alignment = bo->align;
581 bufmgr_gem->exec2_objects[index].offset = bo->offset64; 560 bufmgr_gem->exec2_objects[index].offset = bo->offset64;
582 bufmgr_gem->exec2_objects[index].flags = flags | bo_gem->kflags; 561 bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags;
583 bufmgr_gem->exec2_objects[index].rsvd1 = 0; 562 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
584 bufmgr_gem->exec2_objects[index].rsvd2 = 0; 563 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
585 bufmgr_gem->exec_bos[index] = bo; 564 bufmgr_gem->exec_bos[index] = bo;
@@ -676,7 +655,6 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
676 } else { 655 } else {
677 return false; 656 return false;
678 } 657 }
679 return (ret == 0 && busy.busy);
680} 658}
681 659
682static int 660static int
@@ -832,6 +810,10 @@ retry:
832 } 810 }
833 811
834 bo_gem->gem_handle = create.handle; 812 bo_gem->gem_handle = create.handle;
813 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
814 gem_handle, sizeof(bo_gem->gem_handle),
815 bo_gem);
816
835 bo_gem->bo.handle = bo_gem->gem_handle; 817 bo_gem->bo.handle = bo_gem->gem_handle;
836 bo_gem->bo.bufmgr = bufmgr; 818 bo_gem->bo.bufmgr = bufmgr;
837 bo_gem->bo.align = alignment; 819 bo_gem->bo.align = alignment;
@@ -844,10 +826,6 @@ retry:
844 tiling_mode, 826 tiling_mode,
845 stride)) 827 stride))
846 goto err_free; 828 goto err_free;
847
848 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
849 gem_handle, sizeof(bo_gem->gem_handle),
850 bo_gem);
851 } 829 }
852 830
853 bo_gem->name = name; 831 bo_gem->name = name;
@@ -857,7 +835,6 @@ retry:
857 bo_gem->used_as_reloc_target = false; 835 bo_gem->used_as_reloc_target = false;
858 bo_gem->has_error = false; 836 bo_gem->has_error = false;
859 bo_gem->reusable = true; 837 bo_gem->reusable = true;
860 bo_gem->use_48b_address_range = false;
861 838
862 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment); 839 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
863 pthread_mutex_unlock(&bufmgr_gem->lock); 840 pthread_mutex_unlock(&bufmgr_gem->lock);
@@ -1016,7 +993,6 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1016 bo_gem->used_as_reloc_target = false; 993 bo_gem->used_as_reloc_target = false;
1017 bo_gem->has_error = false; 994 bo_gem->has_error = false;
1018 bo_gem->reusable = false; 995 bo_gem->reusable = false;
1019 bo_gem->use_48b_address_range = false;
1020 996
1021 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); 997 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1022 pthread_mutex_unlock(&bufmgr_gem->lock); 998 pthread_mutex_unlock(&bufmgr_gem->lock);
@@ -1164,7 +1140,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1164 bo_gem->bo.handle = open_arg.handle; 1140 bo_gem->bo.handle = open_arg.handle;
1165 bo_gem->global_name = handle; 1141 bo_gem->global_name = handle;
1166 bo_gem->reusable = false; 1142 bo_gem->reusable = false;
1167 bo_gem->use_48b_address_range = false;
1168 1143
1169 HASH_ADD(handle_hh, bufmgr_gem->handle_table, 1144 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1170 gem_handle, sizeof(bo_gem->gem_handle), bo_gem); 1145 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
@@ -1411,8 +1386,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1411 bo_gem->name = NULL; 1386 bo_gem->name = NULL;
1412 bo_gem->validate_index = -1; 1387 bo_gem->validate_index = -1;
1413 1388
1414 bo_gem->kflags = 0;
1415
1416 DRMLISTADDTAIL(&bo_gem->head, &bucket->head); 1389 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1417 } else { 1390 } else {
1418 drm_intel_gem_bo_free(bo); 1391 drm_intel_gem_bo_free(bo);
@@ -1652,7 +1625,7 @@ int
1652drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) 1625drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1653{ 1626{
1654 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1627 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1655#ifdef HAVE_VALGRIND 1628#if HAVE_VALGRIND
1656 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1629 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1657#endif 1630#endif
1658 int ret; 1631 int ret;
@@ -2054,7 +2027,11 @@ static void
2054drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) 2027drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2055{ 2028{
2056 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2029 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2057 bo_gem->use_48b_address_range = enable; 2030
2031 if (enable)
2032 bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2033 else
2034 bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
2058} 2035}
2059 2036
2060static int 2037static int
@@ -2071,7 +2048,7 @@ drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2071 return -ENOMEM; 2048 return -ENOMEM;
2072 } 2049 }
2073 2050
2074 if (!target_bo_gem->is_softpin) 2051 if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
2075 return -EINVAL; 2052 return -EINVAL;
2076 if (target_bo_gem == bo_gem) 2053 if (target_bo_gem == bo_gem)
2077 return -EINVAL; 2054 return -EINVAL;
@@ -2103,7 +2080,7 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2103 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 2080 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2104 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo; 2081 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
2105 2082
2106 if (target_bo_gem->is_softpin) 2083 if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
2107 return drm_intel_gem_bo_add_softpin_target(bo, target_bo); 2084 return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2108 else 2085 else
2109 return do_bo_emit_reloc(bo, offset, target_bo, target_offset, 2086 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
@@ -2287,7 +2264,7 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2287 /* If we're seeing softpinned object here it means that the kernel 2264 /* If we're seeing softpinned object here it means that the kernel
2288 * has relocated our object... Indicating a programming error 2265 * has relocated our object... Indicating a programming error
2289 */ 2266 */
2290 assert(!bo_gem->is_softpin); 2267 assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
2291 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", 2268 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2292 bo_gem->gem_handle, bo_gem->name, 2269 bo_gem->gem_handle, bo_gem->name,
2293 upper_32_bits(bo->offset64), 2270 upper_32_bits(bo->offset64),
@@ -2643,9 +2620,10 @@ drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2643{ 2620{
2644 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2621 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2645 2622
2646 bo_gem->is_softpin = true;
2647 bo->offset64 = offset; 2623 bo->offset64 = offset;
2648 bo->offset = offset; 2624 bo->offset = offset;
2625 bo_gem->kflags |= EXEC_OBJECT_PINNED;
2626
2649 return 0; 2627 return 0;
2650} 2628}
2651 2629
@@ -2709,7 +2687,6 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
2709 bo_gem->used_as_reloc_target = false; 2687 bo_gem->used_as_reloc_target = false;
2710 bo_gem->has_error = false; 2688 bo_gem->has_error = false;
2711 bo_gem->reusable = false; 2689 bo_gem->reusable = false;
2712 bo_gem->use_48b_address_range = false;
2713 2690
2714 memclear(get_tiling); 2691 memclear(get_tiling);
2715 get_tiling.handle = bo_gem->gem_handle; 2692 get_tiling.handle = bo_gem->gem_handle;
@@ -3681,6 +3658,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
3681 bufmgr_gem->gen = 8; 3658 bufmgr_gem->gen = 8;
3682 else if (IS_GEN9(bufmgr_gem->pci_device)) 3659 else if (IS_GEN9(bufmgr_gem->pci_device))
3683 bufmgr_gem->gen = 9; 3660 bufmgr_gem->gen = 9;
3661 else if (IS_GEN10(bufmgr_gem->pci_device))
3662 bufmgr_gem->gen = 10;
3684 else { 3663 else {
3685 free(bufmgr_gem); 3664 free(bufmgr_gem);
3686 bufmgr_gem = NULL; 3665 bufmgr_gem = NULL;
diff --git a/intel/intel_chipset.h b/intel/intel_chipset.h
index 41fc0da0..01d250e8 100644
--- a/intel/intel_chipset.h
+++ b/intel/intel_chipset.h
@@ -202,7 +202,7 @@
202#define PCI_CHIP_KABYLAKE_ULX_GT1 0x590E 202#define PCI_CHIP_KABYLAKE_ULX_GT1 0x590E
203#define PCI_CHIP_KABYLAKE_ULX_GT2 0x591E 203#define PCI_CHIP_KABYLAKE_ULX_GT2 0x591E
204#define PCI_CHIP_KABYLAKE_DT_GT2 0x5912 204#define PCI_CHIP_KABYLAKE_DT_GT2 0x5912
205#define PCI_CHIP_KABYLAKE_DT_GT1_5 0x5917 205#define PCI_CHIP_KABYLAKE_M_GT2 0x5917
206#define PCI_CHIP_KABYLAKE_DT_GT1 0x5902 206#define PCI_CHIP_KABYLAKE_DT_GT1 0x5902
207#define PCI_CHIP_KABYLAKE_HALO_GT2 0x591B 207#define PCI_CHIP_KABYLAKE_HALO_GT2 0x591B
208#define PCI_CHIP_KABYLAKE_HALO_GT4 0x593B 208#define PCI_CHIP_KABYLAKE_HALO_GT4 0x593B
@@ -221,6 +221,41 @@
221#define PCI_CHIP_GLK 0x3184 221#define PCI_CHIP_GLK 0x3184
222#define PCI_CHIP_GLK_2X6 0x3185 222#define PCI_CHIP_GLK_2X6 0x3185
223 223
224#define PCI_CHIP_COFFEELAKE_S_GT1_1 0x3E90
225#define PCI_CHIP_COFFEELAKE_S_GT1_2 0x3E93
226#define PCI_CHIP_COFFEELAKE_S_GT1_3 0x3E99
227#define PCI_CHIP_COFFEELAKE_S_GT2_1 0x3E91
228#define PCI_CHIP_COFFEELAKE_S_GT2_2 0x3E92
229#define PCI_CHIP_COFFEELAKE_S_GT2_3 0x3E96
230#define PCI_CHIP_COFFEELAKE_S_GT2_4 0x3E9A
231#define PCI_CHIP_COFFEELAKE_H_GT2_1 0x3E9B
232#define PCI_CHIP_COFFEELAKE_H_GT2_2 0x3E94
233#define PCI_CHIP_COFFEELAKE_U_GT1_1 0x3EA1
234#define PCI_CHIP_COFFEELAKE_U_GT1_2 0x3EA4
235#define PCI_CHIP_COFFEELAKE_U_GT2_1 0x3EA0
236#define PCI_CHIP_COFFEELAKE_U_GT2_2 0x3EA3
237#define PCI_CHIP_COFFEELAKE_U_GT2_3 0x3EA9
238#define PCI_CHIP_COFFEELAKE_U_GT3_1 0x3EA2
239#define PCI_CHIP_COFFEELAKE_U_GT3_2 0x3EA5
240#define PCI_CHIP_COFFEELAKE_U_GT3_3 0x3EA6
241#define PCI_CHIP_COFFEELAKE_U_GT3_4 0x3EA7
242#define PCI_CHIP_COFFEELAKE_U_GT3_5 0x3EA8
243
244#define PCI_CHIP_CANNONLAKE_0 0x5A51
245#define PCI_CHIP_CANNONLAKE_1 0x5A59
246#define PCI_CHIP_CANNONLAKE_2 0x5A41
247#define PCI_CHIP_CANNONLAKE_3 0x5A49
248#define PCI_CHIP_CANNONLAKE_4 0x5A52
249#define PCI_CHIP_CANNONLAKE_5 0x5A5A
250#define PCI_CHIP_CANNONLAKE_6 0x5A42
251#define PCI_CHIP_CANNONLAKE_7 0x5A4A
252#define PCI_CHIP_CANNONLAKE_8 0x5A50
253#define PCI_CHIP_CANNONLAKE_9 0x5A40
254#define PCI_CHIP_CANNONLAKE_10 0x5A54
255#define PCI_CHIP_CANNONLAKE_11 0x5A5C
256#define PCI_CHIP_CANNONLAKE_12 0x5A44
257#define PCI_CHIP_CANNONLAKE_13 0x5A4C
258
224#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \ 259#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
225 (devid) == PCI_CHIP_I915_GM || \ 260 (devid) == PCI_CHIP_I915_GM || \
226 (devid) == PCI_CHIP_I945_GM || \ 261 (devid) == PCI_CHIP_I945_GM || \
@@ -411,7 +446,6 @@
411 446
412#define IS_KBL_GT1(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT1_5 || \ 447#define IS_KBL_GT1(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT1_5 || \
413 (devid) == PCI_CHIP_KABYLAKE_ULX_GT1_5 || \ 448 (devid) == PCI_CHIP_KABYLAKE_ULX_GT1_5 || \
414 (devid) == PCI_CHIP_KABYLAKE_DT_GT1_5 || \
415 (devid) == PCI_CHIP_KABYLAKE_ULT_GT1 || \ 449 (devid) == PCI_CHIP_KABYLAKE_ULT_GT1 || \
416 (devid) == PCI_CHIP_KABYLAKE_ULX_GT1 || \ 450 (devid) == PCI_CHIP_KABYLAKE_ULX_GT1 || \
417 (devid) == PCI_CHIP_KABYLAKE_DT_GT1 || \ 451 (devid) == PCI_CHIP_KABYLAKE_DT_GT1 || \
@@ -423,6 +457,7 @@
423 (devid) == PCI_CHIP_KABYLAKE_ULT_GT2F || \ 457 (devid) == PCI_CHIP_KABYLAKE_ULT_GT2F || \
424 (devid) == PCI_CHIP_KABYLAKE_ULX_GT2 || \ 458 (devid) == PCI_CHIP_KABYLAKE_ULX_GT2 || \
425 (devid) == PCI_CHIP_KABYLAKE_DT_GT2 || \ 459 (devid) == PCI_CHIP_KABYLAKE_DT_GT2 || \
460 (devid) == PCI_CHIP_KABYLAKE_M_GT2 || \
426 (devid) == PCI_CHIP_KABYLAKE_HALO_GT2 || \ 461 (devid) == PCI_CHIP_KABYLAKE_HALO_GT2 || \
427 (devid) == PCI_CHIP_KABYLAKE_SRV_GT2 || \ 462 (devid) == PCI_CHIP_KABYLAKE_SRV_GT2 || \
428 (devid) == PCI_CHIP_KABYLAKE_WKS_GT2) 463 (devid) == PCI_CHIP_KABYLAKE_WKS_GT2)
@@ -452,10 +487,54 @@
452#define IS_GEMINILAKE(devid) ((devid) == PCI_CHIP_GLK || \ 487#define IS_GEMINILAKE(devid) ((devid) == PCI_CHIP_GLK || \
453 (devid) == PCI_CHIP_GLK_2X6) 488 (devid) == PCI_CHIP_GLK_2X6)
454 489
490#define IS_CFL_S(devid) ((devid) == PCI_CHIP_COFFEELAKE_S_GT1_1 || \
491 (devid) == PCI_CHIP_COFFEELAKE_S_GT1_2 || \
492 (devid) == PCI_CHIP_COFFEELAKE_S_GT1_3 || \
493 (devid) == PCI_CHIP_COFFEELAKE_S_GT2_1 || \
494 (devid) == PCI_CHIP_COFFEELAKE_S_GT2_2 || \
495 (devid) == PCI_CHIP_COFFEELAKE_S_GT2_3 || \
496 (devid) == PCI_CHIP_COFFEELAKE_S_GT2_4)
497
498#define IS_CFL_H(devid) ((devid) == PCI_CHIP_COFFEELAKE_H_GT2_1 || \
499 (devid) == PCI_CHIP_COFFEELAKE_H_GT2_2)
500
501#define IS_CFL_U(devid) ((devid) == PCI_CHIP_COFFEELAKE_U_GT1_1 || \
502 (devid) == PCI_CHIP_COFFEELAKE_U_GT1_2 || \
503 (devid) == PCI_CHIP_COFFEELAKE_U_GT2_1 || \
504 (devid) == PCI_CHIP_COFFEELAKE_U_GT2_2 || \
505 (devid) == PCI_CHIP_COFFEELAKE_U_GT2_3 || \
506 (devid) == PCI_CHIP_COFFEELAKE_U_GT3_1 || \
507 (devid) == PCI_CHIP_COFFEELAKE_U_GT3_2 || \
508 (devid) == PCI_CHIP_COFFEELAKE_U_GT3_3 || \
509 (devid) == PCI_CHIP_COFFEELAKE_U_GT3_4 || \
510 (devid) == PCI_CHIP_COFFEELAKE_U_GT3_5)
511
512#define IS_COFFEELAKE(devid) (IS_CFL_S(devid) || \
513 IS_CFL_H(devid) || \
514 IS_CFL_U(devid))
515
455#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \ 516#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \
456 IS_BROXTON(devid) || \ 517 IS_BROXTON(devid) || \
457 IS_KABYLAKE(devid) || \ 518 IS_KABYLAKE(devid) || \
458 IS_GEMINILAKE(devid)) 519 IS_GEMINILAKE(devid) || \
520 IS_COFFEELAKE(devid))
521
522#define IS_CANNONLAKE(devid) ((devid) == PCI_CHIP_CANNONLAKE_0 || \
523 (devid) == PCI_CHIP_CANNONLAKE_1 || \
524 (devid) == PCI_CHIP_CANNONLAKE_2 || \
525 (devid) == PCI_CHIP_CANNONLAKE_3 || \
526 (devid) == PCI_CHIP_CANNONLAKE_4 || \
527 (devid) == PCI_CHIP_CANNONLAKE_5 || \
528 (devid) == PCI_CHIP_CANNONLAKE_6 || \
529 (devid) == PCI_CHIP_CANNONLAKE_7 || \
530 (devid) == PCI_CHIP_CANNONLAKE_8 || \
531 (devid) == PCI_CHIP_CANNONLAKE_9 || \
532 (devid) == PCI_CHIP_CANNONLAKE_10 || \
533 (devid) == PCI_CHIP_CANNONLAKE_11 || \
534 (devid) == PCI_CHIP_CANNONLAKE_12 || \
535 (devid) == PCI_CHIP_CANNONLAKE_13)
536
537#define IS_GEN10(devid) (IS_CANNONLAKE(devid))
459 538
460#define IS_9XX(dev) (IS_GEN3(dev) || \ 539#define IS_9XX(dev) (IS_GEN3(dev) || \
461 IS_GEN4(dev) || \ 540 IS_GEN4(dev) || \
@@ -463,7 +542,7 @@
463 IS_GEN6(dev) || \ 542 IS_GEN6(dev) || \
464 IS_GEN7(dev) || \ 543 IS_GEN7(dev) || \
465 IS_GEN8(dev) || \ 544 IS_GEN8(dev) || \
466 IS_GEN9(dev)) 545 IS_GEN9(dev) || \
467 546 IS_GEN10(dev))
468 547
469#endif /* _INTEL_CHIPSET_H */ 548#endif /* _INTEL_CHIPSET_H */
diff --git a/intel/intel_decode.c b/intel/intel_decode.c
index 803d2029..bc7b04b8 100644
--- a/intel/intel_decode.c
+++ b/intel/intel_decode.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <assert.h> 24#include <assert.h>
29#include <stdint.h> 25#include <stdint.h>
30#include <stdlib.h> 26#include <stdlib.h>
@@ -3827,7 +3823,9 @@ drm_intel_decode_context_alloc(uint32_t devid)
3827 ctx->devid = devid; 3823 ctx->devid = devid;
3828 ctx->out = stdout; 3824 ctx->out = stdout;
3829 3825
3830 if (IS_GEN9(devid)) 3826 if (IS_GEN10(devid))
3827 ctx->gen = 10;
3828 else if (IS_GEN9(devid))
3831 ctx->gen = 9; 3829 ctx->gen = 9;
3832 else if (IS_GEN8(devid)) 3830 else if (IS_GEN8(devid))
3833 ctx->gen = 8; 3831 ctx->gen = 8;
@@ -3899,7 +3897,7 @@ drm_intel_decode(struct drm_intel_decode *ctx)
3899 int ret; 3897 int ret;
3900 unsigned int index = 0; 3898 unsigned int index = 0;
3901 uint32_t devid; 3899 uint32_t devid;
3902 int size = ctx->base_count * 4; 3900 int size;
3903 void *temp; 3901 void *temp;
3904 3902
3905 if (!ctx) 3903 if (!ctx)
@@ -3909,6 +3907,7 @@ drm_intel_decode(struct drm_intel_decode *ctx)
3909 * the batchbuffer. This lets us avoid a bunch of length 3907 * the batchbuffer. This lets us avoid a bunch of length
3910 * checking in statically sized packets. 3908 * checking in statically sized packets.
3911 */ 3909 */
3910 size = ctx->base_count * 4;
3912 temp = malloc(size + 4096); 3911 temp = malloc(size + 4096);
3913 memcpy(temp, ctx->base_data, size); 3912 memcpy(temp, ctx->base_data, size);
3914 memset((char *)temp + size, 0xd0, 4096); 3913 memset((char *)temp + size, 0xd0, 4096);
diff --git a/intel/meson.build b/intel/meson.build
new file mode 100644
index 00000000..53c7fce4
--- /dev/null
+++ b/intel/meson.build
@@ -0,0 +1,106 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21libdrm_intel = shared_library(
22 'drm_intel',
23 [
24 files(
25 'intel_bufmgr.c', 'intel_bufmgr_fake.c', 'intel_bufmgr_gem.c',
26 'intel_decode.c', 'mm.c',
27 ),
28 config_file,
29 ],
30 include_directories : [inc_root, inc_drm],
31 link_with : libdrm,
32 dependencies : [dep_pciaccess, dep_pthread_stubs, dep_rt, dep_valgrind, dep_atomic_ops],
33 c_args : warn_c_args,
34 version : '1.0.0',
35 install : true,
36)
37
38ext_libdrm_intel = declare_dependency(
39 link_with : [libdrm, libdrm_intel],
40 include_directories : [inc_drm, include_directories('.')],
41)
42
43install_headers(
44 'intel_bufmgr.h', 'intel_aub.h', 'intel_debug.h',
45 subdir : 'libdrm',
46)
47
48pkg.generate(
49 name : 'libdrm_intel',
50 libraries : libdrm_intel,
51 subdirs : ['.', 'libdrm'],
52 version : meson.project_version(),
53 requires : 'libdrm',
54 description : 'Userspace interface to intel kernel DRM services',
55)
56
57test_decode = executable(
58 'test_decode',
59 files('test_decode.c'),
60 include_directories : [inc_root, inc_drm],
61 link_with : [libdrm, libdrm_intel],
62 c_args : warn_c_args,
63)
64
65test(
66 'gen4-3d.batch',
67 prog_bash,
68 args : files('tests/gen4-3d.batch.sh'),
69 workdir : meson.current_build_dir(),
70)
71test(
72 'gen45-3d.batch',
73 prog_bash,
74 args : files('tests/gm45-3d.batch.sh'),
75 workdir : meson.current_build_dir(),
76)
77test(
78 'gen5-3d.batch',
79 prog_bash,
80 args : files('tests/gen5-3d.batch.sh'),
81 workdir : meson.current_build_dir(),
82)
83test(
84 'gen6-3d.batch',
85 prog_bash,
86 args : files('tests/gen6-3d.batch.sh'),
87 workdir : meson.current_build_dir(),
88)
89test(
90 'gen7-3d.batch',
91 prog_bash,
92 args : files('tests/gen7-3d.batch.sh'),
93 workdir : meson.current_build_dir(),
94)
95test(
96 'gen7-2d-copy.batch',
97 prog_bash,
98 args : files('tests/gen7-2d-copy.batch.sh'),
99 workdir : meson.current_build_dir(),
100)
101test(
102 'intel-symbol-check',
103 prog_bash,
104 env : env_test,
105 args : [files('intel-symbol-check'), libdrm_intel]
106)
diff --git a/intel/mm.c b/intel/mm.c
index 954e9dcb..79d8719d 100644
--- a/intel/mm.c
+++ b/intel/mm.c
@@ -22,10 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <stdlib.h> 25#include <stdlib.h>
30#include <assert.h> 26#include <assert.h>
31 27
diff --git a/intel/mm.h b/intel/mm.h
index 8d83743f..1b0f84fc 100644
--- a/intel/mm.h
+++ b/intel/mm.h
@@ -29,10 +29,6 @@
29#ifndef MM_H 29#ifndef MM_H
30#define MM_H 30#define MM_H
31 31
32#ifdef HAVE_CONFIG_H
33#include "config.h"
34#endif
35
36#include "libdrm_macros.h" 32#include "libdrm_macros.h"
37 33
38struct mem_block { 34struct mem_block {
diff --git a/intel/test_decode.c b/intel/test_decode.c
index b4eddcd1..b9f5b927 100644
--- a/intel/test_decode.c
+++ b/intel/test_decode.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <string.h> 24#include <string.h>
29#include <stdlib.h> 25#include <stdlib.h>
30#include <stdio.h> 26#include <stdio.h>
@@ -91,7 +87,7 @@ compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
91{ 87{
92 FILE *out = NULL; 88 FILE *out = NULL;
93 void *ptr, *ref_ptr, *batch_ptr; 89 void *ptr, *ref_ptr, *batch_ptr;
94#ifdef HAVE_OPEN_MEMSTREAM 90#if HAVE_OPEN_MEMSTREAM
95 size_t size; 91 size_t size;
96#endif 92#endif
97 size_t ref_size, batch_size; 93 size_t ref_size, batch_size;
@@ -109,7 +105,7 @@ compare_batch(struct drm_intel_decode *ctx, const char *batch_filename)
109 * figure out how to output to a file in a safe and sane way 105 * figure out how to output to a file in a safe and sane way
110 * inside of an automake project's test infrastructure. 106 * inside of an automake project's test infrastructure.
111 */ 107 */
112#ifdef HAVE_OPEN_MEMSTREAM 108#if HAVE_OPEN_MEMSTREAM
113 out = open_memstream((char **)&ptr, &size); 109 out = open_memstream((char **)&ptr, &size);
114#else 110#else
115 fprintf(stderr, "platform lacks open_memstream, skipping.\n"); 111 fprintf(stderr, "platform lacks open_memstream, skipping.\n");
diff --git a/libdrm_macros.h b/libdrm_macros.h
index b88fdcef..3134ae96 100644
--- a/libdrm_macros.h
+++ b/libdrm_macros.h
@@ -23,7 +23,7 @@
23#ifndef LIBDRM_LIBDRM_H 23#ifndef LIBDRM_LIBDRM_H
24#define LIBDRM_LIBDRM_H 24#define LIBDRM_LIBDRM_H
25 25
26#if defined(HAVE_VISIBILITY) 26#if HAVE_VISIBILITY
27# define drm_private __attribute__((visibility("hidden"))) 27# define drm_private __attribute__((visibility("hidden")))
28#else 28#else
29# define drm_private 29# define drm_private
diff --git a/libkms/api.c b/libkms/api.c
index 354d8a2e..22dd32d7 100644
--- a/libkms/api.c
+++ b/libkms/api.c
@@ -26,10 +26,6 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <errno.h> 29#include <errno.h>
34#include <stdlib.h> 30#include <stdlib.h>
35#include <string.h> 31#include <string.h>
diff --git a/libkms/dumb.c b/libkms/dumb.c
index b95a072c..17efc10a 100644
--- a/libkms/dumb.c
+++ b/libkms/dumb.c
@@ -26,10 +26,6 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <errno.h> 29#include <errno.h>
34#include <stdio.h> 30#include <stdio.h>
35#include <stdlib.h> 31#include <stdlib.h>
diff --git a/libkms/exynos.c b/libkms/exynos.c
index 0e97fb51..ef64a668 100644
--- a/libkms/exynos.c
+++ b/libkms/exynos.c
@@ -5,16 +5,26 @@
5 * SooChan Lim <sc1.lim@samsung.com> 5 * SooChan Lim <sc1.lim@samsung.com>
6 * Sangjin LEE <lsj119@samsung.com> 6 * Sangjin LEE <lsj119@samsung.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * under the terms of the GNU General Public License as published by the 9 * copy of this software and associated documentation files (the "Software"),
10 * Free Software Foundation; either version 2 of the License, or (at your 10 * to deal in the Software without restriction, including without limitation
11 * option) any later version. 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * SOFTWARE.
12 */ 26 */
13 27
14#ifdef HAVE_CONFIG_H
15#include "config.h"
16#endif
17
18#include <errno.h> 28#include <errno.h>
19#include <stdio.h> 29#include <stdio.h>
20#include <stdlib.h> 30#include <stdlib.h>
diff --git a/libkms/intel.c b/libkms/intel.c
index 3d8ca055..859e7a0f 100644
--- a/libkms/intel.c
+++ b/libkms/intel.c
@@ -26,10 +26,6 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <errno.h> 29#include <errno.h>
34#include <stdio.h> 30#include <stdio.h>
35#include <stdlib.h> 31#include <stdlib.h>
diff --git a/libkms/internal.h b/libkms/internal.h
index 905f5b17..8b386db6 100644
--- a/libkms/internal.h
+++ b/libkms/internal.h
@@ -29,10 +29,6 @@
29#ifndef INTERNAL_H_ 29#ifndef INTERNAL_H_
30#define INTERNAL_H_ 30#define INTERNAL_H_
31 31
32#ifdef HAVE_CONFIG_H
33#include "config.h"
34#endif
35
36#include "libdrm_macros.h" 32#include "libdrm_macros.h"
37#include "libkms.h" 33#include "libkms.h"
38 34
diff --git a/libkms/kms-symbol-check b/libkms/kms-symbol-check
index 658b2692..a5c2120d 100755
--- a/libkms/kms-symbol-check
+++ b/libkms/kms-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.sources/LIBKMS_H_FILES 4# A list of the latter should be available Makefile.sources/LIBKMS_H_FILES
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libkms.so} | awk '{print $3}'| while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libkms.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
diff --git a/libkms/linux.c b/libkms/linux.c
index 0b50777e..56205054 100644
--- a/libkms/linux.c
+++ b/libkms/linux.c
@@ -29,10 +29,6 @@
29 * going from fd to pci id via fstat and udev. 29 * going from fd to pci id via fstat and udev.
30 */ 30 */
31 31
32
33#ifdef HAVE_CONFIG_H
34#include "config.h"
35#endif
36#include <errno.h> 32#include <errno.h>
37#include <stdio.h> 33#include <stdio.h>
38#include <stdlib.h> 34#include <stdlib.h>
@@ -110,27 +106,27 @@ linux_from_sysfs(int fd, struct kms_driver **out)
110 if (ret) 106 if (ret)
111 return ret; 107 return ret;
112 108
113#ifdef HAVE_INTEL 109#if HAVE_INTEL
114 if (!strcmp(name, "intel")) 110 if (!strcmp(name, "intel"))
115 ret = intel_create(fd, out); 111 ret = intel_create(fd, out);
116 else 112 else
117#endif 113#endif
118#ifdef HAVE_VMWGFX 114#if HAVE_VMWGFX
119 if (!strcmp(name, "vmwgfx")) 115 if (!strcmp(name, "vmwgfx"))
120 ret = vmwgfx_create(fd, out); 116 ret = vmwgfx_create(fd, out);
121 else 117 else
122#endif 118#endif
123#ifdef HAVE_NOUVEAU 119#if HAVE_NOUVEAU
124 if (!strcmp(name, "nouveau")) 120 if (!strcmp(name, "nouveau"))
125 ret = nouveau_create(fd, out); 121 ret = nouveau_create(fd, out);
126 else 122 else
127#endif 123#endif
128#ifdef HAVE_RADEON 124#if HAVE_RADEON
129 if (!strcmp(name, "radeon")) 125 if (!strcmp(name, "radeon"))
130 ret = radeon_create(fd, out); 126 ret = radeon_create(fd, out);
131 else 127 else
132#endif 128#endif
133#ifdef HAVE_EXYNOS 129#if HAVE_EXYNOS
134 if (!strcmp(name, "exynos")) 130 if (!strcmp(name, "exynos"))
135 ret = exynos_create(fd, out); 131 ret = exynos_create(fd, out);
136 else 132 else
diff --git a/libkms/meson.build b/libkms/meson.build
new file mode 100644
index 00000000..86d1a4ee
--- /dev/null
+++ b/libkms/meson.build
@@ -0,0 +1,75 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21libkms_include = [inc_root, inc_drm]
22files_libkms = files(
23 'linux.c',
24 'dumb.c',
25 'api.c',
26)
27if with_vmwgfx
28 files_libkms += files('vmwgfx.c')
29endif
30if with_intel
31 files_libkms += files('intel.c')
32endif
33if with_nouveau
34 files_libkms += files('nouveau.c')
35endif
36if with_radeon
37 files_libkms += files('radeon.c')
38endif
39if with_exynos
40 files_libkms += files('exynos.c')
41 libkms_include += include_directories('../exynos')
42endif
43
44libkms = shared_library(
45 'kms',
46 [files_libkms, config_file],
47 c_args : warn_c_args,
48 include_directories : libkms_include,
49 link_with : libdrm,
50 version : '1.0.0',
51 install : true,
52)
53
54ext_libkms = declare_dependency(
55 link_with : [libdrm, libkms],
56 include_directories : [libkms_include],
57)
58
59install_headers('libkms.h', subdir : 'libkms')
60
61pkg.generate(
62 name : 'libkms',
63 libraries : libkms,
64 subdirs : ['libkms'],
65 version : '1.0.0',
66 requires_private : 'libdrm',
67 description : 'Library that abstracts away the different mm interfaces for kernel drivers',
68)
69
70test(
71 'kms-symbol-check',
72 prog_bash,
73 env : env_test,
74 args : [files('kms-symbol-check'), libkms]
75)
diff --git a/libkms/nouveau.c b/libkms/nouveau.c
index d10e0fdb..7fe23db3 100644
--- a/libkms/nouveau.c
+++ b/libkms/nouveau.c
@@ -26,10 +26,6 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <errno.h> 29#include <errno.h>
34#include <stdio.h> 30#include <stdio.h>
35#include <stdlib.h> 31#include <stdlib.h>
diff --git a/libkms/radeon.c b/libkms/radeon.c
index aaeeaf31..2cb2b11f 100644
--- a/libkms/radeon.c
+++ b/libkms/radeon.c
@@ -26,10 +26,6 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <errno.h> 29#include <errno.h>
34#include <stdio.h> 30#include <stdio.h>
35#include <stdlib.h> 31#include <stdlib.h>
diff --git a/libkms/vmwgfx.c b/libkms/vmwgfx.c
index 6a24fd4d..f0e40be7 100644
--- a/libkms/vmwgfx.c
+++ b/libkms/vmwgfx.c
@@ -26,10 +26,6 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <errno.h> 29#include <errno.h>
34#include <stdlib.h> 30#include <stdlib.h>
35#include <string.h> 31#include <string.h>
diff --git a/man/drm-kms.xml b/man/drm-kms.xml
index ae38dc8d..eb04c263 100644
--- a/man/drm-kms.xml
+++ b/man/drm-kms.xml
@@ -309,8 +309,8 @@ static int modeset_find_crtc(int fd, drmModeRes *res, drmModeConnector *conn)
309 <refsect1> 309 <refsect1>
310 <title>Reporting Bugs</title> 310 <title>Reporting Bugs</title>
311 <para>Bugs in this manual should be reported to 311 <para>Bugs in this manual should be reported to
312 http://bugs.freedesktop.org under the "Mesa" product, with "Other" or 312 https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&amp;component=libdrm
313 "libdrm" as the component.</para> 313 under the "DRI" product, component "libdrm"</para>
314 </refsect1> 314 </refsect1>
315 315
316 <refsect1> 316 <refsect1>
diff --git a/man/drm-memory.xml b/man/drm-memory.xml
index 6b4f0759..3aa7cf25 100644
--- a/man/drm-memory.xml
+++ b/man/drm-memory.xml
@@ -410,8 +410,8 @@ memset(map, 0, creq.size);
410 <refsect1> 410 <refsect1>
411 <title>Reporting Bugs</title> 411 <title>Reporting Bugs</title>
412 <para>Bugs in this manual should be reported to 412 <para>Bugs in this manual should be reported to
413 http://bugs.freedesktop.org under the "Mesa" product, with "Other" or 413 https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&amp;component=libdrm
414 "libdrm" as the component.</para> 414 under the "DRI" product, component "libdrm"</para>
415 </refsect1> 415 </refsect1>
416 416
417 <refsect1> 417 <refsect1>
diff --git a/man/drm.xml b/man/drm.xml
index 5a49fe13..1f559669 100644
--- a/man/drm.xml
+++ b/man/drm.xml
@@ -50,7 +50,7 @@
50 50
51 <para>In earlier days, the kernel framework was solely used to provide raw 51 <para>In earlier days, the kernel framework was solely used to provide raw
52 hardware access to priviledged user-space processes which implement 52 hardware access to priviledged user-space processes which implement
53 all the hardware abstraction layers. But more and more tasks where 53 all the hardware abstraction layers. But more and more tasks were
54 moved into the kernel. All these interfaces are based on 54 moved into the kernel. All these interfaces are based on
55 <citerefentry><refentrytitle>ioctl</refentrytitle><manvolnum>2</manvolnum></citerefentry> 55 <citerefentry><refentrytitle>ioctl</refentrytitle><manvolnum>2</manvolnum></citerefentry>
56 commands on the DRM character device. The <emphasis>libdrm</emphasis> 56 commands on the DRM character device. The <emphasis>libdrm</emphasis>
@@ -119,8 +119,8 @@
119 <refsect1> 119 <refsect1>
120 <title>Reporting Bugs</title> 120 <title>Reporting Bugs</title>
121 <para>Bugs in this manual should be reported to 121 <para>Bugs in this manual should be reported to
122 http://bugs.freedesktop.org under the "Mesa" product, with "Other" or 122 https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&amp;component=libdrm
123 "libdrm" as the component.</para> 123 under the "DRI" product, component "libdrm"</para>
124 </refsect1> 124 </refsect1>
125 125
126 <refsect1> 126 <refsect1>
diff --git a/man/drmAvailable.xml b/man/drmAvailable.xml
index 55bef94a..1e5d7873 100644
--- a/man/drmAvailable.xml
+++ b/man/drmAvailable.xml
@@ -61,8 +61,8 @@
61 <refsect1> 61 <refsect1>
62 <title>Reporting Bugs</title> 62 <title>Reporting Bugs</title>
63 <para>Bugs in this function should be reported to 63 <para>Bugs in this function should be reported to
64 http://bugs.freedesktop.org under the "Mesa" product, with "Other" or 64 https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&amp;component=libdrm
65 "libdrm" as the component.</para> 65 under the "DRI" product, component "libdrm"</para>
66 </refsect1> 66 </refsect1>
67 67
68 <refsect1> 68 <refsect1>
diff --git a/man/drmHandleEvent.xml b/man/drmHandleEvent.xml
index b1006e51..83304428 100644
--- a/man/drmHandleEvent.xml
+++ b/man/drmHandleEvent.xml
@@ -86,8 +86,8 @@ typedef struct _drmEventContext {
86 <refsect1> 86 <refsect1>
87 <title>Reporting Bugs</title> 87 <title>Reporting Bugs</title>
88 <para>Bugs in this function should be reported to 88 <para>Bugs in this function should be reported to
89 http://bugs.freedesktop.org under the "Mesa" product, with "Other" or 89 https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&amp;component=libdrm
90 "libdrm" as the component.</para> 90 under the "DRI" product, component "libdrm"</para>
91 </refsect1> 91 </refsect1>
92 92
93 <refsect1> 93 <refsect1>
diff --git a/man/drmModeGetResources.xml b/man/drmModeGetResources.xml
index 2f5e8c2c..0ab6a68b 100644
--- a/man/drmModeGetResources.xml
+++ b/man/drmModeGetResources.xml
@@ -116,8 +116,8 @@ typedef struct _drmModeRes {
116 <refsect1> 116 <refsect1>
117 <title>Reporting Bugs</title> 117 <title>Reporting Bugs</title>
118 <para>Bugs in this function should be reported to 118 <para>Bugs in this function should be reported to
119 http://bugs.freedesktop.org under the "Mesa" product, with "Other" or 119 https://bugs.freedesktop.org/enter_bug.cgi?product=DRI&amp;component=libdrm
120 "libdrm" as the component.</para> 120 under the "DRI" product, component "libdrm"</para>
121 </refsect1> 121 </refsect1>
122 122
123 <refsect1> 123 <refsect1>
diff --git a/man/meson.build b/man/meson.build
new file mode 100644
index 00000000..45eaeda0
--- /dev/null
+++ b/man/meson.build
@@ -0,0 +1,67 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21xsltproc_args = [
22 '--stringparam', 'man.authors.section.enabled', '0',
23 '--stringparam', 'man.copyright.section.enabled', '0',
24 '--stringparam', 'funcsynopsis.style', 'ansi',
25 '--stringparam', 'man.output.quietly', '1',
26 '--nonet', manpage_style,
27]
28
29xmls = [
30 ['drm', '7'], ['drm-kms', '7'], ['drm-memory', '7'], ['drmAvailable', '3'],
31 ['drmHandleEvent', '3'], ['drmModeGetResources', '3']
32]
33foreach x : xmls
34 m = x[0]
35 s = x[1]
36 custom_target(
37 m,
38 input : files('@0@.xml'.format(m)),
39 output : '@0@.@1@'.format(m, s),
40 command : [prog_xslt, '-o', '@OUTPUT@', xsltproc_args, '@INPUT0@'],
41 install : true,
42 install_dir : join_paths(get_option('mandir'), 'man@0@'.format(s)),
43 build_by_default : true,
44 )
45endforeach
46
47foreach x : ['drm-mm', 'drm-gem', 'drm-ttm']
48 gen = custom_target(
49 'gen-@0@'.format(x),
50 input : 'drm-memory.xml',
51 output : '@0@.xml'.format(x),
52 command : [
53 prog_sed, '-e', 's@^\.so \([a-z_]\+\)\.\([0-9]\)$$@\.so man\2\/\1\.\2@',
54 '@INPUT@',
55 ],
56 capture : true,
57 )
58 custom_target(
59 '@0@.7'.format(x),
60 input : gen,
61 output : '@0@.7'.format(x, '7'),
62 command : [prog_xslt, '-o', '@OUTPUT@', xsltproc_args, '@INPUT@'],
63 install : true,
64 install_dir : join_paths(get_option('mandir'), 'man7'),
65 build_by_default : true,
66 )
67endforeach
diff --git a/meson.build b/meson.build
new file mode 100644
index 00000000..961ee59c
--- /dev/null
+++ b/meson.build
@@ -0,0 +1,382 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21project(
22 'libdrm',
23 ['c'],
24 version : '2.4.91',
25 license : 'MIT',
26 meson_version : '>= 0.43',
27 default_options : ['buildtype=debugoptimized', 'c_std=gnu99'],
28)
29
30pkg = import('pkgconfig')
31
32config = configuration_data()
33
34config.set10('UDEV', get_option('udev'))
35with_freedreno_kgsl = get_option('freedreno-kgsl')
36with_install_tests = get_option('install-test-programs')
37
38if ['freebsd', 'dragonfly', 'netbsd'].contains(host_machine.system())
39 dep_pthread_stubs = dependency('pthread-stubs', version : '>= 0.4')
40else
41 dep_pthread_stubs = []
42endif
43dep_threads = dependency('threads')
44
45cc = meson.get_compiler('c')
46
47# Check for atomics
48intel_atomics = false
49lib_atomics = false
50
51dep_atomic_ops = dependency('atomic_ops', required : false)
52if cc.compiles('''
53 int atomic_add(int *i) { return __sync_add_and_fetch (i, 1); }
54 int atomic_cmpxchg(int *i, int j, int k) { return __sync_val_compare_and_swap (i, j, k); }
55 ''',
56 name : 'Intel Atomics')
57 intel_atomics = true
58 with_atomics = true
59 dep_atomic_ops = []
60elif dep_atomic_ops.found()
61 lib_atomics = true
62 with_atomics = true
63elif cc.has_function('atomic_cas_uint')
64 with_atomics = true
65else
66 with_atomics = false
67endif
68
69config.set10('HAVE_LIBDRM_ATOMIC_PRIMITIVES', intel_atomics)
70config.set10('HAVE_LIB_ATOMIC_OPS', lib_atomics)
71
72with_intel = false
73_intel = get_option('intel')
74if _intel != 'false'
75 if _intel == 'true' and not with_atomics
76 error('libdrm_intel requires atomics.')
77 else
78 with_intel = _intel == 'true' or host_machine.cpu_family().startswith('x86')
79 endif
80endif
81
82with_radeon = false
83_radeon = get_option('radeon')
84if _radeon != 'false'
85 if _radeon == 'true' and not with_atomics
86 error('libdrm_radeon requires atomics.')
87 endif
88 with_radeon = true
89endif
90
91with_amdgpu = false
92_amdgpu = get_option('amdgpu')
93if _amdgpu != 'false'
94 if _amdgpu == 'true' and not with_atomics
95 error('libdrm_amdgpu requires atomics.')
96 endif
97 with_amdgpu = true
98endif
99
100with_nouveau = false
101_nouveau = get_option('nouveau')
102if _nouveau != 'false'
103 if _nouveau == 'true' and not with_atomics
104 error('libdrm_nouveau requires atomics.')
105 endif
106 with_nouveau = true
107endif
108
109with_vmwgfx = false
110_vmwgfx = get_option('vmwgfx')
111if _vmwgfx != 'false'
112 with_vmwgfx = true
113endif
114
115with_omap = false
116_omap = get_option('omap')
117if _omap == 'true'
118 if not with_atomics
119 error('libdrm_omap requires atomics.')
120 endif
121 with_omap = true
122endif
123
124with_freedreno = false
125_freedreno = get_option('freedreno')
126if _freedreno != 'false'
127 if _freedreno == 'true' and not with_atomics
128 error('libdrm_freedreno requires atomics.')
129 else
130 with_freedreno = _freedreno == 'true' or ['arm', 'aarch64'].contains(host_machine.cpu_family())
131 endif
132endif
133
134with_tegra = false
135_tegra = get_option('tegra')
136if _tegra == 'true'
137 if not with_atomics
138 error('libdrm_tegra requires atomics.')
139 endif
140 with_tegra = true
141endif
142
143with_etnaviv = false
144_etnaviv = get_option('etnaviv')
145if _etnaviv == 'true'
146 if not with_atomics
147 error('libdrm_etnaviv requires atomics.')
148 endif
149 with_etnaviv = true
150endif
151
152with_exynos = get_option('exynos') == 'true'
153
154with_vc4 = false
155_vc4 = get_option('vc4')
156if _vc4 != 'false'
157 with_vc4 = _vc4 == 'true' or ['arm', 'aarch64'].contains(host_machine.cpu_family())
158endif
159
160# XXX: Aparently only freebsd and dragonfly bsd actually need this (and
161# gnu/kfreebsd), not openbsd and netbsd
162with_libkms = false
163_libkms = get_option('libkms')
164if _libkms != 'false'
165 with_libkms = _libkms == 'true' or ['linux', 'freebsd', 'dragonfly'].contains(host_machine.system())
166endif
167
168# Among others FreeBSD does not have a separate dl library.
169if not cc.has_function('dlsym')
170 dep_dl = cc.find_library('dl', required : with_nouveau)
171else
172 dep_dl = []
173endif
174# clock_gettime might require -rt, or it might not. find out
175if not cc.has_function('clock_gettime', prefix : '#define _GNU_SOURCE\n#include <time.h>')
176 # XXX: untested
177 dep_rt = cc.find_library('rt')
178else
179 dep_rt = []
180endif
181dep_m = cc.find_library('m', required : false)
182foreach header : ['sys/sysctl.h', 'sys/select.h', 'alloca.h']
183 config.set('HAVE_' + header.underscorify().to_upper(),
184 cc.compiles('#include <@0@>'.format(header), name : '@0@ works'.format(header)))
185endforeach
186if cc.has_header_symbol('sys/sysmacros.h', 'major')
187 config.set10('MAJOR_IN_SYSMACROS', true)
188elif cc.has_header_symbol('sys/mkdev.h', 'major')
189 config.set10('MAJOR_IN_MKDEV', true)
190endif
191config.set10('HAVE_OPEN_MEMSTREAM', cc.has_function('open_memstream'))
192
193warn_c_args = []
194foreach a : ['-Wall', '-Wextra', '-Wsign-compare', '-Werror=undef',
195 '-Werror-implicit-function-declaration', '-Wpointer-arith',
196 '-Wwrite-strings', '-Wstrict-prototypes', '-Wmissing-prototypes',
197 '-Wmissing-declarations', '-Wnested-externs', '-Wpacked',
198 '-Wswitch-enum', '-Wmissing-format-attribute',
199 '-Wstrict-aliasing=2', '-Winit-self', '-Winline', '-Wshadow',
200 '-Wdeclaration-after-statement', '-Wold-style-definition']
201 if cc.has_argument(a)
202 warn_c_args += a
203 endif
204endforeach
205# GCC will never error for -Wno-*, so check for -W* then add -Wno-* to the list
206# of options
207foreach a : ['unused-parameter', 'attributes', 'long-long',
208 'missing-field-initializers']
209 if cc.has_argument('-W@0@'.format(a))
210 warn_c_args += '-Wno-@0@'.format(a)
211 endif
212endforeach
213
214
215dep_pciaccess = dependency('pciaccess', version : '>= 0.10', required : with_intel)
216dep_cunit = dependency('cunit', version : '>= 2.1', required : false)
217_cairo_tests = get_option('cairo-tests')
218if _cairo_tests != 'false'
219 dep_cairo = dependency('cairo', required : _cairo_tests == 'true')
220 with_cairo_tests = dep_cairo.found()
221else
222 dep_cairo = []
223 with_cairo_tests = false
224endif
225_valgrind = get_option('valgrind')
226if _valgrind != 'false'
227 dep_valgrind = dependency('valgrind', required : _valgrind == 'true')
228 with_valgrind = dep_valgrind.found()
229else
230 dep_valgrind = []
231 with_valgrind = false
232endif
233
234with_man_pages = get_option('man-pages')
235prog_xslt = find_program('xsltproc', required : with_man_pages == 'true')
236prog_sed = find_program('sed', required : with_man_pages == 'true')
237manpage_style = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'
238if prog_xslt.found()
239 if run_command(prog_xslt, '--nonet', manpage_style).returncode() != 0
240 if with_man_pages == 'true'
241 error('Manpage style sheet cannot be found')
242 endif
243 with_man_pages = 'false'
244 endif
245endif
246with_man_pages = with_man_pages != 'false' and prog_xslt.found() and prog_sed.found()
247
248# Used for tets
249prog_bash = find_program('bash')
250
251config.set10('HAVE_VISIBILITY',
252 cc.compiles('''int foo_hidden(void) __attribute__((visibility(("hidden"))));''',
253 name : 'compiler supports __attribute__(("hidden"))'))
254
255foreach t : [
256 [with_exynos, 'EXYNOS'],
257 [with_freedreno_kgsl, 'FREEDRENO_KGSL'],
258 [with_intel, 'INTEL'],
259 [with_nouveau, 'NOUVEAU'],
260 [with_radeon, 'RADEON'],
261 [with_vc4, 'VC4'],
262 [with_vmwgfx, 'VMWGFX'],
263 [with_cairo_tests, 'CAIRO'],
264 [with_valgrind, 'VALGRIND'],
265 ]
266 config.set10('HAVE_@0@'.format(t[1]), t[0])
267endforeach
268if with_freedreno_kgsl and not with_freedreno
269 error('cannot enable freedreno-kgsl without freedreno support')
270endif
271config.set10('_GNU_SOURCE', true)
272config_file = configure_file(
273 configuration : config,
274 output : 'config.h',
275)
276add_project_arguments('-include', 'config.h', language : 'c')
277
278inc_root = include_directories('.')
279inc_drm = include_directories('include/drm')
280
281libdrm = shared_library(
282 'drm',
283 [files(
284 'xf86drm.c', 'xf86drmHash.c', 'xf86drmRandom.c', 'xf86drmSL.c',
285 'xf86drmMode.c'
286 ),
287 config_file,
288 ],
289 c_args : warn_c_args,
290 dependencies : [dep_valgrind, dep_rt, dep_m],
291 include_directories : inc_drm,
292 version : '2.4.0',
293 install : true,
294)
295
296ext_libdrm = declare_dependency(
297 link_with : libdrm,
298 include_directories : [inc_root, inc_drm],
299)
300
301install_headers('libsync.h', 'xf86drm.h', 'xf86drmMode.h')
302install_headers(
303 'include/drm/drm.h', 'include/drm/drm_fourcc.h', 'include/drm/drm_mode.h',
304 'include/drm/drm_sarea.h', 'include/drm/i915_drm.h',
305 'include/drm/mach64_drm.h', 'include/drm/mga_drm.h',
306 'include/drm/nouveau_drm.h', 'include/drm/qxl_drm.h',
307 'include/drm/r128_drm.h', 'include/drm/radeon_drm.h',
308 'include/drm/amdgpu_drm.h', 'include/drm/savage_drm.h',
309 'include/drm/sis_drm.h', 'include/drm/tegra_drm.h', 'include/drm/vc4_drm.h',
310 'include/drm/via_drm.h', 'include/drm/virtgpu_drm.h',
311 subdir : 'libdrm',
312)
313if with_vmwgfx
314 install_headers('include/drm/vmwgfx_drm.h', subdir : 'libdrm')
315endif
316
317pkg.generate(
318 name : 'libdrm',
319 libraries : libdrm,
320 subdirs : ['.', 'libdrm'],
321 version : meson.project_version(),
322 description : 'Userspace interface to kernel DRM services',
323)
324
325env_test = environment()
326env_test.set('NM', find_program('nm').path())
327
328if with_libkms
329 subdir('libkms')
330endif
331if with_intel
332 subdir('intel')
333endif
334if with_nouveau
335 subdir('nouveau')
336endif
337if with_radeon
338 subdir('radeon')
339endif
340if with_amdgpu
341 subdir('amdgpu')
342endif
343if with_omap
344 subdir('omap')
345endif
346if with_exynos
347 subdir('exynos')
348endif
349if with_freedreno
350 subdir('freedreno')
351endif
352if with_tegra
353 subdir('tegra')
354endif
355if with_vc4
356 subdir('vc4')
357endif
358if with_etnaviv
359 subdir('etnaviv')
360endif
361if with_man_pages
362 subdir('man')
363endif
364subdir('data')
365subdir('tests')
366
367message('')
368message('@0@ will be compiled with:'.format(meson.project_name()))
369message('')
370message(' libkms @0@'.format(with_libkms))
371message(' Intel API @0@'.format(with_intel))
372message(' vmwgfx API @0@'.format(with_vmwgfx))
373message(' Radeon API @0@'.format(with_radeon))
374message(' AMDGPU API @0@'.format(with_amdgpu))
375message(' Nouveau API @0@'.format(with_nouveau))
376message(' OMAP API @0@'.format(with_omap))
377message(' EXYNOS API @0@'.format(with_exynos))
378message(' Freedreno API @0@ (kgsl: @1@)'.format(with_freedreno, with_freedreno_kgsl))
379message(' Tegra API @0@'.format(with_tegra))
380message(' VC4 API @0@'.format(with_vc4))
381message(' Etnaviv API @0@'.format(with_etnaviv))
382message('')
diff --git a/meson_options.txt b/meson_options.txt
new file mode 100644
index 00000000..8af33f1c
--- /dev/null
+++ b/meson_options.txt
@@ -0,0 +1,143 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21option(
22 'libkms',
23 type : 'combo',
24 value : 'auto',
25 choices : ['true', 'false', 'auto'],
26 description : 'Build libkms mm abstraction library.',
27)
28option(
29 'intel',
30 type : 'combo',
31 value : 'auto',
32 choices : ['true', 'false', 'auto'],
33 description : '''Enable support for Intel's KMS API.''',
34)
35option(
36 'radeon',
37 type : 'combo',
38 value : 'auto',
39 choices : ['true', 'false', 'auto'],
40 description : '''Enable support for radeons's KMS API.''',
41)
42option(
43 'amdgpu',
44 type : 'combo',
45 value : 'auto',
46 choices : ['true', 'false', 'auto'],
47 description : '''Enable support for amdgpu's KMS API.''',
48)
49option(
50 'nouveau',
51 type : 'combo',
52 value : 'auto',
53 choices : ['true', 'false', 'auto'],
54 description : '''Enable support for nouveau's KMS API.''',
55)
56option(
57 'vmwgfx',
58 type : 'combo',
59 value : 'true',
60 choices : ['true', 'false', 'auto'],
61 description : '''Enable support for vmgfx's KMS API.''',
62)
63option(
64 'omap',
65 type : 'combo',
66 value : 'false',
67 choices : ['true', 'false', 'auto'],
68 description : '''Enable support for OMAP's experimental KMS API.''',
69)
70option(
71 'exynos',
72 type : 'combo',
73 value : 'false',
74 choices : ['true', 'false', 'auto'],
75 description : '''Enable support for EXYNOS's experimental KMS API.''',
76)
77option(
78 'freedreno',
79 type : 'combo',
80 value : 'auto',
81 choices : ['true', 'false', 'auto'],
82 description : '''Enable support for freedreno's KMS API.''',
83)
84option(
85 'tegra',
86 type : 'combo',
87 value : 'false',
88 choices : ['true', 'false', 'auto'],
89 description : '''Enable support for Tegra's experimental KMS API.''',
90)
91option(
92 'vc4',
93 type : 'combo',
94 value : 'auto',
95 choices : ['true', 'false', 'auto'],
96 description : '''Enable support for vc4's KMS API.''',
97)
98option(
99 'etnaviv',
100 type : 'combo',
101 value : 'false',
102 choices : ['true', 'false', 'auto'],
103 description : '''Enable support for etnaviv's experimental KMS API.''',
104)
105option(
106 'cairo-tests',
107 type : 'combo',
108 value : 'auto',
109 choices : ['true', 'false', 'auto'],
110 description : 'Enable support for Cairo rendering in tests.',
111)
112option(
113 'man-pages',
114 type : 'combo',
115 value : 'auto',
116 choices : ['true', 'false', 'auto'],
117 description : 'Enable manpage generation and installation.',
118)
119option(
120 'valgrind',
121 type : 'combo',
122 value : 'auto',
123 choices : ['true', 'false', 'auto'],
124 description : 'Build libdrm with valgrind support.',
125)
126option(
127 'freedreno-kgsl',
128 type : 'boolean',
129 value : false,
130 description : 'Enable support for freedreno to use downstream android kernel API.',
131)
132option(
133 'install-test-programs',
134 type : 'boolean',
135 value : false,
136 description : 'Install test programs.',
137)
138option(
139 'udev',
140 type : 'boolean',
141 value : false,
142 description : 'Enable support for using udev instead of mknod.',
143)
diff --git a/nouveau/abi16.c b/nouveau/abi16.c
index ee38c0cb..ba2501ea 100644
--- a/nouveau/abi16.c
+++ b/nouveau/abi16.c
@@ -22,10 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26# include <config.h>
27#endif
28
29#include <stdlib.h> 25#include <stdlib.h>
30#include <stdint.h> 26#include <stdint.h>
31#include <stddef.h> 27#include <stddef.h>
diff --git a/nouveau/bufctx.c b/nouveau/bufctx.c
index 4f76e5df..67b7570e 100644
--- a/nouveau/bufctx.c
+++ b/nouveau/bufctx.c
@@ -22,10 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include <config.h>
27#endif
28
29#include <stdio.h> 25#include <stdio.h>
30#include <stdlib.h> 26#include <stdlib.h>
31#include <stdint.h> 27#include <stdint.h>
diff --git a/nouveau/meson.build b/nouveau/meson.build
new file mode 100644
index 00000000..51c9a712
--- /dev/null
+++ b/nouveau/meson.build
@@ -0,0 +1,59 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22libdrm_nouveau = shared_library(
23 'drm_nouveau',
24 [files( 'nouveau.c', 'pushbuf.c', 'bufctx.c', 'abi16.c'), config_file],
25 c_args : warn_c_args,
26 include_directories : [inc_root, inc_drm],
27 link_with : libdrm,
28 dependencies : [dep_threads, dep_atomic_ops],
29 version : '2.0.0',
30 install : true,
31)
32
33ext_libdrm_nouveau = declare_dependency(
34 link_with : [libdrm, libdrm_nouveau],
35 include_directories : [inc_drm, include_directories('.')],
36)
37
38install_headers('nouveau.h', subdir : 'libdrm/nouveau')
39install_headers(
40 'nvif/class.h', 'nvif/cl0080.h', 'nvif/cl9097.h', 'nvif/if0002.h',
41 'nvif/if0003.h', 'nvif/ioctl.h', 'nvif/unpack.h',
42 subdir : 'libdrm/nouveau/nvif'
43)
44
45pkg.generate(
46 name : 'libdrm_nouveau',
47 libraries : libdrm_nouveau,
48 subdirs : ['.', 'libdrm', 'libdrm/nouveau'],
49 version : meson.project_version(),
50 requires_private : 'libdrm',
51 description : 'Userspace interface to nouveau kernel DRM services',
52)
53
54test(
55 'nouveau-symbol-check',
56 prog_bash,
57 env : env_test,
58 args : [files('nouveau-symbol-check'), libdrm_nouveau]
59)
diff --git a/nouveau/nouveau-symbol-check b/nouveau/nouveau-symbol-check
index b265cea4..b3a24101 100755
--- a/nouveau/nouveau-symbol-check
+++ b/nouveau/nouveau-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.sources/LIBDRM_NOUVEAU_H_FILES 4# A list of the latter should be available Makefile.sources/LIBDRM_NOUVEAU_H_FILES
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_nouveau.so} | awk '{print $3}'| while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_nouveau.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
diff --git a/nouveau/nouveau.c b/nouveau/nouveau.c
index e113a8fe..55593517 100644
--- a/nouveau/nouveau.c
+++ b/nouveau/nouveau.c
@@ -22,10 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include <config.h>
27#endif
28
29#include <stdio.h> 25#include <stdio.h>
30#include <stdlib.h> 26#include <stdlib.h>
31#include <stdint.h> 27#include <stdint.h>
diff --git a/nouveau/pushbuf.c b/nouveau/pushbuf.c
index 035e3019..445c966e 100644
--- a/nouveau/pushbuf.c
+++ b/nouveau/pushbuf.c
@@ -22,10 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include <config.h>
27#endif
28
29#include <stdio.h> 25#include <stdio.h>
30#include <stdlib.h> 26#include <stdlib.h>
31#include <stdint.h> 27#include <stdint.h>
diff --git a/omap/Android.mk b/omap/Android.mk
new file mode 100644
index 00000000..b25cca13
--- /dev/null
+++ b/omap/Android.mk
@@ -0,0 +1,13 @@
1LOCAL_PATH := $(call my-dir)
2include $(CLEAR_VARS)
3
4LOCAL_MODULE := libdrm_omap
5LOCAL_VENDOR_MODULE := true
6
7LOCAL_SRC_FILES := omap_drm.c
8
9LOCAL_SHARED_LIBRARIES := libdrm
10
11include $(LIBDRM_COMMON_MK)
12
13include $(BUILD_SHARED_LIBRARY)
diff --git a/omap/meson.build b/omap/meson.build
new file mode 100644
index 00000000..e57b8f5d
--- /dev/null
+++ b/omap/meson.build
@@ -0,0 +1,54 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21libdrm_omap = shared_library(
22 'drm_omap',
23 [files('omap_drm.c'), config_file],
24 include_directories : [inc_root, inc_drm],
25 c_args : warn_c_args,
26 link_with : libdrm,
27 dependencies : [dep_pthread_stubs, dep_atomic_ops],
28 version : '1.0.0',
29 install : true,
30)
31
32ext_libdrm_omap = declare_dependency(
33 link_with : [libdrm, libdrm_omap],
34 include_directories : [inc_drm, include_directories('.')],
35)
36
37install_headers('omap_drmif.h', subdir : 'libdrm')
38install_headers('omap_drm.h', subdir : 'omap')
39
40pkg.generate(
41 name : 'libdrm_omap',
42 libraries : libdrm_omap,
43 subdirs : ['.', 'libdrm', 'omap'],
44 version : '0.6',
45 requires_private : 'libdrm',
46 description : 'Userspace interface to omap kernel DRM services',
47)
48
49test(
50 'omap-symbol-check',
51 prog_bash,
52 env : env_test,
53 args : [files('omap-symbol-check'), libdrm_omap]
54)
diff --git a/omap/omap-symbol-check b/omap/omap-symbol-check
index 759c84bd..0fb4a0f2 100755
--- a/omap/omap-symbol-check
+++ b/omap/omap-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.am/libdrm_omap*HEADERS 4# A list of the latter should be available Makefile.am/libdrm_omap*HEADERS
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_omap.so} | awk '{print $3}'| while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_omap.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
diff --git a/omap/omap_drm.c b/omap/omap_drm.c
index 08ba64eb..417d522c 100644
--- a/omap/omap_drm.c
+++ b/omap/omap_drm.c
@@ -26,10 +26,6 @@
26 * Rob Clark <rob@ti.com> 26 * Rob Clark <rob@ti.com>
27 */ 27 */
28 28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <stdlib.h> 29#include <stdlib.h>
34#include <linux/stddef.h> 30#include <linux/stddef.h>
35#include <linux/types.h> 31#include <linux/types.h>
diff --git a/radeon/meson.build b/radeon/meson.build
new file mode 100644
index 00000000..b08c7442
--- /dev/null
+++ b/radeon/meson.build
@@ -0,0 +1,64 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22libdrm_radeon = shared_library(
23 'drm_radeon',
24 [
25 files(
26 'radeon_bo_gem.c', 'radeon_cs_gem.c', 'radeon_cs_space.c', 'radeon_bo.c',
27 'radeon_cs.c', 'radeon_surface.c',
28 ),
29 config_file,
30 ],
31 c_args : warn_c_args,
32 include_directories : [inc_root, inc_drm],
33 link_with : libdrm,
34 dependencies : [dep_pthread_stubs, dep_atomic_ops],
35 version : '1.0.1',
36 install : true,
37)
38
39ext_libdrm_radeon = declare_dependency(
40 link_with : [libdrm, libdrm_radeon],
41 include_directories : [inc_drm, include_directories('.')],
42)
43
44install_headers(
45 'radeon_bo.h', 'radeon_cs.h', 'radeon_surface.h', 'radeon_bo_gem.h',
46 'radeon_cs_gem.h', 'radeon_bo_int.h', 'radeon_cs_int.h', 'r600_pci_ids.h',
47 subdir : 'libdrm'
48)
49
50pkg.generate(
51 name : 'libdrm_radeon',
52 libraries : libdrm_radeon,
53 subdirs : ['.', 'libdrm'],
54 version : meson.project_version(),
55 requires_private : 'libdrm',
56 description : 'Userspace interface to kernel DRM services for radeon',
57)
58
59test(
60 'radeon-symbol-check',
61 prog_bash,
62 env : env_test,
63 args : [files('radeon-symbol-check'), libdrm_radeon]
64)
diff --git a/radeon/radeon-symbol-check b/radeon/radeon-symbol-check
index 0bf2ffcb..7d79d901 100755
--- a/radeon/radeon-symbol-check
+++ b/radeon/radeon-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.sources/LIBDRM_RADEON_H_FILES 4# A list of the latter should be available Makefile.sources/LIBDRM_RADEON_H_FILES
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_radeon.so} | awk '{print $3}'| while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_radeon.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
diff --git a/radeon/radeon_bo.c b/radeon/radeon_bo.c
index 447f9280..821807bc 100644
--- a/radeon/radeon_bo.c
+++ b/radeon/radeon_bo.c
@@ -29,9 +29,6 @@
29 * Dave Airlie 29 * Dave Airlie
30 * Jérôme Glisse <glisse@freedesktop.org> 30 * Jérôme Glisse <glisse@freedesktop.org>
31 */ 31 */
32#ifdef HAVE_CONFIG_H
33#include <config.h>
34#endif
35#include <libdrm_macros.h> 32#include <libdrm_macros.h>
36#include <radeon_bo.h> 33#include <radeon_bo.h>
37#include <radeon_bo_int.h> 34#include <radeon_bo_int.h>
diff --git a/radeon/radeon_bo_gem.c b/radeon/radeon_bo_gem.c
index fbd453d9..774b26e4 100644
--- a/radeon/radeon_bo_gem.c
+++ b/radeon/radeon_bo_gem.c
@@ -29,9 +29,6 @@
29 * Dave Airlie 29 * Dave Airlie
30 * Jérôme Glisse <glisse@freedesktop.org> 30 * Jérôme Glisse <glisse@freedesktop.org>
31 */ 31 */
32#ifdef HAVE_CONFIG_H
33#include <config.h>
34#endif
35#include <stdio.h> 32#include <stdio.h>
36#include <stdint.h> 33#include <stdint.h>
37#include <stdlib.h> 34#include <stdlib.h>
diff --git a/radeon/radeon_cs.c b/radeon/radeon_cs.c
index dffb869f..eb7859e5 100644
--- a/radeon/radeon_cs.c
+++ b/radeon/radeon_cs.c
@@ -1,6 +1,3 @@
1#ifdef HAVE_CONFIG_H
2#include <config.h>
3#endif
4#include "libdrm_macros.h" 1#include "libdrm_macros.h"
5#include <stdio.h> 2#include <stdio.h>
6#include "radeon_cs.h" 3#include "radeon_cs.h"
diff --git a/radeon/radeon_cs_gem.c b/radeon/radeon_cs_gem.c
index f3dccb6c..4d5fc13a 100644
--- a/radeon/radeon_cs_gem.c
+++ b/radeon/radeon_cs_gem.c
@@ -29,9 +29,6 @@
29 * Nicolai Haehnle <prefect_@gmx.net> 29 * Nicolai Haehnle <prefect_@gmx.net>
30 * Jérôme Glisse <glisse@freedesktop.org> 30 * Jérôme Glisse <glisse@freedesktop.org>
31 */ 31 */
32#ifdef HAVE_CONFIG_H
33#include "config.h"
34#endif
35#include <assert.h> 32#include <assert.h>
36#include <errno.h> 33#include <errno.h>
37#include <stdlib.h> 34#include <stdlib.h>
diff --git a/radeon/radeon_cs_space.c b/radeon/radeon_cs_space.c
index 69287be5..8531c345 100644
--- a/radeon/radeon_cs_space.c
+++ b/radeon/radeon_cs_space.c
@@ -25,9 +25,6 @@
25 */ 25 */
26/* 26/*
27 */ 27 */
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31#include <assert.h> 28#include <assert.h>
32#include <errno.h> 29#include <errno.h>
33#include <stdlib.h> 30#include <stdlib.h>
diff --git a/radeon/radeon_surface.c b/radeon/radeon_surface.c
index 965be24c..3cafcfcb 100644
--- a/radeon/radeon_surface.c
+++ b/radeon/radeon_surface.c
@@ -26,9 +26,6 @@
26 * Authors: 26 * Authors:
27 * Jérôme Glisse <jglisse@redhat.com> 27 * Jérôme Glisse <jglisse@redhat.com>
28 */ 28 */
29#ifdef HAVE_CONFIG_H
30#include <config.h>
31#endif
32#include <stdbool.h> 29#include <stdbool.h>
33#include <assert.h> 30#include <assert.h>
34#include <errno.h> 31#include <errno.h>
@@ -2503,6 +2500,7 @@ static int radeon_surface_sanity(struct radeon_surface_manager *surf_man,
2503 if (surf->npix_y > 1) { 2500 if (surf->npix_y > 1) {
2504 return -EINVAL; 2501 return -EINVAL;
2505 } 2502 }
2503 /* fallthrough */
2506 case RADEON_SURF_TYPE_2D: 2504 case RADEON_SURF_TYPE_2D:
2507 if (surf->npix_z > 1) { 2505 if (surf->npix_z > 1) {
2508 return -EINVAL; 2506 return -EINVAL;
diff --git a/tegra/meson.build b/tegra/meson.build
new file mode 100644
index 00000000..1f5c74b3
--- /dev/null
+++ b/tegra/meson.build
@@ -0,0 +1,53 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21libdrm_tegra = shared_library(
22 'drm_tegra',
23 [files('tegra.c'), config_file],
24 include_directories : [inc_root, inc_drm],
25 link_with : libdrm,
26 dependencies : [dep_pthread_stubs, dep_atomic_ops],
27 c_args : warn_c_args,
28 version : '0.0.0',
29 install : true,
30)
31
32ext_libdrm_tegra = declare_dependency(
33 link_with : [libdrm, libdrm_tegra],
34 include_directories : [inc_drm, include_directories('.')],
35)
36
37install_headers('tegra.h', subdir : 'libdrm')
38
39pkg.generate(
40 name : 'libdrm_tegra',
41 libraries : libdrm_tegra,
42 subdirs : ['.', 'libdrm'],
43 version : meson.project_version(),
44 requires_private : 'libdrm',
45 description : 'Userspace interface to Tegra kernel DRM services',
46)
47
48test(
49 'tegra-symbol-check',
50 prog_bash,
51 env : env_test,
52 args : [files('tegra-symbol-check'), libdrm_tegra]
53)
diff --git a/tegra/tegra-symbol-check b/tegra/tegra-symbol-check
index 40208311..509b678c 100755
--- a/tegra/tegra-symbol-check
+++ b/tegra/tegra-symbol-check
@@ -1,11 +1,14 @@
1#!/bin/bash 1#!/bin/bash
2 2
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first nine) are taken from tegra.h.
4# A list of the latter should be available Makefile.sources/LIBDRM_FREEDRENO_H_FILES
5 4
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_tegra.so} | awk '{print $3}'| while read func; do 5FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_tegra.so} | awk '{print $3}'| while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 6( grep -q "^$func$" || echo $func ) <<EOF
7__bss_end__
8__bss_start__
8__bss_start 9__bss_start
10__end__
11_bss_end__
9_edata 12_edata
10_end 13_end
11_fini 14_fini
diff --git a/tegra/tegra.c b/tegra/tegra.c
index 66f19e96..0c869877 100644
--- a/tegra/tegra.c
+++ b/tegra/tegra.c
@@ -22,10 +22,6 @@
22 * OTHER DEALINGS IN THE SOFTWARE. 22 * OTHER DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26# include "config.h"
27#endif
28
29#include <errno.h> 25#include <errno.h>
30#include <fcntl.h> 26#include <fcntl.h>
31#include <string.h> 27#include <string.h>
diff --git a/tests/amdgpu/.editorconfig b/tests/amdgpu/.editorconfig
new file mode 120000
index 00000000..70734e42
--- /dev/null
+++ b/tests/amdgpu/.editorconfig
@@ -0,0 +1 @@
../../amdgpu/.editorconfig \ No newline at end of file
diff --git a/tests/amdgpu/Makefile.am b/tests/amdgpu/Makefile.am
index c1c3a32e..e79c1bd3 100644
--- a/tests/amdgpu/Makefile.am
+++ b/tests/amdgpu/Makefile.am
@@ -1,7 +1,8 @@
1AM_CFLAGS = \ 1AM_CFLAGS = \
2 -I $(top_srcdir)/include/drm \ 2 -I $(top_srcdir)/include/drm \
3 -I $(top_srcdir)/amdgpu \ 3 -I $(top_srcdir)/amdgpu \
4 -I $(top_srcdir) 4 -I $(top_srcdir) \
5 -pthread
5 6
6LDADD = $(top_builddir)/libdrm.la \ 7LDADD = $(top_builddir)/libdrm.la \
7 $(top_builddir)/amdgpu/libdrm_amdgpu.la \ 8 $(top_builddir)/amdgpu/libdrm_amdgpu.la \
@@ -23,7 +24,12 @@ amdgpu_test_SOURCES = \
23 basic_tests.c \ 24 basic_tests.c \
24 bo_tests.c \ 25 bo_tests.c \
25 cs_tests.c \ 26 cs_tests.c \
26 uvd_messages.h \ 27 decode_messages.h \
27 vce_tests.c \ 28 vce_tests.c \
28 vce_ib.h \ 29 vce_ib.h \
29 frame.h 30 frame.h \
31 uvd_enc_tests.c \
32 vcn_tests.c \
33 uve_ib.h \
34 deadlock_tests.c \
35 vm_tests.c
diff --git a/tests/amdgpu/amdgpu_test.c b/tests/amdgpu/amdgpu_test.c
index 3fd6820a..96fcd687 100644
--- a/tests/amdgpu/amdgpu_test.c
+++ b/tests/amdgpu/amdgpu_test.c
@@ -21,10 +21,6 @@
21 * 21 *
22*/ 22*/
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <string.h> 24#include <string.h>
29#include <stdio.h> 25#include <stdio.h>
30#include <stdlib.h> 26#include <stdlib.h>
@@ -49,6 +45,17 @@
49#include "CUnit/Basic.h" 45#include "CUnit/Basic.h"
50 46
51#include "amdgpu_test.h" 47#include "amdgpu_test.h"
48#include "amdgpu_internal.h"
49
50/* Test suite names */
51#define BASIC_TESTS_STR "Basic Tests"
52#define BO_TESTS_STR "BO Tests"
53#define CS_TESTS_STR "CS Tests"
54#define VCE_TESTS_STR "VCE Tests"
55#define VCN_TESTS_STR "VCN Tests"
56#define UVD_ENC_TESTS_STR "UVD ENC Tests"
57#define DEADLOCK_TESTS_STR "Deadlock Tests"
58#define VM_TESTS_STR "VM Tests"
52 59
53/** 60/**
54 * Open handles for amdgpu devices 61 * Open handles for amdgpu devices
@@ -62,49 +69,150 @@ int open_render_node = 0; /* By default run most tests on primary node */
62/** The table of all known test suites to run */ 69/** The table of all known test suites to run */
63static CU_SuiteInfo suites[] = { 70static CU_SuiteInfo suites[] = {
64 { 71 {
65 .pName = "Basic Tests", 72 .pName = BASIC_TESTS_STR,
66 .pInitFunc = suite_basic_tests_init, 73 .pInitFunc = suite_basic_tests_init,
67 .pCleanupFunc = suite_basic_tests_clean, 74 .pCleanupFunc = suite_basic_tests_clean,
68 .pTests = basic_tests, 75 .pTests = basic_tests,
69 }, 76 },
70 { 77 {
71 .pName = "BO Tests", 78 .pName = BO_TESTS_STR,
72 .pInitFunc = suite_bo_tests_init, 79 .pInitFunc = suite_bo_tests_init,
73 .pCleanupFunc = suite_bo_tests_clean, 80 .pCleanupFunc = suite_bo_tests_clean,
74 .pTests = bo_tests, 81 .pTests = bo_tests,
75 }, 82 },
76 { 83 {
77 .pName = "CS Tests", 84 .pName = CS_TESTS_STR,
78 .pInitFunc = suite_cs_tests_init, 85 .pInitFunc = suite_cs_tests_init,
79 .pCleanupFunc = suite_cs_tests_clean, 86 .pCleanupFunc = suite_cs_tests_clean,
80 .pTests = cs_tests, 87 .pTests = cs_tests,
81 }, 88 },
82 { 89 {
83 .pName = "VCE Tests", 90 .pName = VCE_TESTS_STR,
84 .pInitFunc = suite_vce_tests_init, 91 .pInitFunc = suite_vce_tests_init,
85 .pCleanupFunc = suite_vce_tests_clean, 92 .pCleanupFunc = suite_vce_tests_clean,
86 .pTests = vce_tests, 93 .pTests = vce_tests,
87 }, 94 },
95 {
96 .pName = VCN_TESTS_STR,
97 .pInitFunc = suite_vcn_tests_init,
98 .pCleanupFunc = suite_vcn_tests_clean,
99 .pTests = vcn_tests,
100 },
101 {
102 .pName = UVD_ENC_TESTS_STR,
103 .pInitFunc = suite_uvd_enc_tests_init,
104 .pCleanupFunc = suite_uvd_enc_tests_clean,
105 .pTests = uvd_enc_tests,
106 },
107 {
108 .pName = DEADLOCK_TESTS_STR,
109 .pInitFunc = suite_deadlock_tests_init,
110 .pCleanupFunc = suite_deadlock_tests_clean,
111 .pTests = deadlock_tests,
112 },
113 {
114 .pName = VM_TESTS_STR,
115 .pInitFunc = suite_vm_tests_init,
116 .pCleanupFunc = suite_vm_tests_clean,
117 .pTests = vm_tests,
118 },
119
88 CU_SUITE_INFO_NULL, 120 CU_SUITE_INFO_NULL,
89}; 121};
90 122
123typedef CU_BOOL (*active__stat_func)(void);
91 124
92/** Display information about all suites and their tests */ 125typedef struct Suites_Active_Status {
126 char* pName;
127 active__stat_func pActive;
128}Suites_Active_Status;
129
130static CU_BOOL always_active()
131{
132 return CU_TRUE;
133}
134
135static Suites_Active_Status suites_active_stat[] = {
136 {
137 .pName = BASIC_TESTS_STR,
138 .pActive = always_active,
139 },
140 {
141 .pName = BO_TESTS_STR,
142 .pActive = always_active,
143 },
144 {
145 .pName = CS_TESTS_STR,
146 .pActive = suite_cs_tests_enable,
147 },
148 {
149 .pName = VCE_TESTS_STR,
150 .pActive = suite_vce_tests_enable,
151 },
152 {
153 .pName = VCN_TESTS_STR,
154 .pActive = suite_vcn_tests_enable,
155 },
156 {
157 .pName = UVD_ENC_TESTS_STR,
158 .pActive = suite_uvd_enc_tests_enable,
159 },
160 {
161 .pName = DEADLOCK_TESTS_STR,
162 .pActive = suite_deadlock_tests_enable,
163 },
164 {
165 .pName = VM_TESTS_STR,
166 .pActive = suite_vm_tests_enable,
167 },
168};
169
170
171/*
172 * Display information about all suites and their tests
173 *
174 * NOTE: Must be run after registry is initialized and suites registered.
175 */
93static void display_test_suites(void) 176static void display_test_suites(void)
94{ 177{
95 int iSuite; 178 int iSuite;
96 int iTest; 179 int iTest;
180 CU_pSuite pSuite = NULL;
181 CU_pTest pTest = NULL;
97 182
98 printf("Suites\n"); 183 printf("Suites\n");
99 184
100 for (iSuite = 0; suites[iSuite].pName != NULL; iSuite++) { 185 for (iSuite = 0; suites[iSuite].pName != NULL; iSuite++) {
101 printf("Suite id = %d: Name '%s'\n", 186
102 iSuite + 1, suites[iSuite].pName); 187 pSuite = CU_get_suite_by_index((unsigned int) iSuite + 1,
188 CU_get_registry());
189
190 if (!pSuite) {
191 fprintf(stderr, "Invalid suite id : %d\n", iSuite + 1);
192 continue;
193 }
194
195 printf("Suite id = %d: Name '%s status: %s'\n",
196 iSuite + 1, suites[iSuite].pName,
197 pSuite->fActive ? "ENABLED" : "DISABLED");
198
199
103 200
104 for (iTest = 0; suites[iSuite].pTests[iTest].pName != NULL; 201 for (iTest = 0; suites[iSuite].pTests[iTest].pName != NULL;
105 iTest++) { 202 iTest++) {
106 printf(" Test id %d: Name: '%s'\n", iTest + 1, 203
107 suites[iSuite].pTests[iTest].pName); 204 pTest = CU_get_test_by_index((unsigned int) iTest + 1,
205 pSuite);
206
207 if (!pTest) {
208 fprintf(stderr, "Invalid test id : %d\n", iTest + 1);
209 continue;
210 }
211
212 printf("Test id %d: Name: '%s status: %s'\n", iTest + 1,
213 suites[iSuite].pTests[iTest].pName,
214 pSuite->fActive && pTest->fActive ?
215 "ENABLED" : "DISABLED");
108 } 216 }
109 } 217 }
110} 218}
@@ -112,7 +220,7 @@ static void display_test_suites(void)
112 220
113/** Help string for command line parameters */ 221/** Help string for command line parameters */
114static const char usage[] = 222static const char usage[] =
115 "Usage: %s [-hlpr] [<-s <suite id>> [-t <test id>]] " 223 "Usage: %s [-hlpr] [<-s <suite id>> [-t <test id>] [-f]] "
116 "[-b <pci_bus_id> [-d <pci_device_id>]]\n" 224 "[-b <pci_bus_id> [-d <pci_device_id>]]\n"
117 "where:\n" 225 "where:\n"
118 " l - Display all suites and their tests\n" 226 " l - Display all suites and their tests\n"
@@ -120,9 +228,10 @@ static const char usage[] =
120 " b - Specify device's PCI bus id to run tests\n" 228 " b - Specify device's PCI bus id to run tests\n"
121 " d - Specify device's PCI device id to run tests (optional)\n" 229 " d - Specify device's PCI device id to run tests (optional)\n"
122 " p - Display information of AMDGPU devices in system\n" 230 " p - Display information of AMDGPU devices in system\n"
231 " f - Force executing inactive suite or test\n"
123 " h - Display this help\n"; 232 " h - Display this help\n";
124/** Specified options strings for getopt */ 233/** Specified options strings for getopt */
125static const char options[] = "hlrps:t:b:d:"; 234static const char options[] = "hlrps:t:b:d:f";
126 235
127/* Open AMD devices. 236/* Open AMD devices.
128 * Return the number of AMD device openned. 237 * Return the number of AMD device openned.
@@ -130,7 +239,6 @@ static const char options[] = "hlrps:t:b:d:";
130static int amdgpu_open_devices(int open_render_node) 239static int amdgpu_open_devices(int open_render_node)
131{ 240{
132 drmDevicePtr devices[MAX_CARDS_SUPPORTED]; 241 drmDevicePtr devices[MAX_CARDS_SUPPORTED];
133 int ret;
134 int i; 242 int i;
135 int drm_node; 243 int drm_node;
136 int amd_index = 0; 244 int amd_index = 0;
@@ -264,29 +372,71 @@ static void amdgpu_print_devices()
264/* Find a match AMD device in PCI bus 372/* Find a match AMD device in PCI bus
265 * Return the index of the device or -1 if not found 373 * Return the index of the device or -1 if not found
266 */ 374 */
267static int amdgpu_find_device(uint8_t bus, uint8_t dev) 375static int amdgpu_find_device(uint8_t bus, uint16_t dev)
268{ 376{
269 int i; 377 int i;
270 drmDevicePtr device; 378 drmDevicePtr device;
271 379
272 for (i = 0; i < MAX_CARDS_SUPPORTED && drm_amdgpu[i] >=0; i++) 380 for (i = 0; i < MAX_CARDS_SUPPORTED && drm_amdgpu[i] >= 0; i++) {
273 if (drmGetDevice2(drm_amdgpu[i], 381 if (drmGetDevice2(drm_amdgpu[i],
274 DRM_DEVICE_GET_PCI_REVISION, 382 DRM_DEVICE_GET_PCI_REVISION,
275 &device) == 0) { 383 &device) == 0) {
276 if (device->bustype == DRM_BUS_PCI) 384 if (device->bustype == DRM_BUS_PCI)
277 if (device->businfo.pci->bus == bus && 385 if ((bus == 0xFF || device->businfo.pci->bus == bus) &&
278 device->businfo.pci->dev == dev) { 386 device->deviceinfo.pci->device_id == dev) {
279
280 drmFreeDevice(&device); 387 drmFreeDevice(&device);
281 return i; 388 return i;
282 } 389 }
283 390
284 drmFreeDevice(&device); 391 drmFreeDevice(&device);
285 } 392 }
393 }
286 394
287 return -1; 395 return -1;
288} 396}
289 397
398static void amdgpu_disable_suites()
399{
400 amdgpu_device_handle device_handle;
401 uint32_t major_version, minor_version, family_id;
402 int i;
403 int size = sizeof(suites_active_stat) / sizeof(suites_active_stat[0]);
404
405 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
406 &minor_version, &device_handle))
407 return;
408
409 family_id = device_handle->info.family_id;
410
411 if (amdgpu_device_deinitialize(device_handle))
412 return;
413
414 /* Set active status for suites based on their policies */
415 for (i = 0; i < size; ++i)
416 if (amdgpu_set_suite_active(suites_active_stat[i].pName,
417 suites_active_stat[i].pActive()))
418 fprintf(stderr, "suite deactivation failed - %s\n", CU_get_error_msg());
419
420 /* Explicitly disable specific tests due to known bugs or preferences */
421 /*
422 * BUG: Compute ring stalls and never recovers when the address is
423 * written after the command already submitted
424 */
425 if (amdgpu_set_test_active(DEADLOCK_TESTS_STR, "compute ring block test", CU_FALSE))
426 fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
427
428 if (amdgpu_set_test_active(BO_TESTS_STR, "Metadata", CU_FALSE))
429 fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
430
431 if (amdgpu_set_test_active(BASIC_TESTS_STR, "bo eviction Test", CU_FALSE))
432 fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
433
434 /* This test was ran on GFX8 and GFX9 only */
435 if (family_id < AMDGPU_FAMILY_VI || family_id > AMDGPU_FAMILY_RV)
436 if (amdgpu_set_test_active(BASIC_TESTS_STR, "Sync dependency Test", CU_FALSE))
437 fprintf(stderr, "test deactivation failed - %s\n", CU_get_error_msg());
438}
439
290/* The main() function for setting up and running the tests. 440/* The main() function for setting up and running the tests.
291 * Returns a CUE_SUCCESS on successful running, another 441 * Returns a CUE_SUCCESS on successful running, another
292 * CUnit error code on failure. 442 * CUnit error code on failure.
@@ -303,6 +453,8 @@ int main(int argc, char **argv)
303 CU_pSuite pSuite = NULL; 453 CU_pSuite pSuite = NULL;
304 CU_pTest pTest = NULL; 454 CU_pTest pTest = NULL;
305 int test_device_index; 455 int test_device_index;
456 int display_list = 0;
457 int force_run = 0;
306 458
307 for (i = 0; i < MAX_CARDS_SUPPORTED; i++) 459 for (i = 0; i < MAX_CARDS_SUPPORTED; i++)
308 drm_amdgpu[i] = -1; 460 drm_amdgpu[i] = -1;
@@ -313,8 +465,8 @@ int main(int argc, char **argv)
313 while ((c = getopt(argc, argv, options)) != -1) { 465 while ((c = getopt(argc, argv, options)) != -1) {
314 switch (c) { 466 switch (c) {
315 case 'l': 467 case 'l':
316 display_test_suites(); 468 display_list = 1;
317 exit(EXIT_SUCCESS); 469 break;
318 case 's': 470 case 's':
319 suite_id = atoi(optarg); 471 suite_id = atoi(optarg);
320 break; 472 break;
@@ -325,7 +477,7 @@ int main(int argc, char **argv)
325 pci_bus_id = atoi(optarg); 477 pci_bus_id = atoi(optarg);
326 break; 478 break;
327 case 'd': 479 case 'd':
328 pci_device_id = atoi(optarg); 480 sscanf(optarg, "%x", &pci_device_id);
329 break; 481 break;
330 case 'p': 482 case 'p':
331 display_devices = 1; 483 display_devices = 1;
@@ -333,6 +485,9 @@ int main(int argc, char **argv)
333 case 'r': 485 case 'r':
334 open_render_node = 1; 486 open_render_node = 1;
335 break; 487 break;
488 case 'f':
489 force_run = 1;
490 break;
336 case '?': 491 case '?':
337 case 'h': 492 case 'h':
338 fprintf(stderr, usage, argv[0]); 493 fprintf(stderr, usage, argv[0]);
@@ -359,10 +514,10 @@ int main(int argc, char **argv)
359 exit(EXIT_SUCCESS); 514 exit(EXIT_SUCCESS);
360 } 515 }
361 516
362 if (pci_bus_id > 0) { 517 if (pci_bus_id > 0 || pci_device_id) {
363 /* A device was specified to run the test */ 518 /* A device was specified to run the test */
364 test_device_index = amdgpu_find_device((uint8_t)pci_bus_id, 519 test_device_index = amdgpu_find_device(pci_bus_id,
365 (uint8_t)pci_device_id); 520 pci_device_id);
366 521
367 if (test_device_index >= 0) { 522 if (test_device_index >= 0) {
368 /* Most tests run on device of drm_amdgpu[0]. 523 /* Most tests run on device of drm_amdgpu[0].
@@ -398,17 +553,33 @@ int main(int argc, char **argv)
398 /* Run tests using the CUnit Basic interface */ 553 /* Run tests using the CUnit Basic interface */
399 CU_basic_set_mode(CU_BRM_VERBOSE); 554 CU_basic_set_mode(CU_BRM_VERBOSE);
400 555
556 /* Disable suites and individual tests based on misc. conditions */
557 amdgpu_disable_suites();
558
559 if (display_list) {
560 display_test_suites();
561 goto end;
562 }
563
401 if (suite_id != -1) { /* If user specify particular suite? */ 564 if (suite_id != -1) { /* If user specify particular suite? */
402 pSuite = CU_get_suite_by_index((unsigned int) suite_id, 565 pSuite = CU_get_suite_by_index((unsigned int) suite_id,
403 CU_get_registry()); 566 CU_get_registry());
404 567
405 if (pSuite) { 568 if (pSuite) {
569
570 if (force_run)
571 CU_set_suite_active(pSuite, CU_TRUE);
572
406 if (test_id != -1) { /* If user specify test id */ 573 if (test_id != -1) { /* If user specify test id */
407 pTest = CU_get_test_by_index( 574 pTest = CU_get_test_by_index(
408 (unsigned int) test_id, 575 (unsigned int) test_id,
409 pSuite); 576 pSuite);
410 if (pTest) 577 if (pTest) {
578 if (force_run)
579 CU_set_test_active(pTest, CU_TRUE);
580
411 CU_basic_run_test(pSuite, pTest); 581 CU_basic_run_test(pSuite, pTest);
582 }
412 else { 583 else {
413 fprintf(stderr, "Invalid test id: %d\n", 584 fprintf(stderr, "Invalid test id: %d\n",
414 test_id); 585 test_id);
@@ -428,6 +599,7 @@ int main(int argc, char **argv)
428 } else 599 } else
429 CU_basic_run_tests(); 600 CU_basic_run_tests();
430 601
602end:
431 CU_cleanup_registry(); 603 CU_cleanup_registry();
432 amdgpu_close_devices(); 604 amdgpu_close_devices();
433 return CU_get_error(); 605 return CU_get_error();
diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index e30e2312..62875736 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -85,6 +85,11 @@ int suite_cs_tests_init();
85int suite_cs_tests_clean(); 85int suite_cs_tests_clean();
86 86
87/** 87/**
88 * Decide if the suite is enabled by default or not.
89 */
90CU_BOOL suite_cs_tests_enable(void);
91
92/**
88 * Tests in cs test suite 93 * Tests in cs test suite
89 */ 94 */
90extern CU_TestInfo cs_tests[]; 95extern CU_TestInfo cs_tests[];
@@ -100,11 +105,96 @@ int suite_vce_tests_init();
100int suite_vce_tests_clean(); 105int suite_vce_tests_clean();
101 106
102/** 107/**
108 * Decide if the suite is enabled by default or not.
109 */
110CU_BOOL suite_vce_tests_enable(void);
111
112/**
103 * Tests in vce test suite 113 * Tests in vce test suite
104 */ 114 */
105extern CU_TestInfo vce_tests[]; 115extern CU_TestInfo vce_tests[];
106 116
107/** 117/**
118+ * Initialize vcn test suite
119+ */
120int suite_vcn_tests_init();
121
122/**
123+ * Deinitialize vcn test suite
124+ */
125int suite_vcn_tests_clean();
126
127/**
128 * Decide if the suite is enabled by default or not.
129 */
130CU_BOOL suite_vcn_tests_enable(void);
131
132/**
133+ * Tests in vcn test suite
134+ */
135extern CU_TestInfo vcn_tests[];
136
137/**
138 * Initialize uvd enc test suite
139 */
140int suite_uvd_enc_tests_init();
141
142/**
143 * Deinitialize uvd enc test suite
144 */
145int suite_uvd_enc_tests_clean();
146
147/**
148 * Decide if the suite is enabled by default or not.
149 */
150CU_BOOL suite_uvd_enc_tests_enable(void);
151
152/**
153 * Tests in uvd enc test suite
154 */
155extern CU_TestInfo uvd_enc_tests[];
156
157/**
158 * Initialize deadlock test suite
159 */
160int suite_deadlock_tests_init();
161
162/**
163 * Deinitialize deadlock test suite
164 */
165int suite_deadlock_tests_clean();
166
167/**
168 * Decide if the suite is enabled by default or not.
169 */
170CU_BOOL suite_deadlock_tests_enable(void);
171
172/**
173 * Tests in uvd enc test suite
174 */
175extern CU_TestInfo deadlock_tests[];
176
177/**
178 * Initialize vm test suite
179 */
180int suite_vm_tests_init();
181
182/**
183 * Deinitialize deadlock test suite
184 */
185int suite_vm_tests_clean();
186
187/**
188 * Decide if the suite is enabled by default or not.
189 */
190CU_BOOL suite_vm_tests_enable(void);
191
192/**
193 * Tests in vm test suite
194 */
195extern CU_TestInfo vm_tests[];
196
197/**
108 * Helper functions 198 * Helper functions
109 */ 199 */
110static inline amdgpu_bo_handle gpu_mem_alloc( 200static inline amdgpu_bo_handle gpu_mem_alloc(
@@ -162,6 +252,29 @@ static inline int gpu_mem_free(amdgpu_bo_handle bo,
162} 252}
163 253
164static inline int 254static inline int
255amdgpu_bo_alloc_wrap(amdgpu_device_handle dev, unsigned size,
256 unsigned alignment, unsigned heap, uint64_t flags,
257 amdgpu_bo_handle *bo)
258{
259 struct amdgpu_bo_alloc_request request = {};
260 amdgpu_bo_handle buf_handle;
261 int r;
262
263 request.alloc_size = size;
264 request.phys_alignment = alignment;
265 request.preferred_heap = heap;
266 request.flags = flags;
267
268 r = amdgpu_bo_alloc(dev, &request, &buf_handle);
269 if (r)
270 return r;
271
272 *bo = buf_handle;
273
274 return 0;
275}
276
277static inline int
165amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size, 278amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size,
166 unsigned alignment, unsigned heap, uint64_t flags, 279 unsigned alignment, unsigned heap, uint64_t flags,
167 amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address, 280 amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address,
@@ -236,4 +349,35 @@ amdgpu_get_bo_list(amdgpu_device_handle dev, amdgpu_bo_handle bo1,
236 return amdgpu_bo_list_create(dev, bo2 ? 2 : 1, resources, NULL, list); 349 return amdgpu_bo_list_create(dev, bo2 ? 2 : 1, resources, NULL, list);
237} 350}
238 351
352
353static inline CU_ErrorCode amdgpu_set_suite_active(const char *suite_name,
354 CU_BOOL active)
355{
356 CU_ErrorCode r = CU_set_suite_active(CU_get_suite(suite_name), active);
357
358 if (r != CUE_SUCCESS)
359 fprintf(stderr, "Failed to obtain suite %s\n", suite_name);
360
361 return r;
362}
363
364static inline CU_ErrorCode amdgpu_set_test_active(const char *suite_name,
365 const char *test_name, CU_BOOL active)
366{
367 CU_ErrorCode r;
368 CU_pSuite pSuite = CU_get_suite(suite_name);
369
370 if (!pSuite) {
371 fprintf(stderr, "Failed to obtain suite %s\n",
372 suite_name);
373 return CUE_NOSUITE;
374 }
375
376 r = CU_set_test_active(CU_get_test(pSuite, test_name), active);
377 if (r != CUE_SUCCESS)
378 fprintf(stderr, "Failed to obtain test %s\n", test_name);
379
380 return r;
381}
382
239#endif /* #ifdef _AMDGPU_TEST_H_ */ 383#endif /* #ifdef _AMDGPU_TEST_H_ */
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index bfda21b1..1adbddd9 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -21,16 +21,13 @@
21 * 21 *
22*/ 22*/
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdio.h> 24#include <stdio.h>
29#include <stdlib.h> 25#include <stdlib.h>
30#include <unistd.h> 26#include <unistd.h>
31#ifdef HAVE_ALLOCA_H 27#ifdef HAVE_ALLOCA_H
32# include <alloca.h> 28# include <alloca.h>
33#endif 29#endif
30#include <sys/wait.h>
34 31
35#include "CUnit/Basic.h" 32#include "CUnit/Basic.h"
36 33
@@ -40,27 +37,38 @@
40static amdgpu_device_handle device_handle; 37static amdgpu_device_handle device_handle;
41static uint32_t major_version; 38static uint32_t major_version;
42static uint32_t minor_version; 39static uint32_t minor_version;
40static uint32_t family_id;
43 41
44static void amdgpu_query_info_test(void); 42static void amdgpu_query_info_test(void);
45static void amdgpu_memory_alloc(void);
46static void amdgpu_command_submission_gfx(void); 43static void amdgpu_command_submission_gfx(void);
47static void amdgpu_command_submission_compute(void); 44static void amdgpu_command_submission_compute(void);
45static void amdgpu_command_submission_multi_fence(void);
48static void amdgpu_command_submission_sdma(void); 46static void amdgpu_command_submission_sdma(void);
49static void amdgpu_userptr_test(void); 47static void amdgpu_userptr_test(void);
50static void amdgpu_semaphore_test(void); 48static void amdgpu_semaphore_test(void);
49static void amdgpu_sync_dependency_test(void);
50static void amdgpu_bo_eviction_test(void);
51 51
52static void amdgpu_command_submission_write_linear_helper(unsigned ip_type); 52static void amdgpu_command_submission_write_linear_helper(unsigned ip_type);
53static void amdgpu_command_submission_const_fill_helper(unsigned ip_type); 53static void amdgpu_command_submission_const_fill_helper(unsigned ip_type);
54static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type); 54static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type);
55 55static void amdgpu_test_exec_cs_helper(amdgpu_context_handle context_handle,
56 unsigned ip_type,
57 int instance, int pm4_dw, uint32_t *pm4_src,
58 int res_cnt, amdgpu_bo_handle *resources,
59 struct amdgpu_cs_ib_info *ib_info,
60 struct amdgpu_cs_request *ibs_request);
61
56CU_TestInfo basic_tests[] = { 62CU_TestInfo basic_tests[] = {
57 { "Query Info Test", amdgpu_query_info_test }, 63 { "Query Info Test", amdgpu_query_info_test },
58 { "Memory alloc Test", amdgpu_memory_alloc },
59 { "Userptr Test", amdgpu_userptr_test }, 64 { "Userptr Test", amdgpu_userptr_test },
65 { "bo eviction Test", amdgpu_bo_eviction_test },
60 { "Command submission Test (GFX)", amdgpu_command_submission_gfx }, 66 { "Command submission Test (GFX)", amdgpu_command_submission_gfx },
61 { "Command submission Test (Compute)", amdgpu_command_submission_compute }, 67 { "Command submission Test (Compute)", amdgpu_command_submission_compute },
68 { "Command submission Test (Multi-Fence)", amdgpu_command_submission_multi_fence },
62 { "Command submission Test (SDMA)", amdgpu_command_submission_sdma }, 69 { "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
63 { "SW semaphore Test", amdgpu_semaphore_test }, 70 { "SW semaphore Test", amdgpu_semaphore_test },
71 { "Sync dependency Test", amdgpu_sync_dependency_test },
64 CU_TEST_INFO_NULL, 72 CU_TEST_INFO_NULL,
65}; 73};
66#define BUFFER_SIZE (8 * 1024) 74#define BUFFER_SIZE (8 * 1024)
@@ -197,22 +205,110 @@ CU_TestInfo basic_tests[] = {
197# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) 205# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
198# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) 206# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
199 207
208#define SDMA_PACKET_SI(op, b, t, s, cnt) ((((op) & 0xF) << 28) | \
209 (((b) & 0x1) << 26) | \
210 (((t) & 0x1) << 23) | \
211 (((s) & 0x1) << 22) | \
212 (((cnt) & 0xFFFFF) << 0))
213#define SDMA_OPCODE_COPY_SI 3
214#define SDMA_OPCODE_CONSTANT_FILL_SI 13
215#define SDMA_NOP_SI 0xf
216#define GFX_COMPUTE_NOP_SI 0x80000000
217#define PACKET3_DMA_DATA_SI 0x41
218# define PACKET3_DMA_DATA_SI_ENGINE(x) ((x) << 27)
219 /* 0 - ME
220 * 1 - PFP
221 */
222# define PACKET3_DMA_DATA_SI_DST_SEL(x) ((x) << 20)
223 /* 0 - DST_ADDR using DAS
224 * 1 - GDS
225 * 3 - DST_ADDR using L2
226 */
227# define PACKET3_DMA_DATA_SI_SRC_SEL(x) ((x) << 29)
228 /* 0 - SRC_ADDR using SAS
229 * 1 - GDS
230 * 2 - DATA
231 * 3 - SRC_ADDR using L2
232 */
233# define PACKET3_DMA_DATA_SI_CP_SYNC (1 << 31)
234
235
236#define PKT3_CONTEXT_CONTROL 0x28
237#define CONTEXT_CONTROL_LOAD_ENABLE(x) (((unsigned)(x) & 0x1) << 31)
238#define CONTEXT_CONTROL_LOAD_CE_RAM(x) (((unsigned)(x) & 0x1) << 28)
239#define CONTEXT_CONTROL_SHADOW_ENABLE(x) (((unsigned)(x) & 0x1) << 31)
240
241#define PKT3_CLEAR_STATE 0x12
242
243#define PKT3_SET_SH_REG 0x76
244#define PACKET3_SET_SH_REG_START 0x00002c00
245
246#define PACKET3_DISPATCH_DIRECT 0x15
247
248
249/* gfx 8 */
250#define mmCOMPUTE_PGM_LO 0x2e0c
251#define mmCOMPUTE_PGM_RSRC1 0x2e12
252#define mmCOMPUTE_TMPRING_SIZE 0x2e18
253#define mmCOMPUTE_USER_DATA_0 0x2e40
254#define mmCOMPUTE_USER_DATA_1 0x2e41
255#define mmCOMPUTE_RESOURCE_LIMITS 0x2e15
256#define mmCOMPUTE_NUM_THREAD_X 0x2e07
257
258
259
260#define SWAP_32(num) (((num & 0xff000000) >> 24) | \
261 ((num & 0x0000ff00) << 8) | \
262 ((num & 0x00ff0000) >> 8) | \
263 ((num & 0x000000ff) << 24))
264
265
266/* Shader code
267 * void main()
268{
269
270 float x = some_input;
271 for (unsigned i = 0; i < 1000000; i++)
272 x = sin(x);
273
274 u[0] = 42u;
275}
276*/
277
278static uint32_t shader_bin[] = {
279 SWAP_32(0x800082be), SWAP_32(0x02ff08bf), SWAP_32(0x7f969800), SWAP_32(0x040085bf),
280 SWAP_32(0x02810281), SWAP_32(0x02ff08bf), SWAP_32(0x7f969800), SWAP_32(0xfcff84bf),
281 SWAP_32(0xff0083be), SWAP_32(0x00f00000), SWAP_32(0xc10082be), SWAP_32(0xaa02007e),
282 SWAP_32(0x000070e0), SWAP_32(0x00000080), SWAP_32(0x000081bf)
283};
284
285#define CODE_OFFSET 512
286#define DATA_OFFSET 1024
287
288
200int suite_basic_tests_init(void) 289int suite_basic_tests_init(void)
201{ 290{
291 struct amdgpu_gpu_info gpu_info = {0};
202 int r; 292 int r;
203 293
204 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version, 294 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
205 &minor_version, &device_handle); 295 &minor_version, &device_handle);
206 296
207 if (r == 0) 297 if (r) {
208 return CUE_SUCCESS;
209 else {
210 if ((r == -EACCES) && (errno == EACCES)) 298 if ((r == -EACCES) && (errno == EACCES))
211 printf("\n\nError:%s. " 299 printf("\n\nError:%s. "
212 "Hint:Try to run this test program as root.", 300 "Hint:Try to run this test program as root.",
213 strerror(errno)); 301 strerror(errno));
214 return CUE_SINIT_FAILED; 302 return CUE_SINIT_FAILED;
215 } 303 }
304
305 r = amdgpu_query_gpu_info(device_handle, &gpu_info);
306 if (r)
307 return CUE_SINIT_FAILED;
308
309 family_id = gpu_info.family_id;
310
311 return CUE_SUCCESS;
216} 312}
217 313
218int suite_basic_tests_clean(void) 314int suite_basic_tests_clean(void)
@@ -239,53 +335,6 @@ static void amdgpu_query_info_test(void)
239 CU_ASSERT_EQUAL(r, 0); 335 CU_ASSERT_EQUAL(r, 0);
240} 336}
241 337
242static void amdgpu_memory_alloc(void)
243{
244 amdgpu_bo_handle bo;
245 amdgpu_va_handle va_handle;
246 uint64_t bo_mc;
247 int r;
248
249 /* Test visible VRAM */
250 bo = gpu_mem_alloc(device_handle,
251 4096, 4096,
252 AMDGPU_GEM_DOMAIN_VRAM,
253 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
254 &bo_mc, &va_handle);
255
256 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
257 CU_ASSERT_EQUAL(r, 0);
258
259 /* Test invisible VRAM */
260 bo = gpu_mem_alloc(device_handle,
261 4096, 4096,
262 AMDGPU_GEM_DOMAIN_VRAM,
263 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
264 &bo_mc, &va_handle);
265
266 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
267 CU_ASSERT_EQUAL(r, 0);
268
269 /* Test GART Cacheable */
270 bo = gpu_mem_alloc(device_handle,
271 4096, 4096,
272 AMDGPU_GEM_DOMAIN_GTT,
273 0, &bo_mc, &va_handle);
274
275 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
276 CU_ASSERT_EQUAL(r, 0);
277
278 /* Test GART USWC */
279 bo = gpu_mem_alloc(device_handle,
280 4096, 4096,
281 AMDGPU_GEM_DOMAIN_GTT,
282 AMDGPU_GEM_CREATE_CPU_GTT_USWC,
283 &bo_mc, &va_handle);
284
285 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
286 CU_ASSERT_EQUAL(r, 0);
287}
288
289static void amdgpu_command_submission_gfx_separate_ibs(void) 338static void amdgpu_command_submission_gfx_separate_ibs(void)
290{ 339{
291 amdgpu_context_handle context_handle; 340 amdgpu_context_handle context_handle;
@@ -299,7 +348,7 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
299 uint32_t expired; 348 uint32_t expired;
300 amdgpu_bo_list_handle bo_list; 349 amdgpu_bo_list_handle bo_list;
301 amdgpu_va_handle va_handle, va_handle_ce; 350 amdgpu_va_handle va_handle, va_handle_ce;
302 int r; 351 int r, i = 0;
303 352
304 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 353 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
305 CU_ASSERT_EQUAL(r, 0); 354 CU_ASSERT_EQUAL(r, 0);
@@ -324,12 +373,14 @@ static void amdgpu_command_submission_gfx_separate_ibs(void)
324 373
325 /* IT_SET_CE_DE_COUNTERS */ 374 /* IT_SET_CE_DE_COUNTERS */
326 ptr = ib_result_ce_cpu; 375 ptr = ib_result_ce_cpu;
327 ptr[0] = 0xc0008900; 376 if (family_id != AMDGPU_FAMILY_SI) {
328 ptr[1] = 0; 377 ptr[i++] = 0xc0008900;
329 ptr[2] = 0xc0008400; 378 ptr[i++] = 0;
330 ptr[3] = 1; 379 }
380 ptr[i++] = 0xc0008400;
381 ptr[i++] = 1;
331 ib_info[0].ib_mc_address = ib_result_ce_mc_address; 382 ib_info[0].ib_mc_address = ib_result_ce_mc_address;
332 ib_info[0].size = 4; 383 ib_info[0].size = i;
333 ib_info[0].flags = AMDGPU_IB_FLAG_CE; 384 ib_info[0].flags = AMDGPU_IB_FLAG_CE;
334 385
335 /* IT_WAIT_ON_CE_COUNTER */ 386 /* IT_WAIT_ON_CE_COUNTER */
@@ -388,7 +439,7 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
388 uint32_t expired; 439 uint32_t expired;
389 amdgpu_bo_list_handle bo_list; 440 amdgpu_bo_list_handle bo_list;
390 amdgpu_va_handle va_handle; 441 amdgpu_va_handle va_handle;
391 int r; 442 int r, i = 0;
392 443
393 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 444 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
394 CU_ASSERT_EQUAL(r, 0); 445 CU_ASSERT_EQUAL(r, 0);
@@ -407,12 +458,14 @@ static void amdgpu_command_submission_gfx_shared_ib(void)
407 458
408 /* IT_SET_CE_DE_COUNTERS */ 459 /* IT_SET_CE_DE_COUNTERS */
409 ptr = ib_result_cpu; 460 ptr = ib_result_cpu;
410 ptr[0] = 0xc0008900; 461 if (family_id != AMDGPU_FAMILY_SI) {
411 ptr[1] = 0; 462 ptr[i++] = 0xc0008900;
412 ptr[2] = 0xc0008400; 463 ptr[i++] = 0;
413 ptr[3] = 1; 464 }
465 ptr[i++] = 0xc0008400;
466 ptr[i++] = 1;
414 ib_info[0].ib_mc_address = ib_result_mc_address; 467 ib_info[0].ib_mc_address = ib_result_mc_address;
415 ib_info[0].size = 4; 468 ib_info[0].size = i;
416 ib_info[0].flags = AMDGPU_IB_FLAG_CE; 469 ib_info[0].flags = AMDGPU_IB_FLAG_CE;
417 470
418 ptr = (uint32_t *)ib_result_cpu + 4; 471 ptr = (uint32_t *)ib_result_cpu + 4;
@@ -467,6 +520,156 @@ static void amdgpu_command_submission_gfx_cp_copy_data(void)
467 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX); 520 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX);
468} 521}
469 522
523static void amdgpu_bo_eviction_test(void)
524{
525 const int sdma_write_length = 1024;
526 const int pm4_dw = 256;
527 amdgpu_context_handle context_handle;
528 amdgpu_bo_handle bo1, bo2, vram_max[2], gtt_max[2];
529 amdgpu_bo_handle *resources;
530 uint32_t *pm4;
531 struct amdgpu_cs_ib_info *ib_info;
532 struct amdgpu_cs_request *ibs_request;
533 uint64_t bo1_mc, bo2_mc;
534 volatile unsigned char *bo1_cpu, *bo2_cpu;
535 int i, j, r, loop1, loop2;
536 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
537 amdgpu_va_handle bo1_va_handle, bo2_va_handle;
538 struct amdgpu_heap_info vram_info, gtt_info;
539
540 pm4 = calloc(pm4_dw, sizeof(*pm4));
541 CU_ASSERT_NOT_EQUAL(pm4, NULL);
542
543 ib_info = calloc(1, sizeof(*ib_info));
544 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
545
546 ibs_request = calloc(1, sizeof(*ibs_request));
547 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
548
549 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
550 CU_ASSERT_EQUAL(r, 0);
551
552 /* prepare resource */
553 resources = calloc(4, sizeof(amdgpu_bo_handle));
554 CU_ASSERT_NOT_EQUAL(resources, NULL);
555
556 r = amdgpu_query_heap_info(device_handle, AMDGPU_GEM_DOMAIN_VRAM,
557 0, &vram_info);
558 CU_ASSERT_EQUAL(r, 0);
559
560 r = amdgpu_bo_alloc_wrap(device_handle, vram_info.max_allocation, 4096,
561 AMDGPU_GEM_DOMAIN_VRAM, 0, &vram_max[0]);
562 CU_ASSERT_EQUAL(r, 0);
563 r = amdgpu_bo_alloc_wrap(device_handle, vram_info.max_allocation, 4096,
564 AMDGPU_GEM_DOMAIN_VRAM, 0, &vram_max[1]);
565 CU_ASSERT_EQUAL(r, 0);
566
567 r = amdgpu_query_heap_info(device_handle, AMDGPU_GEM_DOMAIN_GTT,
568 0, &gtt_info);
569 CU_ASSERT_EQUAL(r, 0);
570
571 r = amdgpu_bo_alloc_wrap(device_handle, gtt_info.max_allocation, 4096,
572 AMDGPU_GEM_DOMAIN_GTT, 0, &gtt_max[0]);
573 CU_ASSERT_EQUAL(r, 0);
574 r = amdgpu_bo_alloc_wrap(device_handle, gtt_info.max_allocation, 4096,
575 AMDGPU_GEM_DOMAIN_GTT, 0, &gtt_max[1]);
576 CU_ASSERT_EQUAL(r, 0);
577
578
579
580 loop1 = loop2 = 0;
581 /* run 9 circle to test all mapping combination */
582 while(loop1 < 2) {
583 while(loop2 < 2) {
584 /* allocate UC bo1for sDMA use */
585 r = amdgpu_bo_alloc_and_map(device_handle,
586 sdma_write_length, 4096,
587 AMDGPU_GEM_DOMAIN_GTT,
588 gtt_flags[loop1], &bo1,
589 (void**)&bo1_cpu, &bo1_mc,
590 &bo1_va_handle);
591 CU_ASSERT_EQUAL(r, 0);
592
593 /* set bo1 */
594 memset((void*)bo1_cpu, 0xaa, sdma_write_length);
595
596 /* allocate UC bo2 for sDMA use */
597 r = amdgpu_bo_alloc_and_map(device_handle,
598 sdma_write_length, 4096,
599 AMDGPU_GEM_DOMAIN_GTT,
600 gtt_flags[loop2], &bo2,
601 (void**)&bo2_cpu, &bo2_mc,
602 &bo2_va_handle);
603 CU_ASSERT_EQUAL(r, 0);
604
605 /* clear bo2 */
606 memset((void*)bo2_cpu, 0, sdma_write_length);
607
608 resources[0] = bo1;
609 resources[1] = bo2;
610 resources[2] = vram_max[loop2];
611 resources[3] = gtt_max[loop2];
612
613 /* fulfill PM4: test DMA copy linear */
614 i = j = 0;
615 if (family_id == AMDGPU_FAMILY_SI) {
616 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0,
617 sdma_write_length);
618 pm4[i++] = 0xffffffff & bo2_mc;
619 pm4[i++] = 0xffffffff & bo1_mc;
620 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
621 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
622 } else {
623 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
624 if (family_id >= AMDGPU_FAMILY_AI)
625 pm4[i++] = sdma_write_length - 1;
626 else
627 pm4[i++] = sdma_write_length;
628 pm4[i++] = 0;
629 pm4[i++] = 0xffffffff & bo1_mc;
630 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
631 pm4[i++] = 0xffffffff & bo2_mc;
632 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
633 }
634
635 amdgpu_test_exec_cs_helper(context_handle,
636 AMDGPU_HW_IP_DMA, 0,
637 i, pm4,
638 4, resources,
639 ib_info, ibs_request);
640
641 /* verify if SDMA test result meets with expected */
642 i = 0;
643 while(i < sdma_write_length) {
644 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
645 }
646 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
647 sdma_write_length);
648 CU_ASSERT_EQUAL(r, 0);
649 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
650 sdma_write_length);
651 CU_ASSERT_EQUAL(r, 0);
652 loop2++;
653 }
654 loop2 = 0;
655 loop1++;
656 }
657 amdgpu_bo_free(vram_max[0]);
658 amdgpu_bo_free(vram_max[1]);
659 amdgpu_bo_free(gtt_max[0]);
660 amdgpu_bo_free(gtt_max[1]);
661 /* clean resources */
662 free(resources);
663 free(ibs_request);
664 free(ib_info);
665 free(pm4);
666
667 /* end of test */
668 r = amdgpu_cs_ctx_free(context_handle);
669 CU_ASSERT_EQUAL(r, 0);
670}
671
672
470static void amdgpu_command_submission_gfx(void) 673static void amdgpu_command_submission_gfx(void)
471{ 674{
472 /* write data using the CP */ 675 /* write data using the CP */
@@ -493,10 +696,19 @@ static void amdgpu_semaphore_test(void)
493 struct amdgpu_cs_fence fence_status = {0}; 696 struct amdgpu_cs_fence fence_status = {0};
494 uint32_t *ptr; 697 uint32_t *ptr;
495 uint32_t expired; 698 uint32_t expired;
699 uint32_t sdma_nop, gfx_nop;
496 amdgpu_bo_list_handle bo_list[2]; 700 amdgpu_bo_list_handle bo_list[2];
497 amdgpu_va_handle va_handle[2]; 701 amdgpu_va_handle va_handle[2];
498 int r, i; 702 int r, i;
499 703
704 if (family_id == AMDGPU_FAMILY_SI) {
705 sdma_nop = SDMA_PACKET_SI(SDMA_NOP_SI, 0, 0, 0, 0);
706 gfx_nop = GFX_COMPUTE_NOP_SI;
707 } else {
708 sdma_nop = SDMA_PKT_HEADER_OP(SDMA_NOP);
709 gfx_nop = GFX_COMPUTE_NOP;
710 }
711
500 r = amdgpu_cs_create_semaphore(&sem); 712 r = amdgpu_cs_create_semaphore(&sem);
501 CU_ASSERT_EQUAL(r, 0); 713 CU_ASSERT_EQUAL(r, 0);
502 for (i = 0; i < 2; i++) { 714 for (i = 0; i < 2; i++) {
@@ -516,7 +728,7 @@ static void amdgpu_semaphore_test(void)
516 728
517 /* 1. same context different engine */ 729 /* 1. same context different engine */
518 ptr = ib_result_cpu[0]; 730 ptr = ib_result_cpu[0];
519 ptr[0] = SDMA_NOP; 731 ptr[0] = sdma_nop;
520 ib_info[0].ib_mc_address = ib_result_mc_address[0]; 732 ib_info[0].ib_mc_address = ib_result_mc_address[0];
521 ib_info[0].size = 1; 733 ib_info[0].size = 1;
522 734
@@ -533,7 +745,7 @@ static void amdgpu_semaphore_test(void)
533 r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem); 745 r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
534 CU_ASSERT_EQUAL(r, 0); 746 CU_ASSERT_EQUAL(r, 0);
535 ptr = ib_result_cpu[1]; 747 ptr = ib_result_cpu[1];
536 ptr[0] = GFX_COMPUTE_NOP; 748 ptr[0] = gfx_nop;
537 ib_info[1].ib_mc_address = ib_result_mc_address[1]; 749 ib_info[1].ib_mc_address = ib_result_mc_address[1];
538 ib_info[1].size = 1; 750 ib_info[1].size = 1;
539 751
@@ -557,7 +769,7 @@ static void amdgpu_semaphore_test(void)
557 769
558 /* 2. same engine different context */ 770 /* 2. same engine different context */
559 ptr = ib_result_cpu[0]; 771 ptr = ib_result_cpu[0];
560 ptr[0] = GFX_COMPUTE_NOP; 772 ptr[0] = gfx_nop;
561 ib_info[0].ib_mc_address = ib_result_mc_address[0]; 773 ib_info[0].ib_mc_address = ib_result_mc_address[0];
562 ib_info[0].size = 1; 774 ib_info[0].size = 1;
563 775
@@ -574,7 +786,7 @@ static void amdgpu_semaphore_test(void)
574 r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem); 786 r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem);
575 CU_ASSERT_EQUAL(r, 0); 787 CU_ASSERT_EQUAL(r, 0);
576 ptr = ib_result_cpu[1]; 788 ptr = ib_result_cpu[1];
577 ptr[0] = GFX_COMPUTE_NOP; 789 ptr[0] = gfx_nop;
578 ib_info[1].ib_mc_address = ib_result_mc_address[1]; 790 ib_info[1].ib_mc_address = ib_result_mc_address[1];
579 ib_info[1].size = 1; 791 ib_info[1].size = 1;
580 792
@@ -595,6 +807,7 @@ static void amdgpu_semaphore_test(void)
595 500000000, 0, &expired); 807 500000000, 0, &expired);
596 CU_ASSERT_EQUAL(r, 0); 808 CU_ASSERT_EQUAL(r, 0);
597 CU_ASSERT_EQUAL(expired, true); 809 CU_ASSERT_EQUAL(expired, true);
810
598 for (i = 0; i < 2; i++) { 811 for (i = 0; i < 2; i++) {
599 r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i], 812 r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i],
600 ib_result_mc_address[i], 4096); 813 ib_result_mc_address[i], 4096);
@@ -622,14 +835,18 @@ static void amdgpu_command_submission_compute_nop(void)
622 struct amdgpu_cs_fence fence_status; 835 struct amdgpu_cs_fence fence_status;
623 uint32_t *ptr; 836 uint32_t *ptr;
624 uint32_t expired; 837 uint32_t expired;
625 int i, r, instance; 838 int r, instance;
626 amdgpu_bo_list_handle bo_list; 839 amdgpu_bo_list_handle bo_list;
627 amdgpu_va_handle va_handle; 840 amdgpu_va_handle va_handle;
841 struct drm_amdgpu_info_hw_ip info;
842
843 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_COMPUTE, 0, &info);
844 CU_ASSERT_EQUAL(r, 0);
628 845
629 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 846 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
630 CU_ASSERT_EQUAL(r, 0); 847 CU_ASSERT_EQUAL(r, 0);
631 848
632 for (instance = 0; instance < 8; instance++) { 849 for (instance = 0; (1 << instance) & info.available_rings; instance++) {
633 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096, 850 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
634 AMDGPU_GEM_DOMAIN_GTT, 0, 851 AMDGPU_GEM_DOMAIN_GTT, 0,
635 &ib_result_handle, &ib_result_cpu, 852 &ib_result_handle, &ib_result_cpu,
@@ -641,8 +858,8 @@ static void amdgpu_command_submission_compute_nop(void)
641 CU_ASSERT_EQUAL(r, 0); 858 CU_ASSERT_EQUAL(r, 0);
642 859
643 ptr = ib_result_cpu; 860 ptr = ib_result_cpu;
644 for (i = 0; i < 16; ++i) 861 memset(ptr, 0, 16);
645 ptr[i] = 0xffff1000; 862 ptr[0]=PACKET3(PACKET3_NOP, 14);
646 863
647 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info)); 864 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
648 ib_info.ib_mc_address = ib_result_mc_address; 865 ib_info.ib_mc_address = ib_result_mc_address;
@@ -805,9 +1022,10 @@ static void amdgpu_command_submission_write_linear_helper(unsigned ip_type)
805 struct amdgpu_cs_request *ibs_request; 1022 struct amdgpu_cs_request *ibs_request;
806 uint64_t bo_mc; 1023 uint64_t bo_mc;
807 volatile uint32_t *bo_cpu; 1024 volatile uint32_t *bo_cpu;
808 int i, j, r, loop; 1025 int i, j, r, loop, ring_id;
809 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC}; 1026 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
810 amdgpu_va_handle va_handle; 1027 amdgpu_va_handle va_handle;
1028 struct drm_amdgpu_info_hw_ip hw_ip_info;
811 1029
812 pm4 = calloc(pm4_dw, sizeof(*pm4)); 1030 pm4 = calloc(pm4_dw, sizeof(*pm4));
813 CU_ASSERT_NOT_EQUAL(pm4, NULL); 1031 CU_ASSERT_NOT_EQUAL(pm4, NULL);
@@ -818,6 +1036,9 @@ static void amdgpu_command_submission_write_linear_helper(unsigned ip_type)
818 ibs_request = calloc(1, sizeof(*ibs_request)); 1036 ibs_request = calloc(1, sizeof(*ibs_request));
819 CU_ASSERT_NOT_EQUAL(ibs_request, NULL); 1037 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
820 1038
1039 r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
1040 CU_ASSERT_EQUAL(r, 0);
1041
821 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 1042 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
822 CU_ASSERT_EQUAL(r, 0); 1043 CU_ASSERT_EQUAL(r, 0);
823 1044
@@ -825,58 +1046,66 @@ static void amdgpu_command_submission_write_linear_helper(unsigned ip_type)
825 resources = calloc(1, sizeof(amdgpu_bo_handle)); 1046 resources = calloc(1, sizeof(amdgpu_bo_handle));
826 CU_ASSERT_NOT_EQUAL(resources, NULL); 1047 CU_ASSERT_NOT_EQUAL(resources, NULL);
827 1048
828 loop = 0; 1049 for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
829 while(loop < 2) { 1050 loop = 0;
830 /* allocate UC bo for sDMA use */ 1051 while(loop < 2) {
831 r = amdgpu_bo_alloc_and_map(device_handle, 1052 /* allocate UC bo for sDMA use */
832 sdma_write_length * sizeof(uint32_t), 1053 r = amdgpu_bo_alloc_and_map(device_handle,
833 4096, AMDGPU_GEM_DOMAIN_GTT, 1054 sdma_write_length * sizeof(uint32_t),
834 gtt_flags[loop], &bo, (void**)&bo_cpu, 1055 4096, AMDGPU_GEM_DOMAIN_GTT,
835 &bo_mc, &va_handle); 1056 gtt_flags[loop], &bo, (void**)&bo_cpu,
836 CU_ASSERT_EQUAL(r, 0); 1057 &bo_mc, &va_handle);
1058 CU_ASSERT_EQUAL(r, 0);
837 1059
838 /* clear bo */ 1060 /* clear bo */
839 memset((void*)bo_cpu, 0, sdma_write_length * sizeof(uint32_t)); 1061 memset((void*)bo_cpu, 0, sdma_write_length * sizeof(uint32_t));
840
841
842 resources[0] = bo;
843
844 /* fulfill PM4: test DMA write-linear */
845 i = j = 0;
846 if (ip_type == AMDGPU_HW_IP_DMA) {
847 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
848 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
849 pm4[i++] = 0xffffffff & bo_mc;
850 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
851 pm4[i++] = sdma_write_length;
852 while(j++ < sdma_write_length)
853 pm4[i++] = 0xdeadbeaf;
854 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
855 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
856 pm4[i++] = PACKET3(PACKET3_WRITE_DATA, 2 + sdma_write_length);
857 pm4[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
858 pm4[i++] = 0xfffffffc & bo_mc;
859 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
860 while(j++ < sdma_write_length)
861 pm4[i++] = 0xdeadbeaf;
862 }
863 1062
864 amdgpu_test_exec_cs_helper(context_handle, 1063 resources[0] = bo;
865 ip_type, 0,
866 i, pm4,
867 1, resources,
868 ib_info, ibs_request);
869 1064
870 /* verify if SDMA test result meets with expected */ 1065 /* fulfill PM4: test DMA write-linear */
871 i = 0; 1066 i = j = 0;
872 while(i < sdma_write_length) { 1067 if (ip_type == AMDGPU_HW_IP_DMA) {
873 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf); 1068 if (family_id == AMDGPU_FAMILY_SI)
874 } 1069 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
1070 sdma_write_length);
1071 else
1072 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
1073 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
1074 pm4[i++] = 0xffffffff & bo_mc;
1075 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1076 if (family_id >= AMDGPU_FAMILY_AI)
1077 pm4[i++] = sdma_write_length - 1;
1078 else if (family_id != AMDGPU_FAMILY_SI)
1079 pm4[i++] = sdma_write_length;
1080 while(j++ < sdma_write_length)
1081 pm4[i++] = 0xdeadbeaf;
1082 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1083 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1084 pm4[i++] = PACKET3(PACKET3_WRITE_DATA, 2 + sdma_write_length);
1085 pm4[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1086 pm4[i++] = 0xfffffffc & bo_mc;
1087 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1088 while(j++ < sdma_write_length)
1089 pm4[i++] = 0xdeadbeaf;
1090 }
875 1091
876 r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc, 1092 amdgpu_test_exec_cs_helper(context_handle,
877 sdma_write_length * sizeof(uint32_t)); 1093 ip_type, ring_id,
878 CU_ASSERT_EQUAL(r, 0); 1094 i, pm4,
879 loop++; 1095 1, resources,
1096 ib_info, ibs_request);
1097
1098 /* verify if SDMA test result meets with expected */
1099 i = 0;
1100 while(i < sdma_write_length) {
1101 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
1102 }
1103
1104 r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
1105 sdma_write_length * sizeof(uint32_t));
1106 CU_ASSERT_EQUAL(r, 0);
1107 loop++;
1108 }
880 } 1109 }
881 /* clean resources */ 1110 /* clean resources */
882 free(resources); 1111 free(resources);
@@ -906,9 +1135,10 @@ static void amdgpu_command_submission_const_fill_helper(unsigned ip_type)
906 struct amdgpu_cs_request *ibs_request; 1135 struct amdgpu_cs_request *ibs_request;
907 uint64_t bo_mc; 1136 uint64_t bo_mc;
908 volatile uint32_t *bo_cpu; 1137 volatile uint32_t *bo_cpu;
909 int i, j, r, loop; 1138 int i, j, r, loop, ring_id;
910 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC}; 1139 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
911 amdgpu_va_handle va_handle; 1140 amdgpu_va_handle va_handle;
1141 struct drm_amdgpu_info_hw_ip hw_ip_info;
912 1142
913 pm4 = calloc(pm4_dw, sizeof(*pm4)); 1143 pm4 = calloc(pm4_dw, sizeof(*pm4));
914 CU_ASSERT_NOT_EQUAL(pm4, NULL); 1144 CU_ASSERT_NOT_EQUAL(pm4, NULL);
@@ -919,6 +1149,9 @@ static void amdgpu_command_submission_const_fill_helper(unsigned ip_type)
919 ibs_request = calloc(1, sizeof(*ibs_request)); 1149 ibs_request = calloc(1, sizeof(*ibs_request));
920 CU_ASSERT_NOT_EQUAL(ibs_request, NULL); 1150 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
921 1151
1152 r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
1153 CU_ASSERT_EQUAL(r, 0);
1154
922 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 1155 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
923 CU_ASSERT_EQUAL(r, 0); 1156 CU_ASSERT_EQUAL(r, 0);
924 1157
@@ -926,60 +1159,86 @@ static void amdgpu_command_submission_const_fill_helper(unsigned ip_type)
926 resources = calloc(1, sizeof(amdgpu_bo_handle)); 1159 resources = calloc(1, sizeof(amdgpu_bo_handle));
927 CU_ASSERT_NOT_EQUAL(resources, NULL); 1160 CU_ASSERT_NOT_EQUAL(resources, NULL);
928 1161
929 loop = 0; 1162 for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
930 while(loop < 2) { 1163 loop = 0;
931 /* allocate UC bo for sDMA use */ 1164 while(loop < 2) {
932 r = amdgpu_bo_alloc_and_map(device_handle, 1165 /* allocate UC bo for sDMA use */
933 sdma_write_length, 4096, 1166 r = amdgpu_bo_alloc_and_map(device_handle,
934 AMDGPU_GEM_DOMAIN_GTT, 1167 sdma_write_length, 4096,
935 gtt_flags[loop], &bo, (void**)&bo_cpu, 1168 AMDGPU_GEM_DOMAIN_GTT,
936 &bo_mc, &va_handle); 1169 gtt_flags[loop], &bo, (void**)&bo_cpu,
937 CU_ASSERT_EQUAL(r, 0); 1170 &bo_mc, &va_handle);
1171 CU_ASSERT_EQUAL(r, 0);
938 1172
939 /* clear bo */ 1173 /* clear bo */
940 memset((void*)bo_cpu, 0, sdma_write_length); 1174 memset((void*)bo_cpu, 0, sdma_write_length);
941
942 resources[0] = bo;
943
944 /* fulfill PM4: test DMA const fill */
945 i = j = 0;
946 if (ip_type == AMDGPU_HW_IP_DMA) {
947 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0,
948 SDMA_CONSTANT_FILL_EXTRA_SIZE(2));
949 pm4[i++] = 0xffffffff & bo_mc;
950 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
951 pm4[i++] = 0xdeadbeaf;
952 pm4[i++] = sdma_write_length;
953 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
954 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
955 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
956 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
957 PACKET3_DMA_DATA_DST_SEL(0) |
958 PACKET3_DMA_DATA_SRC_SEL(2) |
959 PACKET3_DMA_DATA_CP_SYNC;
960 pm4[i++] = 0xdeadbeaf;
961 pm4[i++] = 0;
962 pm4[i++] = 0xfffffffc & bo_mc;
963 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
964 pm4[i++] = sdma_write_length;
965 }
966 1175
967 amdgpu_test_exec_cs_helper(context_handle, 1176 resources[0] = bo;
968 ip_type, 0,
969 i, pm4,
970 1, resources,
971 ib_info, ibs_request);
972 1177
973 /* verify if SDMA test result meets with expected */ 1178 /* fulfill PM4: test DMA const fill */
974 i = 0; 1179 i = j = 0;
975 while(i < (sdma_write_length / 4)) { 1180 if (ip_type == AMDGPU_HW_IP_DMA) {
976 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf); 1181 if (family_id == AMDGPU_FAMILY_SI) {
977 } 1182 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_CONSTANT_FILL_SI,
1183 0, 0, 0,
1184 sdma_write_length / 4);
1185 pm4[i++] = 0xfffffffc & bo_mc;
1186 pm4[i++] = 0xdeadbeaf;
1187 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 16;
1188 } else {
1189 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0,
1190 SDMA_CONSTANT_FILL_EXTRA_SIZE(2));
1191 pm4[i++] = 0xffffffff & bo_mc;
1192 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1193 pm4[i++] = 0xdeadbeaf;
1194 if (family_id >= AMDGPU_FAMILY_AI)
1195 pm4[i++] = sdma_write_length - 1;
1196 else
1197 pm4[i++] = sdma_write_length;
1198 }
1199 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1200 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1201 if (family_id == AMDGPU_FAMILY_SI) {
1202 pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1203 pm4[i++] = 0xdeadbeaf;
1204 pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1205 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1206 PACKET3_DMA_DATA_SI_SRC_SEL(2) |
1207 PACKET3_DMA_DATA_SI_CP_SYNC;
1208 pm4[i++] = 0xffffffff & bo_mc;
1209 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1210 pm4[i++] = sdma_write_length;
1211 } else {
1212 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1213 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1214 PACKET3_DMA_DATA_DST_SEL(0) |
1215 PACKET3_DMA_DATA_SRC_SEL(2) |
1216 PACKET3_DMA_DATA_CP_SYNC;
1217 pm4[i++] = 0xdeadbeaf;
1218 pm4[i++] = 0;
1219 pm4[i++] = 0xfffffffc & bo_mc;
1220 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1221 pm4[i++] = sdma_write_length;
1222 }
1223 }
978 1224
979 r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc, 1225 amdgpu_test_exec_cs_helper(context_handle,
980 sdma_write_length); 1226 ip_type, ring_id,
981 CU_ASSERT_EQUAL(r, 0); 1227 i, pm4,
982 loop++; 1228 1, resources,
1229 ib_info, ibs_request);
1230
1231 /* verify if SDMA test result meets with expected */
1232 i = 0;
1233 while(i < (sdma_write_length / 4)) {
1234 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
1235 }
1236
1237 r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
1238 sdma_write_length);
1239 CU_ASSERT_EQUAL(r, 0);
1240 loop++;
1241 }
983 } 1242 }
984 /* clean resources */ 1243 /* clean resources */
985 free(resources); 1244 free(resources);
@@ -1009,9 +1268,10 @@ static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1009 struct amdgpu_cs_request *ibs_request; 1268 struct amdgpu_cs_request *ibs_request;
1010 uint64_t bo1_mc, bo2_mc; 1269 uint64_t bo1_mc, bo2_mc;
1011 volatile unsigned char *bo1_cpu, *bo2_cpu; 1270 volatile unsigned char *bo1_cpu, *bo2_cpu;
1012 int i, j, r, loop1, loop2; 1271 int i, j, r, loop1, loop2, ring_id;
1013 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC}; 1272 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
1014 amdgpu_va_handle bo1_va_handle, bo2_va_handle; 1273 amdgpu_va_handle bo1_va_handle, bo2_va_handle;
1274 struct drm_amdgpu_info_hw_ip hw_ip_info;
1015 1275
1016 pm4 = calloc(pm4_dw, sizeof(*pm4)); 1276 pm4 = calloc(pm4_dw, sizeof(*pm4));
1017 CU_ASSERT_NOT_EQUAL(pm4, NULL); 1277 CU_ASSERT_NOT_EQUAL(pm4, NULL);
@@ -1022,6 +1282,9 @@ static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1022 ibs_request = calloc(1, sizeof(*ibs_request)); 1282 ibs_request = calloc(1, sizeof(*ibs_request));
1023 CU_ASSERT_NOT_EQUAL(ibs_request, NULL); 1283 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
1024 1284
1285 r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
1286 CU_ASSERT_EQUAL(r, 0);
1287
1025 r = amdgpu_cs_ctx_create(device_handle, &context_handle); 1288 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1026 CU_ASSERT_EQUAL(r, 0); 1289 CU_ASSERT_EQUAL(r, 0);
1027 1290
@@ -1029,81 +1292,111 @@ static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1029 resources = calloc(2, sizeof(amdgpu_bo_handle)); 1292 resources = calloc(2, sizeof(amdgpu_bo_handle));
1030 CU_ASSERT_NOT_EQUAL(resources, NULL); 1293 CU_ASSERT_NOT_EQUAL(resources, NULL);
1031 1294
1032 loop1 = loop2 = 0; 1295 for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
1033 /* run 9 circle to test all mapping combination */ 1296 loop1 = loop2 = 0;
1034 while(loop1 < 2) { 1297 /* run 9 circle to test all mapping combination */
1035 while(loop2 < 2) { 1298 while(loop1 < 2) {
1036 /* allocate UC bo1for sDMA use */ 1299 while(loop2 < 2) {
1037 r = amdgpu_bo_alloc_and_map(device_handle, 1300 /* allocate UC bo1for sDMA use */
1038 sdma_write_length, 4096, 1301 r = amdgpu_bo_alloc_and_map(device_handle,
1039 AMDGPU_GEM_DOMAIN_GTT, 1302 sdma_write_length, 4096,
1040 gtt_flags[loop1], &bo1, 1303 AMDGPU_GEM_DOMAIN_GTT,
1041 (void**)&bo1_cpu, &bo1_mc, 1304 gtt_flags[loop1], &bo1,
1042 &bo1_va_handle); 1305 (void**)&bo1_cpu, &bo1_mc,
1043 CU_ASSERT_EQUAL(r, 0); 1306 &bo1_va_handle);
1044 1307 CU_ASSERT_EQUAL(r, 0);
1045 /* set bo1 */ 1308
1046 memset((void*)bo1_cpu, 0xaa, sdma_write_length); 1309 /* set bo1 */
1047 1310 memset((void*)bo1_cpu, 0xaa, sdma_write_length);
1048 /* allocate UC bo2 for sDMA use */ 1311
1049 r = amdgpu_bo_alloc_and_map(device_handle, 1312 /* allocate UC bo2 for sDMA use */
1050 sdma_write_length, 4096, 1313 r = amdgpu_bo_alloc_and_map(device_handle,
1051 AMDGPU_GEM_DOMAIN_GTT, 1314 sdma_write_length, 4096,
1052 gtt_flags[loop2], &bo2, 1315 AMDGPU_GEM_DOMAIN_GTT,
1053 (void**)&bo2_cpu, &bo2_mc, 1316 gtt_flags[loop2], &bo2,
1054 &bo2_va_handle); 1317 (void**)&bo2_cpu, &bo2_mc,
1055 CU_ASSERT_EQUAL(r, 0); 1318 &bo2_va_handle);
1056 1319 CU_ASSERT_EQUAL(r, 0);
1057 /* clear bo2 */ 1320
1058 memset((void*)bo2_cpu, 0, sdma_write_length); 1321 /* clear bo2 */
1059 1322 memset((void*)bo2_cpu, 0, sdma_write_length);
1060 resources[0] = bo1; 1323
1061 resources[1] = bo2; 1324 resources[0] = bo1;
1062 1325 resources[1] = bo2;
1063 /* fulfill PM4: test DMA copy linear */ 1326
1064 i = j = 0; 1327 /* fulfill PM4: test DMA copy linear */
1065 if (ip_type == AMDGPU_HW_IP_DMA) { 1328 i = j = 0;
1066 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); 1329 if (ip_type == AMDGPU_HW_IP_DMA) {
1067 pm4[i++] = sdma_write_length; 1330 if (family_id == AMDGPU_FAMILY_SI) {
1068 pm4[i++] = 0; 1331 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI,
1069 pm4[i++] = 0xffffffff & bo1_mc; 1332 0, 0, 0,
1070 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32; 1333 sdma_write_length);
1071 pm4[i++] = 0xffffffff & bo2_mc; 1334 pm4[i++] = 0xffffffff & bo2_mc;
1072 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32; 1335 pm4[i++] = 0xffffffff & bo1_mc;
1073 } else if ((ip_type == AMDGPU_HW_IP_GFX) || 1336 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1074 (ip_type == AMDGPU_HW_IP_COMPUTE)) { 1337 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1075 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5); 1338 } else {
1076 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) | 1339 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY,
1077 PACKET3_DMA_DATA_DST_SEL(0) | 1340 SDMA_COPY_SUB_OPCODE_LINEAR,
1078 PACKET3_DMA_DATA_SRC_SEL(0) | 1341 0);
1079 PACKET3_DMA_DATA_CP_SYNC; 1342 if (family_id >= AMDGPU_FAMILY_AI)
1080 pm4[i++] = 0xfffffffc & bo1_mc; 1343 pm4[i++] = sdma_write_length - 1;
1081 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32; 1344 else
1082 pm4[i++] = 0xfffffffc & bo2_mc; 1345 pm4[i++] = sdma_write_length;
1083 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32; 1346 pm4[i++] = 0;
1084 pm4[i++] = sdma_write_length; 1347 pm4[i++] = 0xffffffff & bo1_mc;
1085 } 1348 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1086 1349 pm4[i++] = 0xffffffff & bo2_mc;
1087 amdgpu_test_exec_cs_helper(context_handle, 1350 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1088 ip_type, 0, 1351 }
1089 i, pm4, 1352 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1090 2, resources, 1353 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1091 ib_info, ibs_request); 1354 if (family_id == AMDGPU_FAMILY_SI) {
1092 1355 pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1093 /* verify if SDMA test result meets with expected */ 1356 pm4[i++] = 0xfffffffc & bo1_mc;
1094 i = 0; 1357 pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1095 while(i < sdma_write_length) { 1358 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1096 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa); 1359 PACKET3_DMA_DATA_SI_SRC_SEL(0) |
1360 PACKET3_DMA_DATA_SI_CP_SYNC |
1361 (0xffff00000000 & bo1_mc) >> 32;
1362 pm4[i++] = 0xfffffffc & bo2_mc;
1363 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1364 pm4[i++] = sdma_write_length;
1365 } else {
1366 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1367 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1368 PACKET3_DMA_DATA_DST_SEL(0) |
1369 PACKET3_DMA_DATA_SRC_SEL(0) |
1370 PACKET3_DMA_DATA_CP_SYNC;
1371 pm4[i++] = 0xfffffffc & bo1_mc;
1372 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1373 pm4[i++] = 0xfffffffc & bo2_mc;
1374 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1375 pm4[i++] = sdma_write_length;
1376 }
1377 }
1378
1379 amdgpu_test_exec_cs_helper(context_handle,
1380 ip_type, ring_id,
1381 i, pm4,
1382 2, resources,
1383 ib_info, ibs_request);
1384
1385 /* verify if SDMA test result meets with expected */
1386 i = 0;
1387 while(i < sdma_write_length) {
1388 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
1389 }
1390 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
1391 sdma_write_length);
1392 CU_ASSERT_EQUAL(r, 0);
1393 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
1394 sdma_write_length);
1395 CU_ASSERT_EQUAL(r, 0);
1396 loop2++;
1097 } 1397 }
1098 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc, 1398 loop1++;
1099 sdma_write_length);
1100 CU_ASSERT_EQUAL(r, 0);
1101 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
1102 sdma_write_length);
1103 CU_ASSERT_EQUAL(r, 0);
1104 loop2++;
1105 } 1399 }
1106 loop1++;
1107 } 1400 }
1108 /* clean resources */ 1401 /* clean resources */
1109 free(resources); 1402 free(resources);
@@ -1128,6 +1421,106 @@ static void amdgpu_command_submission_sdma(void)
1128 amdgpu_command_submission_sdma_copy_linear(); 1421 amdgpu_command_submission_sdma_copy_linear();
1129} 1422}
1130 1423
1424static void amdgpu_command_submission_multi_fence_wait_all(bool wait_all)
1425{
1426 amdgpu_context_handle context_handle;
1427 amdgpu_bo_handle ib_result_handle, ib_result_ce_handle;
1428 void *ib_result_cpu, *ib_result_ce_cpu;
1429 uint64_t ib_result_mc_address, ib_result_ce_mc_address;
1430 struct amdgpu_cs_request ibs_request[2] = {0};
1431 struct amdgpu_cs_ib_info ib_info[2];
1432 struct amdgpu_cs_fence fence_status[2] = {0};
1433 uint32_t *ptr;
1434 uint32_t expired;
1435 amdgpu_bo_list_handle bo_list;
1436 amdgpu_va_handle va_handle, va_handle_ce;
1437 int r;
1438 int i = 0, ib_cs_num = 2;
1439
1440 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1441 CU_ASSERT_EQUAL(r, 0);
1442
1443 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
1444 AMDGPU_GEM_DOMAIN_GTT, 0,
1445 &ib_result_handle, &ib_result_cpu,
1446 &ib_result_mc_address, &va_handle);
1447 CU_ASSERT_EQUAL(r, 0);
1448
1449 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
1450 AMDGPU_GEM_DOMAIN_GTT, 0,
1451 &ib_result_ce_handle, &ib_result_ce_cpu,
1452 &ib_result_ce_mc_address, &va_handle_ce);
1453 CU_ASSERT_EQUAL(r, 0);
1454
1455 r = amdgpu_get_bo_list(device_handle, ib_result_handle,
1456 ib_result_ce_handle, &bo_list);
1457 CU_ASSERT_EQUAL(r, 0);
1458
1459 memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
1460
1461 /* IT_SET_CE_DE_COUNTERS */
1462 ptr = ib_result_ce_cpu;
1463 if (family_id != AMDGPU_FAMILY_SI) {
1464 ptr[i++] = 0xc0008900;
1465 ptr[i++] = 0;
1466 }
1467 ptr[i++] = 0xc0008400;
1468 ptr[i++] = 1;
1469 ib_info[0].ib_mc_address = ib_result_ce_mc_address;
1470 ib_info[0].size = i;
1471 ib_info[0].flags = AMDGPU_IB_FLAG_CE;
1472
1473 /* IT_WAIT_ON_CE_COUNTER */
1474 ptr = ib_result_cpu;
1475 ptr[0] = 0xc0008600;
1476 ptr[1] = 0x00000001;
1477 ib_info[1].ib_mc_address = ib_result_mc_address;
1478 ib_info[1].size = 2;
1479
1480 for (i = 0; i < ib_cs_num; i++) {
1481 ibs_request[i].ip_type = AMDGPU_HW_IP_GFX;
1482 ibs_request[i].number_of_ibs = 2;
1483 ibs_request[i].ibs = ib_info;
1484 ibs_request[i].resources = bo_list;
1485 ibs_request[i].fence_info.handle = NULL;
1486 }
1487
1488 r = amdgpu_cs_submit(context_handle, 0,ibs_request, ib_cs_num);
1489
1490 CU_ASSERT_EQUAL(r, 0);
1491
1492 for (i = 0; i < ib_cs_num; i++) {
1493 fence_status[i].context = context_handle;
1494 fence_status[i].ip_type = AMDGPU_HW_IP_GFX;
1495 fence_status[i].fence = ibs_request[i].seq_no;
1496 }
1497
1498 r = amdgpu_cs_wait_fences(fence_status, ib_cs_num, wait_all,
1499 AMDGPU_TIMEOUT_INFINITE,
1500 &expired, NULL);
1501 CU_ASSERT_EQUAL(r, 0);
1502
1503 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
1504 ib_result_mc_address, 4096);
1505 CU_ASSERT_EQUAL(r, 0);
1506
1507 r = amdgpu_bo_unmap_and_free(ib_result_ce_handle, va_handle_ce,
1508 ib_result_ce_mc_address, 4096);
1509 CU_ASSERT_EQUAL(r, 0);
1510
1511 r = amdgpu_bo_list_destroy(bo_list);
1512 CU_ASSERT_EQUAL(r, 0);
1513
1514 r = amdgpu_cs_ctx_free(context_handle);
1515 CU_ASSERT_EQUAL(r, 0);
1516}
1517
1518static void amdgpu_command_submission_multi_fence(void)
1519{
1520 amdgpu_command_submission_multi_fence_wait_all(true);
1521 amdgpu_command_submission_multi_fence_wait_all(false);
1522}
1523
1131static void amdgpu_userptr_test(void) 1524static void amdgpu_userptr_test(void)
1132{ 1525{
1133 int i, r, j; 1526 int i, r, j;
@@ -1175,15 +1568,28 @@ static void amdgpu_userptr_test(void)
1175 handle = buf_handle; 1568 handle = buf_handle;
1176 1569
1177 j = i = 0; 1570 j = i = 0;
1178 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 1571
1179 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 1572 if (family_id == AMDGPU_FAMILY_SI)
1573 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
1574 sdma_write_length);
1575 else
1576 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
1577 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
1180 pm4[i++] = 0xffffffff & bo_mc; 1578 pm4[i++] = 0xffffffff & bo_mc;
1181 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32; 1579 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1182 pm4[i++] = sdma_write_length; 1580 if (family_id >= AMDGPU_FAMILY_AI)
1581 pm4[i++] = sdma_write_length - 1;
1582 else if (family_id != AMDGPU_FAMILY_SI)
1583 pm4[i++] = sdma_write_length;
1183 1584
1184 while (j++ < sdma_write_length) 1585 while (j++ < sdma_write_length)
1185 pm4[i++] = 0xdeadbeaf; 1586 pm4[i++] = 0xdeadbeaf;
1186 1587
1588 if (!fork()) {
1589 pm4[0] = 0x0;
1590 exit(0);
1591 }
1592
1187 amdgpu_test_exec_cs_helper(context_handle, 1593 amdgpu_test_exec_cs_helper(context_handle,
1188 AMDGPU_HW_IP_DMA, 0, 1594 AMDGPU_HW_IP_DMA, 0,
1189 i, pm4, 1595 i, pm4,
@@ -1207,4 +1613,212 @@ static void amdgpu_userptr_test(void)
1207 1613
1208 r = amdgpu_cs_ctx_free(context_handle); 1614 r = amdgpu_cs_ctx_free(context_handle);
1209 CU_ASSERT_EQUAL(r, 0); 1615 CU_ASSERT_EQUAL(r, 0);
1616
1617 wait(NULL);
1618}
1619
1620static void amdgpu_sync_dependency_test(void)
1621{
1622 amdgpu_context_handle context_handle[2];
1623 amdgpu_bo_handle ib_result_handle;
1624 void *ib_result_cpu;
1625 uint64_t ib_result_mc_address;
1626 struct amdgpu_cs_request ibs_request;
1627 struct amdgpu_cs_ib_info ib_info;
1628 struct amdgpu_cs_fence fence_status;
1629 uint32_t expired;
1630 int i, j, r;
1631 amdgpu_bo_list_handle bo_list;
1632 amdgpu_va_handle va_handle;
1633 static uint32_t *ptr;
1634 uint64_t seq_no;
1635
1636 r = amdgpu_cs_ctx_create(device_handle, &context_handle[0]);
1637 CU_ASSERT_EQUAL(r, 0);
1638 r = amdgpu_cs_ctx_create(device_handle, &context_handle[1]);
1639 CU_ASSERT_EQUAL(r, 0);
1640
1641 r = amdgpu_bo_alloc_and_map(device_handle, 8192, 4096,
1642 AMDGPU_GEM_DOMAIN_GTT, 0,
1643 &ib_result_handle, &ib_result_cpu,
1644 &ib_result_mc_address, &va_handle);
1645 CU_ASSERT_EQUAL(r, 0);
1646
1647 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
1648 &bo_list);
1649 CU_ASSERT_EQUAL(r, 0);
1650
1651 ptr = ib_result_cpu;
1652 i = 0;
1653
1654 memcpy(ptr + CODE_OFFSET , shader_bin, sizeof(shader_bin));
1655
1656 /* Dispatch minimal init config and verify it's executed */
1657 ptr[i++] = PACKET3(PKT3_CONTEXT_CONTROL, 1);
1658 ptr[i++] = 0x80000000;
1659 ptr[i++] = 0x80000000;
1660
1661 ptr[i++] = PACKET3(PKT3_CLEAR_STATE, 0);
1662 ptr[i++] = 0x80000000;
1663
1664
1665 /* Program compute regs */
1666 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1667 ptr[i++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1668 ptr[i++] = (ib_result_mc_address + CODE_OFFSET * 4) >> 8;
1669 ptr[i++] = (ib_result_mc_address + CODE_OFFSET * 4) >> 40;
1670
1671
1672 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1673 ptr[i++] = mmCOMPUTE_PGM_RSRC1 - PACKET3_SET_SH_REG_START;
1674 /*
1675 * 002c0040 COMPUTE_PGM_RSRC1 <- VGPRS = 0
1676 SGPRS = 1
1677 PRIORITY = 0
1678 FLOAT_MODE = 192 (0xc0)
1679 PRIV = 0
1680 DX10_CLAMP = 1
1681 DEBUG_MODE = 0
1682 IEEE_MODE = 0
1683 BULKY = 0
1684 CDBG_USER = 0
1685 *
1686 */
1687 ptr[i++] = 0x002c0040;
1688
1689
1690 /*
1691 * 00000010 COMPUTE_PGM_RSRC2 <- SCRATCH_EN = 0
1692 USER_SGPR = 8
1693 TRAP_PRESENT = 0
1694 TGID_X_EN = 0
1695 TGID_Y_EN = 0
1696 TGID_Z_EN = 0
1697 TG_SIZE_EN = 0
1698 TIDIG_COMP_CNT = 0
1699 EXCP_EN_MSB = 0
1700 LDS_SIZE = 0
1701 EXCP_EN = 0
1702 *
1703 */
1704 ptr[i++] = 0x00000010;
1705
1706
1707/*
1708 * 00000100 COMPUTE_TMPRING_SIZE <- WAVES = 256 (0x100)
1709 WAVESIZE = 0
1710 *
1711 */
1712 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
1713 ptr[i++] = mmCOMPUTE_TMPRING_SIZE - PACKET3_SET_SH_REG_START;
1714 ptr[i++] = 0x00000100;
1715
1716 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1717 ptr[i++] = mmCOMPUTE_USER_DATA_0 - PACKET3_SET_SH_REG_START;
1718 ptr[i++] = 0xffffffff & (ib_result_mc_address + DATA_OFFSET * 4);
1719 ptr[i++] = (0xffffffff00000000 & (ib_result_mc_address + DATA_OFFSET * 4)) >> 32;
1720
1721 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
1722 ptr[i++] = mmCOMPUTE_RESOURCE_LIMITS - PACKET3_SET_SH_REG_START;
1723 ptr[i++] = 0;
1724
1725 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 3);
1726 ptr[i++] = mmCOMPUTE_NUM_THREAD_X - PACKET3_SET_SH_REG_START;
1727 ptr[i++] = 1;
1728 ptr[i++] = 1;
1729 ptr[i++] = 1;
1730
1731
1732 /* Dispatch */
1733 ptr[i++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1734 ptr[i++] = 1;
1735 ptr[i++] = 1;
1736 ptr[i++] = 1;
1737 ptr[i++] = 0x00000045; /* DISPATCH DIRECT field */
1738
1739
1740 while (i & 7)
1741 ptr[i++] = 0xffff1000; /* type3 nop packet */
1742
1743 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
1744 ib_info.ib_mc_address = ib_result_mc_address;
1745 ib_info.size = i;
1746
1747 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
1748 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
1749 ibs_request.ring = 0;
1750 ibs_request.number_of_ibs = 1;
1751 ibs_request.ibs = &ib_info;
1752 ibs_request.resources = bo_list;
1753 ibs_request.fence_info.handle = NULL;
1754
1755 r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request, 1);
1756 CU_ASSERT_EQUAL(r, 0);
1757 seq_no = ibs_request.seq_no;
1758
1759
1760
1761 /* Prepare second command with dependency on the first */
1762 j = i;
1763 ptr[i++] = PACKET3(PACKET3_WRITE_DATA, 3);
1764 ptr[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1765 ptr[i++] = 0xfffffffc & (ib_result_mc_address + DATA_OFFSET * 4);
1766 ptr[i++] = (0xffffffff00000000 & (ib_result_mc_address + DATA_OFFSET * 4)) >> 32;
1767 ptr[i++] = 99;
1768
1769 while (i & 7)
1770 ptr[i++] = 0xffff1000; /* type3 nop packet */
1771
1772 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
1773 ib_info.ib_mc_address = ib_result_mc_address + j * 4;
1774 ib_info.size = i - j;
1775
1776 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
1777 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
1778 ibs_request.ring = 0;
1779 ibs_request.number_of_ibs = 1;
1780 ibs_request.ibs = &ib_info;
1781 ibs_request.resources = bo_list;
1782 ibs_request.fence_info.handle = NULL;
1783
1784 ibs_request.number_of_dependencies = 1;
1785
1786 ibs_request.dependencies = calloc(1, sizeof(*ibs_request.dependencies));
1787 ibs_request.dependencies[0].context = context_handle[1];
1788 ibs_request.dependencies[0].ip_instance = 0;
1789 ibs_request.dependencies[0].ring = 0;
1790 ibs_request.dependencies[0].fence = seq_no;
1791
1792
1793 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request, 1);
1794 CU_ASSERT_EQUAL(r, 0);
1795
1796
1797 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
1798 fence_status.context = context_handle[0];
1799 fence_status.ip_type = AMDGPU_HW_IP_GFX;
1800 fence_status.ip_instance = 0;
1801 fence_status.ring = 0;
1802 fence_status.fence = ibs_request.seq_no;
1803
1804 r = amdgpu_cs_query_fence_status(&fence_status,
1805 AMDGPU_TIMEOUT_INFINITE,0, &expired);
1806 CU_ASSERT_EQUAL(r, 0);
1807
1808 /* Expect the second command to wait for shader to complete */
1809 CU_ASSERT_EQUAL(ptr[DATA_OFFSET], 99);
1810
1811 r = amdgpu_bo_list_destroy(bo_list);
1812 CU_ASSERT_EQUAL(r, 0);
1813
1814 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
1815 ib_result_mc_address, 4096);
1816 CU_ASSERT_EQUAL(r, 0);
1817
1818 r = amdgpu_cs_ctx_free(context_handle[0]);
1819 CU_ASSERT_EQUAL(r, 0);
1820 r = amdgpu_cs_ctx_free(context_handle[1]);
1821 CU_ASSERT_EQUAL(r, 0);
1822
1823 free(ibs_request.dependencies);
1210} 1824}
diff --git a/tests/amdgpu/bo_tests.c b/tests/amdgpu/bo_tests.c
index 74b5e77b..9d4da4af 100644
--- a/tests/amdgpu/bo_tests.c
+++ b/tests/amdgpu/bo_tests.c
@@ -21,10 +21,6 @@
21 * 21 *
22*/ 22*/
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdio.h> 24#include <stdio.h>
29 25
30#include "CUnit/Basic.h" 26#include "CUnit/Basic.h"
@@ -46,13 +42,15 @@ static amdgpu_va_handle va_handle;
46static void amdgpu_bo_export_import(void); 42static void amdgpu_bo_export_import(void);
47static void amdgpu_bo_metadata(void); 43static void amdgpu_bo_metadata(void);
48static void amdgpu_bo_map_unmap(void); 44static void amdgpu_bo_map_unmap(void);
45static void amdgpu_memory_alloc(void);
46static void amdgpu_mem_fail_alloc(void);
49 47
50CU_TestInfo bo_tests[] = { 48CU_TestInfo bo_tests[] = {
51 { "Export/Import", amdgpu_bo_export_import }, 49 { "Export/Import", amdgpu_bo_export_import },
52#if 0
53 { "Metadata", amdgpu_bo_metadata }, 50 { "Metadata", amdgpu_bo_metadata },
54#endif
55 { "CPU map/unmap", amdgpu_bo_map_unmap }, 51 { "CPU map/unmap", amdgpu_bo_map_unmap },
52 { "Memory alloc Test", amdgpu_memory_alloc },
53 { "Memory fail alloc Test", amdgpu_mem_fail_alloc },
56 CU_TEST_INFO_NULL, 54 CU_TEST_INFO_NULL,
57}; 55};
58 56
@@ -195,3 +193,72 @@ static void amdgpu_bo_map_unmap(void)
195 r = amdgpu_bo_cpu_unmap(buffer_handle); 193 r = amdgpu_bo_cpu_unmap(buffer_handle);
196 CU_ASSERT_EQUAL(r, 0); 194 CU_ASSERT_EQUAL(r, 0);
197} 195}
196
197static void amdgpu_memory_alloc(void)
198{
199 amdgpu_bo_handle bo;
200 amdgpu_va_handle va_handle;
201 uint64_t bo_mc;
202 int r;
203
204 /* Test visible VRAM */
205 bo = gpu_mem_alloc(device_handle,
206 4096, 4096,
207 AMDGPU_GEM_DOMAIN_VRAM,
208 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
209 &bo_mc, &va_handle);
210
211 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
212 CU_ASSERT_EQUAL(r, 0);
213
214 /* Test invisible VRAM */
215 bo = gpu_mem_alloc(device_handle,
216 4096, 4096,
217 AMDGPU_GEM_DOMAIN_VRAM,
218 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
219 &bo_mc, &va_handle);
220
221 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
222 CU_ASSERT_EQUAL(r, 0);
223
224 /* Test GART Cacheable */
225 bo = gpu_mem_alloc(device_handle,
226 4096, 4096,
227 AMDGPU_GEM_DOMAIN_GTT,
228 0, &bo_mc, &va_handle);
229
230 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
231 CU_ASSERT_EQUAL(r, 0);
232
233 /* Test GART USWC */
234 bo = gpu_mem_alloc(device_handle,
235 4096, 4096,
236 AMDGPU_GEM_DOMAIN_GTT,
237 AMDGPU_GEM_CREATE_CPU_GTT_USWC,
238 &bo_mc, &va_handle);
239
240 r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
241 CU_ASSERT_EQUAL(r, 0);
242}
243
244static void amdgpu_mem_fail_alloc(void)
245{
246 amdgpu_bo_handle bo;
247 int r;
248 struct amdgpu_bo_alloc_request req = {0};
249 amdgpu_bo_handle buf_handle;
250
251 /* Test impossible mem allocation, 1TB */
252 req.alloc_size = 0xE8D4A51000;
253 req.phys_alignment = 4096;
254 req.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
255 req.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
256
257 r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
258 CU_ASSERT_EQUAL(r, -ENOMEM);
259
260 if (!r) {
261 r = amdgpu_bo_free(bo);
262 CU_ASSERT_EQUAL(r, 0);
263 }
264}
diff --git a/tests/amdgpu/cs_tests.c b/tests/amdgpu/cs_tests.c
index 82c55aa8..7ad0f0dc 100644
--- a/tests/amdgpu/cs_tests.c
+++ b/tests/amdgpu/cs_tests.c
@@ -21,10 +21,6 @@
21 * 21 *
22*/ 22*/
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdio.h> 24#include <stdio.h>
29 25
30#include "CUnit/Basic.h" 26#include "CUnit/Basic.h"
@@ -32,7 +28,7 @@
32#include "util_math.h" 28#include "util_math.h"
33 29
34#include "amdgpu_test.h" 30#include "amdgpu_test.h"
35#include "uvd_messages.h" 31#include "decode_messages.h"
36#include "amdgpu_drm.h" 32#include "amdgpu_drm.h"
37#include "amdgpu_internal.h" 33#include "amdgpu_internal.h"
38 34
@@ -66,6 +62,26 @@ CU_TestInfo cs_tests[] = {
66 CU_TEST_INFO_NULL, 62 CU_TEST_INFO_NULL,
67}; 63};
68 64
65CU_BOOL suite_cs_tests_enable(void)
66{
67 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
68 &minor_version, &device_handle))
69 return CU_FALSE;
70
71 family_id = device_handle->info.family_id;
72
73 if (amdgpu_device_deinitialize(device_handle))
74 return CU_FALSE;
75
76
77 if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI) {
78 printf("\n\nThe ASIC NOT support UVD, suite disabled\n");
79 return CU_FALSE;
80 }
81
82 return CU_TRUE;
83}
84
69int suite_cs_tests_init(void) 85int suite_cs_tests_init(void)
70{ 86{
71 amdgpu_bo_handle ib_result_handle; 87 amdgpu_bo_handle ib_result_handle;
@@ -175,11 +191,11 @@ static int submit(unsigned ndw, unsigned ip)
175 191
176static void uvd_cmd(uint64_t addr, unsigned cmd, int *idx) 192static void uvd_cmd(uint64_t addr, unsigned cmd, int *idx)
177{ 193{
178 ib_cpu[(*idx)++] = 0x3BC4; 194 ib_cpu[(*idx)++] = (family_id < AMDGPU_FAMILY_AI) ? 0x3BC4 : 0x81C4;
179 ib_cpu[(*idx)++] = addr; 195 ib_cpu[(*idx)++] = addr;
180 ib_cpu[(*idx)++] = 0x3BC5; 196 ib_cpu[(*idx)++] = (family_id < AMDGPU_FAMILY_AI) ? 0x3BC5 : 0x81C5;
181 ib_cpu[(*idx)++] = addr >> 32; 197 ib_cpu[(*idx)++] = addr >> 32;
182 ib_cpu[(*idx)++] = 0x3BC3; 198 ib_cpu[(*idx)++] = (family_id < AMDGPU_FAMILY_AI) ? 0x3BC3 : 0x81C3;
183 ib_cpu[(*idx)++] = cmd << 1; 199 ib_cpu[(*idx)++] = cmd << 1;
184} 200}
185 201
@@ -211,10 +227,13 @@ static void amdgpu_cs_uvd_create(void)
211 CU_ASSERT_EQUAL(r, 0); 227 CU_ASSERT_EQUAL(r, 0);
212 228
213 memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg)); 229 memcpy(msg, uvd_create_msg, sizeof(uvd_create_msg));
230
214 if (family_id >= AMDGPU_FAMILY_VI) { 231 if (family_id >= AMDGPU_FAMILY_VI) {
215 ((uint8_t*)msg)[0x10] = 7; 232 ((uint8_t*)msg)[0x10] = 7;
216 /* chip polaris 10/11 */ 233 /* chip beyond polaris 10/11 */
217 if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) { 234 if ((family_id == AMDGPU_FAMILY_AI) ||
235 (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A ||
236 chip_id == chip_rev+0x64)) {
218 /* dpb size */ 237 /* dpb size */
219 ((uint8_t*)msg)[0x28] = 0x00; 238 ((uint8_t*)msg)[0x28] = 0x00;
220 ((uint8_t*)msg)[0x29] = 0x94; 239 ((uint8_t*)msg)[0x29] = 0x94;
@@ -250,7 +269,7 @@ static void amdgpu_cs_uvd_create(void)
250 269
251static void amdgpu_cs_uvd_decode(void) 270static void amdgpu_cs_uvd_decode(void)
252{ 271{
253 const unsigned dpb_size = 15923584, ctx_size = 5287680, dt_size = 737280; 272 const unsigned dpb_size = 15923584, dt_size = 737280;
254 uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, ctx_addr, dt_addr, it_addr; 273 uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, ctx_addr, dt_addr, it_addr;
255 struct amdgpu_bo_alloc_request req = {0}; 274 struct amdgpu_bo_alloc_request req = {0};
256 amdgpu_bo_handle buf_handle; 275 amdgpu_bo_handle buf_handle;
@@ -286,14 +305,18 @@ static void amdgpu_cs_uvd_decode(void)
286 r = amdgpu_bo_cpu_map(buf_handle, (void **)&ptr); 305 r = amdgpu_bo_cpu_map(buf_handle, (void **)&ptr);
287 CU_ASSERT_EQUAL(r, 0); 306 CU_ASSERT_EQUAL(r, 0);
288 307
289 memcpy(ptr, uvd_decode_msg, sizeof(uvd_create_msg)); 308 memcpy(ptr, uvd_decode_msg, sizeof(uvd_decode_msg));
309 memcpy(ptr + sizeof(uvd_decode_msg), avc_decode_msg, sizeof(avc_decode_msg));
310
290 if (family_id >= AMDGPU_FAMILY_VI) { 311 if (family_id >= AMDGPU_FAMILY_VI) {
291 ptr[0x10] = 7; 312 ptr[0x10] = 7;
292 ptr[0x98] = 0x00; 313 ptr[0x98] = 0x00;
293 ptr[0x99] = 0x02; 314 ptr[0x99] = 0x02;
294 /* chip polaris10/11 */ 315 /* chip beyond polaris10/11 */
295 if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) { 316 if ((family_id == AMDGPU_FAMILY_AI) ||
296 /*dpb size */ 317 (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A ||
318 chip_id == chip_rev+0x64)) {
319 /* dpb size */
297 ptr[0x24] = 0x00; 320 ptr[0x24] = 0x00;
298 ptr[0x25] = 0x94; 321 ptr[0x25] = 0x94;
299 ptr[0x26] = 0x6B; 322 ptr[0x26] = 0x6B;
@@ -335,9 +358,12 @@ static void amdgpu_cs_uvd_decode(void)
335 bs_addr = fb_addr + 4*1024; 358 bs_addr = fb_addr + 4*1024;
336 dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024); 359 dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024);
337 360
338 if ((family_id >= AMDGPU_FAMILY_VI) && 361 if (family_id >= AMDGPU_FAMILY_VI) {
339 (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A)) { 362 if ((family_id == AMDGPU_FAMILY_AI) ||
340 ctx_addr = ALIGN(dpb_addr + 0x006B9400, 4*1024); 363 (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A ||
364 chip_id == chip_rev+0x64)) {
365 ctx_addr = ALIGN(dpb_addr + 0x006B9400, 4*1024);
366 }
341 } 367 }
342 368
343 dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024); 369 dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024);
@@ -348,12 +374,16 @@ static void amdgpu_cs_uvd_decode(void)
348 uvd_cmd(dt_addr, 0x2, &i); 374 uvd_cmd(dt_addr, 0x2, &i);
349 uvd_cmd(fb_addr, 0x3, &i); 375 uvd_cmd(fb_addr, 0x3, &i);
350 uvd_cmd(bs_addr, 0x100, &i); 376 uvd_cmd(bs_addr, 0x100, &i);
377
351 if (family_id >= AMDGPU_FAMILY_VI) { 378 if (family_id >= AMDGPU_FAMILY_VI) {
352 uvd_cmd(it_addr, 0x204, &i); 379 uvd_cmd(it_addr, 0x204, &i);
353 if (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A) 380 if ((family_id == AMDGPU_FAMILY_AI) ||
381 (chip_id == chip_rev+0x50 || chip_id == chip_rev+0x5A ||
382 chip_id == chip_rev+0x64))
354 uvd_cmd(ctx_addr, 0x206, &i); 383 uvd_cmd(ctx_addr, 0x206, &i);
355} 384 }
356 ib_cpu[i++] = 0x3BC6; 385
386 ib_cpu[i++] = (family_id < AMDGPU_FAMILY_AI) ? 0x3BC6 : 0x81C6;
357 ib_cpu[i++] = 0x1; 387 ib_cpu[i++] = 0x1;
358 for (; i % 16; ++i) 388 for (; i % 16; ++i)
359 ib_cpu[i] = 0x80000000; 389 ib_cpu[i] = 0x80000000;
@@ -364,7 +394,7 @@ static void amdgpu_cs_uvd_decode(void)
364 /* TODO: use a real CRC32 */ 394 /* TODO: use a real CRC32 */
365 for (i = 0, sum = 0; i < dt_size; ++i) 395 for (i = 0, sum = 0; i < dt_size; ++i)
366 sum += ptr[i]; 396 sum += ptr[i];
367 CU_ASSERT_EQUAL(sum, 0x20345d8); 397 CU_ASSERT_EQUAL(sum, SUM_DECODE);
368 398
369 r = amdgpu_bo_cpu_unmap(buf_handle); 399 r = amdgpu_bo_cpu_unmap(buf_handle);
370 CU_ASSERT_EQUAL(r, 0); 400 CU_ASSERT_EQUAL(r, 0);
diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c
new file mode 100644
index 00000000..1eb5761a
--- /dev/null
+++ b/tests/amdgpu/deadlock_tests.c
@@ -0,0 +1,255 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include <stdio.h>
25#include <stdlib.h>
26#include <unistd.h>
27#ifdef HAVE_ALLOCA_H
28# include <alloca.h>
29#endif
30
31#include "CUnit/Basic.h"
32
33#include "amdgpu_test.h"
34#include "amdgpu_drm.h"
35#include "amdgpu_internal.h"
36
37#include <pthread.h>
38
39
40/*
41 * This defines the delay in MS after which memory location designated for
42 * compression against reference value is written to, unblocking command
43 * processor
44 */
45#define WRITE_MEM_ADDRESS_DELAY_MS 100
46
47#define PACKET_TYPE3 3
48
49#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
50 (((op) & 0xFF) << 8) | \
51 ((n) & 0x3FFF) << 16)
52
53#define PACKET3_WAIT_REG_MEM 0x3C
54#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
55 /* 0 - always
56 * 1 - <
57 * 2 - <=
58 * 3 - ==
59 * 4 - !=
60 * 5 - >=
61 * 6 - >
62 */
63#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
64 /* 0 - reg
65 * 1 - mem
66 */
67#define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
68 /* 0 - wait_reg_mem
69 * 1 - wr_wait_wr_reg
70 */
71#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
72 /* 0 - me
73 * 1 - pfp
74 */
75
76static amdgpu_device_handle device_handle;
77static uint32_t major_version;
78static uint32_t minor_version;
79
80static pthread_t stress_thread;
81static uint32_t *ptr;
82
83static void amdgpu_deadlock_helper(unsigned ip_type);
84static void amdgpu_deadlock_gfx(void);
85static void amdgpu_deadlock_compute(void);
86
87CU_BOOL suite_deadlock_tests_enable(void)
88{
89 CU_BOOL enable = CU_TRUE;
90
91 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
92 &minor_version, &device_handle))
93 return CU_FALSE;
94
95 if (device_handle->info.family_id == AMDGPU_FAMILY_AI ||
96 device_handle->info.family_id == AMDGPU_FAMILY_SI) {
97 printf("\n\nCurrently hangs the CP on this ASIC, deadlock suite disabled\n");
98 enable = CU_FALSE;
99 }
100
101 if (amdgpu_device_deinitialize(device_handle))
102 return CU_FALSE;
103
104 return enable;
105}
106
107int suite_deadlock_tests_init(void)
108{
109 int r;
110
111 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
112 &minor_version, &device_handle);
113
114 if (r) {
115 if ((r == -EACCES) && (errno == EACCES))
116 printf("\n\nError:%s. "
117 "Hint:Try to run this test program as root.",
118 strerror(errno));
119 return CUE_SINIT_FAILED;
120 }
121
122 return CUE_SUCCESS;
123}
124
125int suite_deadlock_tests_clean(void)
126{
127 int r = amdgpu_device_deinitialize(device_handle);
128
129 if (r == 0)
130 return CUE_SUCCESS;
131 else
132 return CUE_SCLEAN_FAILED;
133}
134
135
136CU_TestInfo deadlock_tests[] = {
137 { "gfx ring block test", amdgpu_deadlock_gfx },
138 { "compute ring block test", amdgpu_deadlock_compute },
139 CU_TEST_INFO_NULL,
140};
141
142static void *write_mem_address(void *data)
143{
144 int i;
145
146 /* useconds_t range is [0, 1,000,000] so use loop for waits > 1s */
147 for (i = 0; i < WRITE_MEM_ADDRESS_DELAY_MS; i++)
148 usleep(1000);
149
150 ptr[256] = 0x1;
151
152 return 0;
153}
154
155static void amdgpu_deadlock_gfx(void)
156{
157 amdgpu_deadlock_helper(AMDGPU_HW_IP_GFX);
158}
159
160static void amdgpu_deadlock_compute(void)
161{
162 amdgpu_deadlock_helper(AMDGPU_HW_IP_COMPUTE);
163}
164
165static void amdgpu_deadlock_helper(unsigned ip_type)
166{
167 amdgpu_context_handle context_handle;
168 amdgpu_bo_handle ib_result_handle;
169 void *ib_result_cpu;
170 uint64_t ib_result_mc_address;
171 struct amdgpu_cs_request ibs_request;
172 struct amdgpu_cs_ib_info ib_info;
173 struct amdgpu_cs_fence fence_status;
174 uint32_t expired;
175 int i, r;
176 amdgpu_bo_list_handle bo_list;
177 amdgpu_va_handle va_handle;
178
179 r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
180 CU_ASSERT_EQUAL(r, 0);
181
182 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
183 CU_ASSERT_EQUAL(r, 0);
184
185 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
186 AMDGPU_GEM_DOMAIN_GTT, 0,
187 &ib_result_handle, &ib_result_cpu,
188 &ib_result_mc_address, &va_handle);
189 CU_ASSERT_EQUAL(r, 0);
190
191 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
192 &bo_list);
193 CU_ASSERT_EQUAL(r, 0);
194
195 ptr = ib_result_cpu;
196
197 ptr[0] = PACKET3(PACKET3_WAIT_REG_MEM, 5);
198 ptr[1] = (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
199 WAIT_REG_MEM_FUNCTION(4) | /* != */
200 WAIT_REG_MEM_ENGINE(0)); /* me */
201 ptr[2] = (ib_result_mc_address + 256*4) & 0xfffffffc;
202 ptr[3] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
203 ptr[4] = 0x00000000; /* reference value */
204 ptr[5] = 0xffffffff; /* and mask */
205 ptr[6] = 0x00000004; /* poll interval */
206
207 for (i = 7; i < 16; ++i)
208 ptr[i] = 0xffff1000;
209
210
211 ptr[256] = 0x0; /* the memory we wait on to change */
212
213
214
215 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
216 ib_info.ib_mc_address = ib_result_mc_address;
217 ib_info.size = 16;
218
219 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
220 ibs_request.ip_type = ip_type;
221 ibs_request.ring = 0;
222 ibs_request.number_of_ibs = 1;
223 ibs_request.ibs = &ib_info;
224 ibs_request.resources = bo_list;
225 ibs_request.fence_info.handle = NULL;
226
227 for (i = 0; i < 200; i++) {
228 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
229 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
230
231 }
232
233 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
234 fence_status.context = context_handle;
235 fence_status.ip_type = ip_type;
236 fence_status.ip_instance = 0;
237 fence_status.ring = 0;
238 fence_status.fence = ibs_request.seq_no;
239
240 r = amdgpu_cs_query_fence_status(&fence_status,
241 AMDGPU_TIMEOUT_INFINITE,0, &expired);
242 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
243
244 pthread_join(stress_thread, NULL);
245
246 r = amdgpu_bo_list_destroy(bo_list);
247 CU_ASSERT_EQUAL(r, 0);
248
249 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
250 ib_result_mc_address, 4096);
251 CU_ASSERT_EQUAL(r, 0);
252
253 r = amdgpu_cs_ctx_free(context_handle);
254 CU_ASSERT_EQUAL(r, 0);
255}
diff --git a/tests/amdgpu/uvd_messages.h b/tests/amdgpu/decode_messages.h
index 00235cbb..bd6fe4b6 100644
--- a/tests/amdgpu/uvd_messages.h
+++ b/tests/amdgpu/decode_messages.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2014 Advanced Micro Devices, Inc. 2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -21,8 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef _UVD_MESSAGES_H_ 24#ifndef _DECODE_MESSAGES_H_
25#define _UVD_MESSAGES_H_ 25#define _DECODE_MESSAGES_H_
26
27#define SUM_DECODE 0x20345d8
26 28
27static const uint8_t uvd_create_msg[] = { 29static const uint8_t uvd_create_msg[] = {
28 0xe4,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x44,0x40,0x00,0x00,0x00,0x00, 30 0xe4,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x44,0x40,0x00,0x00,0x00,0x00,
@@ -356,6 +358,9 @@ static const uint8_t uvd_decode_msg[] = {
356 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 358 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
357 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 359 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
358 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 360 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
361};
362
363static const uint8_t avc_decode_msg[] = {
359 0x02,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x88,0x00,0x00,0x00, 364 0x02,0x00,0x00,0x00,0x1e,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x88,0x00,0x00,0x00,
360 0x01,0x00,0x00,0x01,0x00,0x03,0x02,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 365 0x01,0x00,0x00,0x01,0x00,0x03,0x02,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
361 0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10, 366 0x00,0x00,0x00,0x00,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
@@ -810,4 +815,34 @@ static const uint8_t uvd_it_scaling_table[] = {
810 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10, 815 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,
811}; 816};
812 817
813#endif /* _UVD_MESSAGES_H_ */ 818static const uint8_t vcn_dec_create_msg[] = {
819 0x28,0x00,0x00,0x00,0x38,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
820 0x03,0x00,0x44,0x40,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x28,0x00,0x00,0x00,
821 0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
822 0x60,0x03,0x00,0x00,0xe0,0x01,0x00,0x00,
823};
824
825static const uint8_t vcn_dec_decode_msg[] = {
826 0x28,0x00,0x00,0x00,0x90,0x06,0x00,0x00,0x02,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
827 0x03,0x00,0x44,0x40,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x38,0x00,0x00,0x00,
828 0xb4,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0xec,0x00,0x00,0x00,
829 0x5c,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
830 0x60,0x03,0x00,0x00,0xe0,0x01,0x00,0x00,0x80,0x05,0x00,0x00,0x00,0x94,0x6b,0x00,
831 0x96,0x4e,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xaf,0x50,0x00,
832 0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
833 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
834 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
835 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,
836 0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
837 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
838 0x00,0x00,0x00,0x00,0x00,0xc0,0x03,0x00,0x00,0x80,0x07,0x00,0x00,0x60,0x09,0x00,
839 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
840 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
841};
842
843static const uint8_t vcn_dec_destroy_msg[] = {
844 0x28,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,
845 0x03,0x00,0x44,0x40,0x00,0x00,0x00,0x00,
846};
847
848#endif /* _DECODE_MESSAGES_H_ */
diff --git a/tests/amdgpu/frame.h b/tests/amdgpu/frame.h
index 4c946c27..335401c1 100644
--- a/tests/amdgpu/frame.h
+++ b/tests/amdgpu/frame.h
@@ -24,7 +24,7 @@
24#ifndef _frame_h_ 24#ifndef _frame_h_
25#define _frame_h_ 25#define _frame_h_
26 26
27const uint8_t frame[] = { 27static const uint8_t frame[] = {
28 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 28 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb,
29 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 29 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2,
30 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xaa, 0xaa, 0xaa, 30 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xaa, 0xaa, 0xaa,
diff --git a/tests/amdgpu/meson.build b/tests/amdgpu/meson.build
new file mode 100644
index 00000000..4c1237c6
--- /dev/null
+++ b/tests/amdgpu/meson.build
@@ -0,0 +1,34 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21if dep_cunit.found()
22 amdgpu_test = executable(
23 'amdgpu_test',
24 files(
25 'amdgpu_test.c', 'basic_tests.c', 'bo_tests.c', 'cs_tests.c',
26 'vce_tests.c', 'uvd_enc_tests.c', 'vcn_tests.c', 'deadlock_tests.c',
27 'vm_tests.c',
28 ),
29 dependencies : [dep_cunit, dep_threads],
30 include_directories : [inc_root, inc_drm, include_directories('../../amdgpu')],
31 link_with : [libdrm, libdrm_amdgpu],
32 install : with_install_tests,
33 )
34endif
diff --git a/tests/amdgpu/uvd_enc_tests.c b/tests/amdgpu/uvd_enc_tests.c
new file mode 100644
index 00000000..b4251bcf
--- /dev/null
+++ b/tests/amdgpu/uvd_enc_tests.c
@@ -0,0 +1,491 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include <stdio.h>
25#include <inttypes.h>
26
27#include "CUnit/Basic.h"
28
29#include "util_math.h"
30
31#include "amdgpu_test.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34#include "frame.h"
35#include "uve_ib.h"
36
37#define IB_SIZE 4096
38#define MAX_RESOURCES 16
39
40struct amdgpu_uvd_enc_bo {
41 amdgpu_bo_handle handle;
42 amdgpu_va_handle va_handle;
43 uint64_t addr;
44 uint64_t size;
45 uint8_t *ptr;
46};
47
48struct amdgpu_uvd_enc {
49 unsigned width;
50 unsigned height;
51 struct amdgpu_uvd_enc_bo session;
52 struct amdgpu_uvd_enc_bo vbuf;
53 struct amdgpu_uvd_enc_bo bs;
54 struct amdgpu_uvd_enc_bo fb;
55 struct amdgpu_uvd_enc_bo cpb;
56};
57
58static amdgpu_device_handle device_handle;
59static uint32_t major_version;
60static uint32_t minor_version;
61static uint32_t family_id;
62
63static amdgpu_context_handle context_handle;
64static amdgpu_bo_handle ib_handle;
65static amdgpu_va_handle ib_va_handle;
66static uint64_t ib_mc_address;
67static uint32_t *ib_cpu;
68
69static struct amdgpu_uvd_enc enc;
70static amdgpu_bo_handle resources[MAX_RESOURCES];
71static unsigned num_resources;
72
73static void amdgpu_cs_uvd_enc_create(void);
74static void amdgpu_cs_uvd_enc_session_init(void);
75static void amdgpu_cs_uvd_enc_encode(void);
76static void amdgpu_cs_uvd_enc_destroy(void);
77
78
79CU_TestInfo uvd_enc_tests[] = {
80 { "UVD ENC create", amdgpu_cs_uvd_enc_create },
81 { "UVD ENC session init", amdgpu_cs_uvd_enc_session_init },
82 { "UVD ENC encode", amdgpu_cs_uvd_enc_encode },
83 { "UVD ENC destroy", amdgpu_cs_uvd_enc_destroy },
84 CU_TEST_INFO_NULL,
85};
86
87CU_BOOL suite_uvd_enc_tests_enable(void)
88{
89 int r;
90 struct drm_amdgpu_info_hw_ip info;
91
92 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
93 &minor_version, &device_handle))
94 return CU_FALSE;
95
96 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_UVD_ENC, 0, &info);
97
98 if (amdgpu_device_deinitialize(device_handle))
99 return CU_FALSE;
100
101 if (!info.available_rings)
102 printf("\n\nThe ASIC NOT support UVD ENC, suite disabled.\n");
103
104 return (r == 0 && (info.available_rings ? CU_TRUE : CU_FALSE));
105}
106
107
108int suite_uvd_enc_tests_init(void)
109{
110 int r;
111
112 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
113 &minor_version, &device_handle);
114 if (r)
115 return CUE_SINIT_FAILED;
116
117 family_id = device_handle->info.family_id;
118
119 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
120 if (r)
121 return CUE_SINIT_FAILED;
122
123 r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
124 AMDGPU_GEM_DOMAIN_GTT, 0,
125 &ib_handle, (void**)&ib_cpu,
126 &ib_mc_address, &ib_va_handle);
127 if (r)
128 return CUE_SINIT_FAILED;
129
130 return CUE_SUCCESS;
131}
132
133int suite_uvd_enc_tests_clean(void)
134{
135 int r;
136
137 r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
138 ib_mc_address, IB_SIZE);
139 if (r)
140 return CUE_SCLEAN_FAILED;
141
142 r = amdgpu_cs_ctx_free(context_handle);
143 if (r)
144 return CUE_SCLEAN_FAILED;
145
146 r = amdgpu_device_deinitialize(device_handle);
147 if (r)
148 return CUE_SCLEAN_FAILED;
149
150 return CUE_SUCCESS;
151}
152
153static int submit(unsigned ndw, unsigned ip)
154{
155 struct amdgpu_cs_request ibs_request = {0};
156 struct amdgpu_cs_ib_info ib_info = {0};
157 struct amdgpu_cs_fence fence_status = {0};
158 uint32_t expired;
159 int r;
160
161 ib_info.ib_mc_address = ib_mc_address;
162 ib_info.size = ndw;
163
164 ibs_request.ip_type = ip;
165
166 r = amdgpu_bo_list_create(device_handle, num_resources, resources,
167 NULL, &ibs_request.resources);
168 if (r)
169 return r;
170
171 ibs_request.number_of_ibs = 1;
172 ibs_request.ibs = &ib_info;
173 ibs_request.fence_info.handle = NULL;
174
175 r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
176 if (r)
177 return r;
178
179 r = amdgpu_bo_list_destroy(ibs_request.resources);
180 if (r)
181 return r;
182
183 fence_status.context = context_handle;
184 fence_status.ip_type = ip;
185 fence_status.fence = ibs_request.seq_no;
186
187 r = amdgpu_cs_query_fence_status(&fence_status,
188 AMDGPU_TIMEOUT_INFINITE,
189 0, &expired);
190 if (r)
191 return r;
192
193 return 0;
194}
195
196static void alloc_resource(struct amdgpu_uvd_enc_bo *uvd_enc_bo,
197 unsigned size, unsigned domain)
198{
199 struct amdgpu_bo_alloc_request req = {0};
200 amdgpu_bo_handle buf_handle;
201 amdgpu_va_handle va_handle;
202 uint64_t va = 0;
203 int r;
204
205 req.alloc_size = ALIGN(size, 4096);
206 req.preferred_heap = domain;
207 r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
208 CU_ASSERT_EQUAL(r, 0);
209 r = amdgpu_va_range_alloc(device_handle,
210 amdgpu_gpu_va_range_general,
211 req.alloc_size, 1, 0, &va,
212 &va_handle, 0);
213 CU_ASSERT_EQUAL(r, 0);
214 r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
215 AMDGPU_VA_OP_MAP);
216 CU_ASSERT_EQUAL(r, 0);
217 uvd_enc_bo->addr = va;
218 uvd_enc_bo->handle = buf_handle;
219 uvd_enc_bo->size = req.alloc_size;
220 uvd_enc_bo->va_handle = va_handle;
221 r = amdgpu_bo_cpu_map(uvd_enc_bo->handle, (void **)&uvd_enc_bo->ptr);
222 CU_ASSERT_EQUAL(r, 0);
223 memset(uvd_enc_bo->ptr, 0, size);
224 r = amdgpu_bo_cpu_unmap(uvd_enc_bo->handle);
225 CU_ASSERT_EQUAL(r, 0);
226}
227
228static void free_resource(struct amdgpu_uvd_enc_bo *uvd_enc_bo)
229{
230 int r;
231
232 r = amdgpu_bo_va_op(uvd_enc_bo->handle, 0, uvd_enc_bo->size,
233 uvd_enc_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
234 CU_ASSERT_EQUAL(r, 0);
235
236 r = amdgpu_va_range_free(uvd_enc_bo->va_handle);
237 CU_ASSERT_EQUAL(r, 0);
238
239 r = amdgpu_bo_free(uvd_enc_bo->handle);
240 CU_ASSERT_EQUAL(r, 0);
241 memset(uvd_enc_bo, 0, sizeof(*uvd_enc_bo));
242}
243
244static void amdgpu_cs_uvd_enc_create(void)
245{
246 enc.width = 160;
247 enc.height = 128;
248
249 num_resources = 0;
250 alloc_resource(&enc.session, 128 * 1024, AMDGPU_GEM_DOMAIN_GTT);
251 resources[num_resources++] = enc.session.handle;
252 resources[num_resources++] = ib_handle;
253}
254
255static void check_result(struct amdgpu_uvd_enc *enc)
256{
257 uint64_t sum;
258 uint32_t s = 175602;
259 uint32_t *ptr, size;
260 int j, r;
261
262 r = amdgpu_bo_cpu_map(enc->fb.handle, (void **)&enc->fb.ptr);
263 CU_ASSERT_EQUAL(r, 0);
264 ptr = (uint32_t *)enc->fb.ptr;
265 size = ptr[6];
266 r = amdgpu_bo_cpu_unmap(enc->fb.handle);
267 CU_ASSERT_EQUAL(r, 0);
268 r = amdgpu_bo_cpu_map(enc->bs.handle, (void **)&enc->bs.ptr);
269 CU_ASSERT_EQUAL(r, 0);
270 for (j = 0, sum = 0; j < size; ++j)
271 sum += enc->bs.ptr[j];
272 CU_ASSERT_EQUAL(sum, s);
273 r = amdgpu_bo_cpu_unmap(enc->bs.handle);
274 CU_ASSERT_EQUAL(r, 0);
275
276}
277
278static void amdgpu_cs_uvd_enc_session_init(void)
279{
280 int len, r;
281
282 len = 0;
283 memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
284 len += sizeof(uve_session_info) / 4;
285 ib_cpu[len++] = enc.session.addr >> 32;
286 ib_cpu[len++] = enc.session.addr;
287
288 memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
289 len += sizeof(uve_task_info) / 4;
290 ib_cpu[len++] = 0x000000d8;
291 ib_cpu[len++] = 0x00000000;
292 ib_cpu[len++] = 0x00000000;
293
294 memcpy((ib_cpu + len), uve_op_init, sizeof(uve_op_init));
295 len += sizeof(uve_op_init) / 4;
296
297 memcpy((ib_cpu + len), uve_session_init, sizeof(uve_session_init));
298 len += sizeof(uve_session_init) / 4;
299
300 memcpy((ib_cpu + len), uve_layer_ctrl, sizeof(uve_layer_ctrl));
301 len += sizeof(uve_layer_ctrl) / 4;
302
303 memcpy((ib_cpu + len), uve_slice_ctrl, sizeof(uve_slice_ctrl));
304 len += sizeof(uve_slice_ctrl) / 4;
305
306 memcpy((ib_cpu + len), uve_spec_misc, sizeof(uve_spec_misc));
307 len += sizeof(uve_spec_misc) / 4;
308
309 memcpy((ib_cpu + len), uve_rc_session_init, sizeof(uve_rc_session_init));
310 len += sizeof(uve_rc_session_init) / 4;
311
312 memcpy((ib_cpu + len), uve_deblocking_filter, sizeof(uve_deblocking_filter));
313 len += sizeof(uve_deblocking_filter) / 4;
314
315 memcpy((ib_cpu + len), uve_quality_params, sizeof(uve_quality_params));
316 len += sizeof(uve_quality_params) / 4;
317
318 memcpy((ib_cpu + len), uve_op_init_rc, sizeof(uve_op_init_rc));
319 len += sizeof(uve_op_init_rc) / 4;
320
321 memcpy((ib_cpu + len), uve_op_init_rc_vbv_level, sizeof(uve_op_init_rc_vbv_level));
322 len += sizeof(uve_op_init_rc_vbv_level) / 4;
323
324 r = submit(len, AMDGPU_HW_IP_UVD_ENC);
325 CU_ASSERT_EQUAL(r, 0);
326}
327
328static void amdgpu_cs_uvd_enc_encode(void)
329{
330 int len, r, i;
331 uint64_t luma_offset, chroma_offset;
332 uint32_t vbuf_size, bs_size = 0x003f4800, cpb_size;
333 unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
334 vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
335 cpb_size = vbuf_size * 10;
336
337
338 num_resources = 0;
339 alloc_resource(&enc.fb, 4096, AMDGPU_GEM_DOMAIN_VRAM);
340 resources[num_resources++] = enc.fb.handle;
341 alloc_resource(&enc.bs, bs_size, AMDGPU_GEM_DOMAIN_VRAM);
342 resources[num_resources++] = enc.bs.handle;
343 alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
344 resources[num_resources++] = enc.vbuf.handle;
345 alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
346 resources[num_resources++] = enc.cpb.handle;
347 resources[num_resources++] = ib_handle;
348
349 r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
350 CU_ASSERT_EQUAL(r, 0);
351
352 memset(enc.vbuf.ptr, 0, vbuf_size);
353 for (i = 0; i < enc.height; ++i) {
354 memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
355 enc.vbuf.ptr += ALIGN(enc.width, align);
356 }
357 for (i = 0; i < enc.height / 2; ++i) {
358 memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
359 enc.vbuf.ptr += ALIGN(enc.width, align);
360 }
361
362 r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
363 CU_ASSERT_EQUAL(r, 0);
364
365 len = 0;
366 memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
367 len += sizeof(uve_session_info) / 4;
368 ib_cpu[len++] = enc.session.addr >> 32;
369 ib_cpu[len++] = enc.session.addr;
370
371 memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
372 len += sizeof(uve_task_info) / 4;
373 ib_cpu[len++] = 0x000005e0;
374 ib_cpu[len++] = 0x00000001;
375 ib_cpu[len++] = 0x00000001;
376
377 memcpy((ib_cpu + len), uve_nalu_buffer_1, sizeof(uve_nalu_buffer_1));
378 len += sizeof(uve_nalu_buffer_1) / 4;
379
380 memcpy((ib_cpu + len), uve_nalu_buffer_2, sizeof(uve_nalu_buffer_2));
381 len += sizeof(uve_nalu_buffer_2) / 4;
382
383 memcpy((ib_cpu + len), uve_nalu_buffer_3, sizeof(uve_nalu_buffer_3));
384 len += sizeof(uve_nalu_buffer_3) / 4;
385
386 memcpy((ib_cpu + len), uve_nalu_buffer_4, sizeof(uve_nalu_buffer_4));
387 len += sizeof(uve_nalu_buffer_4) / 4;
388
389 memcpy((ib_cpu + len), uve_slice_header, sizeof(uve_slice_header));
390 len += sizeof(uve_slice_header) / 4;
391
392 ib_cpu[len++] = 0x00000254;
393 ib_cpu[len++] = 0x00000010;
394 ib_cpu[len++] = enc.cpb.addr >> 32;
395 ib_cpu[len++] = enc.cpb.addr;
396 memcpy((ib_cpu + len), uve_ctx_buffer, sizeof(uve_ctx_buffer));
397 len += sizeof(uve_ctx_buffer) / 4;
398
399 memcpy((ib_cpu + len), uve_bitstream_buffer, sizeof(uve_bitstream_buffer));
400 len += sizeof(uve_bitstream_buffer) / 4;
401 ib_cpu[len++] = 0x00000000;
402 ib_cpu[len++] = enc.bs.addr >> 32;
403 ib_cpu[len++] = enc.bs.addr;
404 ib_cpu[len++] = 0x003f4800;
405 ib_cpu[len++] = 0x00000000;
406
407 memcpy((ib_cpu + len), uve_feedback_buffer, sizeof(uve_feedback_buffer));
408 len += sizeof(uve_feedback_buffer) / 4;
409 ib_cpu[len++] = enc.fb.addr >> 32;
410 ib_cpu[len++] = enc.fb.addr;
411 ib_cpu[len++] = 0x00000010;
412 ib_cpu[len++] = 0x00000028;
413
414 memcpy((ib_cpu + len), uve_feedback_buffer_additional, sizeof(uve_feedback_buffer_additional));
415 len += sizeof(uve_feedback_buffer_additional) / 4;
416
417 memcpy((ib_cpu + len), uve_intra_refresh, sizeof(uve_intra_refresh));
418 len += sizeof(uve_intra_refresh) / 4;
419
420 memcpy((ib_cpu + len), uve_layer_select, sizeof(uve_layer_select));
421 len += sizeof(uve_layer_select) / 4;
422
423 memcpy((ib_cpu + len), uve_rc_layer_init, sizeof(uve_rc_layer_init));
424 len += sizeof(uve_rc_layer_init) / 4;
425
426 memcpy((ib_cpu + len), uve_layer_select, sizeof(uve_layer_select));
427 len += sizeof(uve_layer_select) / 4;
428
429 memcpy((ib_cpu + len), uve_rc_per_pic, sizeof(uve_rc_per_pic));
430 len += sizeof(uve_rc_per_pic) / 4;
431
432 unsigned luma_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16);
433 luma_offset = enc.vbuf.addr;
434 chroma_offset = luma_offset + luma_size;
435 ib_cpu[len++] = 0x00000054;
436 ib_cpu[len++] = 0x0000000c;
437 ib_cpu[len++] = 0x00000002;
438 ib_cpu[len++] = 0x003f4800;
439 ib_cpu[len++] = luma_offset >> 32;
440 ib_cpu[len++] = luma_offset;
441 ib_cpu[len++] = chroma_offset >> 32;
442 ib_cpu[len++] = chroma_offset;
443 memcpy((ib_cpu + len), uve_encode_param, sizeof(uve_encode_param));
444 ib_cpu[len] = ALIGN(enc.width, align);
445 ib_cpu[len + 1] = ALIGN(enc.width, align);
446 len += sizeof(uve_encode_param) / 4;
447
448 memcpy((ib_cpu + len), uve_op_speed_enc_mode, sizeof(uve_op_speed_enc_mode));
449 len += sizeof(uve_op_speed_enc_mode) / 4;
450
451 memcpy((ib_cpu + len), uve_op_encode, sizeof(uve_op_encode));
452 len += sizeof(uve_op_encode) / 4;
453
454 r = submit(len, AMDGPU_HW_IP_UVD_ENC);
455 CU_ASSERT_EQUAL(r, 0);
456
457 check_result(&enc);
458
459 free_resource(&enc.fb);
460 free_resource(&enc.bs);
461 free_resource(&enc.vbuf);
462 free_resource(&enc.cpb);
463}
464
465static void amdgpu_cs_uvd_enc_destroy(void)
466{
467 int len, r;
468
469 num_resources = 0;
470 resources[num_resources++] = ib_handle;
471
472 len = 0;
473 memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
474 len += sizeof(uve_session_info) / 4;
475 ib_cpu[len++] = enc.session.addr >> 32;
476 ib_cpu[len++] = enc.session.addr;
477
478 memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
479 len += sizeof(uve_task_info) / 4;
480 ib_cpu[len++] = 0xffffffff;
481 ib_cpu[len++] = 0x00000002;
482 ib_cpu[len++] = 0x00000000;
483
484 memcpy((ib_cpu + len), uve_op_close, sizeof(uve_op_close));
485 len += sizeof(uve_op_close) / 4;
486
487 r = submit(len, AMDGPU_HW_IP_UVD_ENC);
488 CU_ASSERT_EQUAL(r, 0);
489
490 free_resource(&enc.session);
491}
diff --git a/tests/amdgpu/uve_ib.h b/tests/amdgpu/uve_ib.h
new file mode 100644
index 00000000..cb72be22
--- /dev/null
+++ b/tests/amdgpu/uve_ib.h
@@ -0,0 +1,527 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#ifndef _uve_ib_h_
25#define _uve_ib_h_
26
27static const uint32_t uve_session_info[] = {
28 0x00000018,
29 0x00000001,
30 0x00000000,
31 0x00010000,
32};
33
34static const uint32_t uve_task_info[] = {
35 0x00000014,
36 0x00000002,
37};
38
39static const uint32_t uve_session_init[] = {
40 0x00000020,
41 0x00000003,
42 0x000000c0,
43 0x00000080,
44 0x00000020,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48};
49
50static const uint32_t uve_layer_ctrl[] = {
51 0x00000010,
52 0x00000004,
53 0x00000001,
54 0x00000001,
55};
56
57static const uint32_t uve_layer_select[] = {
58 0x0000000c,
59 0x00000005,
60 0x00000000,
61};
62
63static const uint32_t uve_slice_ctrl[] = {
64 0x00000014,
65 0x00000006,
66 0x00000000,
67 0x00000006,
68 0x00000006,
69};
70
71static const uint32_t uve_spec_misc[] = {
72 0x00000024,
73 0x00000007,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000001,
80 0x00000001,
81};
82
83static const uint32_t uve_rc_session_init[] = {
84 0x00000010,
85 0x00000008,
86 0x00000000,
87 0x00000040,
88};
89
90static const uint32_t uve_rc_layer_init[] = {
91 0x00000028,
92 0x00000009,
93 0x001e8480,
94 0x001e8480,
95 0x0000001e,
96 0x00000001,
97 0x0001046a,
98 0x0001046a,
99 0x0001046a,
100 0xaaaaaaaa,
101};
102
103static const uint32_t uve_deblocking_filter[] = {
104 0x00000020,
105 0x0000000e,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112};
113
114static const uint32_t uve_quality_params[] = {
115 0x00000014,
116 0x0000000d,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120};
121
122static const uint32_t uve_feedback_buffer[] = {
123 0x0000001c,
124 0x00000012,
125 0x00000000,
126};
127
128static const uint32_t uve_feedback_buffer_additional[] = {
129 0x00000108,
130 0x00000014,
131 0x00000001,
132 0x00000010,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139 0x00000000,
140 0x00000000,
141 0x00000000,
142 0x00000000,
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195};
196
197static const uint32_t uve_nalu_buffer_1[] = {
198 0x00000018,
199 0x00000013,
200 0x00000001,
201 0x00000007,
202 0x00000001,
203 0x46011000,
204};
205
206static const uint32_t uve_nalu_buffer_2[] = {
207 0x0000002c,
208 0x00000013,
209 0x00000002,
210 0x0000001b,
211 0x00000001,
212 0x40010c01,
213 0xffff0160,
214 0x00000300,
215 0xb0000003,
216 0x00000300,
217 0x962c0900,
218};
219
220static const uint32_t uve_nalu_buffer_3[] = {
221 0x00000034,
222 0x00000013,
223 0x00000003,
224 0x00000023,
225 0x00000001,
226 0x42010101,
227 0x60000003,
228 0x00b00000,
229 0x03000003,
230 0x0096a018,
231 0x2020708f,
232 0xcb924295,
233 0x12e08000,
234};
235
236static const uint32_t uve_nalu_buffer_4[] = {
237 0x0000001c,
238 0x00000013,
239 0x00000004,
240 0x0000000b,
241 0x00000001,
242 0x4401e0f1,
243 0x80992000,
244};
245
246static const uint32_t uve_slice_header[] = {
247 0x000000c8,
248 0x0000000b,
249 0x28010000,
250 0x40000000,
251 0x60000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000002,
266 0x00000010,
267 0x00000003,
268 0x00000000,
269 0x00000002,
270 0x00000002,
271 0x00000004,
272 0x00000000,
273 0x00000001,
274 0x00000000,
275 0x00000002,
276 0x00000003,
277 0x00000005,
278 0x00000000,
279 0x00000002,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297};
298
299static const uint32_t uve_encode_param[] = {
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0xffffffff,
305 0x00000001,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313};
314
315static const uint32_t uve_intra_refresh[] = {
316 0x00000014,
317 0x0000000f,
318 0x00000000,
319 0x00000000,
320 0x00000001,
321};
322
323static const uint32_t uve_ctx_buffer[] = {
324 0x00000000,
325 0x00000000,
326 0x000000a0,
327 0x000000a0,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469};
470
471static const uint32_t uve_bitstream_buffer[] = {
472 0x0000001c,
473 0x00000011,
474};
475
476static const uint32_t uve_rc_per_pic[] = {
477 0x00000024,
478 0x0000000a,
479 0x0000001a,
480 0x00000000,
481 0x00000033,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x00000001,
486};
487
488static const uint32_t uve_op_init[] = {
489 0x00000008,
490 0x08000001,
491};
492
493static const uint32_t uve_op_close[] = {
494 0x00000008,
495 0x08000002,
496};
497
498static const uint32_t uve_op_encode[] = {
499 0x00000008,
500 0x08000003,
501};
502
503static const uint32_t uve_op_init_rc[] = {
504 0x00000008,
505 0x08000004,
506};
507
508static const uint32_t uve_op_init_rc_vbv_level[] = {
509 0x00000008,
510 0x08000005,
511};
512
513static const uint32_t uve_op_speed_enc_mode[] = {
514 0x00000008,
515 0x08000006,
516};
517
518static const uint32_t uve_op_balance_enc_mode[] = {
519 0x00000008,
520 0x08000007,
521};
522
523static const uint32_t uve_op_quality_enc_mode[] = {
524 0x00000008,
525 0x08000008,
526};
527#endif /*_uve_ib_h*/
diff --git a/tests/amdgpu/vce_tests.c b/tests/amdgpu/vce_tests.c
index de63aa15..25c0b1fb 100644
--- a/tests/amdgpu/vce_tests.c
+++ b/tests/amdgpu/vce_tests.c
@@ -21,10 +21,6 @@
21 * 21 *
22*/ 22*/
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdio.h> 24#include <stdio.h>
29#include <inttypes.h> 25#include <inttypes.h>
30 26
@@ -88,6 +84,27 @@ CU_TestInfo vce_tests[] = {
88 CU_TEST_INFO_NULL, 84 CU_TEST_INFO_NULL,
89}; 85};
90 86
87
88CU_BOOL suite_vce_tests_enable(void)
89{
90 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
91 &minor_version, &device_handle))
92 return CU_FALSE;
93
94 family_id = device_handle->info.family_id;
95
96 if (amdgpu_device_deinitialize(device_handle))
97 return CU_FALSE;
98
99
100 if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI) {
101 printf("\n\nThe ASIC NOT support VCE, suite disabled\n");
102 return CU_FALSE;
103 }
104
105 return CU_TRUE;
106}
107
91int suite_vce_tests_init(void) 108int suite_vce_tests_init(void)
92{ 109{
93 int r; 110 int r;
@@ -234,6 +251,7 @@ static void free_resource(struct amdgpu_vce_bo *vce_bo)
234 251
235static void amdgpu_cs_vce_create(void) 252static void amdgpu_cs_vce_create(void)
236{ 253{
254 unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
237 int len, r; 255 int len, r;
238 256
239 enc.width = vce_create[6]; 257 enc.width = vce_create[6];
@@ -250,6 +268,8 @@ static void amdgpu_cs_vce_create(void)
250 memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo)); 268 memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
251 len += sizeof(vce_taskinfo) / 4; 269 len += sizeof(vce_taskinfo) / 4;
252 memcpy((ib_cpu + len), vce_create, sizeof(vce_create)); 270 memcpy((ib_cpu + len), vce_create, sizeof(vce_create));
271 ib_cpu[len + 8] = ALIGN(enc.width, align);
272 ib_cpu[len + 9] = ALIGN(enc.width, align);
253 len += sizeof(vce_create) / 4; 273 len += sizeof(vce_create) / 4;
254 memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback)); 274 memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
255 ib_cpu[len + 2] = enc.fb[0].addr >> 32; 275 ib_cpu[len + 2] = enc.fb[0].addr >> 32;
@@ -291,10 +311,12 @@ static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
291{ 311{
292 312
293 uint64_t luma_offset, chroma_offset; 313 uint64_t luma_offset, chroma_offset;
294 int len = 0, r; 314 unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
315 unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
316 int len = 0, i, r;
295 317
296 luma_offset = enc->vbuf.addr; 318 luma_offset = enc->vbuf.addr;
297 chroma_offset = luma_offset + enc->width * enc->height; 319 chroma_offset = luma_offset + luma_size;
298 320
299 memcpy((ib_cpu + len), vce_session, sizeof(vce_session)); 321 memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
300 len += sizeof(vce_session) / 4; 322 len += sizeof(vce_session) / 4;
@@ -309,6 +331,10 @@ static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
309 ib_cpu[len + 3] = enc->cpb.addr; 331 ib_cpu[len + 3] = enc->cpb.addr;
310 len += sizeof(vce_context_buffer) / 4; 332 len += sizeof(vce_context_buffer) / 4;
311 memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer)); 333 memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
334 for (i = 0; i < 8; ++i)
335 ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
336 for (i = 0; i < 8; ++i)
337 ib_cpu[len + 10 + i] = luma_size * 1.5;
312 len += sizeof(vce_aux_buffer) / 4; 338 len += sizeof(vce_aux_buffer) / 4;
313 memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback)); 339 memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
314 ib_cpu[len + 2] = enc->fb[0].addr >> 32; 340 ib_cpu[len + 2] = enc->fb[0].addr >> 32;
@@ -319,8 +345,10 @@ static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
319 ib_cpu[len + 10] = luma_offset; 345 ib_cpu[len + 10] = luma_offset;
320 ib_cpu[len + 11] = chroma_offset >> 32; 346 ib_cpu[len + 11] = chroma_offset >> 32;
321 ib_cpu[len + 12] = chroma_offset; 347 ib_cpu[len + 12] = chroma_offset;
322 ib_cpu[len + 73] = 0x7800; 348 ib_cpu[len + 14] = ALIGN(enc->width, align);
323 ib_cpu[len + 74] = 0x7800 + 0x5000; 349 ib_cpu[len + 15] = ALIGN(enc->width, align);
350 ib_cpu[len + 73] = luma_size * 1.5;
351 ib_cpu[len + 74] = luma_size * 2.5;
324 len += sizeof(vce_encode) / 4; 352 len += sizeof(vce_encode) / 4;
325 enc->ib_len = len; 353 enc->ib_len = len;
326 if (!enc->two_instance) { 354 if (!enc->two_instance) {
@@ -332,11 +360,13 @@ static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
332static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc) 360static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
333{ 361{
334 uint64_t luma_offset, chroma_offset; 362 uint64_t luma_offset, chroma_offset;
335 int len, r; 363 int len, i, r;
364 unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
365 unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
336 366
337 len = (enc->two_instance) ? enc->ib_len : 0; 367 len = (enc->two_instance) ? enc->ib_len : 0;
338 luma_offset = enc->vbuf.addr; 368 luma_offset = enc->vbuf.addr;
339 chroma_offset = luma_offset + enc->width * enc->height; 369 chroma_offset = luma_offset + luma_size;
340 370
341 if (!enc->two_instance) { 371 if (!enc->two_instance) {
342 memcpy((ib_cpu + len), vce_session, sizeof(vce_session)); 372 memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
@@ -353,6 +383,10 @@ static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
353 ib_cpu[len + 3] = enc->cpb.addr; 383 ib_cpu[len + 3] = enc->cpb.addr;
354 len += sizeof(vce_context_buffer) / 4; 384 len += sizeof(vce_context_buffer) / 4;
355 memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer)); 385 memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
386 for (i = 0; i < 8; ++i)
387 ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
388 for (i = 0; i < 8; ++i)
389 ib_cpu[len + 10 + i] = luma_size * 1.5;
356 len += sizeof(vce_aux_buffer) / 4; 390 len += sizeof(vce_aux_buffer) / 4;
357 memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback)); 391 memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
358 ib_cpu[len + 2] = enc->fb[1].addr >> 32; 392 ib_cpu[len + 2] = enc->fb[1].addr >> 32;
@@ -364,15 +398,17 @@ static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
364 ib_cpu[len + 10] = luma_offset; 398 ib_cpu[len + 10] = luma_offset;
365 ib_cpu[len + 11] = chroma_offset >> 32; 399 ib_cpu[len + 11] = chroma_offset >> 32;
366 ib_cpu[len + 12] = chroma_offset; 400 ib_cpu[len + 12] = chroma_offset;
401 ib_cpu[len + 14] = ALIGN(enc->width, align);
402 ib_cpu[len + 15] = ALIGN(enc->width, align);
367 ib_cpu[len + 18] = 0; 403 ib_cpu[len + 18] = 0;
368 ib_cpu[len + 19] = 0; 404 ib_cpu[len + 19] = 0;
369 ib_cpu[len + 56] = 3; 405 ib_cpu[len + 56] = 3;
370 ib_cpu[len + 57] = 0; 406 ib_cpu[len + 57] = 0;
371 ib_cpu[len + 58] = 0; 407 ib_cpu[len + 58] = 0;
372 ib_cpu[len + 59] = 0x7800; 408 ib_cpu[len + 59] = luma_size * 1.5;
373 ib_cpu[len + 60] = 0x7800 + 0x5000; 409 ib_cpu[len + 60] = luma_size * 2.5;
374 ib_cpu[len + 73] = 0; 410 ib_cpu[len + 73] = 0;
375 ib_cpu[len + 74] = 0x5000; 411 ib_cpu[len + 74] = luma_size;
376 ib_cpu[len + 81] = 1; 412 ib_cpu[len + 81] = 1;
377 ib_cpu[len + 82] = 1; 413 ib_cpu[len + 82] = 1;
378 len += sizeof(vce_encode) / 4; 414 len += sizeof(vce_encode) / 4;
@@ -408,9 +444,10 @@ static void check_result(struct amdgpu_vce_encode *enc)
408static void amdgpu_cs_vce_encode(void) 444static void amdgpu_cs_vce_encode(void)
409{ 445{
410 uint32_t vbuf_size, bs_size = 0x154000, cpb_size; 446 uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
411 int r; 447 unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
448 int i, r;
412 449
413 vbuf_size = enc.width * enc.height * 1.5; 450 vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
414 cpb_size = vbuf_size * 10; 451 cpb_size = vbuf_size * 10;
415 num_resources = 0; 452 num_resources = 0;
416 alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT); 453 alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
@@ -429,7 +466,17 @@ static void amdgpu_cs_vce_encode(void)
429 466
430 r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr); 467 r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
431 CU_ASSERT_EQUAL(r, 0); 468 CU_ASSERT_EQUAL(r, 0);
432 memcpy(enc.vbuf.ptr, frame, sizeof(frame)); 469
470 memset(enc.vbuf.ptr, 0, vbuf_size);
471 for (i = 0; i < enc.height; ++i) {
472 memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
473 enc.vbuf.ptr += ALIGN(enc.width, align);
474 }
475 for (i = 0; i < enc.height / 2; ++i) {
476 memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
477 enc.vbuf.ptr += ALIGN(enc.width, align);
478 }
479
433 r = amdgpu_bo_cpu_unmap(enc.vbuf.handle); 480 r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
434 CU_ASSERT_EQUAL(r, 0); 481 CU_ASSERT_EQUAL(r, 0);
435 482
diff --git a/tests/amdgpu/vcn_tests.c b/tests/amdgpu/vcn_tests.c
new file mode 100644
index 00000000..d9f05af8
--- /dev/null
+++ b/tests/amdgpu/vcn_tests.c
@@ -0,0 +1,398 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include <stdio.h>
25#include <inttypes.h>
26
27#include "CUnit/Basic.h"
28
29#include "util_math.h"
30
31#include "amdgpu_test.h"
32#include "amdgpu_drm.h"
33#include "amdgpu_internal.h"
34#include "decode_messages.h"
35
36#define IB_SIZE 4096
37#define MAX_RESOURCES 16
38
39struct amdgpu_vcn_bo {
40 amdgpu_bo_handle handle;
41 amdgpu_va_handle va_handle;
42 uint64_t addr;
43 uint64_t size;
44 uint8_t *ptr;
45};
46
47static amdgpu_device_handle device_handle;
48static uint32_t major_version;
49static uint32_t minor_version;
50static uint32_t family_id;
51
52static amdgpu_context_handle context_handle;
53static amdgpu_bo_handle ib_handle;
54static amdgpu_va_handle ib_va_handle;
55static uint64_t ib_mc_address;
56static uint32_t *ib_cpu;
57
58static amdgpu_bo_handle resources[MAX_RESOURCES];
59static unsigned num_resources;
60
61static void amdgpu_cs_vcn_dec_create(void);
62static void amdgpu_cs_vcn_dec_decode(void);
63static void amdgpu_cs_vcn_dec_destroy(void);
64
65static void amdgpu_cs_vcn_enc_create(void);
66static void amdgpu_cs_vcn_enc_encode(void);
67static void amdgpu_cs_vcn_enc_destroy(void);
68
69CU_TestInfo vcn_tests[] = {
70
71 { "VCN DEC create", amdgpu_cs_vcn_dec_create },
72 { "VCN DEC decode", amdgpu_cs_vcn_dec_decode },
73 { "VCN DEC destroy", amdgpu_cs_vcn_dec_destroy },
74
75 { "VCN ENC create", amdgpu_cs_vcn_enc_create },
76 { "VCN ENC decode", amdgpu_cs_vcn_enc_encode },
77 { "VCN ENC destroy", amdgpu_cs_vcn_enc_destroy },
78 CU_TEST_INFO_NULL,
79};
80
81CU_BOOL suite_vcn_tests_enable(void)
82{
83
84 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
85 &minor_version, &device_handle))
86 return CU_FALSE;
87
88 family_id = device_handle->info.family_id;
89
90 if (amdgpu_device_deinitialize(device_handle))
91 return CU_FALSE;
92
93
94 if (family_id < AMDGPU_FAMILY_RV) {
95 printf("\n\nThe ASIC NOT support VCN, suite disabled\n");
96 return CU_FALSE;
97 }
98
99 return CU_TRUE;
100}
101
102int suite_vcn_tests_init(void)
103{
104 int r;
105
106 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
107 &minor_version, &device_handle);
108 if (r)
109 return CUE_SINIT_FAILED;
110
111 family_id = device_handle->info.family_id;
112
113 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
114 if (r)
115 return CUE_SINIT_FAILED;
116
117 r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
118 AMDGPU_GEM_DOMAIN_GTT, 0,
119 &ib_handle, (void**)&ib_cpu,
120 &ib_mc_address, &ib_va_handle);
121 if (r)
122 return CUE_SINIT_FAILED;
123
124 return CUE_SUCCESS;
125}
126
127int suite_vcn_tests_clean(void)
128{
129 int r;
130
131 r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
132 ib_mc_address, IB_SIZE);
133 if (r)
134 return CUE_SCLEAN_FAILED;
135
136 r = amdgpu_cs_ctx_free(context_handle);
137 if (r)
138 return CUE_SCLEAN_FAILED;
139
140 r = amdgpu_device_deinitialize(device_handle);
141 if (r)
142 return CUE_SCLEAN_FAILED;
143
144 return CUE_SUCCESS;
145}
146
147static int submit(unsigned ndw, unsigned ip)
148{
149 struct amdgpu_cs_request ibs_request = {0};
150 struct amdgpu_cs_ib_info ib_info = {0};
151 struct amdgpu_cs_fence fence_status = {0};
152 uint32_t expired;
153 int r;
154
155 ib_info.ib_mc_address = ib_mc_address;
156 ib_info.size = ndw;
157
158 ibs_request.ip_type = ip;
159
160 r = amdgpu_bo_list_create(device_handle, num_resources, resources,
161 NULL, &ibs_request.resources);
162 if (r)
163 return r;
164
165 ibs_request.number_of_ibs = 1;
166 ibs_request.ibs = &ib_info;
167 ibs_request.fence_info.handle = NULL;
168
169 r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
170 if (r)
171 return r;
172
173 r = amdgpu_bo_list_destroy(ibs_request.resources);
174 if (r)
175 return r;
176
177 fence_status.context = context_handle;
178 fence_status.ip_type = ip;
179 fence_status.fence = ibs_request.seq_no;
180
181 r = amdgpu_cs_query_fence_status(&fence_status,
182 AMDGPU_TIMEOUT_INFINITE,
183 0, &expired);
184 if (r)
185 return r;
186
187 return 0;
188}
189
190static void alloc_resource(struct amdgpu_vcn_bo *vcn_bo,
191 unsigned size, unsigned domain)
192{
193 struct amdgpu_bo_alloc_request req = {0};
194 amdgpu_bo_handle buf_handle;
195 amdgpu_va_handle va_handle;
196 uint64_t va = 0;
197 int r;
198
199 req.alloc_size = ALIGN(size, 4096);
200 req.preferred_heap = domain;
201 r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
202 CU_ASSERT_EQUAL(r, 0);
203 r = amdgpu_va_range_alloc(device_handle,
204 amdgpu_gpu_va_range_general,
205 req.alloc_size, 1, 0, &va,
206 &va_handle, 0);
207 CU_ASSERT_EQUAL(r, 0);
208 r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
209 AMDGPU_VA_OP_MAP);
210 CU_ASSERT_EQUAL(r, 0);
211 vcn_bo->addr = va;
212 vcn_bo->handle = buf_handle;
213 vcn_bo->size = req.alloc_size;
214 vcn_bo->va_handle = va_handle;
215 r = amdgpu_bo_cpu_map(vcn_bo->handle, (void **)&vcn_bo->ptr);
216 CU_ASSERT_EQUAL(r, 0);
217 memset(vcn_bo->ptr, 0, size);
218 r = amdgpu_bo_cpu_unmap(vcn_bo->handle);
219 CU_ASSERT_EQUAL(r, 0);
220}
221
222static void free_resource(struct amdgpu_vcn_bo *vcn_bo)
223{
224 int r;
225
226 r = amdgpu_bo_va_op(vcn_bo->handle, 0, vcn_bo->size,
227 vcn_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
228 CU_ASSERT_EQUAL(r, 0);
229
230 r = amdgpu_va_range_free(vcn_bo->va_handle);
231 CU_ASSERT_EQUAL(r, 0);
232
233 r = amdgpu_bo_free(vcn_bo->handle);
234 CU_ASSERT_EQUAL(r, 0);
235 memset(vcn_bo, 0, sizeof(*vcn_bo));
236}
237
238static void vcn_dec_cmd(uint64_t addr, unsigned cmd, int *idx)
239{
240 ib_cpu[(*idx)++] = 0x81C4;
241 ib_cpu[(*idx)++] = addr;
242 ib_cpu[(*idx)++] = 0x81C5;
243 ib_cpu[(*idx)++] = addr >> 32;
244 ib_cpu[(*idx)++] = 0x81C3;
245 ib_cpu[(*idx)++] = cmd << 1;
246}
247
248static void amdgpu_cs_vcn_dec_create(void)
249{
250 struct amdgpu_vcn_bo msg_buf;
251 int len, r;
252
253 num_resources = 0;
254 alloc_resource(&msg_buf, 4096, AMDGPU_GEM_DOMAIN_GTT);
255 resources[num_resources++] = msg_buf.handle;
256 resources[num_resources++] = ib_handle;
257
258 r = amdgpu_bo_cpu_map(msg_buf.handle, (void **)&msg_buf.ptr);
259 CU_ASSERT_EQUAL(r, 0);
260
261 memset(msg_buf.ptr, 0, 4096);
262 memcpy(msg_buf.ptr, vcn_dec_create_msg, sizeof(vcn_dec_create_msg));
263
264 len = 0;
265 ib_cpu[len++] = 0x81C4;
266 ib_cpu[len++] = msg_buf.addr;
267 ib_cpu[len++] = 0x81C5;
268 ib_cpu[len++] = msg_buf.addr >> 32;
269 ib_cpu[len++] = 0x81C3;
270 ib_cpu[len++] = 0;
271 for (; len % 16; ++len)
272 ib_cpu[len] = 0x81ff;
273
274 r = submit(len, AMDGPU_HW_IP_VCN_DEC);
275 CU_ASSERT_EQUAL(r, 0);
276
277 free_resource(&msg_buf);
278}
279
280static void amdgpu_cs_vcn_dec_decode(void)
281{
282 const unsigned dpb_size = 15923584, dt_size = 737280;
283 uint64_t msg_addr, fb_addr, bs_addr, dpb_addr, ctx_addr, dt_addr, it_addr, sum;
284 struct amdgpu_vcn_bo dec_buf;
285 int size, len, i, r;
286 uint8_t *dec;
287
288 size = 4*1024; /* msg */
289 size += 4*1024; /* fb */
290 size += 4096; /*it_scaling_table*/
291 size += ALIGN(sizeof(uvd_bitstream), 4*1024);
292 size += ALIGN(dpb_size, 4*1024);
293 size += ALIGN(dt_size, 4*1024);
294
295 num_resources = 0;
296 alloc_resource(&dec_buf, size, AMDGPU_GEM_DOMAIN_GTT);
297 resources[num_resources++] = dec_buf.handle;
298 resources[num_resources++] = ib_handle;
299
300 r = amdgpu_bo_cpu_map(dec_buf.handle, (void **)&dec_buf.ptr);
301 dec = dec_buf.ptr;
302
303 CU_ASSERT_EQUAL(r, 0);
304 memset(dec_buf.ptr, 0, size);
305 memcpy(dec_buf.ptr, vcn_dec_decode_msg, sizeof(vcn_dec_decode_msg));
306 memcpy(dec_buf.ptr + sizeof(vcn_dec_decode_msg),
307 avc_decode_msg, sizeof(avc_decode_msg));
308
309 dec += 4*1024;
310 dec += 4*1024;
311 memcpy(dec, uvd_it_scaling_table, sizeof(uvd_it_scaling_table));
312
313 dec += 4*1024;
314 memcpy(dec, uvd_bitstream, sizeof(uvd_bitstream));
315
316 dec += ALIGN(sizeof(uvd_bitstream), 4*1024);
317
318 dec += ALIGN(dpb_size, 4*1024);
319
320 msg_addr = dec_buf.addr;
321 fb_addr = msg_addr + 4*1024;
322 it_addr = fb_addr + 4*1024;
323 bs_addr = it_addr + 4*1024;
324 dpb_addr = ALIGN(bs_addr + sizeof(uvd_bitstream), 4*1024);
325 ctx_addr = ALIGN(dpb_addr + 0x006B9400, 4*1024);
326 dt_addr = ALIGN(dpb_addr + dpb_size, 4*1024);
327
328 len = 0;
329 vcn_dec_cmd(msg_addr, 0x0, &len);
330 vcn_dec_cmd(dpb_addr, 0x1, &len);
331 vcn_dec_cmd(dt_addr, 0x2, &len);
332 vcn_dec_cmd(fb_addr, 0x3, &len);
333 vcn_dec_cmd(bs_addr, 0x100, &len);
334 vcn_dec_cmd(it_addr, 0x204, &len);
335 vcn_dec_cmd(ctx_addr, 0x206, &len);
336
337 ib_cpu[len++] = 0x81C6;
338 ib_cpu[len++] = 0x1;
339 for (; len % 16; ++len)
340 ib_cpu[len] = 0x80000000;
341
342 r = submit(len, AMDGPU_HW_IP_VCN_DEC);
343 CU_ASSERT_EQUAL(r, 0);
344
345 for (i = 0, sum = 0; i < dt_size; ++i)
346 sum += dec[i];
347
348 CU_ASSERT_EQUAL(sum, SUM_DECODE);
349
350 free_resource(&dec_buf);
351}
352
353static void amdgpu_cs_vcn_dec_destroy(void)
354{
355 struct amdgpu_vcn_bo msg_buf;
356 int len, r;
357
358 num_resources = 0;
359 alloc_resource(&msg_buf, 1024, AMDGPU_GEM_DOMAIN_GTT);
360 resources[num_resources++] = msg_buf.handle;
361 resources[num_resources++] = ib_handle;
362
363 r = amdgpu_bo_cpu_map(msg_buf.handle, (void **)&msg_buf.ptr);
364 CU_ASSERT_EQUAL(r, 0);
365
366 memset(msg_buf.ptr, 0, 1024);
367 memcpy(msg_buf.ptr, vcn_dec_destroy_msg, sizeof(vcn_dec_destroy_msg));
368
369 len = 0;
370 ib_cpu[len++] = 0x81C4;
371 ib_cpu[len++] = msg_buf.addr;
372 ib_cpu[len++] = 0x81C5;
373 ib_cpu[len++] = msg_buf.addr >> 32;
374 ib_cpu[len++] = 0x81C3;
375 ib_cpu[len++] = 0;
376 for (; len % 16; ++len)
377 ib_cpu[len] = 0x80000000;
378
379 r = submit(len, AMDGPU_HW_IP_VCN_DEC);
380 CU_ASSERT_EQUAL(r, 0);
381
382 free_resource(&msg_buf);
383}
384
385static void amdgpu_cs_vcn_enc_create(void)
386{
387 /* TODO */
388}
389
390static void amdgpu_cs_vcn_enc_encode(void)
391{
392 /* TODO */
393}
394
395static void amdgpu_cs_vcn_enc_destroy(void)
396{
397 /* TODO */
398}
diff --git a/tests/amdgpu/vm_tests.c b/tests/amdgpu/vm_tests.c
new file mode 100644
index 00000000..7b6dc5d6
--- /dev/null
+++ b/tests/amdgpu/vm_tests.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22*/
23
24#include "CUnit/Basic.h"
25
26#include "amdgpu_test.h"
27#include "amdgpu_drm.h"
28#include "amdgpu_internal.h"
29
30static amdgpu_device_handle device_handle;
31static uint32_t major_version;
32static uint32_t minor_version;
33
34
35static void amdgpu_vmid_reserve_test(void);
36
37CU_BOOL suite_vm_tests_enable(void)
38{
39 CU_BOOL enable = CU_TRUE;
40
41 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
42 &minor_version, &device_handle))
43 return CU_FALSE;
44
45 if (device_handle->info.family_id == AMDGPU_FAMILY_SI) {
46 printf("\n\nCurrently hangs the CP on this ASIC, VM suite disabled\n");
47 enable = CU_FALSE;
48 }
49
50 if (amdgpu_device_deinitialize(device_handle))
51 return CU_FALSE;
52
53 return enable;
54}
55
56int suite_vm_tests_init(void)
57{
58 int r;
59
60 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
61 &minor_version, &device_handle);
62
63 if (r) {
64 if ((r == -EACCES) && (errno == EACCES))
65 printf("\n\nError:%s. "
66 "Hint:Try to run this test program as root.",
67 strerror(errno));
68 return CUE_SINIT_FAILED;
69 }
70
71 return CUE_SUCCESS;
72}
73
74int suite_vm_tests_clean(void)
75{
76 int r = amdgpu_device_deinitialize(device_handle);
77
78 if (r == 0)
79 return CUE_SUCCESS;
80 else
81 return CUE_SCLEAN_FAILED;
82}
83
84
85CU_TestInfo vm_tests[] = {
86 { "resere vmid test", amdgpu_vmid_reserve_test },
87 CU_TEST_INFO_NULL,
88};
89
90static void amdgpu_vmid_reserve_test(void)
91{
92 amdgpu_context_handle context_handle;
93 amdgpu_bo_handle ib_result_handle;
94 void *ib_result_cpu;
95 uint64_t ib_result_mc_address;
96 struct amdgpu_cs_request ibs_request;
97 struct amdgpu_cs_ib_info ib_info;
98 struct amdgpu_cs_fence fence_status;
99 uint32_t expired, flags;
100 int i, r;
101 amdgpu_bo_list_handle bo_list;
102 amdgpu_va_handle va_handle;
103 static uint32_t *ptr;
104
105 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
106 CU_ASSERT_EQUAL(r, 0);
107
108 flags = 0;
109 r = amdgpu_vm_reserve_vmid(device_handle, flags);
110 CU_ASSERT_EQUAL(r, 0);
111
112
113 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
114 AMDGPU_GEM_DOMAIN_GTT, 0,
115 &ib_result_handle, &ib_result_cpu,
116 &ib_result_mc_address, &va_handle);
117 CU_ASSERT_EQUAL(r, 0);
118
119 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
120 &bo_list);
121 CU_ASSERT_EQUAL(r, 0);
122
123 ptr = ib_result_cpu;
124
125 for (i = 0; i < 16; ++i)
126 ptr[i] = 0xffff1000;
127
128 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
129 ib_info.ib_mc_address = ib_result_mc_address;
130 ib_info.size = 16;
131
132 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
133 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
134 ibs_request.ring = 0;
135 ibs_request.number_of_ibs = 1;
136 ibs_request.ibs = &ib_info;
137 ibs_request.resources = bo_list;
138 ibs_request.fence_info.handle = NULL;
139
140 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
141 CU_ASSERT_EQUAL(r, 0);
142
143
144 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
145 fence_status.context = context_handle;
146 fence_status.ip_type = AMDGPU_HW_IP_GFX;
147 fence_status.ip_instance = 0;
148 fence_status.ring = 0;
149 fence_status.fence = ibs_request.seq_no;
150
151 r = amdgpu_cs_query_fence_status(&fence_status,
152 AMDGPU_TIMEOUT_INFINITE,0, &expired);
153 CU_ASSERT_EQUAL(r, 0);
154
155 r = amdgpu_bo_list_destroy(bo_list);
156 CU_ASSERT_EQUAL(r, 0);
157
158 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
159 ib_result_mc_address, 4096);
160 CU_ASSERT_EQUAL(r, 0);
161
162 flags = 0;
163 r = amdgpu_vm_unreserve_vmid(device_handle, flags);
164 CU_ASSERT_EQUAL(r, 0);
165
166
167 r = amdgpu_cs_ctx_free(context_handle);
168 CU_ASSERT_EQUAL(r, 0);
169}
diff --git a/tests/drmsl.c b/tests/drmsl.c
index d0ac0efa..d1b59a86 100644
--- a/tests/drmsl.c
+++ b/tests/drmsl.c
@@ -106,7 +106,9 @@ static double do_time(int size, int iter)
106 return usec; 106 return usec;
107} 107}
108 108
109static void print_neighbors(void *list, unsigned long key) 109static void print_neighbors(void *list, unsigned long key,
110 unsigned long expected_prev,
111 unsigned long expected_next)
110{ 112{
111 unsigned long prev_key = 0; 113 unsigned long prev_key = 0;
112 unsigned long next_key = 0; 114 unsigned long next_key = 0;
@@ -119,6 +121,16 @@ static void print_neighbors(void *list, unsigned long key)
119 &next_key, &next_value); 121 &next_key, &next_value);
120 printf("Neighbors of %5lu: %d %5lu %5lu\n", 122 printf("Neighbors of %5lu: %d %5lu %5lu\n",
121 key, retval, prev_key, next_key); 123 key, retval, prev_key, next_key);
124 if (prev_key != expected_prev) {
125 fprintf(stderr, "Unexpected neighbor: %5lu. Expected: %5lu\n",
126 prev_key, expected_prev);
127 exit(1);
128 }
129 if (next_key != expected_next) {
130 fprintf(stderr, "Unexpected neighbor: %5lu. Expected: %5lu\n",
131 next_key, expected_next);
132 exit(1);
133 }
122} 134}
123 135
124int main(void) 136int main(void)
@@ -138,13 +150,13 @@ int main(void)
138 print(list); 150 print(list);
139 printf("\n==============================\n\n"); 151 printf("\n==============================\n\n");
140 152
141 print_neighbors(list, 0); 153 print_neighbors(list, 0, 0, 50);
142 print_neighbors(list, 50); 154 print_neighbors(list, 50, 0, 50);
143 print_neighbors(list, 51); 155 print_neighbors(list, 51, 50, 123);
144 print_neighbors(list, 123); 156 print_neighbors(list, 123, 50, 123);
145 print_neighbors(list, 200); 157 print_neighbors(list, 200, 123, 213);
146 print_neighbors(list, 213); 158 print_neighbors(list, 213, 123, 213);
147 print_neighbors(list, 256); 159 print_neighbors(list, 256, 213, 256);
148 printf("\n==============================\n\n"); 160 printf("\n==============================\n\n");
149 161
150 drmSLDelete(list, 50); 162 drmSLDelete(list, 50);
diff --git a/tests/drmstat.c b/tests/drmstat.c
deleted file mode 100644
index 023aa069..00000000
--- a/tests/drmstat.c
+++ /dev/null
@@ -1,419 +0,0 @@
1/* drmstat.c -- DRM device status and testing program
2 * Created: Tue Jan 5 08:19:24 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 *
29 */
30
31#ifdef HAVE_CONFIG_H
32#include "config.h"
33#endif
34
35#include <stdio.h>
36#include <stdlib.h>
37#include <unistd.h>
38#include <sys/types.h>
39#include <sys/time.h>
40#include <sys/mman.h>
41#include <getopt.h>
42#include <strings.h>
43#include <errno.h>
44#include <signal.h>
45#include <fcntl.h>
46#ifdef HAVE_ALLOCA_H
47# include <alloca.h>
48#endif
49#include "xf86drm.h"
50
51int sigio_fd;
52
53static double usec(struct timeval *end, struct timeval *start)
54{
55 double e = end->tv_sec * 1000000 + end->tv_usec;
56 double s = start->tv_sec * 1000000 + start->tv_usec;
57
58 return e - s;
59}
60
61static void getversion(int fd)
62{
63 drmVersionPtr version;
64
65 version = drmGetVersion(fd);
66 if (version) {
67 printf( "Name: %s\n", version->name ? version->name : "?" );
68 printf( " Version: %d.%d.%d\n",
69 version->version_major,
70 version->version_minor,
71 version->version_patchlevel );
72 printf( " Date: %s\n", version->date ? version->date : "?" );
73 printf( " Desc: %s\n", version->desc ? version->desc : "?" );
74 drmFreeVersion(version);
75 } else {
76 printf( "No driver available\n" );
77 }
78}
79
80static void process_sigio(char *device)
81{
82 int fd;
83
84 if ((fd = open(device, 0)) < 0) {
85 drmError(-errno, __func__);
86 exit(1);
87 }
88
89 sigio_fd = fd;
90 for (;;) sleep(60);
91}
92
93int main(int argc, char **argv)
94{
95 int c;
96 int r = 0;
97 int fd = -1;
98 drm_handle_t handle;
99 void *address;
100 char *pt;
101 unsigned long count;
102 unsigned long offset;
103 unsigned long size;
104 drm_context_t context;
105 int loops;
106 char buf[1024];
107 int i;
108 drmBufInfoPtr info;
109 drmBufMapPtr bufs;
110 drmLockPtr lock;
111 int secs;
112
113 while ((c = getopt(argc, argv,
114 "lc:vo:O:f:s:w:W:b:r:R:P:L:C:XS:B:F:")) != EOF)
115 switch (c) {
116 case 'F':
117 count = strtoul(optarg, NULL, 0);
118 if (!fork()) {
119 dup(fd);
120 sleep(count);
121 }
122 close(fd);
123 break;
124 case 'v': getversion(fd); break;
125 case 'X':
126 if ((r = drmCreateContext(fd, &context))) {
127 drmError(r, argv[0]);
128 return 1;
129 }
130 printf( "Got %d\n", context);
131 break;
132 case 'S':
133 process_sigio(optarg);
134 break;
135 case 'C':
136 if ((r = drmSwitchToContext(fd, strtoul(optarg, NULL, 0)))) {
137 drmError(r, argv[0]);
138 return 1;
139 }
140 break;
141 case 'c':
142 if ((r = drmSetBusid(fd,optarg))) {
143 drmError(r, argv[0]);
144 return 1;
145 }
146 break;
147 case 'o':
148 if ((fd = drmOpen(optarg, NULL)) < 0) {
149 drmError(fd, argv[0]);
150 return 1;
151 }
152 break;
153 case 'O':
154 if ((fd = drmOpen(NULL, optarg)) < 0) {
155 drmError(fd, argv[0]);
156 return 1;
157 }
158 break;
159 case 'B': /* Test buffer allocation */
160 count = strtoul(optarg, &pt, 0);
161 size = strtoul(pt+1, &pt, 0);
162 secs = strtoul(pt+1, NULL, 0);
163 {
164 drmDMAReq dma;
165 int *indices, *sizes;
166
167 indices = alloca(sizeof(*indices) * count);
168 sizes = alloca(sizeof(*sizes) * count);
169 dma.context = context;
170 dma.send_count = 0;
171 dma.request_count = count;
172 dma.request_size = size;
173 dma.request_list = indices;
174 dma.request_sizes = sizes;
175 dma.flags = DRM_DMA_WAIT;
176 if ((r = drmDMA(fd, &dma))) {
177 drmError(r, argv[0]);
178 return 1;
179 }
180 for (i = 0; i < dma.granted_count; i++) {
181 printf("%5d: index = %d, size = %d\n",
182 i, dma.request_list[i], dma.request_sizes[i]);
183 }
184 sleep(secs);
185 drmFreeBufs(fd, dma.granted_count, indices);
186 }
187 break;
188 case 'b':
189 count = strtoul(optarg, &pt, 0);
190 size = strtoul(pt+1, NULL, 0);
191 if ((r = drmAddBufs(fd, count, size, 0, 65536)) < 0) {
192 drmError(r, argv[0]);
193 return 1;
194 }
195 if (!(info = drmGetBufInfo(fd))) {
196 drmError(0, argv[0]);
197 return 1;
198 }
199 for (i = 0; i < info->count; i++) {
200 printf("%5d buffers of size %6d (low = %d, high = %d)\n",
201 info->list[i].count,
202 info->list[i].size,
203 info->list[i].low_mark,
204 info->list[i].high_mark);
205 }
206 if ((r = drmMarkBufs(fd, 0.50, 0.80))) {
207 drmError(r, argv[0]);
208 return 1;
209 }
210 if (!(info = drmGetBufInfo(fd))) {
211 drmError(0, argv[0]);
212 return 1;
213 }
214 for (i = 0; i < info->count; i++) {
215 printf("%5d buffers of size %6d (low = %d, high = %d)\n",
216 info->list[i].count,
217 info->list[i].size,
218 info->list[i].low_mark,
219 info->list[i].high_mark);
220 }
221 printf("===== /proc/dri/0/mem =====\n");
222 sprintf(buf, "cat /proc/dri/0/mem");
223 system(buf);
224#if 1
225 if (!(bufs = drmMapBufs(fd))) {
226 drmError(0, argv[0]);
227 return 1;
228 }
229 printf("===============================\n");
230 printf( "%d bufs\n", bufs->count);
231 for (i = 0; i < bufs->count; i++) {
232 printf( " %4d: %8d bytes at %p\n",
233 i,
234 bufs->list[i].total,
235 bufs->list[i].address);
236 }
237 printf("===== /proc/dri/0/vma =====\n");
238 sprintf(buf, "cat /proc/dri/0/vma");
239 system(buf);
240#endif
241 break;
242 case 'f':
243 offset = strtoul(optarg, &pt, 0);
244 size = strtoul(pt+1, NULL, 0);
245 handle = 0;
246 if ((r = drmAddMap(fd, offset, size,
247 DRM_FRAME_BUFFER, 0, &handle))) {
248 drmError(r, argv[0]);
249 return 1;
250 }
251 printf("0x%08lx:0x%04lx added\n", offset, size);
252 printf("===== /proc/dri/0/mem =====\n");
253 sprintf(buf, "cat /proc/dri/0/mem");
254 system(buf);
255 break;
256 case 'r':
257 case 'R':
258 offset = strtoul(optarg, &pt, 0);
259 size = strtoul(pt+1, NULL, 0);
260 handle = 0;
261 if ((r = drmAddMap(fd, offset, size,
262 DRM_REGISTERS,
263 c == 'R' ? DRM_READ_ONLY : 0,
264 &handle))) {
265 drmError(r, argv[0]);
266 return 1;
267 }
268 printf("0x%08lx:0x%04lx added\n", offset, size);
269 printf("===== /proc/dri/0/mem =====\n");
270 sprintf(buf, "cat /proc/dri/0/mem");
271 system(buf);
272 break;
273 case 's':
274 size = strtoul(optarg, &pt, 0);
275 handle = 0;
276 if ((r = drmAddMap(fd, 0, size,
277 DRM_SHM, DRM_CONTAINS_LOCK,
278 &handle))) {
279 drmError(r, argv[0]);
280 return 1;
281 }
282 printf("0x%04lx byte shm added at 0x%08lx\n", size, handle);
283 sprintf(buf, "cat /proc/dri/0/vm");
284 system(buf);
285 break;
286 case 'P':
287 offset = strtoul(optarg, &pt, 0);
288 size = strtoul(pt+1, NULL, 0);
289 address = NULL;
290 if ((r = drmMap(fd, offset, size, &address))) {
291 drmError(r, argv[0]);
292 return 1;
293 }
294 printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
295 offset, size, address, getpid());
296 printf("===== /proc/dri/0/vma =====\n");
297 sprintf(buf, "cat /proc/dri/0/vma");
298 system(buf);
299 mprotect((void *)offset, size, PROT_READ);
300 printf("===== /proc/dri/0/vma =====\n");
301 sprintf(buf, "cat /proc/dri/0/vma");
302 system(buf);
303 break;
304 case 'w':
305 case 'W':
306 offset = strtoul(optarg, &pt, 0);
307 size = strtoul(pt+1, NULL, 0);
308 address = NULL;
309 if ((r = drmMap(fd, offset, size, &address))) {
310 drmError(r, argv[0]);
311 return 1;
312 }
313 printf("0x%08lx:0x%04lx mapped at %p for pid %d\n",
314 offset, size, address, getpid());
315 printf("===== /proc/%d/maps =====\n", getpid());
316 sprintf(buf, "cat /proc/%d/maps", getpid());
317 system(buf);
318 printf("===== /proc/dri/0/mem =====\n");
319 sprintf(buf, "cat /proc/dri/0/mem");
320 system(buf);
321 printf("===== /proc/dri/0/vma =====\n");
322 sprintf(buf, "cat /proc/dri/0/vma");
323 system(buf);
324 printf("===== READING =====\n");
325 for (i = 0; i < 0x10; i++)
326 printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
327 printf("\n");
328 if (c == 'w') {
329 printf("===== WRITING =====\n");
330 for (i = 0; i < size; i+=2) {
331 ((char *)address)[i] = i & 0xff;
332 ((char *)address)[i+1] = i & 0xff;
333 }
334 }
335 printf("===== READING =====\n");
336 for (i = 0; i < 0x10; i++)
337 printf("%02x ", (unsigned int)((unsigned char *)address)[i]);
338 printf("\n");
339 printf("===== /proc/dri/0/vma =====\n");
340 sprintf(buf, "cat /proc/dri/0/vma");
341 system(buf);
342 break;
343 case 'L':
344 context = strtoul(optarg, &pt, 0);
345 offset = strtoul(pt+1, &pt, 0);
346 size = strtoul(pt+1, &pt, 0);
347 loops = strtoul(pt+1, NULL, 0);
348 address = NULL;
349 if ((r = drmMap(fd, offset, size, &address))) {
350 drmError(r, argv[0]);
351 return 1;
352 }
353 lock = address;
354#if 1
355 {
356 int counter = 0;
357 struct timeval loop_start, loop_end;
358 struct timeval lock_start, lock_end;
359 double wt;
360#define HISTOSIZE 9
361 int histo[HISTOSIZE];
362 int output = 0;
363 int fast = 0;
364
365 if (loops < 0) {
366 loops = -loops;
367 ++output;
368 }
369
370 for (i = 0; i < HISTOSIZE; i++) histo[i] = 0;
371
372 gettimeofday(&loop_start, NULL);
373 for (i = 0; i < loops; i++) {
374 gettimeofday(&lock_start, NULL);
375 DRM_LIGHT_LOCK_COUNT(fd,lock,context,fast);
376 gettimeofday(&lock_end, NULL);
377 DRM_UNLOCK(fd,lock,context);
378 ++counter;
379 wt = usec(&lock_end, &lock_start);
380 if (wt <= 2.5) ++histo[8];
381 if (wt < 5.0) ++histo[0];
382 else if (wt < 50.0) ++histo[1];
383 else if (wt < 500.0) ++histo[2];
384 else if (wt < 5000.0) ++histo[3];
385 else if (wt < 50000.0) ++histo[4];
386 else if (wt < 500000.0) ++histo[5];
387 else if (wt < 5000000.0) ++histo[6];
388 else ++histo[7];
389 if (output) printf( "%.2f uSec, %d fast\n", wt, fast);
390 }
391 gettimeofday(&loop_end, NULL);
392 printf( "Average wait time = %.2f usec, %d fast\n",
393 usec(&loop_end, &loop_start) / counter, fast);
394 printf( "%9d <= 2.5 uS\n", histo[8]);
395 printf( "%9d < 5 uS\n", histo[0]);
396 printf( "%9d < 50 uS\n", histo[1]);
397 printf( "%9d < 500 uS\n", histo[2]);
398 printf( "%9d < 5000 uS\n", histo[3]);
399 printf( "%9d < 50000 uS\n", histo[4]);
400 printf( "%9d < 500000 uS\n", histo[5]);
401 printf( "%9d < 5000000 uS\n", histo[6]);
402 printf( "%9d >= 5000000 uS\n", histo[7]);
403 }
404#else
405 printf( "before lock: 0x%08x\n", lock->lock);
406 printf( "lock: 0x%08x\n", lock->lock);
407 sleep(5);
408 printf( "unlock: 0x%08x\n", lock->lock);
409#endif
410 break;
411 default:
412 fprintf( stderr, "Usage: drmstat [options]\n" );
413 return 1;
414 }
415
416 return r;
417}
418
419int xf86ConfigDRI[10];
diff --git a/tests/etnaviv/Makefile.am b/tests/etnaviv/Makefile.am
index 06318643..226baee2 100644
--- a/tests/etnaviv/Makefile.am
+++ b/tests/etnaviv/Makefile.am
@@ -28,6 +28,7 @@ etnaviv_2d_test_SOURCES = \
28 write_bmp.h 28 write_bmp.h
29 29
30etnaviv_cmd_stream_test_LDADD = \ 30etnaviv_cmd_stream_test_LDADD = \
31 $(top_builddir)/libdrm.la \
31 $(top_builddir)/etnaviv/libdrm_etnaviv.la 32 $(top_builddir)/etnaviv/libdrm_etnaviv.la
32 33
33etnaviv_cmd_stream_test_SOURCES = \ 34etnaviv_cmd_stream_test_SOURCES = \
diff --git a/tests/etnaviv/etnaviv_2d_test.c b/tests/etnaviv/etnaviv_2d_test.c
index 10751c73..8dd77b66 100644
--- a/tests/etnaviv/etnaviv_2d_test.c
+++ b/tests/etnaviv/etnaviv_2d_test.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include "config.h"
29#endif
30
31#include <fcntl.h> 27#include <fcntl.h>
32#include <stdio.h> 28#include <stdio.h>
33#include <string.h> 29#include <string.h>
diff --git a/tests/etnaviv/etnaviv_bo_cache_test.c b/tests/etnaviv/etnaviv_bo_cache_test.c
index fb01f8d3..7fb06293 100644
--- a/tests/etnaviv/etnaviv_bo_cache_test.c
+++ b/tests/etnaviv/etnaviv_bo_cache_test.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include "config.h"
29#endif
30
31#undef NDEBUG 27#undef NDEBUG
32#include <assert.h> 28#include <assert.h>
33 29
diff --git a/tests/etnaviv/meson.build b/tests/etnaviv/meson.build
new file mode 100644
index 00000000..8b4a3cfb
--- /dev/null
+++ b/tests/etnaviv/meson.build
@@ -0,0 +1,45 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21inc_etnaviv_tests = [inc_root, inc_drm, include_directories('../../etnaviv')]
22
23etnaviv_2d_test = executable(
24 'etnaviv_2d_test',
25 files('etnaviv_2d_test.c', 'write_bmp.c'),
26 include_directories : inc_etnaviv_tests,
27 link_with : [libdrm, libdrm_etnaviv],
28 install : with_install_tests,
29)
30
31etnaviv_cmd_stream_test = executable(
32 'etnaviv_cmd_stream_test',
33 files('etnaviv_cmd_stream_test.c'),
34 include_directories : inc_etnaviv_tests,
35 link_with : [libdrm, libdrm_etnaviv],
36 install : with_install_tests,
37)
38
39etnaviv_bo_cache_test = executable(
40 'etnaviv_bo_cache_test',
41 files('etnaviv_bo_cache_test.c'),
42 include_directories : inc_etnaviv_tests,
43 link_with : [libdrm, libdrm_etnaviv],
44 install : with_install_tests,
45)
diff --git a/tests/etnaviv/write_bmp.c b/tests/etnaviv/write_bmp.c
index 7ae0646c..f7b6bc69 100644
--- a/tests/etnaviv/write_bmp.c
+++ b/tests/etnaviv/write_bmp.c
@@ -63,7 +63,7 @@ struct dib_header {
63 unsigned int unused[12]; 63 unsigned int unused[12];
64} __attribute__((__packed__)); 64} __attribute__((__packed__));
65 65
66static int 66static void
67bmp_header_write(int fd, int width, int height, int bgra, int noflip, int alpha) 67bmp_header_write(int fd, int width, int height, int bgra, int noflip, int alpha)
68{ 68{
69 struct bmp_header bmp_header = { 69 struct bmp_header bmp_header = {
@@ -98,8 +98,6 @@ bmp_header_write(int fd, int width, int height, int bgra, int noflip, int alpha)
98 98
99 write(fd, &bmp_header, sizeof(struct bmp_header)); 99 write(fd, &bmp_header, sizeof(struct bmp_header));
100 write(fd, &dib_header, sizeof(struct dib_header)); 100 write(fd, &dib_header, sizeof(struct dib_header));
101
102 return 0;
103} 101}
104 102
105void 103void
diff --git a/tests/exynos/exynos_fimg2d_event.c b/tests/exynos/exynos_fimg2d_event.c
index 9ed5a307..353e087b 100644
--- a/tests/exynos/exynos_fimg2d_event.c
+++ b/tests/exynos/exynos_fimg2d_event.c
@@ -1,17 +1,24 @@
1/* 1/*
2 * Copyright (C) 2015 - Tobias Jakobi 2 * Copyright (C) 2015 - Tobias Jakobi
3 * 3 *
4 * This is free software: you can redistribute it and/or modify 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * it under the terms of the GNU General Public License as published 5 * copy of this software and associated documentation files (the "Software"),
6 * by the Free Software Foundation, either version 2 of the License, 6 * to deal in the Software without restriction, including without limitation
7 * or (at your option) any later version. 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
8 * 10 *
9 * It is distributed in the hope that it will be useful, but 11 * The above copyright notice and this permission notice (including the next
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * paragraph) shall be included in all copies or substantial portions of the
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * Software.
12 * GNU General Public License for more details. 14 *
13 * You should have received a copy of the GNU General Public License 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * along with it. If not, see <http://www.gnu.org/licenses/>. 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
15 */ 22 */
16 23
17#include <unistd.h> 24#include <unistd.h>
diff --git a/tests/exynos/exynos_fimg2d_perf.c b/tests/exynos/exynos_fimg2d_perf.c
index 1699bba7..97691a71 100644
--- a/tests/exynos/exynos_fimg2d_perf.c
+++ b/tests/exynos/exynos_fimg2d_perf.c
@@ -1,17 +1,24 @@
1/* 1/*
2 * Copyright (C) 2015 - Tobias Jakobi 2 * Copyright (C) 2015 - Tobias Jakobi
3 * 3 *
4 * This is free software: you can redistribute it and/or modify 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * it under the terms of the GNU General Public License as published 5 * copy of this software and associated documentation files (the "Software"),
6 * by the Free Software Foundation, either version 2 of the License, 6 * to deal in the Software without restriction, including without limitation
7 * or (at your option) any later version. 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
8 * 10 *
9 * It is distributed in the hope that it will be useful, but 11 * The above copyright notice and this permission notice (including the next
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * paragraph) shall be included in all copies or substantial portions of the
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * Software.
12 * GNU General Public License for more details. 14 *
13 * You should have received a copy of the GNU General Public License 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * along with it. If not, see <http://www.gnu.org/licenses/>. 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
15 */ 22 */
16 23
17#include <stdlib.h> 24#include <stdlib.h>
@@ -267,13 +274,6 @@ int main(int argc, char **argv)
267 goto out; 274 goto out;
268 } 275 }
269 276
270 if (bufw == 0 || bufh == 0) {
271 fprintf(stderr, "error: buffer width/height should be non-zero.\n");
272 ret = -1;
273
274 goto out;
275 }
276
277 fd = drmOpen("exynos", NULL); 277 fd = drmOpen("exynos", NULL);
278 if (fd < 0) { 278 if (fd < 0) {
279 fprintf(stderr, "error: failed to open drm\n"); 279 fprintf(stderr, "error: failed to open drm\n");
diff --git a/tests/exynos/exynos_fimg2d_test.c b/tests/exynos/exynos_fimg2d_test.c
index 797fb6eb..99bb9233 100644
--- a/tests/exynos/exynos_fimg2d_test.c
+++ b/tests/exynos/exynos_fimg2d_test.c
@@ -3,17 +3,26 @@
3 * Authors: 3 * Authors:
4 * Inki Dae <inki.dae@samsung.com> 4 * Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * under the terms of the GNU General Public License as published by the 7 * copy of this software and associated documentation files (the "Software"),
8 * Free Software Foundation; either version 2 of the License, or (at your 8 * to deal in the Software without restriction, including without limitation
9 * option) any later version. 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
10 * 12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
11 */ 24 */
12 25
13#ifdef HAVE_CONFIG_H
14#include "config.h"
15#endif
16
17#include <stdlib.h> 26#include <stdlib.h>
18#include <stdio.h> 27#include <stdio.h>
19#include <string.h> 28#include <string.h>
@@ -59,7 +68,6 @@ static void connector_find_mode(int fd, struct connector *c,
59 if (!connector) { 68 if (!connector) {
60 fprintf(stderr, "could not get connector %i: %s\n", 69 fprintf(stderr, "could not get connector %i: %s\n",
61 resources->connectors[i], strerror(errno)); 70 resources->connectors[i], strerror(errno));
62 drmModeFreeConnector(connector);
63 continue; 71 continue;
64 } 72 }
65 73
@@ -98,7 +106,6 @@ static void connector_find_mode(int fd, struct connector *c,
98 if (!c->encoder) { 106 if (!c->encoder) {
99 fprintf(stderr, "could not get encoder %i: %s\n", 107 fprintf(stderr, "could not get encoder %i: %s\n",
100 resources->encoders[i], strerror(errno)); 108 resources->encoders[i], strerror(errno));
101 drmModeFreeEncoder(c->encoder);
102 continue; 109 continue;
103 } 110 }
104 111
@@ -264,7 +271,8 @@ static int g2d_copy_test(struct exynos_device *dev, struct exynos_bo *src,
264 userptr = (unsigned long)malloc(size); 271 userptr = (unsigned long)malloc(size);
265 if (!userptr) { 272 if (!userptr) {
266 fprintf(stderr, "failed to allocate userptr.\n"); 273 fprintf(stderr, "failed to allocate userptr.\n");
267 return -EFAULT; 274 ret = -EFAULT;
275 goto fail;
268 } 276 }
269 277
270 src_img.user_ptr[0].userptr = userptr; 278 src_img.user_ptr[0].userptr = userptr;
@@ -469,7 +477,8 @@ static int g2d_copy_with_scale_test(struct exynos_device *dev,
469 userptr = (unsigned long)malloc(size); 477 userptr = (unsigned long)malloc(size);
470 if (!userptr) { 478 if (!userptr) {
471 fprintf(stderr, "failed to allocate userptr.\n"); 479 fprintf(stderr, "failed to allocate userptr.\n");
472 return -EFAULT; 480 ret = -EFAULT;
481 goto fail;
473 } 482 }
474 483
475 src_img.user_ptr[0].userptr = userptr; 484 src_img.user_ptr[0].userptr = userptr;
@@ -520,9 +529,10 @@ err_free_userptr:
520fail: 529fail:
521 g2d_fini(ctx); 530 g2d_fini(ctx);
522 531
523 return 0; 532 return ret;
524} 533}
525 534
535#ifdef EXYNOS_G2D_USERPTR_TEST
526static int g2d_blend_test(struct exynos_device *dev, 536static int g2d_blend_test(struct exynos_device *dev,
527 struct exynos_bo *src, 537 struct exynos_bo *src,
528 struct exynos_bo *dst, 538 struct exynos_bo *dst,
@@ -557,7 +567,8 @@ static int g2d_blend_test(struct exynos_device *dev,
557 userptr = (unsigned long)malloc(size); 567 userptr = (unsigned long)malloc(size);
558 if (!userptr) { 568 if (!userptr) {
559 fprintf(stderr, "failed to allocate userptr.\n"); 569 fprintf(stderr, "failed to allocate userptr.\n");
560 return -EFAULT; 570 ret = -EFAULT;
571 goto fail;
561 } 572 }
562 573
563 src_img.user_ptr[0].userptr = userptr; 574 src_img.user_ptr[0].userptr = userptr;
@@ -619,8 +630,9 @@ err_free_userptr:
619fail: 630fail:
620 g2d_fini(ctx); 631 g2d_fini(ctx);
621 632
622 return 0; 633 return ret;
623} 634}
635#endif
624 636
625static int g2d_checkerboard_test(struct exynos_device *dev, 637static int g2d_checkerboard_test(struct exynos_device *dev,
626 struct exynos_bo *src, 638 struct exynos_bo *src,
@@ -645,8 +657,8 @@ static int g2d_checkerboard_test(struct exynos_device *dev,
645 dst_y = 0; 657 dst_y = 0;
646 658
647 checkerboard = create_checkerboard_pattern(screen_width / 32, screen_height / 32, 32); 659 checkerboard = create_checkerboard_pattern(screen_width / 32, screen_height / 32, 32);
648 if (checkerboard == NULL) { 660 if (!checkerboard) {
649 ret = -1; 661 ret = -EFAULT;
650 goto fail; 662 goto fail;
651 } 663 }
652 664
@@ -755,8 +767,8 @@ int main(int argc, char **argv)
755 767
756 dev = exynos_device_create(fd); 768 dev = exynos_device_create(fd);
757 if (!dev) { 769 if (!dev) {
758 drmClose(dev->fd); 770 ret = -EFAULT;
759 return -EFAULT; 771 goto err_drm_close;
760 } 772 }
761 773
762 resources = drmModeGetResources(dev->fd); 774 resources = drmModeGetResources(dev->fd);
@@ -764,7 +776,7 @@ int main(int argc, char **argv)
764 fprintf(stderr, "drmModeGetResources failed: %s\n", 776 fprintf(stderr, "drmModeGetResources failed: %s\n",
765 strerror(errno)); 777 strerror(errno));
766 ret = -EFAULT; 778 ret = -EFAULT;
767 goto err_drm_close; 779 goto err_dev_destory;
768 } 780 }
769 781
770 connector_find_mode(dev->fd, &con, resources); 782 connector_find_mode(dev->fd, &con, resources);
@@ -773,7 +785,7 @@ int main(int argc, char **argv)
773 if (!con.mode) { 785 if (!con.mode) {
774 fprintf(stderr, "failed to find usable connector\n"); 786 fprintf(stderr, "failed to find usable connector\n");
775 ret = -EFAULT; 787 ret = -EFAULT;
776 goto err_drm_close; 788 goto err_dev_destory;
777 } 789 }
778 790
779 screen_width = con.mode->hdisplay; 791 screen_width = con.mode->hdisplay;
@@ -782,7 +794,7 @@ int main(int argc, char **argv)
782 if (screen_width == 0 || screen_height == 0) { 794 if (screen_width == 0 || screen_height == 0) {
783 fprintf(stderr, "failed to find sane resolution on connector\n"); 795 fprintf(stderr, "failed to find sane resolution on connector\n");
784 ret = -EFAULT; 796 ret = -EFAULT;
785 goto err_drm_close; 797 goto err_dev_destory;
786 } 798 }
787 799
788 printf("screen width = %d, screen height = %d\n", screen_width, 800 printf("screen width = %d, screen height = %d\n", screen_width,
@@ -791,7 +803,7 @@ int main(int argc, char **argv)
791 bo = exynos_create_buffer(dev, screen_width * screen_height * 4, 0); 803 bo = exynos_create_buffer(dev, screen_width * screen_height * 4, 0);
792 if (!bo) { 804 if (!bo) {
793 ret = -EFAULT; 805 ret = -EFAULT;
794 goto err_drm_close; 806 goto err_dev_destory;
795 } 807 }
796 808
797 handles[0] = bo->handle; 809 handles[0] = bo->handle;
@@ -864,7 +876,7 @@ int main(int argc, char **argv)
864 * 876 *
865 * Disable the test for now, until the kernel code has been sanitized. 877 * Disable the test for now, until the kernel code has been sanitized.
866 */ 878 */
867#if 0 879#ifdef EXYNOS_G2D_USERPTR_TEST
868 ret = g2d_blend_test(dev, src, bo, G2D_IMGBUF_USERPTR); 880 ret = g2d_blend_test(dev, src, bo, G2D_IMGBUF_USERPTR);
869 if (ret < 0) 881 if (ret < 0)
870 fprintf(stderr, "failed to test blend operation.\n"); 882 fprintf(stderr, "failed to test blend operation.\n");
@@ -882,9 +894,11 @@ err_rm_fb:
882err_destroy_buffer: 894err_destroy_buffer:
883 exynos_destroy_buffer(bo); 895 exynos_destroy_buffer(bo);
884 896
885err_drm_close: 897err_dev_destory:
886 drmClose(dev->fd);
887 exynos_device_destroy(dev); 898 exynos_device_destroy(dev);
888 899
889 return 0; 900err_drm_close:
901 drmClose(fd);
902
903 return ret;
890} 904}
diff --git a/tests/exynos/meson.build b/tests/exynos/meson.build
new file mode 100644
index 00000000..940c3ce4
--- /dev/null
+++ b/tests/exynos/meson.build
@@ -0,0 +1,54 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21inc_exynos = include_directories('../../exynos')
22
23if with_libkms
24 exynos_fimg2d_test = executable(
25 'exynos_fimg2d_test',
26 files('exynos_fimg2d_test.c'),
27 c_args : warn_c_args,
28 include_directories : [inc_root, inc_drm, inc_exynos,
29 include_directories('../../libkms')],
30 link_with : [libdrm, libkms, libdrm_exynos],
31 dependencies : dep_threads,
32 install : with_install_tests,
33 )
34endif
35
36exynos_fimg2d_perf = executable(
37 'exynos_fimg2d_perf',
38 files('exynos_fimg2d_perf.c'),
39 c_args : warn_c_args,
40 include_directories : [inc_root, inc_drm, inc_exynos],
41 link_with : [libdrm, libdrm_exynos],
42 dependencies : dep_threads,
43 install : with_install_tests,
44)
45
46exynos_fimg2d_event = executable(
47 'exynos_fimg2d_event',
48 files('exynos_fimg2d_event.c'),
49 c_args : warn_c_args,
50 include_directories : [inc_root, inc_drm, inc_exynos],
51 link_with : [libdrm, libdrm_exynos],
52 dependencies : dep_threads,
53 install : with_install_tests,
54)
diff --git a/tests/kms/kms-steal-crtc.c b/tests/kms/kms-steal-crtc.c
index 4b830d27..cd40758d 100644
--- a/tests/kms/kms-steal-crtc.c
+++ b/tests/kms/kms-steal-crtc.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <errno.h> 24#include <errno.h>
29#include <fcntl.h> 25#include <fcntl.h>
30#include <signal.h> 26#include <signal.h>
diff --git a/tests/kms/kms-universal-planes.c b/tests/kms/kms-universal-planes.c
index 89057bb5..2163c987 100644
--- a/tests/kms/kms-universal-planes.c
+++ b/tests/kms/kms-universal-planes.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <fcntl.h> 24#include <fcntl.h>
29#include <getopt.h> 25#include <getopt.h>
30#include <stdbool.h> 26#include <stdbool.h>
diff --git a/tests/kms/libkms-test-crtc.c b/tests/kms/libkms-test-crtc.c
index 3adb4903..2c28face 100644
--- a/tests/kms/libkms-test-crtc.c
+++ b/tests/kms/libkms-test-crtc.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include "libkms-test.h" 24#include "libkms-test.h"
29 25
30struct kms_crtc *kms_crtc_create(struct kms_device *device, uint32_t id) 26struct kms_crtc *kms_crtc_create(struct kms_device *device, uint32_t id)
diff --git a/tests/kms/libkms-test-device.c b/tests/kms/libkms-test-device.c
index 53c7349b..d3bb11ce 100644
--- a/tests/kms/libkms-test-device.c
+++ b/tests/kms/libkms-test-device.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdio.h> 24#include <stdio.h>
29#include <string.h> 25#include <string.h>
30#include <unistd.h> 26#include <unistd.h>
@@ -67,7 +63,7 @@ static void kms_device_probe_screens(struct kms_device *device)
67 63
68 device->screens = calloc(res->count_connectors, sizeof(screen)); 64 device->screens = calloc(res->count_connectors, sizeof(screen));
69 if (!device->screens) 65 if (!device->screens)
70 return; 66 goto err_free_resources;
71 67
72 for (i = 0; i < res->count_connectors; i++) { 68 for (i = 0; i < res->count_connectors; i++) {
73 unsigned int *count; 69 unsigned int *count;
@@ -97,6 +93,7 @@ static void kms_device_probe_screens(struct kms_device *device)
97 device->num_screens++; 93 device->num_screens++;
98 } 94 }
99 95
96err_free_resources:
100 drmModeFreeResources(res); 97 drmModeFreeResources(res);
101} 98}
102 99
@@ -112,7 +109,7 @@ static void kms_device_probe_crtcs(struct kms_device *device)
112 109
113 device->crtcs = calloc(res->count_crtcs, sizeof(crtc)); 110 device->crtcs = calloc(res->count_crtcs, sizeof(crtc));
114 if (!device->crtcs) 111 if (!device->crtcs)
115 return; 112 goto err_free_resources;
116 113
117 for (i = 0; i < res->count_crtcs; i++) { 114 for (i = 0; i < res->count_crtcs; i++) {
118 crtc = kms_crtc_create(device, res->crtcs[i]); 115 crtc = kms_crtc_create(device, res->crtcs[i]);
@@ -123,6 +120,7 @@ static void kms_device_probe_crtcs(struct kms_device *device)
123 device->num_crtcs++; 120 device->num_crtcs++;
124 } 121 }
125 122
123err_free_resources:
126 drmModeFreeResources(res); 124 drmModeFreeResources(res);
127} 125}
128 126
@@ -138,7 +136,7 @@ static void kms_device_probe_planes(struct kms_device *device)
138 136
139 device->planes = calloc(res->count_planes, sizeof(plane)); 137 device->planes = calloc(res->count_planes, sizeof(plane));
140 if (!device->planes) 138 if (!device->planes)
141 return; 139 goto err_free_resources;
142 140
143 for (i = 0; i < res->count_planes; i++) { 141 for (i = 0; i < res->count_planes; i++) {
144 plane = kms_plane_create(device, res->planes[i]); 142 plane = kms_plane_create(device, res->planes[i]);
@@ -149,6 +147,7 @@ static void kms_device_probe_planes(struct kms_device *device)
149 device->num_planes++; 147 device->num_planes++;
150 } 148 }
151 149
150err_free_resources:
152 drmModeFreePlaneResources(res); 151 drmModeFreePlaneResources(res);
153} 152}
154 153
diff --git a/tests/kms/libkms-test-framebuffer.c b/tests/kms/libkms-test-framebuffer.c
index c9e5ad3c..9bb2d95b 100644
--- a/tests/kms/libkms-test-framebuffer.c
+++ b/tests/kms/libkms-test-framebuffer.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <errno.h> 24#include <errno.h>
29#include <string.h> 25#include <string.h>
30 26
diff --git a/tests/kms/libkms-test-plane.c b/tests/kms/libkms-test-plane.c
index 8eb78af1..6c40a3c9 100644
--- a/tests/kms/libkms-test-plane.c
+++ b/tests/kms/libkms-test-plane.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <errno.h> 24#include <errno.h>
29#include <string.h> 25#include <string.h>
30 26
diff --git a/tests/kms/libkms-test-screen.c b/tests/kms/libkms-test-screen.c
index 33690222..bbe972a0 100644
--- a/tests/kms/libkms-test-screen.c
+++ b/tests/kms/libkms-test-screen.c
@@ -21,10 +21,6 @@
21 * IN THE SOFTWARE. 21 * IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <errno.h> 24#include <errno.h>
29#include <string.h> 25#include <string.h>
30 26
diff --git a/tests/kms/meson.build b/tests/kms/meson.build
new file mode 100644
index 00000000..1f7f724d
--- /dev/null
+++ b/tests/kms/meson.build
@@ -0,0 +1,49 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22libkms_test = static_library(
23 'kms-test',
24 files(
25 'libkms-test-crtc.c', 'libkms-test-device.c', 'libkms-test-framebuffer.c',
26 'libkms-test-plane.c', 'libkms-test-screen.c',
27 ),
28 include_directories : [inc_root, inc_tests, inc_drm],
29 link_with : libdrm,
30 c_args : warn_c_args,
31)
32
33kms_steal_crtc = executable(
34 'kms-steal-crtc',
35 files('kms-steal-crtc.c'),
36 dependencies : dep_cairo,
37 include_directories : [inc_root, inc_tests, inc_drm],
38 link_with : [libkms_test, libutil],
39 install : with_install_tests,
40)
41
42kms_universal_planes = executable(
43 'kms-universal-planes',
44 files('kms-universal-planes.c'),
45 dependencies : dep_cairo,
46 include_directories : [inc_root, inc_tests, inc_drm],
47 link_with : [libkms_test],
48 install : with_install_tests,
49)
diff --git a/tests/kmstest/meson.build b/tests/kmstest/meson.build
new file mode 100644
index 00000000..a47d4951
--- /dev/null
+++ b/tests/kmstest/meson.build
@@ -0,0 +1,30 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21kmstest = executable(
22 'kmstest',
23 files('main.c'),
24 c_args : warn_c_args,
25 include_directories : [
26 inc_root, inc_tests, include_directories('../../libkms'), inc_drm,
27 ],
28 link_with : [libutil, libkms, libdrm],
29 install : with_install_tests,
30)
diff --git a/tests/meson.build b/tests/meson.build
new file mode 100644
index 00000000..fdf950b7
--- /dev/null
+++ b/tests/meson.build
@@ -0,0 +1,86 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21inc_tests = include_directories('.')
22
23subdir('util')
24subdir('kms')
25subdir('modeprint')
26subdir('proptest')
27subdir('modetest')
28subdir('vbltest')
29if with_libkms
30 subdir('kmstest')
31endif
32if with_radeon
33 subdir('radeon')
34endif
35if with_amdgpu
36 subdir('amdgpu')
37endif
38if with_exynos
39 subdir('exynos')
40endif
41if with_tegra
42 subdir('tegra')
43endif
44if with_etnaviv
45 subdir('etnaviv')
46endif
47if with_nouveau
48 subdir('nouveau')
49endif
50
51drmsl = executable(
52 'drmsl',
53 files('drmsl.c'),
54 include_directories : [inc_root, inc_drm],
55 link_with : libdrm,
56 c_args : warn_c_args,
57)
58
59hash = executable(
60 'hash',
61 files('hash.c'),
62 include_directories : [inc_root, inc_drm],
63 link_with : libdrm,
64 c_args : warn_c_args,
65)
66
67random = executable(
68 'random',
69 files('random.c'),
70 include_directories : [inc_root, inc_drm],
71 link_with : libdrm,
72 c_args : warn_c_args,
73)
74
75drmdevice = executable(
76 'drmdevice',
77 files('drmdevice.c'),
78 include_directories : [inc_root, inc_drm],
79 link_with : libdrm,
80 c_args : warn_c_args,
81)
82
83test('random', random, timeout : 240)
84test('hash', hash)
85test('drmsl', drmsl)
86test('drmdevice', drmdevice)
diff --git a/tests/modeprint/meson.build b/tests/modeprint/meson.build
new file mode 100644
index 00000000..5f0eb24b
--- /dev/null
+++ b/tests/modeprint/meson.build
@@ -0,0 +1,29 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21modeprint = executable(
22 'modeprint',
23 files('modeprint.c'),
24 c_args : warn_c_args,
25 include_directories : [inc_root, inc_tests, inc_drm],
26 link_with : libdrm,
27 dependencies : dep_threads,
28 install : with_install_tests,
29)
diff --git a/tests/modeprint/modeprint.c b/tests/modeprint/modeprint.c
index 0d854103..c81dd91d 100644
--- a/tests/modeprint/modeprint.c
+++ b/tests/modeprint/modeprint.c
@@ -244,7 +244,7 @@ static int printFrameBuffer(int fd, drmModeResPtr res, drmModeFBPtr fb)
244 printf("\thandle : %i\n", fb->handle); 244 printf("\thandle : %i\n", fb->handle);
245 printf("\twidth : %i\n", fb->width); 245 printf("\twidth : %i\n", fb->width);
246 printf("\theight : %i\n", fb->height); 246 printf("\theight : %i\n", fb->height);
247 printf("\tpitch : %i\n", fb->pitch);; 247 printf("\tpitch : %i\n", fb->pitch);
248 printf("\tbpp : %i\n", fb->bpp); 248 printf("\tbpp : %i\n", fb->bpp);
249 printf("\tdepth : %i\n", fb->depth); 249 printf("\tdepth : %i\n", fb->depth);
250 printf("\tbuffer_id : %i\n", fb->handle); 250 printf("\tbuffer_id : %i\n", fb->handle);
diff --git a/tests/modetest/buffers.c b/tests/modetest/buffers.c
index 4fd310b9..9b635c0c 100644
--- a/tests/modetest/buffers.c
+++ b/tests/modetest/buffers.c
@@ -24,10 +24,6 @@
24 * IN THE SOFTWARE. 24 * IN THE SOFTWARE.
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <assert.h> 27#include <assert.h>
32#include <errno.h> 28#include <errno.h>
33#include <stdio.h> 29#include <stdio.h>
diff --git a/tests/modetest/cursor.c b/tests/modetest/cursor.c
index 6de82a4a..829bced1 100644
--- a/tests/modetest/cursor.c
+++ b/tests/modetest/cursor.c
@@ -22,10 +22,6 @@
22 * IN THE SOFTWARE. 22 * IN THE SOFTWARE.
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <assert.h> 25#include <assert.h>
30#include <errno.h> 26#include <errno.h>
31#include <stdio.h> 27#include <stdio.h>
diff --git a/tests/modetest/meson.build b/tests/modetest/meson.build
new file mode 100644
index 00000000..2a081845
--- /dev/null
+++ b/tests/modetest/meson.build
@@ -0,0 +1,29 @@
1# Copyright © 2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21modetest = executable(
22 'modetest',
23 files('buffers.c', 'cursor.c', 'modetest.c'),
24 c_args : [warn_c_args, '-Wno-pointer-arith'],
25 include_directories : [inc_root, inc_tests, inc_drm],
26 dependencies : [dep_threads, dep_cairo],
27 link_with : [libdrm, libutil],
28 install : with_install_tests,
29)
diff --git a/tests/modetest/modetest.c b/tests/modetest/modetest.c
index cd911193..62957d84 100644
--- a/tests/modetest/modetest.c
+++ b/tests/modetest/modetest.c
@@ -38,10 +38,6 @@
38 * the mode has been programmed, along with possible test patterns. 38 * the mode has been programmed, along with possible test patterns.
39 */ 39 */
40 40
41#ifdef HAVE_CONFIG_H
42#include "config.h"
43#endif
44
45#include <assert.h> 41#include <assert.h>
46#include <ctype.h> 42#include <ctype.h>
47#include <stdbool.h> 43#include <stdbool.h>
@@ -174,6 +170,15 @@ static const char *mode_flag_names[] = {
174 170
175static bit_name_fn(mode_flag) 171static bit_name_fn(mode_flag)
176 172
173static void dump_fourcc(uint32_t fourcc)
174{
175 printf(" %c%c%c%c",
176 fourcc,
177 fourcc >> 8,
178 fourcc >> 16,
179 fourcc >> 24);
180}
181
177static void dump_encoders(struct device *dev) 182static void dump_encoders(struct device *dev)
178{ 183{
179 drmModeEncoder *encoder; 184 drmModeEncoder *encoder;
@@ -242,6 +247,89 @@ static void dump_blob(struct device *dev, uint32_t blob_id)
242 drmModeFreePropertyBlob(blob); 247 drmModeFreePropertyBlob(blob);
243} 248}
244 249
250static const char *modifier_to_string(uint64_t modifier)
251{
252 switch (modifier) {
253 case DRM_FORMAT_MOD_INVALID:
254 return "INVALID";
255 case DRM_FORMAT_MOD_LINEAR:
256 return "LINEAR";
257 case I915_FORMAT_MOD_X_TILED:
258 return "X_TILED";
259 case I915_FORMAT_MOD_Y_TILED:
260 return "Y_TILED";
261 case I915_FORMAT_MOD_Yf_TILED:
262 return "Yf_TILED";
263 case I915_FORMAT_MOD_Y_TILED_CCS:
264 return "Y_TILED_CCS";
265 case I915_FORMAT_MOD_Yf_TILED_CCS:
266 return "Yf_TILED_CCS";
267 case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
268 return "SAMSUNG_64_32_TILE";
269 case DRM_FORMAT_MOD_VIVANTE_TILED:
270 return "VIVANTE_TILED";
271 case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
272 return "VIVANTE_SUPER_TILED";
273 case DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED:
274 return "VIVANTE_SPLIT_TILED";
275 case DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED:
276 return "VIVANTE_SPLIT_SUPER_TILED";
277 case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
278 return "NVIDIA_TEGRA_TILED";
279 case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0):
280 return "NVIDIA_16BX2_BLOCK(0)";
281 case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1):
282 return "NVIDIA_16BX2_BLOCK(1)";
283 case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2):
284 return "NVIDIA_16BX2_BLOCK(2)";
285 case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3):
286 return "NVIDIA_16BX2_BLOCK(3)";
287 case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4):
288 return "NVIDIA_16BX2_BLOCK(4)";
289 case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5):
290 return "NVIDIA_16BX2_BLOCK(5)";
291 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
292 return "MOD_BROADCOM_VC4_T_TILED";
293 default:
294 return "(UNKNOWN MODIFIER)";
295 }
296}
297
298static void dump_in_formats(struct device *dev, uint32_t blob_id)
299{
300 uint32_t i, j;
301 drmModePropertyBlobPtr blob;
302 struct drm_format_modifier_blob *header;
303 uint32_t *formats;
304 struct drm_format_modifier *modifiers;
305
306 printf("\t\tin_formats blob decoded:\n");
307 blob = drmModeGetPropertyBlob(dev->fd, blob_id);
308 if (!blob) {
309 printf("\n");
310 return;
311 }
312
313 header = blob->data;
314 formats = (uint32_t *) ((char *) header + header->formats_offset);
315 modifiers = (struct drm_format_modifier *)
316 ((char *) header + header->modifiers_offset);
317
318 for (i = 0; i < header->count_formats; i++) {
319 printf("\t\t\t");
320 dump_fourcc(formats[i]);
321 printf(": ");
322 for (j = 0; j < header->count_modifiers; j++) {
323 uint64_t mask = 1ULL << i;
324 if (modifiers[j].formats & mask)
325 printf(" %s", modifier_to_string(modifiers[j].modifier));
326 }
327 printf("\n");
328 }
329
330 drmModeFreePropertyBlob(blob);
331}
332
245static void dump_prop(struct device *dev, drmModePropertyPtr prop, 333static void dump_prop(struct device *dev, drmModePropertyPtr prop,
246 uint32_t prop_id, uint64_t value) 334 uint32_t prop_id, uint64_t value)
247{ 335{
@@ -319,6 +407,9 @@ static void dump_prop(struct device *dev, drmModePropertyPtr prop,
319 printf(" %"PRId64"\n", value); 407 printf(" %"PRId64"\n", value);
320 else 408 else
321 printf(" %"PRIu64"\n", value); 409 printf(" %"PRIu64"\n", value);
410
411 if (strcmp(prop->name, "IN_FORMATS") == 0)
412 dump_in_formats(dev, value);
322} 413}
323 414
324static void dump_connectors(struct device *dev) 415static void dump_connectors(struct device *dev)
@@ -443,7 +534,7 @@ static void dump_planes(struct device *dev)
443 534
444 printf(" formats:"); 535 printf(" formats:");
445 for (j = 0; j < ovr->count_formats; j++) 536 for (j = 0; j < ovr->count_formats; j++)
446 printf(" %4.4s", (char *)&ovr->formats[j]); 537 dump_fourcc(ovr->formats[j]);
447 printf("\n"); 538 printf("\n");
448 539
449 if (plane->props) { 540 if (plane->props) {
@@ -524,7 +615,6 @@ static struct resources *get_resources(struct device *dev)
524 return NULL; 615 return NULL;
525 616
526 drmSetClientCap(dev->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1); 617 drmSetClientCap(dev->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
527 drmSetClientCap(dev->fd, DRM_CLIENT_CAP_ATOMIC, 1);
528 618
529 res->res = drmModeGetResources(dev->fd); 619 res->res = drmModeGetResources(dev->fd);
530 if (!res->res) { 620 if (!res->res) {
@@ -562,10 +652,13 @@ static struct resources *get_resources(struct device *dev)
562 for (i = 0; i < res->res->count_connectors; i++) { 652 for (i = 0; i < res->res->count_connectors; i++) {
563 struct connector *connector = &res->connectors[i]; 653 struct connector *connector = &res->connectors[i];
564 drmModeConnector *conn = connector->connector; 654 drmModeConnector *conn = connector->connector;
655 int num;
565 656
566 asprintf(&connector->name, "%s-%u", 657 num = asprintf(&connector->name, "%s-%u",
567 util_lookup_connector_type_name(conn->connector_type), 658 util_lookup_connector_type_name(conn->connector_type),
568 conn->connector_type_id); 659 conn->connector_type_id);
660 if (num < 0)
661 goto error;
569 } 662 }
570 663
571#define get_properties(_res, __res, type, Type) \ 664#define get_properties(_res, __res, type, Type) \
@@ -997,7 +1090,8 @@ static int set_plane(struct device *dev, struct plane_arg *p)
997 if (!format_support(ovr, p->fourcc)) 1090 if (!format_support(ovr, p->fourcc))
998 continue; 1091 continue;
999 1092
1000 if ((ovr->possible_crtcs & (1 << pipe)) && !ovr->crtc_id) { 1093 if ((ovr->possible_crtcs & (1 << pipe)) &&
1094 (ovr->crtc_id == 0 || ovr->crtc_id == p->crtc_id)) {
1001 plane_id = ovr->plane_id; 1095 plane_id = ovr->plane_id;
1002 break; 1096 break;
1003 } 1097 }
diff --git a/tests/nouveau/meson.build b/tests/nouveau/meson.build
new file mode 100644
index 00000000..f5d73c1e
--- /dev/null
+++ b/tests/nouveau/meson.build
@@ -0,0 +1,30 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21threaded = executable(
22 'threaded',
23 files('threaded.c'),
24 dependencies : [dep_dl, dep_threads],
25 include_directories : [inc_root, inc_drm, include_directories('../../nouveau')],
26 link_with : [libdrm, libdrm_nouveau],
27 c_args : warn_c_args,
28)
29
30test('threaded', threaded)
diff --git a/tests/nouveau/threaded.c b/tests/nouveau/threaded.c
index 281af460..3669bcd3 100644
--- a/tests/nouveau/threaded.c
+++ b/tests/nouveau/threaded.c
@@ -20,10 +20,6 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#ifdef HAVE_CONFIG_H
24# include "config.h"
25#endif
26
27#include <sys/ioctl.h> 23#include <sys/ioctl.h>
28#include <dlfcn.h> 24#include <dlfcn.h>
29#include <fcntl.h> 25#include <fcntl.h>
diff --git a/tests/proptest/meson.build b/tests/proptest/meson.build
new file mode 100644
index 00000000..22d7473e
--- /dev/null
+++ b/tests/proptest/meson.build
@@ -0,0 +1,28 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21proptest = executable(
22 'proptest',
23 files('proptest.c'),
24 c_args : warn_c_args,
25 include_directories : [inc_root, inc_tests, inc_drm],
26 link_with : [libdrm, libutil],
27 install : with_install_tests,
28)
diff --git a/tests/radeon/meson.build b/tests/radeon/meson.build
new file mode 100644
index 00000000..9e4f916e
--- /dev/null
+++ b/tests/radeon/meson.build
@@ -0,0 +1,27 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21radeon_ttm = executable(
22 'radeon_ttm',
23 files('rbo.c', 'radeon_ttm.c'),
24 include_directories : [inc_root, inc_drm],
25 link_with : libdrm,
26 c_args : warn_c_args,
27)
diff --git a/tests/tegra/meson.build b/tests/tegra/meson.build
new file mode 100644
index 00000000..9c74ac4a
--- /dev/null
+++ b/tests/tegra/meson.build
@@ -0,0 +1,27 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21openclose = executable(
22 'openclose',
23 files('openclose.c'),
24 include_directories : [inc_root, inc_drm, include_directories('../../tegra')],
25 c_args : warn_c_args,
26 link_with : [libdrm, libdrm_tegra],
27)
diff --git a/tests/tegra/openclose.c b/tests/tegra/openclose.c
index 881d8aa4..f80f52d4 100644
--- a/tests/tegra/openclose.c
+++ b/tests/tegra/openclose.c
@@ -20,10 +20,6 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#ifdef HAVE_CONFIG_H
24# include "config.h"
25#endif
26
27#include <fcntl.h> 23#include <fcntl.h>
28#include <stdio.h> 24#include <stdio.h>
29#include <unistd.h> 25#include <unistd.h>
diff --git a/tests/util/format.c b/tests/util/format.c
index 043cfe7f..15ac5e1e 100644
--- a/tests/util/format.c
+++ b/tests/util/format.c
@@ -23,10 +23,6 @@
23 * IN THE SOFTWARE. 23 * IN THE SOFTWARE.
24 */ 24 */
25 25
26#ifdef HAVE_CONFIG_H
27#include "config.h"
28#endif
29
30#include <stdint.h> 26#include <stdint.h>
31#include <stdlib.h> 27#include <stdlib.h>
32#include <string.h> 28#include <string.h>
diff --git a/tests/util/kms.c b/tests/util/kms.c
index 959b6881..8b3e7878 100644
--- a/tests/util/kms.c
+++ b/tests/util/kms.c
@@ -37,10 +37,6 @@
37 * the mode has been programmed, along with possible test patterns. 37 * the mode has been programmed, along with possible test patterns.
38 */ 38 */
39 39
40#ifdef HAVE_CONFIG_H
41#include "config.h"
42#endif
43
44#include <errno.h> 40#include <errno.h>
45#include <stdint.h> 41#include <stdint.h>
46#include <stdio.h> 42#include <stdio.h>
@@ -79,6 +75,7 @@ static const struct type_name encoder_type_names[] = {
79 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, 75 { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
80 { DRM_MODE_ENCODER_DSI, "DSI" }, 76 { DRM_MODE_ENCODER_DSI, "DSI" },
81 { DRM_MODE_ENCODER_DPMST, "DPMST" }, 77 { DRM_MODE_ENCODER_DPMST, "DPMST" },
78 { DRM_MODE_ENCODER_DPI, "DPI" },
82}; 79};
83 80
84const char *util_lookup_encoder_type_name(unsigned int type) 81const char *util_lookup_encoder_type_name(unsigned int type)
@@ -117,6 +114,7 @@ static const struct type_name connector_type_names[] = {
117 { DRM_MODE_CONNECTOR_eDP, "eDP" }, 114 { DRM_MODE_CONNECTOR_eDP, "eDP" },
118 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, 115 { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
119 { DRM_MODE_CONNECTOR_DSI, "DSI" }, 116 { DRM_MODE_CONNECTOR_DSI, "DSI" },
117 { DRM_MODE_CONNECTOR_DPI, "DPI" },
120}; 118};
121 119
122const char *util_lookup_connector_type_name(unsigned int type) 120const char *util_lookup_connector_type_name(unsigned int type)
@@ -145,6 +143,7 @@ static const char * const modules[] = {
145 "virtio_gpu", 143 "virtio_gpu",
146 "mediatek", 144 "mediatek",
147 "meson", 145 "meson",
146 "pl111",
148}; 147};
149 148
150int util_open(const char *device, const char *module) 149int util_open(const char *device, const char *module)
diff --git a/tests/util/meson.build b/tests/util/meson.build
new file mode 100644
index 00000000..7fa1a4b7
--- /dev/null
+++ b/tests/util/meson.build
@@ -0,0 +1,28 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22libutil = static_library(
23 'util',
24 [files('format.c', 'kms.c', 'pattern.c'), config_file],
25 include_directories : [inc_root, inc_drm],
26 link_with : libdrm,
27 dependencies : dep_cairo
28)
diff --git a/tests/util/pattern.c b/tests/util/pattern.c
index 00b08a8c..9fa0a417 100644
--- a/tests/util/pattern.c
+++ b/tests/util/pattern.c
@@ -23,10 +23,6 @@
23 * IN THE SOFTWARE. 23 * IN THE SOFTWARE.
24 */ 24 */
25 25
26#ifdef HAVE_CONFIG_H
27#include "config.h"
28#endif
29
30#include <stdint.h> 26#include <stdint.h>
31#include <stdio.h> 27#include <stdio.h>
32#include <stdlib.h> 28#include <stdlib.h>
@@ -34,7 +30,7 @@
34 30
35#include <drm_fourcc.h> 31#include <drm_fourcc.h>
36 32
37#ifdef HAVE_CAIRO 33#if HAVE_CAIRO
38#include <cairo.h> 34#include <cairo.h>
39#include <math.h> 35#include <math.h>
40#endif 36#endif
@@ -546,10 +542,9 @@ static void fill_smpte(const struct util_format_info *info, void *planes[3],
546static void make_pwetty(void *data, unsigned int width, unsigned int height, 542static void make_pwetty(void *data, unsigned int width, unsigned int height,
547 unsigned int stride, uint32_t format) 543 unsigned int stride, uint32_t format)
548{ 544{
549#ifdef HAVE_CAIRO 545#if HAVE_CAIRO
550 cairo_surface_t *surface; 546 cairo_surface_t *surface;
551 cairo_t *cr; 547 cairo_t *cr;
552 int x, y;
553 cairo_format_t cairo_format; 548 cairo_format_t cairo_format;
554 549
555 /* we can ignore the order of R,G,B channels */ 550 /* we can ignore the order of R,G,B channels */
@@ -576,8 +571,8 @@ static void make_pwetty(void *data, unsigned int width, unsigned int height,
576 cairo_surface_destroy(surface); 571 cairo_surface_destroy(surface);
577 572
578 cairo_set_line_cap(cr, CAIRO_LINE_CAP_SQUARE); 573 cairo_set_line_cap(cr, CAIRO_LINE_CAP_SQUARE);
579 for (x = 0; x < width; x += 250) 574 for (unsigned x = 0; x < width; x += 250)
580 for (y = 0; y < height; y += 250) { 575 for (unsigned y = 0; y < height; y += 250) {
581 char buf[64]; 576 char buf[64];
582 577
583 cairo_move_to(cr, x, y - 20); 578 cairo_move_to(cr, x, y - 20);
@@ -824,8 +819,8 @@ static void fill_tiles(const struct util_format_info *info, void *planes[3],
824 } 819 }
825} 820}
826 821
827static void fill_plain(const struct util_format_info *info, void *planes[3], 822static void fill_plain(void *planes[3],
828 unsigned int width, unsigned int height, 823 unsigned int height,
829 unsigned int stride) 824 unsigned int stride)
830{ 825{
831 memset(planes[0], 0x77, stride * height); 826 memset(planes[0], 0x77, stride * height);
@@ -861,7 +856,7 @@ void util_fill_pattern(uint32_t format, enum util_fill_pattern pattern,
861 return fill_smpte(info, planes, width, height, stride); 856 return fill_smpte(info, planes, width, height, stride);
862 857
863 case UTIL_PATTERN_PLAIN: 858 case UTIL_PATTERN_PLAIN:
864 return fill_plain(info, planes, width, height, stride); 859 return fill_plain(planes, height, stride);
865 860
866 default: 861 default:
867 printf("Error: unsupported test pattern %u.\n", pattern); 862 printf("Error: unsupported test pattern %u.\n", pattern);
diff --git a/tests/vbltest/meson.build b/tests/vbltest/meson.build
new file mode 100644
index 00000000..ae52ab88
--- /dev/null
+++ b/tests/vbltest/meson.build
@@ -0,0 +1,28 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21vbltest = executable(
22 'vbltest',
23 files('vbltest.c'),
24 c_args : warn_c_args,
25 include_directories : [inc_root, inc_tests, inc_drm],
26 link_with : [libdrm, libutil],
27 install : with_install_tests,
28)
diff --git a/tests/vbltest/vbltest.c b/tests/vbltest/vbltest.c
index 3f6b803a..48708d20 100644
--- a/tests/vbltest/vbltest.c
+++ b/tests/vbltest/vbltest.c
@@ -24,10 +24,6 @@
24 * IN THE SOFTWARE. 24 * IN THE SOFTWARE.
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <assert.h> 27#include <assert.h>
32#include <stdio.h> 28#include <stdio.h>
33#include <stdlib.h> 29#include <stdlib.h>
diff --git a/vc4/meson.build b/vc4/meson.build
new file mode 100644
index 00000000..0136987b
--- /dev/null
+++ b/vc4/meson.build
@@ -0,0 +1,28 @@
1# Copyright © 2017 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21install_headers('vc4_packet.h', 'vc4_qpu_defines.h', subdir : 'libdrm')
22
23pkg.generate(
24 name : 'libdrm_vc4',
25 version : meson.project_version(),
26 requires_private : 'libdrm',
27 description : 'Userspace interface to vc4 kernel DRM services',
28)
diff --git a/xf86atomic.h b/xf86atomic.h
index 922b37da..2d733bd5 100644
--- a/xf86atomic.h
+++ b/xf86atomic.h
@@ -34,10 +34,6 @@
34#ifndef LIBDRM_ATOMICS_H 34#ifndef LIBDRM_ATOMICS_H
35#define LIBDRM_ATOMICS_H 35#define LIBDRM_ATOMICS_H
36 36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#if HAVE_LIBDRM_ATOMIC_PRIMITIVES 37#if HAVE_LIBDRM_ATOMIC_PRIMITIVES
42 38
43#define HAS_ATOMIC_OPS 1 39#define HAS_ATOMIC_OPS 1
@@ -101,7 +97,7 @@ typedef struct { LIBDRM_ATOMIC_TYPE atomic; } atomic_t;
101 97
102#endif 98#endif
103 99
104#if ! HAS_ATOMIC_OPS 100#if !defined(HAS_ATOMIC_OPS)
105#error libdrm requires atomic operations, please define them for your CPU/compiler. 101#error libdrm requires atomic operations, please define them for your CPU/compiler.
106#endif 102#endif
107 103
diff --git a/xf86drm.c b/xf86drm.c
index 82fb0e22..390e1eb1 100644
--- a/xf86drm.c
+++ b/xf86drm.c
@@ -31,9 +31,6 @@
31 * DEALINGS IN THE SOFTWARE. 31 * DEALINGS IN THE SOFTWARE.
32 */ 32 */
33 33
34#ifdef HAVE_CONFIG_H
35# include <config.h>
36#endif
37#include <stdio.h> 34#include <stdio.h>
38#include <stdlib.h> 35#include <stdlib.h>
39#include <stdbool.h> 36#include <stdbool.h>
@@ -293,7 +290,7 @@ static int drmMatchBusID(const char *id1, const char *id2, int pci_domain_ok)
293 * If any other failure happened then it will output error mesage using 290 * If any other failure happened then it will output error mesage using
294 * drmMsg() call. 291 * drmMsg() call.
295 */ 292 */
296#if !defined(UDEV) 293#if !UDEV
297static int chown_check_return(const char *path, uid_t owner, gid_t group) 294static int chown_check_return(const char *path, uid_t owner, gid_t group)
298{ 295{
299 int rv; 296 int rv;
@@ -332,7 +329,7 @@ static int drmOpenDevice(dev_t dev, int minor, int type)
332 int fd; 329 int fd;
333 mode_t devmode = DRM_DEV_MODE, serv_mode; 330 mode_t devmode = DRM_DEV_MODE, serv_mode;
334 gid_t serv_group; 331 gid_t serv_group;
335#if !defined(UDEV) 332#if !UDEV
336 int isroot = !geteuid(); 333 int isroot = !geteuid();
337 uid_t user = DRM_DEV_UID; 334 uid_t user = DRM_DEV_UID;
338 gid_t group = DRM_DEV_GID; 335 gid_t group = DRM_DEV_GID;
@@ -361,7 +358,7 @@ static int drmOpenDevice(dev_t dev, int minor, int type)
361 devmode &= ~(S_IXUSR|S_IXGRP|S_IXOTH); 358 devmode &= ~(S_IXUSR|S_IXGRP|S_IXOTH);
362 } 359 }
363 360
364#if !defined(UDEV) 361#if !UDEV
365 if (stat(DRM_DIR_NAME, &st)) { 362 if (stat(DRM_DIR_NAME, &st)) {
366 if (!isroot) 363 if (!isroot)
367 return DRM_ERR_NOT_ROOT; 364 return DRM_ERR_NOT_ROOT;
@@ -414,7 +411,7 @@ wait_for_udev:
414 if (fd >= 0) 411 if (fd >= 0)
415 return fd; 412 return fd;
416 413
417#if !defined(UDEV) 414#if !UDEV
418 /* Check if the device node is not what we expect it to be, and recreate it 415 /* Check if the device node is not what we expect it to be, and recreate it
419 * and try again if so. 416 * and try again if so.
420 */ 417 */
@@ -866,8 +863,6 @@ drmVersionPtr drmGetVersion(int fd)
866 drmVersionPtr retval; 863 drmVersionPtr retval;
867 drm_version_t *version = drmMalloc(sizeof(*version)); 864 drm_version_t *version = drmMalloc(sizeof(*version));
868 865
869 memclear(*version);
870
871 if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) { 866 if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
872 drmFreeKernelVersion(version); 867 drmFreeKernelVersion(version);
873 return NULL; 868 return NULL;
@@ -994,8 +989,10 @@ char *drmGetBusid(int fd)
994 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) 989 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
995 return NULL; 990 return NULL;
996 u.unique = drmMalloc(u.unique_len + 1); 991 u.unique = drmMalloc(u.unique_len + 1);
997 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) 992 if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) {
993 drmFree(u.unique);
998 return NULL; 994 return NULL;
995 }
999 u.unique[u.unique_len] = '\0'; 996 u.unique[u.unique_len] = '\0';
1000 997
1001 return u.unique; 998 return u.unique;
@@ -1523,14 +1520,12 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
1523 1520
1524 if (!(list = drmMalloc(res.count * sizeof(*list)))) 1521 if (!(list = drmMalloc(res.count * sizeof(*list))))
1525 return NULL; 1522 return NULL;
1526 if (!(retval = drmMalloc(res.count * sizeof(*retval)))) { 1523 if (!(retval = drmMalloc(res.count * sizeof(*retval))))
1527 drmFree(list); 1524 goto err_free_list;
1528 return NULL;
1529 }
1530 1525
1531 res.contexts = list; 1526 res.contexts = list;
1532 if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res)) 1527 if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
1533 return NULL; 1528 goto err_free_context;
1534 1529
1535 for (i = 0; i < res.count; i++) 1530 for (i = 0; i < res.count; i++)
1536 retval[i] = list[i].handle; 1531 retval[i] = list[i].handle;
@@ -1538,6 +1533,12 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
1538 1533
1539 *count = res.count; 1534 *count = res.count;
1540 return retval; 1535 return retval;
1536
1537err_free_list:
1538 drmFree(list);
1539err_free_context:
1540 drmFree(retval);
1541 return NULL;
1541} 1542}
1542 1543
1543void drmFreeReservedContextList(drm_context_t *pt) 1544void drmFreeReservedContextList(drm_context_t *pt)
@@ -1691,6 +1692,43 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
1691 return 0; 1692 return 0;
1692} 1693}
1693 1694
1695int drmCrtcGetSequence(int fd, uint32_t crtcId, uint64_t *sequence, uint64_t *ns)
1696{
1697 struct drm_crtc_get_sequence get_seq;
1698 int ret;
1699
1700 memclear(get_seq);
1701 get_seq.crtc_id = crtcId;
1702 ret = drmIoctl(fd, DRM_IOCTL_CRTC_GET_SEQUENCE, &get_seq);
1703 if (ret)
1704 return ret;
1705
1706 if (sequence)
1707 *sequence = get_seq.sequence;
1708 if (ns)
1709 *ns = get_seq.sequence_ns;
1710 return 0;
1711}
1712
1713int drmCrtcQueueSequence(int fd, uint32_t crtcId, uint32_t flags, uint64_t sequence,
1714 uint64_t *sequence_queued, uint64_t user_data)
1715{
1716 struct drm_crtc_queue_sequence queue_seq;
1717 int ret;
1718
1719 memclear(queue_seq);
1720 queue_seq.crtc_id = crtcId;
1721 queue_seq.flags = flags;
1722 queue_seq.sequence = sequence;
1723 queue_seq.user_data = user_data;
1724
1725 ret = drmIoctl(fd, DRM_IOCTL_CRTC_QUEUE_SEQUENCE, &queue_seq);
1726 if (ret == 0 && sequence_queued)
1727 *sequence_queued = queue_seq.sequence;
1728
1729 return ret;
1730}
1731
1694/** 1732/**
1695 * Acquire the AGP device. 1733 * Acquire the AGP device.
1696 * 1734 *
@@ -2781,12 +2819,11 @@ static char *drmGetMinorNameForFD(int fd, int type)
2781{ 2819{
2782#ifdef __linux__ 2820#ifdef __linux__
2783 DIR *sysdir; 2821 DIR *sysdir;
2784 struct dirent *pent, *ent; 2822 struct dirent *ent;
2785 struct stat sbuf; 2823 struct stat sbuf;
2786 const char *name = drmGetMinorName(type); 2824 const char *name = drmGetMinorName(type);
2787 int len; 2825 int len;
2788 char dev_name[64], buf[64]; 2826 char dev_name[64], buf[64];
2789 long name_max;
2790 int maj, min; 2827 int maj, min;
2791 2828
2792 if (!name) 2829 if (!name)
@@ -2809,30 +2846,16 @@ static char *drmGetMinorNameForFD(int fd, int type)
2809 if (!sysdir) 2846 if (!sysdir)
2810 return NULL; 2847 return NULL;
2811 2848
2812 name_max = fpathconf(dirfd(sysdir), _PC_NAME_MAX); 2849 while ((ent = readdir(sysdir))) {
2813 if (name_max == -1)
2814 goto out_close_dir;
2815
2816 pent = malloc(offsetof(struct dirent, d_name) + name_max + 1);
2817 if (pent == NULL)
2818 goto out_close_dir;
2819
2820 while (readdir_r(sysdir, pent, &ent) == 0 && ent != NULL) {
2821 if (strncmp(ent->d_name, name, len) == 0) { 2850 if (strncmp(ent->d_name, name, len) == 0) {
2822 snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s", 2851 snprintf(dev_name, sizeof(dev_name), DRM_DIR_NAME "/%s",
2823 ent->d_name); 2852 ent->d_name);
2824 2853
2825 free(pent);
2826 closedir(sysdir); 2854 closedir(sysdir);
2827
2828 return strdup(dev_name); 2855 return strdup(dev_name);
2829 } 2856 }
2830 } 2857 }
2831 2858 return NULL;
2832 free(pent);
2833
2834out_close_dir:
2835 closedir(sysdir);
2836#else 2859#else
2837 struct stat sbuf; 2860 struct stat sbuf;
2838 char buf[PATH_MAX + 1]; 2861 char buf[PATH_MAX + 1];
@@ -2873,7 +2896,6 @@ out_close_dir:
2873 2896
2874 return strdup(buf); 2897 return strdup(buf);
2875#endif 2898#endif
2876 return NULL;
2877} 2899}
2878 2900
2879char *drmGetPrimaryDeviceNameFromFd(int fd) 2901char *drmGetPrimaryDeviceNameFromFd(int fd)
@@ -3023,32 +3045,32 @@ static int drmParsePciBusInfo(int maj, int min, drmPciBusInfoPtr info)
3023#endif 3045#endif
3024} 3046}
3025 3047
3026static int drmCompareBusInfo(drmDevicePtr a, drmDevicePtr b) 3048int drmDevicesEqual(drmDevicePtr a, drmDevicePtr b)
3027{ 3049{
3028 if (a == NULL || b == NULL) 3050 if (a == NULL || b == NULL)
3029 return -1; 3051 return 0;
3030 3052
3031 if (a->bustype != b->bustype) 3053 if (a->bustype != b->bustype)
3032 return -1; 3054 return 0;
3033 3055
3034 switch (a->bustype) { 3056 switch (a->bustype) {
3035 case DRM_BUS_PCI: 3057 case DRM_BUS_PCI:
3036 return memcmp(a->businfo.pci, b->businfo.pci, sizeof(drmPciBusInfo)); 3058 return memcmp(a->businfo.pci, b->businfo.pci, sizeof(drmPciBusInfo)) == 0;
3037 3059
3038 case DRM_BUS_USB: 3060 case DRM_BUS_USB:
3039 return memcmp(a->businfo.usb, b->businfo.usb, sizeof(drmUsbBusInfo)); 3061 return memcmp(a->businfo.usb, b->businfo.usb, sizeof(drmUsbBusInfo)) == 0;
3040 3062
3041 case DRM_BUS_PLATFORM: 3063 case DRM_BUS_PLATFORM:
3042 return memcmp(a->businfo.platform, b->businfo.platform, sizeof(drmPlatformBusInfo)); 3064 return memcmp(a->businfo.platform, b->businfo.platform, sizeof(drmPlatformBusInfo)) == 0;
3043 3065
3044 case DRM_BUS_HOST1X: 3066 case DRM_BUS_HOST1X:
3045 return memcmp(a->businfo.host1x, b->businfo.host1x, sizeof(drmHost1xBusInfo)); 3067 return memcmp(a->businfo.host1x, b->businfo.host1x, sizeof(drmHost1xBusInfo)) == 0;
3046 3068
3047 default: 3069 default:
3048 break; 3070 break;
3049 } 3071 }
3050 3072
3051 return -1; 3073 return 0;
3052} 3074}
3053 3075
3054static int drmGetNodeType(const char *name) 3076static int drmGetNodeType(const char *name)
@@ -3663,7 +3685,7 @@ static void drmFoldDuplicatedDevices(drmDevicePtr local_devices[], int count)
3663 3685
3664 for (i = 0; i < count; i++) { 3686 for (i = 0; i < count; i++) {
3665 for (j = i + 1; j < count; j++) { 3687 for (j = i + 1; j < count; j++) {
3666 if (drmCompareBusInfo(local_devices[i], local_devices[j]) == 0) { 3688 if (drmDevicesEqual(local_devices[i], local_devices[j])) {
3667 local_devices[i]->available_nodes |= local_devices[j]->available_nodes; 3689 local_devices[i]->available_nodes |= local_devices[j]->available_nodes;
3668 node_type = log2(local_devices[j]->available_nodes); 3690 node_type = log2(local_devices[j]->available_nodes);
3669 memcpy(local_devices[i]->nodes[node_type], 3691 memcpy(local_devices[i]->nodes[node_type],
@@ -3985,7 +4007,7 @@ int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices)
3985 ret = drmProcessUsbDevice(&device, node, node_type, maj, min, 4007 ret = drmProcessUsbDevice(&device, node, node_type, maj, min,
3986 devices != NULL, flags); 4008 devices != NULL, flags);
3987 if (ret) 4009 if (ret)
3988 goto free_devices; 4010 continue;
3989 4011
3990 break; 4012 break;
3991 4013
@@ -3993,7 +4015,7 @@ int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices)
3993 ret = drmProcessPlatformDevice(&device, node, node_type, maj, min, 4015 ret = drmProcessPlatformDevice(&device, node, node_type, maj, min,
3994 devices != NULL, flags); 4016 devices != NULL, flags);
3995 if (ret) 4017 if (ret)
3996 goto free_devices; 4018 continue;
3997 4019
3998 break; 4020 break;
3999 4021
@@ -4001,7 +4023,7 @@ int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices)
4001 ret = drmProcessHost1xDevice(&device, node, node_type, maj, min, 4023 ret = drmProcessHost1xDevice(&device, node, node_type, maj, min,
4002 devices != NULL, flags); 4024 devices != NULL, flags);
4003 if (ret) 4025 if (ret)
4004 goto free_devices; 4026 continue;
4005 4027
4006 break; 4028 break;
4007 4029
@@ -4140,3 +4162,132 @@ char *drmGetDeviceNameFromFd2(int fd)
4140 return strdup(node); 4162 return strdup(node);
4141#endif 4163#endif
4142} 4164}
4165
4166int drmSyncobjCreate(int fd, uint32_t flags, uint32_t *handle)
4167{
4168 struct drm_syncobj_create args;
4169 int ret;
4170
4171 memclear(args);
4172 args.flags = flags;
4173 args.handle = 0;
4174 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
4175 if (ret)
4176 return ret;
4177 *handle = args.handle;
4178 return 0;
4179}
4180
4181int drmSyncobjDestroy(int fd, uint32_t handle)
4182{
4183 struct drm_syncobj_destroy args;
4184
4185 memclear(args);
4186 args.handle = handle;
4187 return drmIoctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
4188}
4189
4190int drmSyncobjHandleToFD(int fd, uint32_t handle, int *obj_fd)
4191{
4192 struct drm_syncobj_handle args;
4193 int ret;
4194
4195 memclear(args);
4196 args.fd = -1;
4197 args.handle = handle;
4198 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
4199 if (ret)
4200 return ret;
4201 *obj_fd = args.fd;
4202 return 0;
4203}
4204
4205int drmSyncobjFDToHandle(int fd, int obj_fd, uint32_t *handle)
4206{
4207 struct drm_syncobj_handle args;
4208 int ret;
4209
4210 memclear(args);
4211 args.fd = obj_fd;
4212 args.handle = 0;
4213 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
4214 if (ret)
4215 return ret;
4216 *handle = args.handle;
4217 return 0;
4218}
4219
4220int drmSyncobjImportSyncFile(int fd, uint32_t handle, int sync_file_fd)
4221{
4222 struct drm_syncobj_handle args;
4223
4224 memclear(args);
4225 args.fd = sync_file_fd;
4226 args.handle = handle;
4227 args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
4228 return drmIoctl(fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
4229}
4230
4231int drmSyncobjExportSyncFile(int fd, uint32_t handle, int *sync_file_fd)
4232{
4233 struct drm_syncobj_handle args;
4234 int ret;
4235
4236 memclear(args);
4237 args.fd = -1;
4238 args.handle = handle;
4239 args.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
4240 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
4241 if (ret)
4242 return ret;
4243 *sync_file_fd = args.fd;
4244 return 0;
4245}
4246
4247int drmSyncobjWait(int fd, uint32_t *handles, unsigned num_handles,
4248 int64_t timeout_nsec, unsigned flags,
4249 uint32_t *first_signaled)
4250{
4251 struct drm_syncobj_wait args;
4252 int ret;
4253
4254 memclear(args);
4255 args.handles = (uintptr_t)handles;
4256 args.timeout_nsec = timeout_nsec;
4257 args.count_handles = num_handles;
4258 args.flags = flags;
4259
4260 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
4261 if (ret < 0)
4262 return -errno;
4263
4264 if (first_signaled)
4265 *first_signaled = args.first_signaled;
4266 return ret;
4267}
4268
4269int drmSyncobjReset(int fd, const uint32_t *handles, uint32_t handle_count)
4270{
4271 struct drm_syncobj_array args;
4272 int ret;
4273
4274 memclear(args);
4275 args.handles = (uintptr_t)handles;
4276 args.count_handles = handle_count;
4277
4278 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
4279 return ret;
4280}
4281
4282int drmSyncobjSignal(int fd, const uint32_t *handles, uint32_t handle_count)
4283{
4284 struct drm_syncobj_array args;
4285 int ret;
4286
4287 memclear(args);
4288 args.handles = (uintptr_t)handles;
4289 args.count_handles = handle_count;
4290
4291 ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &args);
4292 return ret;
4293}
diff --git a/xf86drm.h b/xf86drm.h
index 0d927018..7773d71a 100644
--- a/xf86drm.h
+++ b/xf86drm.h
@@ -636,6 +636,12 @@ extern int drmCtlUninstHandler(int fd);
636extern int drmSetClientCap(int fd, uint64_t capability, 636extern int drmSetClientCap(int fd, uint64_t capability,
637 uint64_t value); 637 uint64_t value);
638 638
639extern int drmCrtcGetSequence(int fd, uint32_t crtcId,
640 uint64_t *sequence, uint64_t *ns);
641extern int drmCrtcQueueSequence(int fd, uint32_t crtcId,
642 uint32_t flags, uint64_t sequence,
643 uint64_t *sequence_queued,
644 uint64_t user_data);
639/* General user-level programmer's API: authenticated client and/or X */ 645/* General user-level programmer's API: authenticated client and/or X */
640extern int drmMap(int fd, 646extern int drmMap(int fd,
641 drm_handle_t handle, 647 drm_handle_t handle,
@@ -728,7 +734,7 @@ extern void drmMsg(const char *format, ...) DRM_PRINTFLIKE(1, 2);
728extern int drmSetMaster(int fd); 734extern int drmSetMaster(int fd);
729extern int drmDropMaster(int fd); 735extern int drmDropMaster(int fd);
730 736
731#define DRM_EVENT_CONTEXT_VERSION 2 737#define DRM_EVENT_CONTEXT_VERSION 4
732 738
733typedef struct _drmEventContext { 739typedef struct _drmEventContext {
734 740
@@ -748,6 +754,17 @@ typedef struct _drmEventContext {
748 unsigned int tv_usec, 754 unsigned int tv_usec,
749 void *user_data); 755 void *user_data);
750 756
757 void (*page_flip_handler2)(int fd,
758 unsigned int sequence,
759 unsigned int tv_sec,
760 unsigned int tv_usec,
761 unsigned int crtc_id,
762 void *user_data);
763
764 void (*sequence_handler)(int fd,
765 uint64_t sequence,
766 uint64_t ns,
767 uint64_t user_data);
751} drmEventContext, *drmEventContextPtr; 768} drmEventContext, *drmEventContextPtr;
752 769
753extern int drmHandleEvent(int fd, drmEventContextPtr evctx); 770extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
@@ -844,6 +861,21 @@ extern void drmFreeDevices(drmDevicePtr devices[], int count);
844extern int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device); 861extern int drmGetDevice2(int fd, uint32_t flags, drmDevicePtr *device);
845extern int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices); 862extern int drmGetDevices2(uint32_t flags, drmDevicePtr devices[], int max_devices);
846 863
864extern int drmDevicesEqual(drmDevicePtr a, drmDevicePtr b);
865
866extern int drmSyncobjCreate(int fd, uint32_t flags, uint32_t *handle);
867extern int drmSyncobjDestroy(int fd, uint32_t handle);
868extern int drmSyncobjHandleToFD(int fd, uint32_t handle, int *obj_fd);
869extern int drmSyncobjFDToHandle(int fd, int obj_fd, uint32_t *handle);
870
871extern int drmSyncobjImportSyncFile(int fd, uint32_t handle, int sync_file_fd);
872extern int drmSyncobjExportSyncFile(int fd, uint32_t handle, int *sync_file_fd);
873extern int drmSyncobjWait(int fd, uint32_t *handles, unsigned num_handles,
874 int64_t timeout_nsec, unsigned flags,
875 uint32_t *first_signaled);
876extern int drmSyncobjReset(int fd, const uint32_t *handles, uint32_t handle_count);
877extern int drmSyncobjSignal(int fd, const uint32_t *handles, uint32_t handle_count);
878
847#if defined(__cplusplus) 879#if defined(__cplusplus)
848} 880}
849#endif 881#endif
diff --git a/xf86drmHash.c b/xf86drmHash.c
index f287e61f..b2fa414e 100644
--- a/xf86drmHash.c
+++ b/xf86drmHash.c
@@ -98,9 +98,6 @@ static unsigned long HashHash(unsigned long key)
98 } 98 }
99 99
100 hash %= HASH_SIZE; 100 hash %= HASH_SIZE;
101#if DEBUG
102 printf( "Hash(%lu) = %lu\n", key, hash);
103#endif
104 return hash; 101 return hash;
105} 102}
106 103
@@ -201,9 +198,6 @@ int drmHashInsert(void *t, unsigned long key, void *value)
201 bucket->value = value; 198 bucket->value = value;
202 bucket->next = table->buckets[hash]; 199 bucket->next = table->buckets[hash];
203 table->buckets[hash] = bucket; 200 table->buckets[hash] = bucket;
204#if DEBUG
205 printf("Inserted %lu at %lu/%p\n", key, hash, bucket);
206#endif
207 return 0; /* Added to table */ 201 return 0; /* Added to table */
208} 202}
209 203
diff --git a/xf86drmMode.c b/xf86drmMode.c
index e1c99742..9a15b5e7 100644
--- a/xf86drmMode.c
+++ b/xf86drmMode.c
@@ -38,10 +38,6 @@
38 * platforms find which headers to include to get uint32_t 38 * platforms find which headers to include to get uint32_t
39 */ 39 */
40 40
41#ifdef HAVE_CONFIG_H
42#include "config.h"
43#endif
44
45#include <limits.h> 41#include <limits.h>
46#include <stdint.h> 42#include <stdint.h>
47#include <stdlib.h> 43#include <stdlib.h>
@@ -271,9 +267,9 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
271} 267}
272 268
273int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height, 269int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
274 uint32_t pixel_format, uint32_t bo_handles[4], 270 uint32_t pixel_format, const uint32_t bo_handles[4],
275 uint32_t pitches[4], uint32_t offsets[4], 271 const uint32_t pitches[4], const uint32_t offsets[4],
276 uint64_t modifier[4], uint32_t *buf_id, uint32_t flags) 272 const uint64_t modifier[4], uint32_t *buf_id, uint32_t flags)
277{ 273{
278 struct drm_mode_fb_cmd2 f; 274 struct drm_mode_fb_cmd2 f;
279 int ret; 275 int ret;
@@ -297,8 +293,8 @@ int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
297} 293}
298 294
299int drmModeAddFB2(int fd, uint32_t width, uint32_t height, 295int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
300 uint32_t pixel_format, uint32_t bo_handles[4], 296 uint32_t pixel_format, const uint32_t bo_handles[4],
301 uint32_t pitches[4], uint32_t offsets[4], 297 const uint32_t pitches[4], const uint32_t offsets[4],
302 uint32_t *buf_id, uint32_t flags) 298 uint32_t *buf_id, uint32_t flags)
303{ 299{
304 return drmModeAddFB2WithModifiers(fd, width, height, 300 return drmModeAddFB2WithModifiers(fd, width, height,
@@ -831,8 +827,7 @@ int drmCheckModesettingSupported(const char *busid)
831 } 827 }
832#elif defined(__DragonFly__) 828#elif defined(__DragonFly__)
833 return 0; 829 return 0;
834#endif 830#elif defined(__OpenBSD__)
835#ifdef __OpenBSD__
836 int fd; 831 int fd;
837 struct drm_mode_card_res res; 832 struct drm_mode_card_res res;
838 drmModeResPtr r = 0; 833 drmModeResPtr r = 0;
@@ -889,6 +884,8 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
889 int len, i; 884 int len, i;
890 struct drm_event *e; 885 struct drm_event *e;
891 struct drm_event_vblank *vblank; 886 struct drm_event_vblank *vblank;
887 struct drm_event_crtc_sequence *seq;
888 void *user_data;
892 889
893 /* The DRM read semantics guarantees that we always get only 890 /* The DRM read semantics guarantees that we always get only
894 * complete events. */ 891 * complete events. */
@@ -915,15 +912,30 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
915 U642VOID (vblank->user_data)); 912 U642VOID (vblank->user_data));
916 break; 913 break;
917 case DRM_EVENT_FLIP_COMPLETE: 914 case DRM_EVENT_FLIP_COMPLETE:
918 if (evctx->version < 2 ||
919 evctx->page_flip_handler == NULL)
920 break;
921 vblank = (struct drm_event_vblank *) e; 915 vblank = (struct drm_event_vblank *) e;
922 evctx->page_flip_handler(fd, 916 user_data = U642VOID (vblank->user_data);
923 vblank->sequence, 917
924 vblank->tv_sec, 918 if (evctx->version >= 3 && evctx->page_flip_handler2)
925 vblank->tv_usec, 919 evctx->page_flip_handler2(fd,
926 U642VOID (vblank->user_data)); 920 vblank->sequence,
921 vblank->tv_sec,
922 vblank->tv_usec,
923 vblank->crtc_id,
924 user_data);
925 else if (evctx->version >= 2 && evctx->page_flip_handler)
926 evctx->page_flip_handler(fd,
927 vblank->sequence,
928 vblank->tv_sec,
929 vblank->tv_usec,
930 user_data);
931 break;
932 case DRM_EVENT_CRTC_SEQUENCE:
933 seq = (struct drm_event_crtc_sequence *) e;
934 if (evctx->version >= 4 && evctx->sequence_handler)
935 evctx->sequence_handler(fd,
936 seq->sequence,
937 seq->time_ns,
938 seq->user_data);
927 break; 939 break;
928 default: 940 default:
929 break; 941 break;
@@ -1188,275 +1200,6 @@ int drmModeObjectSetProperty(int fd, uint32_t object_id, uint32_t object_type,
1188 return DRM_IOCTL(fd, DRM_IOCTL_MODE_OBJ_SETPROPERTY, &prop); 1200 return DRM_IOCTL(fd, DRM_IOCTL_MODE_OBJ_SETPROPERTY, &prop);
1189} 1201}
1190 1202
1191typedef struct _drmModePropertySetItem drmModePropertySetItem, *drmModePropertySetItemPtr;
1192
1193struct _drmModePropertySetItem {
1194 uint32_t object_id;
1195 uint32_t property_id;
1196 bool is_blob;
1197 uint64_t value;
1198 void *blob;
1199 drmModePropertySetItemPtr next;
1200};
1201
1202struct _drmModePropertySet {
1203 unsigned int count_objs;
1204 unsigned int count_props;
1205 unsigned int count_blobs;
1206 drmModePropertySetItem list;
1207};
1208
1209drmModePropertySetPtr drmModePropertySetAlloc(void)
1210{
1211 drmModePropertySetPtr set;
1212
1213 set = drmMalloc(sizeof *set);
1214 if (!set)
1215 return NULL;
1216
1217 set->list.next = NULL;
1218 set->count_props = 0;
1219 set->count_objs = 0;
1220
1221 return set;
1222}
1223
1224int drmModePropertySetAdd(drmModePropertySetPtr set,
1225 uint32_t object_id,
1226 uint32_t property_id,
1227 uint64_t value)
1228{
1229 drmModePropertySetItemPtr prev = &set->list;
1230 bool new_obj = false;
1231
1232 /* keep it sorted by object_id and property_id */
1233 while (prev->next) {
1234 if (prev->next->object_id > object_id)
1235 break;
1236
1237 if (prev->next->object_id == object_id &&
1238 prev->next->property_id >= property_id)
1239 break;
1240
1241 prev = prev->next;
1242 }
1243
1244 if ((prev == &set->list || prev->object_id != object_id) &&
1245 (!prev->next || prev->next->object_id != object_id))
1246 new_obj = true;
1247
1248 /* replace or add? */
1249 if (prev->next &&
1250 prev->next->object_id == object_id &&
1251 prev->next->property_id == property_id) {
1252 drmModePropertySetItemPtr item = prev->next;
1253
1254 if (item->is_blob)
1255 return -EINVAL;
1256
1257 item->value = value;
1258 } else {
1259 drmModePropertySetItemPtr item;
1260
1261 item = drmMalloc(sizeof *item);
1262 if (!item)
1263 return -1;
1264
1265 item->object_id = object_id;
1266 item->property_id = property_id;
1267 item->value = value;
1268 item->is_blob = false;
1269 item->blob = NULL;
1270
1271 item->next = prev->next;
1272 prev->next = item;
1273
1274 set->count_props++;
1275 }
1276
1277 if (new_obj)
1278 set->count_objs++;
1279
1280 return 0;
1281}
1282
1283int drmModePropertySetAddBlob(drmModePropertySetPtr set,
1284 uint32_t object_id,
1285 uint32_t property_id,
1286 uint64_t length,
1287 void *data)
1288{
1289 drmModePropertySetItemPtr prev = &set->list;
1290 bool new_obj = false;
1291
1292 /* keep it sorted by object_id and property_id */
1293 while (prev->next) {
1294 if (prev->next->object_id > object_id)
1295 break;
1296
1297 if (prev->next->object_id == object_id &&
1298 prev->next->property_id >= property_id)
1299 break;
1300
1301 prev = prev->next;
1302 }
1303
1304 if ((prev == &set->list || prev->object_id != object_id) &&
1305 (!prev->next || prev->next->object_id != object_id))
1306 new_obj = true;
1307
1308 /* replace or add? */
1309 if (prev->next &&
1310 prev->next->object_id == object_id &&
1311 prev->next->property_id == property_id) {
1312 drmModePropertySetItemPtr item = prev->next;
1313
1314 if (!item->is_blob)
1315 return -EINVAL;
1316
1317 item->value = length;
1318 item->blob = data;
1319 } else {
1320 drmModePropertySetItemPtr item;
1321
1322 item = drmMalloc(sizeof *item);
1323 if (!item)
1324 return -1;
1325
1326 item->object_id = object_id;
1327 item->property_id = property_id;
1328 item->is_blob = true;
1329 item->value = length;
1330 item->blob = data;
1331
1332 item->next = prev->next;
1333 prev->next = item;
1334
1335 set->count_props++;
1336 set->count_blobs++;
1337 }
1338
1339 if (new_obj)
1340 set->count_objs++;
1341
1342 return 0;
1343}
1344
1345void drmModePropertySetFree(drmModePropertySetPtr set)
1346{
1347 drmModePropertySetItemPtr item;
1348
1349 if (!set)
1350 return;
1351
1352 item = set->list.next;
1353
1354 while (item) {
1355 drmModePropertySetItemPtr next = item->next;
1356
1357 drmFree(item);
1358
1359 item = next;
1360 }
1361
1362 drmFree(set);
1363}
1364
1365int drmModePropertySetCommit(int fd, uint32_t flags, void *user_data,
1366 drmModePropertySetPtr set)
1367{
1368 drmModePropertySetItemPtr item;
1369 uint32_t *objs_ptr = NULL;
1370 uint32_t *count_props_ptr = NULL;
1371 uint32_t *props_ptr = NULL;
1372 uint64_t *prop_values_ptr = NULL;
1373 uint64_t *blob_values_ptr = NULL;
1374 struct drm_mode_atomic atomic = { 0 };
1375 unsigned int obj_idx = 0;
1376 unsigned int prop_idx = 0;
1377 unsigned int blob_idx = 0;
1378 int ret = -1;
1379
1380 if (!set)
1381 return -1;
1382
1383 objs_ptr = drmMalloc(set->count_objs * sizeof objs_ptr[0]);
1384 if (!objs_ptr) {
1385 errno = ENOMEM;
1386 goto out;
1387 }
1388
1389 count_props_ptr = drmMalloc(set->count_objs * sizeof count_props_ptr[0]);
1390 if (!count_props_ptr) {
1391 errno = ENOMEM;
1392 goto out;
1393 }
1394
1395 props_ptr = drmMalloc(set->count_props * sizeof props_ptr[0]);
1396 if (!props_ptr) {
1397 errno = ENOMEM;
1398 goto out;
1399 }
1400
1401 prop_values_ptr = drmMalloc(set->count_props * sizeof prop_values_ptr[0]);
1402 if (!prop_values_ptr) {
1403 errno = ENOMEM;
1404 goto out;
1405 }
1406
1407 blob_values_ptr = drmMalloc(set->count_blobs * sizeof blob_values_ptr[0]);
1408 if (!blob_values_ptr) {
1409 errno = ENOMEM;
1410 goto out;
1411 }
1412
1413 item = set->list.next;
1414
1415 while (item) {
1416 int count_props = 0;
1417 drmModePropertySetItemPtr next = item;
1418
1419 objs_ptr[obj_idx] = item->object_id;
1420
1421 while (next && next->object_id == item->object_id) {
1422 props_ptr[prop_idx] = next->property_id;
1423 prop_values_ptr[prop_idx] = next->value;
1424 prop_idx++;
1425
1426 if (next->is_blob)
1427 blob_values_ptr[blob_idx++] = VOID2U64(next->blob);
1428
1429 count_props++;
1430
1431 next = next->next;
1432 }
1433
1434 count_props_ptr[obj_idx++] = count_props;
1435
1436 item = next;
1437 }
1438
1439 atomic.count_objs = set->count_objs;
1440 atomic.flags = flags;
1441 atomic.objs_ptr = VOID2U64(objs_ptr);
1442 atomic.count_props_ptr = VOID2U64(count_props_ptr);
1443 atomic.props_ptr = VOID2U64(props_ptr);
1444 atomic.prop_values_ptr = VOID2U64(prop_values_ptr);
1445// TODO:
1446// atomic.blob_values_ptr = VOID2U64(blob_values_ptr);
1447 atomic.user_data = VOID2U64(user_data);
1448
1449 ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ATOMIC, &atomic);
1450
1451out:
1452 drmFree(objs_ptr);
1453 drmFree(count_props_ptr);
1454 drmFree(props_ptr);
1455 drmFree(prop_values_ptr);
1456
1457 return ret;
1458}
1459
1460typedef struct _drmModeAtomicReqItem drmModeAtomicReqItem, *drmModeAtomicReqItemPtr; 1203typedef struct _drmModeAtomicReqItem drmModeAtomicReqItem, *drmModeAtomicReqItemPtr;
1461 1204
1462struct _drmModeAtomicReqItem { 1205struct _drmModeAtomicReqItem {
@@ -1565,6 +1308,9 @@ int drmModeAtomicAddProperty(drmModeAtomicReqPtr req,
1565 if (!req) 1308 if (!req)
1566 return -EINVAL; 1309 return -EINVAL;
1567 1310
1311 if (object_id == 0 || property_id == 0)
1312 return -EINVAL;
1313
1568 if (req->cursor >= req->size_items) { 1314 if (req->cursor >= req->size_items) {
1569 drmModeAtomicReqItemPtr new; 1315 drmModeAtomicReqItemPtr new;
1570 1316
@@ -1746,3 +1492,92 @@ drmModeDestroyPropertyBlob(int fd, uint32_t id)
1746 destroy.blob_id = id; 1492 destroy.blob_id = id;
1747 return DRM_IOCTL(fd, DRM_IOCTL_MODE_DESTROYPROPBLOB, &destroy); 1493 return DRM_IOCTL(fd, DRM_IOCTL_MODE_DESTROYPROPBLOB, &destroy);
1748} 1494}
1495
1496int
1497drmModeCreateLease(int fd, const uint32_t *objects, int num_objects, int flags, uint32_t *lessee_id)
1498{
1499 struct drm_mode_create_lease create;
1500 int ret;
1501
1502 memclear(create);
1503 create.object_ids = (uintptr_t) objects;
1504 create.object_count = num_objects;
1505 create.flags = flags;
1506
1507 ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_CREATE_LEASE, &create);
1508 if (ret == 0) {
1509 *lessee_id = create.lessee_id;
1510 return create.fd;
1511 }
1512 return -errno;
1513}
1514
1515drmModeLesseeListPtr
1516drmModeListLessees(int fd)
1517{
1518 struct drm_mode_list_lessees list;
1519 uint32_t count;
1520 drmModeLesseeListPtr ret;
1521
1522 memclear(list);
1523
1524 if (DRM_IOCTL(fd, DRM_IOCTL_MODE_LIST_LESSEES, &list))
1525 return NULL;
1526
1527 count = list.count_lessees;
1528 ret = drmMalloc(sizeof (drmModeLesseeListRes) + count * sizeof (ret->lessees[0]));
1529 if (!ret)
1530 return NULL;
1531
1532 list.lessees_ptr = VOID2U64(&ret->lessees[0]);
1533 if (DRM_IOCTL(fd, DRM_IOCTL_MODE_LIST_LESSEES, &list)) {
1534 drmFree(ret);
1535 return NULL;
1536 }
1537
1538 ret->count = count;
1539 return ret;
1540}
1541
1542drmModeObjectListPtr
1543drmModeGetLease(int fd)
1544{
1545 struct drm_mode_get_lease get;
1546 uint32_t count;
1547 drmModeObjectListPtr ret;
1548
1549 memclear(get);
1550
1551 if (DRM_IOCTL(fd, DRM_IOCTL_MODE_GET_LEASE, &get))
1552 return NULL;
1553
1554 count = get.count_objects;
1555 ret = drmMalloc(sizeof (drmModeObjectListRes) + count * sizeof (ret->objects[0]));
1556 if (!ret)
1557 return NULL;
1558
1559 get.objects_ptr = VOID2U64(&ret->objects[0]);
1560 if (DRM_IOCTL(fd, DRM_IOCTL_MODE_GET_LEASE, &get)) {
1561 drmFree(ret);
1562 return NULL;
1563 }
1564
1565 ret->count = count;
1566 return ret;
1567}
1568
1569int
1570drmModeRevokeLease(int fd, uint32_t lessee_id)
1571{
1572 struct drm_mode_revoke_lease revoke;
1573 int ret;
1574
1575 memclear(revoke);
1576
1577 revoke.lessee_id = lessee_id;
1578
1579 ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_REVOKE_LEASE, &revoke);
1580 if (ret == 0)
1581 return 0;
1582 return -errno;
1583}
diff --git a/xf86drmMode.h b/xf86drmMode.h
index 9d73be95..3cd27aee 100644
--- a/xf86drmMode.h
+++ b/xf86drmMode.h
@@ -369,15 +369,16 @@ extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
369 uint32_t *buf_id); 369 uint32_t *buf_id);
370/* ...with a specific pixel format */ 370/* ...with a specific pixel format */
371extern int drmModeAddFB2(int fd, uint32_t width, uint32_t height, 371extern int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
372 uint32_t pixel_format, uint32_t bo_handles[4], 372 uint32_t pixel_format, const uint32_t bo_handles[4],
373 uint32_t pitches[4], uint32_t offsets[4], 373 const uint32_t pitches[4], const uint32_t offsets[4],
374 uint32_t *buf_id, uint32_t flags); 374 uint32_t *buf_id, uint32_t flags);
375 375
376/* ...with format modifiers */ 376/* ...with format modifiers */
377int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height, 377int drmModeAddFB2WithModifiers(int fd, uint32_t width, uint32_t height,
378 uint32_t pixel_format, uint32_t bo_handles[4], 378 uint32_t pixel_format, const uint32_t bo_handles[4],
379 uint32_t pitches[4], uint32_t offsets[4], 379 const uint32_t pitches[4], const uint32_t offsets[4],
380 uint64_t modifier[4], uint32_t *buf_id, uint32_t flags); 380 const uint64_t modifier[4], uint32_t *buf_id,
381 uint32_t flags);
381 382
382/** 383/**
383 * Destroies the given framebuffer. 384 * Destroies the given framebuffer.
@@ -498,25 +499,6 @@ extern int drmModeObjectSetProperty(int fd, uint32_t object_id,
498 uint64_t value); 499 uint64_t value);
499 500
500 501
501typedef struct _drmModePropertySet drmModePropertySet, *drmModePropertySetPtr;
502
503extern drmModePropertySetPtr drmModePropertySetAlloc(void);
504
505extern int drmModePropertySetAdd(drmModePropertySetPtr set,
506 uint32_t object_id,
507 uint32_t property_id,
508 uint64_t value);
509extern int drmModePropertySetAddBlob(drmModePropertySetPtr set,
510 uint32_t object_id,
511 uint32_t property_id,
512 uint64_t length,
513 void *blob);
514
515extern int drmModePropertySetCommit(int fd, uint32_t flags,
516 void *user_data, drmModePropertySetPtr set);
517
518extern void drmModePropertySetFree(drmModePropertySetPtr set);
519
520typedef struct _drmModeAtomicReq drmModeAtomicReq, *drmModeAtomicReqPtr; 502typedef struct _drmModeAtomicReq drmModeAtomicReq, *drmModeAtomicReqPtr;
521 503
522extern drmModeAtomicReqPtr drmModeAtomicAlloc(void); 504extern drmModeAtomicReqPtr drmModeAtomicAlloc(void);
@@ -539,6 +521,28 @@ extern int drmModeCreatePropertyBlob(int fd, const void *data, size_t size,
539 uint32_t *id); 521 uint32_t *id);
540extern int drmModeDestroyPropertyBlob(int fd, uint32_t id); 522extern int drmModeDestroyPropertyBlob(int fd, uint32_t id);
541 523
524/*
525 * DRM mode lease APIs. These create and manage new drm_masters with
526 * access to a subset of the available DRM resources
527 */
528
529extern int drmModeCreateLease(int fd, const uint32_t *objects, int num_objects, int flags, uint32_t *lessee_id);
530
531typedef struct drmModeLesseeList {
532 uint32_t count;
533 uint32_t lessees[0];
534} drmModeLesseeListRes, *drmModeLesseeListPtr;
535
536extern drmModeLesseeListPtr drmModeListLessees(int fd);
537
538typedef struct drmModeObjectList {
539 uint32_t count;
540 uint32_t objects[0];
541} drmModeObjectListRes, *drmModeObjectListPtr;
542
543extern drmModeObjectListPtr drmModeGetLease(int fd);
544
545extern int drmModeRevokeLease(int fd, uint32_t lessee_id);
542 546
543#if defined(__cplusplus) 547#if defined(__cplusplus)
544} 548}