aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Shmidt2018-04-27 19:29:55 -0500
committerDmitry Shmidt2018-04-27 19:29:55 -0500
commit16adcbf47e362a071c00141d5bd050d66dd8bb3d (patch)
treea1e58bc6dec605a114d57c6d655e9614f1b6669b
parent2f9aea0661550a43c3d2ac33a5bc286870edd34e (diff)
parent915c5fa5407ef5ace18b1bc69a4686c012273e8e (diff)
downloadexternal-libgbm-16adcbf47e362a071c00141d5bd050d66dd8bb3d.tar.gz
external-libgbm-16adcbf47e362a071c00141d5bd050d66dd8bb3d.tar.xz
external-libgbm-16adcbf47e362a071c00141d5bd050d66dd8bb3d.zip
Merge remote-tracking branch 'aosp/upstream-aosp-merge-fdo-master' into libdrm
Change-Id: Ifff1b65a81b838a6ab77c9f33eb5c3c514c250db
-rw-r--r--.editorconfig4
-rw-r--r--Android.bp2
-rw-r--r--Makefile.am33
-rw-r--r--Makefile.sources3
-rw-r--r--README24
-rw-r--r--RELEASING6
-rw-r--r--amdgpu/.editorconfig13
-rw-r--r--amdgpu/Makefile.am4
-rw-r--r--amdgpu/Makefile.sources3
-rwxr-xr-xamdgpu/amdgpu-symbol-check23
-rw-r--r--amdgpu/amdgpu.h324
-rw-r--r--amdgpu/amdgpu_asic_id.c161
-rw-r--r--amdgpu/amdgpu_asic_id.h165
-rw-r--r--amdgpu/amdgpu_bo.c97
-rw-r--r--amdgpu/amdgpu_cs.c298
-rw-r--r--amdgpu/amdgpu_device.c92
-rw-r--r--amdgpu/amdgpu_gpu_info.c94
-rw-r--r--amdgpu/amdgpu_internal.h48
-rw-r--r--amdgpu/amdgpu_vamgr.c180
-rw-r--r--amdgpu/amdgpu_vm.c49
-rw-r--r--amdgpu/meson.build66
-rw-r--r--amdgpu/util_hash.c4
-rw-r--r--amdgpu/util_hash.h4
-rw-r--r--amdgpu/util_hash_table.c4
-rw-r--r--amdgpu/util_hash_table.h4
-rw-r--r--android/gralloc_handle.h111
-rwxr-xr-xautogen.sh10
-rw-r--r--configure.ac86
-rw-r--r--data/Android.mk10
-rw-r--r--data/Makefile.am25
-rw-r--r--data/amdgpu.ids187
-rw-r--r--data/meson.build27
-rw-r--r--etnaviv/Makefile.sources1
-rwxr-xr-xetnaviv/etnaviv-symbol-check6
-rw-r--r--etnaviv/etnaviv_bo.c11
-rw-r--r--etnaviv/etnaviv_bo_cache.c30
-rw-r--r--etnaviv/etnaviv_cmd_stream.c49
-rw-r--r--etnaviv/etnaviv_device.c4
-rw-r--r--etnaviv/etnaviv_drm.h58
-rw-r--r--etnaviv/etnaviv_drmif.h25
-rw-r--r--etnaviv/etnaviv_gpu.c76
-rw-r--r--etnaviv/etnaviv_perfmon.c185
-rw-r--r--etnaviv/etnaviv_pipe.c4
-rw-r--r--etnaviv/etnaviv_priv.h47
-rw-r--r--etnaviv/meson.build59
-rwxr-xr-xexynos/exynos-symbol-check2
-rw-r--r--exynos/exynos_drm.c6
-rw-r--r--exynos/exynos_drmif.h8
-rw-r--r--exynos/exynos_fimg2d.c39
-rw-r--r--exynos/exynos_fimg2d.h21
-rw-r--r--exynos/fimg2d_reg.h21
-rw-r--r--exynos/meson.build54
-rw-r--r--freedreno/Makefile.am1
-rwxr-xr-xfreedreno/freedreno-symbol-check5
-rw-r--r--freedreno/freedreno_bo.c29
-rw-r--r--freedreno/freedreno_bo_cache.c8
-rw-r--r--freedreno/freedreno_device.c11
-rw-r--r--freedreno/freedreno_drmif.h6
-rw-r--r--freedreno/freedreno_pipe.c33
-rw-r--r--freedreno/freedreno_priv.h66
-rw-r--r--freedreno/freedreno_ringbuffer.c4
-rw-r--r--freedreno/kgsl/kgsl_bo.c4
-rw-r--r--freedreno/kgsl/kgsl_device.c6
-rw-r--r--freedreno/kgsl/kgsl_pipe.c7
-rw-r--r--freedreno/kgsl/kgsl_priv.h2
-rw-r--r--freedreno/kgsl/kgsl_ringbuffer.c6
-rw-r--r--freedreno/meson.build77
-rw-r--r--freedreno/msm/msm_bo.c17
-rw-r--r--freedreno/msm/msm_device.c6
-rw-r--r--freedreno/msm/msm_drm.h40
-rw-r--r--freedreno/msm/msm_pipe.c51
-rw-r--r--freedreno/msm/msm_priv.h3
-rw-r--r--freedreno/msm/msm_ringbuffer.c20
-rw-r--r--include/drm/README34
-rw-r--r--include/drm/amdgpu_drm.h592
-rw-r--r--include/drm/drm.h92
-rw-r--r--include/drm/drm_fourcc.h184
-rw-r--r--include/drm/drm_mode.h266
-rw-r--r--include/drm/drm_sarea.h8
-rw-r--r--include/drm/i915_drm.h321
-rw-r--r--include/drm/mga_drm.h12
-rw-r--r--include/drm/nouveau_drm.h94
-rw-r--r--include/drm/qxl_drm.h82
-rw-r--r--include/drm/r128_drm.h10
-rw-r--r--include/drm/radeon_drm.h128
-rw-r--r--include/drm/savage_drm.h20
-rw-r--r--include/drm/sis_drm.h10
-rw-r--r--include/drm/tegra_drm.h38
-rw-r--r--include/drm/vc4_drm.h133
-rw-r--r--include/drm/via_drm.h8
-rw-r--r--include/drm/virtgpu_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h44
-rwxr-xr-xintel/intel-symbol-check2
-rw-r--r--intel/intel_bufmgr.c4
-rw-r--r--intel/intel_bufmgr_fake.c4
-rw-r--r--intel/intel_bufmgr_gem.c69
-rw-r--r--intel/intel_chipset.h89
-rw-r--r--intel/intel_decode.c11
-rw-r--r--intel/meson.build106
-rw-r--r--intel/mm.c4
-rw-r--r--intel/mm.h4
-rw-r--r--intel/test_decode.c8
-rw-r--r--libdrm_macros.h2
-rw-r--r--libkms/api.c4
-rw-r--r--libkms/dumb.c4
-rw-r--r--libkms/exynos.c26
-rw-r--r--libkms/intel.c4
-rw-r--r--libkms/internal.h4
-rwxr-xr-xlibkms/kms-symbol-check2
-rw-r--r--libkms/linux.c14
-rw-r--r--libkms/meson.build75
-rw-r--r--libkms/nouveau.c4
-rw-r--r--libkms/radeon.c4
-rw-r--r--libkms/vmwgfx.c4
-rw-r--r--man/drm-kms.xml4
-rw-r--r--man/drm-memory.xml4
-rw-r--r--man/drm.xml6
-rw-r--r--man/drmAvailable.xml4
-rw-r--r--man/drmHandleEvent.xml4
-rw-r--r--man/drmModeGetResources.xml4
-rw-r--r--man/meson.build67
-rw-r--r--meson.build382
-rw-r--r--meson_options.txt143
-rw-r--r--nouveau/abi16.c4
-rw-r--r--nouveau/bufctx.c4
-rw-r--r--nouveau/meson.build59
-rwxr-xr-xnouveau/nouveau-symbol-check2
-rw-r--r--nouveau/nouveau.c4
-rw-r--r--nouveau/pushbuf.c4
-rw-r--r--omap/Android.mk13
-rw-r--r--omap/meson.build54
-rwxr-xr-xomap/omap-symbol-check2
-rw-r--r--omap/omap_drm.c4
-rw-r--r--radeon/meson.build64
-rwxr-xr-xradeon/radeon-symbol-check2
-rw-r--r--radeon/radeon_bo.c3
-rw-r--r--radeon/radeon_bo_gem.c3
-rw-r--r--radeon/radeon_cs.c3
-rw-r--r--radeon/radeon_cs_gem.c3
-rw-r--r--radeon/radeon_cs_space.c3
-rw-r--r--radeon/radeon_surface.c4
-rw-r--r--tegra/meson.build53
-rwxr-xr-xtegra/tegra-symbol-check9
-rw-r--r--tegra/tegra.c4
l---------tests/amdgpu/.editorconfig1
-rw-r--r--tests/amdgpu/Makefile.am12
-rw-r--r--tests/amdgpu/amdgpu_test.c228
-rw-r--r--tests/amdgpu/amdgpu_test.h144
-rw-r--r--tests/amdgpu/basic_tests.c1122
-rw-r--r--tests/amdgpu/bo_tests.c79
-rw-r--r--tests/amdgpu/cs_tests.c74
-rw-r--r--tests/amdgpu/deadlock_tests.c255
-rw-r--r--tests/amdgpu/decode_messages.h (renamed from tests/amdgpu/uvd_messages.h)43
-rw-r--r--tests/amdgpu/frame.h2
-rw-r--r--tests/amdgpu/meson.build34
-rw-r--r--tests/amdgpu/uvd_enc_tests.c491
-rw-r--r--tests/amdgpu/uve_ib.h527
-rw-r--r--tests/amdgpu/vce_tests.c79
-rw-r--r--tests/amdgpu/vcn_tests.c398
-rw-r--r--tests/amdgpu/vm_tests.c169
-rw-r--r--tests/drmsl.c28
-rw-r--r--tests/drmstat.c419
-rw-r--r--tests/etnaviv/Makefile.am1
-rw-r--r--tests/etnaviv/etnaviv_2d_test.c4
-rw-r--r--tests/etnaviv/etnaviv_bo_cache_test.c4
-rw-r--r--tests/etnaviv/meson.build45
-rw-r--r--tests/etnaviv/write_bmp.c4
-rw-r--r--tests/exynos/exynos_fimg2d_event.c27
-rw-r--r--tests/exynos/exynos_fimg2d_perf.c34
-rw-r--r--tests/exynos/exynos_fimg2d_test.c68
-rw-r--r--tests/exynos/meson.build54
-rw-r--r--tests/kms/kms-steal-crtc.c4
-rw-r--r--tests/kms/kms-universal-planes.c4
-rw-r--r--tests/kms/libkms-test-crtc.c4
-rw-r--r--tests/kms/libkms-test-device.c13
-rw-r--r--tests/kms/libkms-test-framebuffer.c4
-rw-r--r--tests/kms/libkms-test-plane.c4
-rw-r--r--tests/kms/libkms-test-screen.c4
-rw-r--r--tests/kms/meson.build49
-rw-r--r--tests/kmstest/meson.build30
-rw-r--r--tests/meson.build86
-rw-r--r--tests/modeprint/meson.build29
-rw-r--r--tests/modeprint/modeprint.c2
-rw-r--r--tests/modetest/buffers.c4
-rw-r--r--tests/modetest/cursor.c4
-rw-r--r--tests/modetest/meson.build29
-rw-r--r--tests/modetest/modetest.c110
-rw-r--r--tests/nouveau/meson.build30
-rw-r--r--tests/nouveau/threaded.c4
-rw-r--r--tests/proptest/meson.build28
-rw-r--r--tests/radeon/meson.build27
-rw-r--r--tests/tegra/meson.build27
-rw-r--r--tests/tegra/openclose.c4
-rw-r--r--tests/util/format.c4
-rw-r--r--tests/util/kms.c7
-rw-r--r--tests/util/meson.build28
-rw-r--r--tests/util/pattern.c19
-rw-r--r--tests/vbltest/meson.build28
-rw-r--r--tests/vbltest/vbltest.c4
-rw-r--r--vc4/meson.build28
-rw-r--r--xf86atomic.h6
-rw-r--r--xf86drm.c243
-rw-r--r--xf86drm.h34
-rw-r--r--xf86drmHash.c6
-rw-r--r--xf86drmMode.c411
-rw-r--r--xf86drmMode.h52
206 files changed, 9813 insertions, 2616 deletions
diff --git a/.editorconfig b/.editorconfig
index 893b7be0..29b4f393 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -17,3 +17,7 @@ indent_style = tab
17[*.m4] 17[*.m4]
18indent_style = space 18indent_style = space
19indent_size = 2 19indent_size = 2
20
21[{meson.build,meson_options.txt}]
22indent_style = space
23indent_size = 2
diff --git a/Android.bp b/Android.bp
index 429c22cc..9121068a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -54,7 +54,7 @@ cc_library {
54 "libdrm_sources", 54 "libdrm_sources",
55 ], 55 ],
56 56
57 export_include_dirs: ["include/drm"], 57 export_include_dirs: ["include/drm", "android"],
58 58
59 cflags: [ 59 cflags: [
60 "-Wno-enum-conversion", 60 "-Wno-enum-conversion",
diff --git a/Makefile.am b/Makefile.am
index 2bf644be..6de56770 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -113,6 +113,7 @@ SUBDIRS = \
113 $(TEGRA_SUBDIR) \ 113 $(TEGRA_SUBDIR) \
114 $(VC4_SUBDIR) \ 114 $(VC4_SUBDIR) \
115 $(ETNAVIV_SUBDIR) \ 115 $(ETNAVIV_SUBDIR) \
116 data \
116 tests \ 117 tests \
117 $(MAN_SUBDIR) \ 118 $(MAN_SUBDIR) \
118 $(ROCKCHIP_SUBDIR) 119 $(ROCKCHIP_SUBDIR)
@@ -139,7 +140,37 @@ if HAVE_VMWGFX
139klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES) 140klibdrminclude_HEADERS += $(LIBDRM_INCLUDE_VMWGFX_H_FILES)
140endif 141endif
141 142
142EXTRA_DIST = include/drm/README 143EXTRA_DIST = \
144 include/drm/README \
145 amdgpu/meson.build \
146 data/meson.build \
147 etnaviv/meson.build \
148 exynos/meson.build \
149 freedreno/meson.build \
150 intel/meson.build \
151 libkms/meson.build \
152 man/meson.build \
153 nouveau/meson.build \
154 omap/meson.build \
155 radeon/meson.build \
156 tegra/meson.build \
157 tests/amdgpu/meson.build \
158 tests/etnaviv/meson.build \
159 tests/exynos/meson.build \
160 tests/kms/meson.build \
161 tests/kmstest/meson.build \
162 tests/meson.build \
163 tests/modeprint/meson.build \
164 tests/modetest/meson.build \
165 tests/nouveau/meson.build \
166 tests/proptest/meson.build \
167 tests/radeon/meson.build \
168 tests/tegra/meson.build \
169 tests/util/meson.build \
170 tests/vbltest/meson.build \
171 vc4/meson.build \
172 meson.build \
173 meson_options.txt
143 174
144copy-headers : 175copy-headers :
145 cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/ 176 cp -r $(kernel_source)/include/uapi/drm/*.h $(top_srcdir)/include/drm/
diff --git a/Makefile.sources b/Makefile.sources
index 10aa1d0f..1f8372bc 100644
--- a/Makefile.sources
+++ b/Makefile.sources
@@ -37,5 +37,8 @@ LIBDRM_INCLUDE_H_FILES := \
37 include/drm/via_drm.h \ 37 include/drm/via_drm.h \
38 include/drm/virtgpu_drm.h 38 include/drm/virtgpu_drm.h
39 39
40LIBDRM_INCLUDE_ANDROID_H_FILES := \
41 android/gralloc_handle.h
42
40LIBDRM_INCLUDE_VMWGFX_H_FILES := \ 43LIBDRM_INCLUDE_VMWGFX_H_FILES := \
41 include/drm/vmwgfx_drm.h 44 include/drm/vmwgfx_drm.h
diff --git a/README b/README
index 26cab9d3..f3df9ac1 100644
--- a/README
+++ b/README
@@ -15,9 +15,27 @@ with an older kernel.
15Compiling 15Compiling
16--------- 16---------
17 17
18libdrm is a standard autotools package and follows the normal 18libdrm has two build systems, a legacy autotools build system, and a newer
19configure, build and install steps. The first step is to configure 19meson build system. The meson build system is much faster, and offers a
20the package, which is done by running the configure shell script: 20slightly different interface, but otherwise provides an equivalent feature set.
21
22To use it:
23
24 meson builddir/
25
26By default this will install into /usr/local, you can change your prefix
27with --prefix=/usr (or `meson configure builddir/ -Dprefix=/usr` after
28the initial meson setup).
29
30Then use ninja to build and install:
31
32 ninja -C builddir/ install
33
34If you are installing into a system location you will need to run install
35separately, and as root.
36
37
38Alternatively you can invoke autotools configure:
21 39
22 ./configure 40 ./configure
23 41
diff --git a/RELEASING b/RELEASING
index 262ca08d..7e03e3b9 100644
--- a/RELEASING
+++ b/RELEASING
@@ -9,9 +9,9 @@ However, this is up to whoever is driving the feature in question.
9 9
10Follow these steps to release a new version of libdrm: 10Follow these steps to release a new version of libdrm:
11 11
12 1) Bump the version number in configure.ac. We seem to have settled 12 1) Bump the version number in configure.ac and meson.build. We seem
13 for 2.4.x as the versioning scheme for libdrm, so just bump the 13 to have settled for 2.4.x as the versioning scheme for libdrm, so
14 micro version. 14 just bump the micro version.
15 15
16 2) Run autoconf and then re-run ./configure so the build system 16 2) Run autoconf and then re-run ./configure so the build system
17 picks up the new version number. 17 picks up the new version number.
diff --git a/amdgpu/.editorconfig b/amdgpu/.editorconfig
new file mode 100644
index 00000000..426273fd
--- /dev/null
+++ b/amdgpu/.editorconfig
@@ -0,0 +1,13 @@
1# To use this config with your editor, follow the instructions at:
2# http://editorconfig.org
3
4[*]
5charset = utf-8
6indent_style = tab
7indent_size = 8
8tab_width = 8
9insert_final_newline = true
10
11[meson.build]
12indent_style = space
13indent_size = 2
diff --git a/amdgpu/Makefile.am b/amdgpu/Makefile.am
index cf7bc1ba..a1b0d05c 100644
--- a/amdgpu/Makefile.am
+++ b/amdgpu/Makefile.am
@@ -30,12 +30,16 @@ AM_CFLAGS = \
30 $(PTHREADSTUBS_CFLAGS) \ 30 $(PTHREADSTUBS_CFLAGS) \
31 -I$(top_srcdir)/include/drm 31 -I$(top_srcdir)/include/drm
32 32
33libdrmdatadir = @libdrmdatadir@
34AM_CPPFLAGS = -DAMDGPU_ASIC_ID_TABLE=\"${libdrmdatadir}/amdgpu.ids\"
35
33libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la 36libdrm_amdgpu_la_LTLIBRARIES = libdrm_amdgpu.la
34libdrm_amdgpu_ladir = $(libdir) 37libdrm_amdgpu_ladir = $(libdir)
35libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined 38libdrm_amdgpu_la_LDFLAGS = -version-number 1:0:0 -no-undefined
36libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@ 39libdrm_amdgpu_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
37 40
38libdrm_amdgpu_la_SOURCES = $(LIBDRM_AMDGPU_FILES) 41libdrm_amdgpu_la_SOURCES = $(LIBDRM_AMDGPU_FILES)
42amdgpu_asic_id.lo: $(top_srcdir)/data/amdgpu.ids
39 43
40libdrm_amdgpuincludedir = ${includedir}/libdrm 44libdrm_amdgpuincludedir = ${includedir}/libdrm
41libdrm_amdgpuinclude_HEADERS = $(LIBDRM_AMDGPU_H_FILES) 45libdrm_amdgpuinclude_HEADERS = $(LIBDRM_AMDGPU_H_FILES)
diff --git a/amdgpu/Makefile.sources b/amdgpu/Makefile.sources
index 487b9e0a..498b64cc 100644
--- a/amdgpu/Makefile.sources
+++ b/amdgpu/Makefile.sources
@@ -1,11 +1,12 @@
1LIBDRM_AMDGPU_FILES := \ 1LIBDRM_AMDGPU_FILES := \
2 amdgpu_asic_id.h \ 2 amdgpu_asic_id.c \
3 amdgpu_bo.c \ 3 amdgpu_bo.c \
4 amdgpu_cs.c \ 4 amdgpu_cs.c \
5 amdgpu_device.c \ 5 amdgpu_device.c \
6 amdgpu_gpu_info.c \ 6 amdgpu_gpu_info.c \
7 amdgpu_internal.h \ 7 amdgpu_internal.h \
8 amdgpu_vamgr.c \ 8 amdgpu_vamgr.c \
9 amdgpu_vm.c \
9 util_hash.c \ 10 util_hash.c \
10 util_hash.h \ 11 util_hash.h \
11 util_hash_table.c \ 12 util_hash_table.c \
diff --git a/amdgpu/amdgpu-symbol-check b/amdgpu/amdgpu-symbol-check
index 87f4fd2c..90b7a1d6 100755
--- a/amdgpu/amdgpu-symbol-check
+++ b/amdgpu/amdgpu-symbol-check
@@ -3,7 +3,7 @@
3# The following symbols (past the first five) are taken from the public headers. 3# The following symbols (past the first five) are taken from the public headers.
4# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS 4# A list of the latter should be available Makefile.am/libdrm_amdgpuinclude_HEADERS
5 5
6FUNCS=$(nm -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do 6FUNCS=$($NM -D --format=bsd --defined-only ${1-.libs/libdrm_amdgpu.so} | awk '{print $3}' | while read func; do
7( grep -q "^$func$" || echo $func ) <<EOF 7( grep -q "^$func$" || echo $func ) <<EOF
8__bss_start 8__bss_start
9_edata 9_edata
@@ -22,16 +22,34 @@ amdgpu_bo_list_update
22amdgpu_bo_query_info 22amdgpu_bo_query_info
23amdgpu_bo_set_metadata 23amdgpu_bo_set_metadata
24amdgpu_bo_va_op 24amdgpu_bo_va_op
25amdgpu_bo_va_op_raw
25amdgpu_bo_wait_for_idle 26amdgpu_bo_wait_for_idle
26amdgpu_create_bo_from_user_mem 27amdgpu_create_bo_from_user_mem
28amdgpu_cs_chunk_fence_info_to_data
29amdgpu_cs_chunk_fence_to_dep
27amdgpu_cs_create_semaphore 30amdgpu_cs_create_semaphore
31amdgpu_cs_create_syncobj
32amdgpu_cs_create_syncobj2
28amdgpu_cs_ctx_create 33amdgpu_cs_ctx_create
34amdgpu_cs_ctx_create2
29amdgpu_cs_ctx_free 35amdgpu_cs_ctx_free
30amdgpu_cs_destroy_semaphore 36amdgpu_cs_destroy_semaphore
37amdgpu_cs_destroy_syncobj
38amdgpu_cs_export_syncobj
39amdgpu_cs_fence_to_handle
40amdgpu_cs_import_syncobj
31amdgpu_cs_query_fence_status 41amdgpu_cs_query_fence_status
32amdgpu_cs_query_reset_state 42amdgpu_cs_query_reset_state
43amdgpu_query_sw_info
33amdgpu_cs_signal_semaphore 44amdgpu_cs_signal_semaphore
34amdgpu_cs_submit 45amdgpu_cs_submit
46amdgpu_cs_submit_raw
47amdgpu_cs_syncobj_export_sync_file
48amdgpu_cs_syncobj_import_sync_file
49amdgpu_cs_syncobj_reset
50amdgpu_cs_syncobj_signal
51amdgpu_cs_syncobj_wait
52amdgpu_cs_wait_fences
35amdgpu_cs_wait_semaphore 53amdgpu_cs_wait_semaphore
36amdgpu_device_deinitialize 54amdgpu_device_deinitialize
37amdgpu_device_initialize 55amdgpu_device_initialize
@@ -45,10 +63,13 @@ amdgpu_query_heap_info
45amdgpu_query_hw_ip_count 63amdgpu_query_hw_ip_count
46amdgpu_query_hw_ip_info 64amdgpu_query_hw_ip_info
47amdgpu_query_info 65amdgpu_query_info
66amdgpu_query_sensor_info
48amdgpu_read_mm_registers 67amdgpu_read_mm_registers
49amdgpu_va_range_alloc 68amdgpu_va_range_alloc
50amdgpu_va_range_free 69amdgpu_va_range_free
51amdgpu_va_range_query 70amdgpu_va_range_query
71amdgpu_vm_reserve_vmid
72amdgpu_vm_unreserve_vmid
52EOF 73EOF
53done) 74done)
54 75
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 7b26a04c..36f91058 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -37,6 +37,10 @@
37#include <stdint.h> 37#include <stdint.h>
38#include <stdbool.h> 38#include <stdbool.h>
39 39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
40struct drm_amdgpu_info_hw_ip; 44struct drm_amdgpu_info_hw_ip;
41 45
42/*--------------------------------------------------------------------------*/ 46/*--------------------------------------------------------------------------*/
@@ -90,6 +94,10 @@ enum amdgpu_gpu_va_range
90 amdgpu_gpu_va_range_general = 0 94 amdgpu_gpu_va_range_general = 0
91}; 95};
92 96
97enum amdgpu_sw_info {
98 amdgpu_sw_info_address32_hi = 0,
99};
100
93/*--------------------------------------------------------------------------*/ 101/*--------------------------------------------------------------------------*/
94/* -------------------------- Datatypes ----------------------------------- */ 102/* -------------------------- Datatypes ----------------------------------- */
95/*--------------------------------------------------------------------------*/ 103/*--------------------------------------------------------------------------*/
@@ -794,8 +802,9 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
794 * context will always be executed in order (first come, first serve). 802 * context will always be executed in order (first come, first serve).
795 * 803 *
796 * 804 *
797 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 805 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
798 * \param context - \c [out] GPU Context handle 806 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
807 * \param context - \c [out] GPU Context handle
799 * 808 *
800 * \return 0 on success\n 809 * \return 0 on success\n
801 * <0 - Negative POSIX Error code 810 * <0 - Negative POSIX Error code
@@ -803,6 +812,18 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
803 * \sa amdgpu_cs_ctx_free() 812 * \sa amdgpu_cs_ctx_free()
804 * 813 *
805*/ 814*/
815int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
816 uint32_t priority,
817 amdgpu_context_handle *context);
818/**
819 * Create GPU execution Context
820 *
821 * Refer to amdgpu_cs_ctx_create2 for full documentation. This call
822 * is missing the priority parameter.
823 *
824 * \sa amdgpu_cs_ctx_create2()
825 *
826*/
806int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 827int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
807 amdgpu_context_handle *context); 828 amdgpu_context_handle *context);
808 829
@@ -907,6 +928,29 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
907 uint64_t flags, 928 uint64_t flags,
908 uint32_t *expired); 929 uint32_t *expired);
909 930
931/**
932 * Wait for multiple fences
933 *
934 * \param fences - \c [in] The fence array to wait
935 * \param fence_count - \c [in] The fence count
936 * \param wait_all - \c [in] If true, wait all fences to be signaled,
937 * otherwise, wait at least one fence
938 * \param timeout_ns - \c [in] The timeout to wait, in nanoseconds
939 * \param status - \c [out] '1' for signaled, '0' for timeout
940 * \param first - \c [out] the index of the first signaled fence from @fences
941 *
942 * \return 0 on success
943 * <0 - Negative POSIX Error code
944 *
945 * \note Currently it supports only one amdgpu_device. All fences come from
946 * the same amdgpu_device with the same fd.
947*/
948int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
949 uint32_t fence_count,
950 bool wait_all,
951 uint64_t timeout_ns,
952 uint32_t *status, uint32_t *first);
953
910/* 954/*
911 * Query / Info API 955 * Query / Info API
912 * 956 *
@@ -1046,6 +1090,23 @@ int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
1046 unsigned size, void *value); 1090 unsigned size, void *value);
1047 1091
1048/** 1092/**
1093 * Query hardware or driver information.
1094 *
1095 * The return size is query-specific and depends on the "info_id" parameter.
1096 * No more than "size" bytes is returned.
1097 *
1098 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1099 * \param info - \c [in] amdgpu_sw_info_*
1100 * \param value - \c [out] Pointer to the return value.
1101 *
1102 * \return 0 on success\n
1103 * <0 - Negative POSIX error code
1104 *
1105*/
1106int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
1107 void *value);
1108
1109/**
1049 * Query information about GDS 1110 * Query information about GDS
1050 * 1111 *
1051 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1112 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
@@ -1059,6 +1120,24 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
1059 struct amdgpu_gds_resource_info *gds_info); 1120 struct amdgpu_gds_resource_info *gds_info);
1060 1121
1061/** 1122/**
1123 * Query information about sensor.
1124 *
1125 * The return size is query-specific and depends on the "sensor_type"
1126 * parameter. No more than "size" bytes is returned.
1127 *
1128 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1129 * \param sensor_type - \c [in] AMDGPU_INFO_SENSOR_*
1130 * \param size - \c [in] Size of the returned value.
1131 * \param value - \c [out] Pointer to the return value.
1132 *
1133 * \return 0 on success\n
1134 * <0 - Negative POSIX Error code
1135 *
1136*/
1137int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
1138 unsigned size, void *value);
1139
1140/**
1062 * Read a set of consecutive memory-mapped registers. 1141 * Read a set of consecutive memory-mapped registers.
1063 * Not all registers are allowed to be read by userspace. 1142 * Not all registers are allowed to be read by userspace.
1064 * 1143 *
@@ -1083,6 +1162,7 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
1083 * Flag to request VA address range in the 32bit address space 1162 * Flag to request VA address range in the 32bit address space
1084*/ 1163*/
1085#define AMDGPU_VA_RANGE_32_BIT 0x1 1164#define AMDGPU_VA_RANGE_32_BIT 0x1
1165#define AMDGPU_VA_RANGE_HIGH 0x2
1086 1166
1087/** 1167/**
1088 * Allocate virtual address range 1168 * Allocate virtual address range
@@ -1186,6 +1266,34 @@ int amdgpu_bo_va_op(amdgpu_bo_handle bo,
1186 uint32_t ops); 1266 uint32_t ops);
1187 1267
1188/** 1268/**
1269 * VA mapping/unmapping for a buffer object or PRT region.
1270 *
1271 * This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all
1272 * parameters are treated "raw", i.e. size is not automatically aligned, and
1273 * all flags must be specified explicitly.
1274 *
1275 * \param dev - \c [in] device handle
1276 * \param bo - \c [in] BO handle (may be NULL)
1277 * \param offset - \c [in] Start offset to map
1278 * \param size - \c [in] Size to map
1279 * \param addr - \c [in] Start virtual address.
1280 * \param flags - \c [in] Supported flags for mapping/unmapping
1281 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
1282 *
1283 * \return 0 on success\n
1284 * <0 - Negative POSIX Error code
1285 *
1286*/
1287
1288int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
1289 amdgpu_bo_handle bo,
1290 uint64_t offset,
1291 uint64_t size,
1292 uint64_t addr,
1293 uint64_t flags,
1294 uint32_t ops);
1295
1296/**
1189 * create semaphore 1297 * create semaphore
1190 * 1298 *
1191 * \param sem - \c [out] semaphore handle 1299 * \param sem - \c [out] semaphore handle
@@ -1255,4 +1363,216 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
1255*/ 1363*/
1256const char *amdgpu_get_marketing_name(amdgpu_device_handle dev); 1364const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
1257 1365
1366/**
1367 * Create kernel sync object
1368 *
1369 * \param dev - \c [in] device handle
1370 * \param flags - \c [in] flags that affect creation
1371 * \param syncobj - \c [out] sync object handle
1372 *
1373 * \return 0 on success\n
1374 * <0 - Negative POSIX Error code
1375 *
1376*/
1377int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
1378 uint32_t flags,
1379 uint32_t *syncobj);
1380
1381/**
1382 * Create kernel sync object
1383 *
1384 * \param dev - \c [in] device handle
1385 * \param syncobj - \c [out] sync object handle
1386 *
1387 * \return 0 on success\n
1388 * <0 - Negative POSIX Error code
1389 *
1390*/
1391int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
1392 uint32_t *syncobj);
1393/**
1394 * Destroy kernel sync object
1395 *
1396 * \param dev - \c [in] device handle
1397 * \param syncobj - \c [in] sync object handle
1398 *
1399 * \return 0 on success\n
1400 * <0 - Negative POSIX Error code
1401 *
1402*/
1403int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
1404 uint32_t syncobj);
1405
1406/**
1407 * Reset kernel sync objects to unsignalled state.
1408 *
1409 * \param dev - \c [in] device handle
1410 * \param syncobjs - \c [in] array of sync object handles
1411 * \param syncobj_count - \c [in] number of handles in syncobjs
1412 *
1413 * \return 0 on success\n
1414 * <0 - Negative POSIX Error code
1415 *
1416*/
1417int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
1418 const uint32_t *syncobjs, uint32_t syncobj_count);
1419
1420/**
1421 * Signal kernel sync objects.
1422 *
1423 * \param dev - \c [in] device handle
1424 * \param syncobjs - \c [in] array of sync object handles
1425 * \param syncobj_count - \c [in] number of handles in syncobjs
1426 *
1427 * \return 0 on success\n
1428 * <0 - Negative POSIX Error code
1429 *
1430*/
1431int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
1432 const uint32_t *syncobjs, uint32_t syncobj_count);
1433
1434/**
1435 * Wait for one or all sync objects to signal.
1436 *
1437 * \param dev - \c [in] self-explanatory
1438 * \param handles - \c [in] array of sync object handles
1439 * \param num_handles - \c [in] self-explanatory
1440 * \param timeout_nsec - \c [in] self-explanatory
1441 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
1442 * \param first_signaled - \c [in] self-explanatory
1443 *
1444 * \return 0 on success\n
1445 * -ETIME - Timeout
1446 * <0 - Negative POSIX Error code
1447 *
1448 */
1449int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
1450 uint32_t *handles, unsigned num_handles,
1451 int64_t timeout_nsec, unsigned flags,
1452 uint32_t *first_signaled);
1453
1454/**
1455 * Export kernel sync object to shareable fd.
1456 *
1457 * \param dev - \c [in] device handle
1458 * \param syncobj - \c [in] sync object handle
1459 * \param shared_fd - \c [out] shared file descriptor.
1460 *
1461 * \return 0 on success\n
1462 * <0 - Negative POSIX Error code
1463 *
1464*/
1465int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
1466 uint32_t syncobj,
1467 int *shared_fd);
1468/**
1469 * Import kernel sync object from shareable fd.
1470 *
1471 * \param dev - \c [in] device handle
1472 * \param shared_fd - \c [in] shared file descriptor.
1473 * \param syncobj - \c [out] sync object handle
1474 *
1475 * \return 0 on success\n
1476 * <0 - Negative POSIX Error code
1477 *
1478*/
1479int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
1480 int shared_fd,
1481 uint32_t *syncobj);
1482
1483/**
1484 * Export kernel sync object to a sync_file.
1485 *
1486 * \param dev - \c [in] device handle
1487 * \param syncobj - \c [in] sync object handle
1488 * \param sync_file_fd - \c [out] sync_file file descriptor.
1489 *
1490 * \return 0 on success\n
1491 * <0 - Negative POSIX Error code
1492 *
1493 */
1494int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
1495 uint32_t syncobj,
1496 int *sync_file_fd);
1497
1498/**
1499 * Import kernel sync object from a sync_file.
1500 *
1501 * \param dev - \c [in] device handle
1502 * \param syncobj - \c [in] sync object handle
1503 * \param sync_file_fd - \c [in] sync_file file descriptor.
1504 *
1505 * \return 0 on success\n
1506 * <0 - Negative POSIX Error code
1507 *
1508 */
1509int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
1510 uint32_t syncobj,
1511 int sync_file_fd);
1512
1513/**
1514 * Export an amdgpu fence as a handle (syncobj or fd).
1515 *
1516 * \param what AMDGPU_FENCE_TO_HANDLE_GET_{SYNCOBJ, FD}
1517 * \param out_handle returned handle
1518 *
1519 * \return 0 on success\n
1520 * <0 - Negative POSIX Error code
1521 */
1522int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
1523 struct amdgpu_cs_fence *fence,
1524 uint32_t what,
1525 uint32_t *out_handle);
1526
1527/**
1528 * Submit raw command submission to kernel
1529 *
1530 * \param dev - \c [in] device handle
1531 * \param context - \c [in] context handle for context id
1532 * \param bo_list_handle - \c [in] request bo list handle (0 for none)
1533 * \param num_chunks - \c [in] number of CS chunks to submit
1534 * \param chunks - \c [in] array of CS chunks
1535 * \param seq_no - \c [out] output sequence number for submission.
1536 *
1537 * \return 0 on success\n
1538 * <0 - Negative POSIX Error code
1539 *
1540 */
1541struct drm_amdgpu_cs_chunk;
1542struct drm_amdgpu_cs_chunk_dep;
1543struct drm_amdgpu_cs_chunk_data;
1544
1545int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
1546 amdgpu_context_handle context,
1547 amdgpu_bo_list_handle bo_list_handle,
1548 int num_chunks,
1549 struct drm_amdgpu_cs_chunk *chunks,
1550 uint64_t *seq_no);
1551
1552void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
1553 struct drm_amdgpu_cs_chunk_dep *dep);
1554void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
1555 struct drm_amdgpu_cs_chunk_data *data);
1556
1557/**
1558 * Reserve VMID
1559 * \param context - \c [in] GPU Context
1560 * \param flags - \c [in] TBD
1561 *
1562 * \return 0 on success otherwise POSIX Error code
1563*/
1564int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags);
1565
1566/**
1567 * Free reserved VMID
1568 * \param context - \c [in] GPU Context
1569 * \param flags - \c [in] TBD
1570 *
1571 * \return 0 on success otherwise POSIX Error code
1572*/
1573int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags);
1574
1575#ifdef __cplusplus
1576}
1577#endif
1258#endif /* #ifdef _AMDGPU_H_ */ 1578#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_asic_id.c b/amdgpu/amdgpu_asic_id.c
new file mode 100644
index 00000000..a5007ffc
--- /dev/null
+++ b/amdgpu/amdgpu_asic_id.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright © 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#include <ctype.h>
26#include <stdio.h>
27#include <stdlib.h>
28#include <stdint.h>
29#include <string.h>
30#include <unistd.h>
31#include <errno.h>
32
33#include "xf86drm.h"
34#include "amdgpu_drm.h"
35#include "amdgpu_internal.h"
36
37static int parse_one_line(struct amdgpu_device *dev, const char *line)
38{
39 char *buf, *saveptr;
40 char *s_did;
41 uint32_t did;
42 char *s_rid;
43 uint32_t rid;
44 char *s_name;
45 char *endptr;
46 int r = -EINVAL;
47
48 /* ignore empty line and commented line */
49 if (strlen(line) == 0 || line[0] == '#')
50 return -EAGAIN;
51
52 buf = strdup(line);
53 if (!buf)
54 return -ENOMEM;
55
56 /* device id */
57 s_did = strtok_r(buf, ",", &saveptr);
58 if (!s_did)
59 goto out;
60
61 did = strtol(s_did, &endptr, 16);
62 if (*endptr)
63 goto out;
64
65 if (did != dev->info.asic_id) {
66 r = -EAGAIN;
67 goto out;
68 }
69
70 /* revision id */
71 s_rid = strtok_r(NULL, ",", &saveptr);
72 if (!s_rid)
73 goto out;
74
75 rid = strtol(s_rid, &endptr, 16);
76 if (*endptr)
77 goto out;
78
79 if (rid != dev->info.pci_rev_id) {
80 r = -EAGAIN;
81 goto out;
82 }
83
84 /* marketing name */
85 s_name = strtok_r(NULL, ",", &saveptr);
86 if (!s_name)
87 goto out;
88
89 /* trim leading whitespaces or tabs */
90 while (isblank(*s_name))
91 s_name++;
92 if (strlen(s_name) == 0)
93 goto out;
94
95 dev->marketing_name = strdup(s_name);
96 if (dev->marketing_name)
97 r = 0;
98 else
99 r = -ENOMEM;
100
101out:
102 free(buf);
103
104 return r;
105}
106
107void amdgpu_parse_asic_ids(struct amdgpu_device *dev)
108{
109 FILE *fp;
110 char *line = NULL;
111 size_t len = 0;
112 ssize_t n;
113 int line_num = 1;
114 int r = 0;
115
116 fp = fopen(AMDGPU_ASIC_ID_TABLE, "r");
117 if (!fp) {
118 fprintf(stderr, "%s: %s\n", AMDGPU_ASIC_ID_TABLE,
119 strerror(errno));
120 return;
121 }
122
123 /* 1st valid line is file version */
124 while ((n = getline(&line, &len, fp)) != -1) {
125 /* trim trailing newline */
126 if (line[n - 1] == '\n')
127 line[n - 1] = '\0';
128
129 /* ignore empty line and commented line */
130 if (strlen(line) == 0 || line[0] == '#') {
131 line_num++;
132 continue;
133 }
134
135 drmMsg("%s version: %s\n", AMDGPU_ASIC_ID_TABLE, line);
136 break;
137 }
138
139 while ((n = getline(&line, &len, fp)) != -1) {
140 /* trim trailing newline */
141 if (line[n - 1] == '\n')
142 line[n - 1] = '\0';
143
144 r = parse_one_line(dev, line);
145 if (r != -EAGAIN)
146 break;
147
148 line_num++;
149 }
150
151 if (r == -EINVAL) {
152 fprintf(stderr, "Invalid format: %s: line %d: %s\n",
153 AMDGPU_ASIC_ID_TABLE, line_num, line);
154 } else if (r && r != -EAGAIN) {
155 fprintf(stderr, "%s: Cannot parse ASIC IDs: %s\n",
156 __func__, strerror(-r));
157 }
158
159 free(line);
160 fclose(fp);
161}
diff --git a/amdgpu/amdgpu_asic_id.h b/amdgpu/amdgpu_asic_id.h
deleted file mode 100644
index 3e7d736b..00000000
--- a/amdgpu/amdgpu_asic_id.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * Copyright © 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __AMDGPU_ASIC_ID_H__
26#define __AMDGPU_ASIC_ID_H__
27
28static struct amdgpu_asic_id_table_t {
29 uint32_t did;
30 uint32_t rid;
31 const char *marketing_name;
32} const amdgpu_asic_id_table [] = {
33 {0x6600, 0x0, "AMD Radeon HD 8600/8700M"},
34 {0x6600, 0x81, "AMD Radeon R7 M370"},
35 {0x6601, 0x0, "AMD Radeon HD 8500M/8700M"},
36 {0x6604, 0x0, "AMD Radeon R7 M265 Series"},
37 {0x6604, 0x81, "AMD Radeon R7 M350"},
38 {0x6605, 0x0, "AMD Radeon R7 M260 Series"},
39 {0x6605, 0x81, "AMD Radeon R7 M340"},
40 {0x6606, 0x0, "AMD Radeon HD 8790M"},
41 {0x6607, 0x0, "AMD Radeon HD8530M"},
42 {0x6608, 0x0, "AMD FirePro W2100"},
43 {0x6610, 0x0, "AMD Radeon HD 8600 Series"},
44 {0x6610, 0x81, "AMD Radeon R7 350"},
45 {0x6610, 0x83, "AMD Radeon R5 340"},
46 {0x6611, 0x0, "AMD Radeon HD 8500 Series"},
47 {0x6613, 0x0, "AMD Radeon HD 8500 series"},
48 {0x6617, 0xC7, "AMD Radeon R7 240 Series"},
49 {0x6640, 0x0, "AMD Radeon HD 8950"},
50 {0x6640, 0x80, "AMD Radeon R9 M380"},
51 {0x6646, 0x0, "AMD Radeon R9 M280X"},
52 {0x6646, 0x80, "AMD Radeon R9 M470X"},
53 {0x6647, 0x0, "AMD Radeon R9 M270X"},
54 {0x6647, 0x80, "AMD Radeon R9 M380"},
55 {0x6649, 0x0, "AMD FirePro W5100"},
56 {0x6658, 0x0, "AMD Radeon R7 200 Series"},
57 {0x665C, 0x0, "AMD Radeon HD 7700 Series"},
58 {0x665D, 0x0, "AMD Radeon R7 200 Series"},
59 {0x665F, 0x81, "AMD Radeon R7 300 Series"},
60 {0x6660, 0x0, "AMD Radeon HD 8600M Series"},
61 {0x6660, 0x81, "AMD Radeon R5 M335"},
62 {0x6660, 0x83, "AMD Radeon R5 M330"},
63 {0x6663, 0x0, "AMD Radeon HD 8500M Series"},
64 {0x6663, 0x83, "AMD Radeon R5 M320"},
65 {0x6664, 0x0, "AMD Radeon R5 M200 Series"},
66 {0x6665, 0x0, "AMD Radeon R5 M200 Series"},
67 {0x6665, 0x83, "AMD Radeon R5 M320"},
68 {0x6667, 0x0, "AMD Radeon R5 M200 Series"},
69 {0x666F, 0x0, "AMD Radeon HD 8500M"},
70 {0x6780, 0x0, "ATI FirePro V (FireGL V) Graphics Adapter"},
71 {0x678A, 0x0, "ATI FirePro V (FireGL V) Graphics Adapter"},
72 {0x6798, 0x0, "AMD Radeon HD 7900 Series"},
73 {0x679A, 0x0, "AMD Radeon HD 7900 Series"},
74 {0x679B, 0x0, "AMD Radeon HD 7900 Series"},
75 {0x679E, 0x0, "AMD Radeon HD 7800 Series"},
76 {0x67A0, 0x0, "HAWAII XTGL (67A0)"},
77 {0x67A1, 0x0, "HAWAII GL40 (67A1)"},
78 {0x67B0, 0x0, "AMD Radeon R9 200 Series"},
79 {0x67B0, 0x80, "AMD Radeon R9 390 Series"},
80 {0x67B1, 0x0, "AMD Radeon R9 200 Series"},
81 {0x67B1, 0x80, "AMD Radeon R9 390 Series"},
82 {0x67B9, 0x0, "AMD Radeon R9 200 Series"},
83 {0x67DF, 0xC4, "AMD Radeon RX 480 Graphics"},
84 {0x67DF, 0xC5, "AMD Radeon RX 470 Graphics"},
85 {0x67DF, 0xC7, "AMD Radeon RX 480 Graphics"},
86 {0x67DF, 0xCF, "AMD Radeon RX 470 Graphics"},
87 {0x67C4, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
88 {0x67C7, 0x00, "AMD Radeon Pro WX 5100 Graphics"},
89 {0x67C0, 0x00, "AMD Radeon Pro WX 7100 Graphics"},
90 {0x67E0, 0x00, "AMD Radeon Pro WX Series Graphics"},
91 {0x67E3, 0x00, "AMD Radeon Pro WX 4100 Graphics"},
92 {0x67E8, 0x00, "AMD Radeon Pro WX Series Graphics"},
93 {0x67E8, 0x01, "AMD Radeon Pro WX Series Graphics"},
94 {0x67E8, 0x80, "AMD Radeon E9260 Graphics"},
95 {0x67EB, 0x00, "AMD Radeon Pro WX Series Graphics"},
96 {0x67EF, 0xC0, "AMD Radeon RX Graphics"},
97 {0x67EF, 0xC1, "AMD Radeon RX 460 Graphics"},
98 {0x67EF, 0xC5, "AMD Radeon RX 460 Graphics"},
99 {0x67EF, 0xC7, "AMD Radeon RX Graphics"},
100 {0x67EF, 0xCF, "AMD Radeon RX 460 Graphics"},
101 {0x67EF, 0xEF, "AMD Radeon RX Graphics"},
102 {0x67FF, 0xC0, "AMD Radeon RX Graphics"},
103 {0x67FF, 0xC1, "AMD Radeon RX Graphics"},
104 {0x6800, 0x0, "AMD Radeon HD 7970M"},
105 {0x6801, 0x0, "AMD Radeon(TM) HD8970M"},
106 {0x6808, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
107 {0x6809, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
108 {0x6810, 0x0, "AMD Radeon(TM) HD 8800 Series"},
109 {0x6810, 0x81, "AMD Radeon R7 370 Series"},
110 {0x6811, 0x0, "AMD Radeon(TM) HD8800 Series"},
111 {0x6811, 0x81, "AMD Radeon R7 300 Series"},
112 {0x6818, 0x0, "AMD Radeon HD 7800 Series"},
113 {0x6819, 0x0, "AMD Radeon HD 7800 Series"},
114 {0x6820, 0x0, "AMD Radeon HD 8800M Series"},
115 {0x6820, 0x81, "AMD Radeon R9 M375"},
116 {0x6820, 0x83, "AMD Radeon R9 M375X"},
117 {0x6821, 0x0, "AMD Radeon HD 8800M Series"},
118 {0x6821, 0x87, "AMD Radeon R7 M380"},
119 {0x6821, 0x83, "AMD Radeon R9 M370X"},
120 {0x6822, 0x0, "AMD Radeon E8860"},
121 {0x6823, 0x0, "AMD Radeon HD 8800M Series"},
122 {0x6825, 0x0, "AMD Radeon HD 7800M Series"},
123 {0x6827, 0x0, "AMD Radeon HD 7800M Series"},
124 {0x6828, 0x0, "ATI FirePro V(FireGL V) Graphics Adapter"},
125 {0x682B, 0x0, "AMD Radeon HD 8800M Series"},
126 {0x682B, 0x87, "AMD Radeon R9 M360"},
127 {0x682C, 0x0, "AMD FirePro W4100"},
128 {0x682D, 0x0, "AMD Radeon HD 7700M Series"},
129 {0x682F, 0x0, "AMD Radeon HD 7700M Series"},
130 {0x6835, 0x0, "AMD Radeon R7 Series / HD 9000 Series"},
131 {0x6837, 0x0, "AMD Radeon HD7700 Series"},
132 {0x683D, 0x0, "AMD Radeon HD 7700 Series"},
133 {0x683F, 0x0, "AMD Radeon HD 7700 Series"},
134 {0x6900, 0x0, "AMD Radeon R7 M260"},
135 {0x6900, 0x81, "AMD Radeon R7 M360"},
136 {0x6900, 0x83, "AMD Radeon R7 M340"},
137 {0x6901, 0x0, "AMD Radeon R5 M255"},
138 {0x6907, 0x0, "AMD Radeon R5 M255"},
139 {0x6907, 0x87, "AMD Radeon R5 M315"},
140 {0x6920, 0x0, "AMD Radeon R9 M395X"},
141 {0x6920, 0x1, "AMD Radeon R9 M390X"},
142 {0x6921, 0x0, "AMD Radeon R9 M295X"},
143 {0x6929, 0x0, "AMD FirePro S7150"},
144 {0x692B, 0x0, "AMD FirePro W7100"},
145 {0x6938, 0x0, "AMD Radeon R9 200 Series"},
146 {0x6938, 0xF0, "AMD Radeon R9 200 Series"},
147 {0x6938, 0xF1, "AMD Radeon R9 380 Series"},
148 {0x6939, 0xF0, "AMD Radeon R9 200 Series"},
149 {0x6939, 0x0, "AMD Radeon R9 200 Series"},
150 {0x6939, 0xF1, "AMD Radeon R9 380 Series"},
151 {0x7300, 0xC8, "AMD Radeon R9 Fury Series"},
152 {0x7300, 0xCB, "AMD Radeon R9 Fury Series"},
153 {0x7300, 0xCA, "AMD Radeon R9 Fury Series"},
154 {0x9874, 0xC4, "AMD Radeon R7 Graphics"},
155 {0x9874, 0xC5, "AMD Radeon R6 Graphics"},
156 {0x9874, 0xC6, "AMD Radeon R6 Graphics"},
157 {0x9874, 0xC7, "AMD Radeon R5 Graphics"},
158 {0x9874, 0x81, "AMD Radeon R6 Graphics"},
159 {0x9874, 0x87, "AMD Radeon R5 Graphics"},
160 {0x9874, 0x85, "AMD Radeon R6 Graphics"},
161 {0x9874, 0x84, "AMD Radeon R7 Graphics"},
162
163 {0x0000, 0x0, "\0"},
164};
165#endif
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index d30fd1e7..9e37b149 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -22,10 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <stdlib.h> 25#include <stdlib.h>
30#include <stdio.h> 26#include <stdio.h>
31#include <stdint.h> 27#include <stdint.h>
@@ -53,29 +49,6 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
53 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args); 49 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54} 50}
55 51
56drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
57{
58 /* Remove the buffer from the hash tables. */
59 pthread_mutex_lock(&bo->dev->bo_table_mutex);
60 util_hash_table_remove(bo->dev->bo_handles,
61 (void*)(uintptr_t)bo->handle);
62 if (bo->flink_name) {
63 util_hash_table_remove(bo->dev->bo_flink_names,
64 (void*)(uintptr_t)bo->flink_name);
65 }
66 pthread_mutex_unlock(&bo->dev->bo_table_mutex);
67
68 /* Release CPU access. */
69 if (bo->cpu_map_count > 0) {
70 bo->cpu_map_count = 1;
71 amdgpu_bo_cpu_unmap(bo);
72 }
73
74 amdgpu_close_kms_handle(bo->dev, bo->handle);
75 pthread_mutex_destroy(&bo->cpu_access_mutex);
76 free(bo);
77}
78
79int amdgpu_bo_alloc(amdgpu_device_handle dev, 52int amdgpu_bo_alloc(amdgpu_device_handle dev,
80 struct amdgpu_bo_alloc_request *alloc_buffer, 53 struct amdgpu_bo_alloc_request *alloc_buffer,
81 amdgpu_bo_handle *buf_handle) 54 amdgpu_bo_handle *buf_handle)
@@ -273,8 +246,9 @@ int amdgpu_bo_export(amdgpu_bo_handle bo,
273 246
274 case amdgpu_bo_handle_type_dma_buf_fd: 247 case amdgpu_bo_handle_type_dma_buf_fd:
275 amdgpu_add_handle_to_table(bo); 248 amdgpu_add_handle_to_table(bo);
276 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, 249 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
277 (int*)shared_handle); 250 DRM_CLOEXEC | DRM_RDWR,
251 (int*)shared_handle);
278 } 252 }
279 return -EINVAL; 253 return -EINVAL;
280} 254}
@@ -302,6 +276,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
302 /* Get a KMS handle. */ 276 /* Get a KMS handle. */
303 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle); 277 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
304 if (r) { 278 if (r) {
279 pthread_mutex_unlock(&dev->bo_table_mutex);
305 return r; 280 return r;
306 } 281 }
307 282
@@ -341,10 +316,9 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
341 } 316 }
342 317
343 if (bo) { 318 if (bo) {
344 pthread_mutex_unlock(&dev->bo_table_mutex);
345
346 /* The buffer already exists, just bump the refcount. */ 319 /* The buffer already exists, just bump the refcount. */
347 atomic_inc(&bo->refcount); 320 atomic_inc(&bo->refcount);
321 pthread_mutex_unlock(&dev->bo_table_mutex);
348 322
349 output->buf_handle = bo; 323 output->buf_handle = bo;
350 output->alloc_size = bo->alloc_size; 324 output->alloc_size = bo->alloc_size;
@@ -419,8 +393,35 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
419 393
420int amdgpu_bo_free(amdgpu_bo_handle buf_handle) 394int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
421{ 395{
422 /* Just drop the reference. */ 396 struct amdgpu_device *dev;
423 amdgpu_bo_reference(&buf_handle, NULL); 397 struct amdgpu_bo *bo = buf_handle;
398
399 assert(bo != NULL);
400 dev = bo->dev;
401 pthread_mutex_lock(&dev->bo_table_mutex);
402
403 if (update_references(&bo->refcount, NULL)) {
404 /* Remove the buffer from the hash tables. */
405 util_hash_table_remove(dev->bo_handles,
406 (void*)(uintptr_t)bo->handle);
407
408 if (bo->flink_name) {
409 util_hash_table_remove(dev->bo_flink_names,
410 (void*)(uintptr_t)bo->flink_name);
411 }
412
413 /* Release CPU access. */
414 if (bo->cpu_map_count > 0) {
415 bo->cpu_map_count = 1;
416 amdgpu_bo_cpu_unmap(bo);
417 }
418
419 amdgpu_close_kms_handle(dev, bo->handle);
420 pthread_mutex_destroy(&bo->cpu_access_mutex);
421 free(bo);
422 }
423
424 pthread_mutex_unlock(&dev->bo_table_mutex);
424 return 0; 425 return 0;
425} 426}
426 427
@@ -652,7 +653,7 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
652 return -EINVAL; 653 return -EINVAL;
653 654
654 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry)); 655 list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
655 if (list == NULL) 656 if (!list)
656 return -ENOMEM; 657 return -ENOMEM;
657 658
658 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE; 659 args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
@@ -683,21 +684,37 @@ int amdgpu_bo_va_op(amdgpu_bo_handle bo,
683 uint32_t ops) 684 uint32_t ops)
684{ 685{
685 amdgpu_device_handle dev = bo->dev; 686 amdgpu_device_handle dev = bo->dev;
687
688 size = ALIGN(size, getpagesize());
689
690 return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
691 AMDGPU_VM_PAGE_READABLE |
692 AMDGPU_VM_PAGE_WRITEABLE |
693 AMDGPU_VM_PAGE_EXECUTABLE, ops);
694}
695
696int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
697 amdgpu_bo_handle bo,
698 uint64_t offset,
699 uint64_t size,
700 uint64_t addr,
701 uint64_t flags,
702 uint32_t ops)
703{
686 struct drm_amdgpu_gem_va va; 704 struct drm_amdgpu_gem_va va;
687 int r; 705 int r;
688 706
689 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP) 707 if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
708 ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
690 return -EINVAL; 709 return -EINVAL;
691 710
692 memset(&va, 0, sizeof(va)); 711 memset(&va, 0, sizeof(va));
693 va.handle = bo->handle; 712 va.handle = bo ? bo->handle : 0;
694 va.operation = ops; 713 va.operation = ops;
695 va.flags = AMDGPU_VM_PAGE_READABLE | 714 va.flags = flags;
696 AMDGPU_VM_PAGE_WRITEABLE |
697 AMDGPU_VM_PAGE_EXECUTABLE;
698 va.va_address = addr; 715 va.va_address = addr;
699 va.offset_in_bo = offset; 716 va.offset_in_bo = offset;
700 va.map_size = ALIGN(size, getpagesize()); 717 va.map_size = size;
701 718
702 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va)); 719 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
703 720
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index fb5b3a8c..3c9be6c2 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -21,10 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdlib.h> 24#include <stdlib.h>
29#include <stdio.h> 25#include <stdio.h>
30#include <string.h> 26#include <string.h>
@@ -46,26 +42,25 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
46/** 42/**
47 * Create command submission context 43 * Create command submission context
48 * 44 *
49 * \param dev - \c [in] amdgpu device handle 45 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
50 * \param context - \c [out] amdgpu context handle 46 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
47 * \param context - \c [out] GPU Context handle
51 * 48 *
52 * \return 0 on success otherwise POSIX Error code 49 * \return 0 on success otherwise POSIX Error code
53*/ 50*/
54int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 51int amdgpu_cs_ctx_create2(amdgpu_device_handle dev, uint32_t priority,
55 amdgpu_context_handle *context) 52 amdgpu_context_handle *context)
56{ 53{
57 struct amdgpu_context *gpu_context; 54 struct amdgpu_context *gpu_context;
58 union drm_amdgpu_ctx args; 55 union drm_amdgpu_ctx args;
59 int i, j, k; 56 int i, j, k;
60 int r; 57 int r;
61 58
62 if (NULL == dev) 59 if (!dev || !context)
63 return -EINVAL;
64 if (NULL == context)
65 return -EINVAL; 60 return -EINVAL;
66 61
67 gpu_context = calloc(1, sizeof(struct amdgpu_context)); 62 gpu_context = calloc(1, sizeof(struct amdgpu_context));
68 if (NULL == gpu_context) 63 if (!gpu_context)
69 return -ENOMEM; 64 return -ENOMEM;
70 65
71 gpu_context->dev = dev; 66 gpu_context->dev = dev;
@@ -77,6 +72,8 @@ int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
77 /* Create the context */ 72 /* Create the context */
78 memset(&args, 0, sizeof(args)); 73 memset(&args, 0, sizeof(args));
79 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX; 74 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
75 args.in.priority = priority;
76
80 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); 77 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
81 if (r) 78 if (r)
82 goto error; 79 goto error;
@@ -96,6 +93,12 @@ error:
96 return r; 93 return r;
97} 94}
98 95
96int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
97 amdgpu_context_handle *context)
98{
99 return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
100}
101
99/** 102/**
100 * Release command submission context 103 * Release command submission context
101 * 104 *
@@ -110,7 +113,7 @@ int amdgpu_cs_ctx_free(amdgpu_context_handle context)
110 int i, j, k; 113 int i, j, k;
111 int r; 114 int r;
112 115
113 if (NULL == context) 116 if (!context)
114 return -EINVAL; 117 return -EINVAL;
115 118
116 pthread_mutex_destroy(&context->sequence_mutex); 119 pthread_mutex_destroy(&context->sequence_mutex);
@@ -188,8 +191,6 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
188 return -EINVAL; 191 return -EINVAL;
189 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS) 192 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
190 return -EINVAL; 193 return -EINVAL;
191 if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
192 return -EINVAL;
193 if (ibs_request->number_of_ibs == 0) { 194 if (ibs_request->number_of_ibs == 0) {
194 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ; 195 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
195 return 0; 196 return 0;
@@ -330,9 +331,7 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
330 uint32_t i; 331 uint32_t i;
331 int r; 332 int r;
332 333
333 if (NULL == context) 334 if (!context || !ibs_request)
334 return -EINVAL;
335 if (NULL == ibs_request)
336 return -EINVAL; 335 return -EINVAL;
337 336
338 r = 0; 337 r = 0;
@@ -416,11 +415,7 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
416 bool busy = true; 415 bool busy = true;
417 int r; 416 int r;
418 417
419 if (NULL == fence) 418 if (!fence || !expired || !fence->context)
420 return -EINVAL;
421 if (NULL == expired)
422 return -EINVAL;
423 if (NULL == fence->context)
424 return -EINVAL; 419 return -EINVAL;
425 if (fence->ip_type >= AMDGPU_HW_IP_NUM) 420 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
426 return -EINVAL; 421 return -EINVAL;
@@ -443,15 +438,83 @@ int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
443 return r; 438 return r;
444} 439}
445 440
441static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
442 uint32_t fence_count,
443 bool wait_all,
444 uint64_t timeout_ns,
445 uint32_t *status,
446 uint32_t *first)
447{
448 struct drm_amdgpu_fence *drm_fences;
449 amdgpu_device_handle dev = fences[0].context->dev;
450 union drm_amdgpu_wait_fences args;
451 int r;
452 uint32_t i;
453
454 drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
455 for (i = 0; i < fence_count; i++) {
456 drm_fences[i].ctx_id = fences[i].context->id;
457 drm_fences[i].ip_type = fences[i].ip_type;
458 drm_fences[i].ip_instance = fences[i].ip_instance;
459 drm_fences[i].ring = fences[i].ring;
460 drm_fences[i].seq_no = fences[i].fence;
461 }
462
463 memset(&args, 0, sizeof(args));
464 args.in.fences = (uint64_t)(uintptr_t)drm_fences;
465 args.in.fence_count = fence_count;
466 args.in.wait_all = wait_all;
467 args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
468
469 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
470 if (r)
471 return -errno;
472
473 *status = args.out.status;
474
475 if (first)
476 *first = args.out.first_signaled;
477
478 return 0;
479}
480
481int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
482 uint32_t fence_count,
483 bool wait_all,
484 uint64_t timeout_ns,
485 uint32_t *status,
486 uint32_t *first)
487{
488 uint32_t i;
489
490 /* Sanity check */
491 if (!fences || !status || !fence_count)
492 return -EINVAL;
493
494 for (i = 0; i < fence_count; i++) {
495 if (NULL == fences[i].context)
496 return -EINVAL;
497 if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
498 return -EINVAL;
499 if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
500 return -EINVAL;
501 }
502
503 *status = 0;
504
505 return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
506 timeout_ns, status, first);
507}
508
446int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem) 509int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
447{ 510{
448 struct amdgpu_semaphore *gpu_semaphore; 511 struct amdgpu_semaphore *gpu_semaphore;
449 512
450 if (NULL == sem) 513 if (!sem)
451 return -EINVAL; 514 return -EINVAL;
452 515
453 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore)); 516 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
454 if (NULL == gpu_semaphore) 517 if (!gpu_semaphore)
455 return -ENOMEM; 518 return -ENOMEM;
456 519
457 atomic_set(&gpu_semaphore->refcount, 1); 520 atomic_set(&gpu_semaphore->refcount, 1);
@@ -466,14 +529,12 @@ int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
466 uint32_t ring, 529 uint32_t ring,
467 amdgpu_semaphore_handle sem) 530 amdgpu_semaphore_handle sem)
468{ 531{
469 if (NULL == ctx) 532 if (!ctx || !sem)
470 return -EINVAL; 533 return -EINVAL;
471 if (ip_type >= AMDGPU_HW_IP_NUM) 534 if (ip_type >= AMDGPU_HW_IP_NUM)
472 return -EINVAL; 535 return -EINVAL;
473 if (ring >= AMDGPU_CS_MAX_RINGS) 536 if (ring >= AMDGPU_CS_MAX_RINGS)
474 return -EINVAL; 537 return -EINVAL;
475 if (NULL == sem)
476 return -EINVAL;
477 /* sem has been signaled */ 538 /* sem has been signaled */
478 if (sem->signal_fence.context) 539 if (sem->signal_fence.context)
479 return -EINVAL; 540 return -EINVAL;
@@ -494,16 +555,14 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
494 uint32_t ring, 555 uint32_t ring,
495 amdgpu_semaphore_handle sem) 556 amdgpu_semaphore_handle sem)
496{ 557{
497 if (NULL == ctx) 558 if (!ctx || !sem)
498 return -EINVAL; 559 return -EINVAL;
499 if (ip_type >= AMDGPU_HW_IP_NUM) 560 if (ip_type >= AMDGPU_HW_IP_NUM)
500 return -EINVAL; 561 return -EINVAL;
501 if (ring >= AMDGPU_CS_MAX_RINGS) 562 if (ring >= AMDGPU_CS_MAX_RINGS)
502 return -EINVAL; 563 return -EINVAL;
503 if (NULL == sem)
504 return -EINVAL;
505 /* must signal first */ 564 /* must signal first */
506 if (NULL == sem->signal_fence.context) 565 if (!sem->signal_fence.context)
507 return -EINVAL; 566 return -EINVAL;
508 567
509 pthread_mutex_lock(&ctx->sequence_mutex); 568 pthread_mutex_lock(&ctx->sequence_mutex);
@@ -514,12 +573,10 @@ int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
514 573
515static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem) 574static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
516{ 575{
517 if (NULL == sem) 576 if (!sem || !sem->signal_fence.context)
518 return -EINVAL;
519 if (NULL == sem->signal_fence.context)
520 return -EINVAL; 577 return -EINVAL;
521 578
522 sem->signal_fence.context = NULL;; 579 sem->signal_fence.context = NULL;
523 sem->signal_fence.ip_type = 0; 580 sem->signal_fence.ip_type = 0;
524 sem->signal_fence.ip_instance = 0; 581 sem->signal_fence.ip_instance = 0;
525 sem->signal_fence.ring = 0; 582 sem->signal_fence.ring = 0;
@@ -530,7 +587,7 @@ static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
530 587
531static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem) 588static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
532{ 589{
533 if (NULL == sem) 590 if (!sem)
534 return -EINVAL; 591 return -EINVAL;
535 592
536 if (update_references(&sem->refcount, NULL)) 593 if (update_references(&sem->refcount, NULL))
@@ -542,3 +599,170 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
542{ 599{
543 return amdgpu_cs_unreference_sem(sem); 600 return amdgpu_cs_unreference_sem(sem);
544} 601}
602
603int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
604 uint32_t flags,
605 uint32_t *handle)
606{
607 if (NULL == dev)
608 return -EINVAL;
609
610 return drmSyncobjCreate(dev->fd, flags, handle);
611}
612
613int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
614 uint32_t *handle)
615{
616 if (NULL == dev)
617 return -EINVAL;
618
619 return drmSyncobjCreate(dev->fd, 0, handle);
620}
621
622int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
623 uint32_t handle)
624{
625 if (NULL == dev)
626 return -EINVAL;
627
628 return drmSyncobjDestroy(dev->fd, handle);
629}
630
631int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
632 const uint32_t *syncobjs, uint32_t syncobj_count)
633{
634 if (NULL == dev)
635 return -EINVAL;
636
637 return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
638}
639
640int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
641 const uint32_t *syncobjs, uint32_t syncobj_count)
642{
643 if (NULL == dev)
644 return -EINVAL;
645
646 return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
647}
648
649int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
650 uint32_t *handles, unsigned num_handles,
651 int64_t timeout_nsec, unsigned flags,
652 uint32_t *first_signaled)
653{
654 if (NULL == dev)
655 return -EINVAL;
656
657 return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
658 flags, first_signaled);
659}
660
661int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
662 uint32_t handle,
663 int *shared_fd)
664{
665 if (NULL == dev)
666 return -EINVAL;
667
668 return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
669}
670
671int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
672 int shared_fd,
673 uint32_t *handle)
674{
675 if (NULL == dev)
676 return -EINVAL;
677
678 return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
679}
680
681int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
682 uint32_t syncobj,
683 int *sync_file_fd)
684{
685 if (NULL == dev)
686 return -EINVAL;
687
688 return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
689}
690
691int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
692 uint32_t syncobj,
693 int sync_file_fd)
694{
695 if (NULL == dev)
696 return -EINVAL;
697
698 return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
699}
700
701int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
702 amdgpu_context_handle context,
703 amdgpu_bo_list_handle bo_list_handle,
704 int num_chunks,
705 struct drm_amdgpu_cs_chunk *chunks,
706 uint64_t *seq_no)
707{
708 union drm_amdgpu_cs cs = {0};
709 uint64_t *chunk_array;
710 int i, r;
711 if (num_chunks == 0)
712 return -EINVAL;
713
714 chunk_array = alloca(sizeof(uint64_t) * num_chunks);
715 for (i = 0; i < num_chunks; i++)
716 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
717 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
718 cs.in.ctx_id = context->id;
719 cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
720 cs.in.num_chunks = num_chunks;
721 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
722 &cs, sizeof(cs));
723 if (r)
724 return r;
725
726 if (seq_no)
727 *seq_no = cs.out.handle;
728 return 0;
729}
730
731void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
732 struct drm_amdgpu_cs_chunk_data *data)
733{
734 data->fence_data.handle = fence_info->handle->handle;
735 data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
736}
737
738void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
739 struct drm_amdgpu_cs_chunk_dep *dep)
740{
741 dep->ip_type = fence->ip_type;
742 dep->ip_instance = fence->ip_instance;
743 dep->ring = fence->ring;
744 dep->ctx_id = fence->context->id;
745 dep->handle = fence->fence;
746}
747
748int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
749 struct amdgpu_cs_fence *fence,
750 uint32_t what,
751 uint32_t *out_handle)
752{
753 union drm_amdgpu_fence_to_handle fth = {0};
754 int r;
755
756 fth.in.fence.ctx_id = fence->context->id;
757 fth.in.fence.ip_type = fence->ip_type;
758 fth.in.fence.ip_instance = fence->ip_instance;
759 fth.in.fence.ring = fence->ring;
760 fth.in.fence.seq_no = fence->fence;
761 fth.in.what = what;
762
763 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
764 &fth, sizeof(fth));
765 if (r == 0)
766 *out_handle = fth.out.handle;
767 return r;
768}
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index f4ede031..d81efcf8 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -28,10 +28,6 @@
28 * 28 *
29 */ 29 */
30 30
31#ifdef HAVE_CONFIG_H
32#include "config.h"
33#endif
34
35#include <sys/stat.h> 31#include <sys/stat.h>
36#include <errno.h> 32#include <errno.h>
37#include <string.h> 33#include <string.h>
@@ -44,7 +40,6 @@
44#include "amdgpu_internal.h" 40#include "amdgpu_internal.h"
45#include "util_hash_table.h" 41#include "util_hash_table.h"
46#include "util_math.h" 42#include "util_math.h"
47#include "amdgpu_asic_id.h"
48 43
49#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 44#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
50#define UINT_TO_PTR(x) ((void *)((intptr_t)(x))) 45#define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@@ -131,10 +126,8 @@ static int amdgpu_get_auth(int fd, int *auth)
131 126
132static void amdgpu_device_free_internal(amdgpu_device_handle dev) 127static void amdgpu_device_free_internal(amdgpu_device_handle dev)
133{ 128{
134 amdgpu_vamgr_deinit(dev->vamgr); 129 amdgpu_vamgr_deinit(&dev->vamgr_32);
135 free(dev->vamgr); 130 amdgpu_vamgr_deinit(&dev->vamgr);
136 amdgpu_vamgr_deinit(dev->vamgr_32);
137 free(dev->vamgr_32);
138 util_hash_table_destroy(dev->bo_flink_names); 131 util_hash_table_destroy(dev->bo_flink_names);
139 util_hash_table_destroy(dev->bo_handles); 132 util_hash_table_destroy(dev->bo_handles);
140 pthread_mutex_destroy(&dev->bo_table_mutex); 133 pthread_mutex_destroy(&dev->bo_table_mutex);
@@ -142,6 +135,7 @@ static void amdgpu_device_free_internal(amdgpu_device_handle dev)
142 close(dev->fd); 135 close(dev->fd);
143 if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd)) 136 if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
144 close(dev->flink_fd); 137 close(dev->flink_fd);
138 free(dev->marketing_name);
145 free(dev); 139 free(dev);
146} 140}
147 141
@@ -187,6 +181,8 @@ int amdgpu_device_initialize(int fd,
187 fd_tab = util_hash_table_create(fd_hash, fd_compare); 181 fd_tab = util_hash_table_create(fd_hash, fd_compare);
188 r = amdgpu_get_auth(fd, &flag_auth); 182 r = amdgpu_get_auth(fd, &flag_auth);
189 if (r) { 183 if (r) {
184 fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
185 __func__, r);
190 pthread_mutex_unlock(&fd_mutex); 186 pthread_mutex_unlock(&fd_mutex);
191 return r; 187 return r;
192 } 188 }
@@ -194,6 +190,8 @@ int amdgpu_device_initialize(int fd,
194 if (dev) { 190 if (dev) {
195 r = amdgpu_get_auth(dev->fd, &flag_authexist); 191 r = amdgpu_get_auth(dev->fd, &flag_authexist);
196 if (r) { 192 if (r) {
193 fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
194 __func__, r);
197 pthread_mutex_unlock(&fd_mutex); 195 pthread_mutex_unlock(&fd_mutex);
198 return r; 196 return r;
199 } 197 }
@@ -209,6 +207,7 @@ int amdgpu_device_initialize(int fd,
209 207
210 dev = calloc(1, sizeof(struct amdgpu_device)); 208 dev = calloc(1, sizeof(struct amdgpu_device));
211 if (!dev) { 209 if (!dev) {
210 fprintf(stderr, "%s: calloc failed\n", __func__);
212 pthread_mutex_unlock(&fd_mutex); 211 pthread_mutex_unlock(&fd_mutex);
213 return -ENOMEM; 212 return -ENOMEM;
214 } 213 }
@@ -244,38 +243,47 @@ int amdgpu_device_initialize(int fd,
244 243
245 /* Check if acceleration is working. */ 244 /* Check if acceleration is working. */
246 r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working); 245 r = amdgpu_query_info(dev, AMDGPU_INFO_ACCEL_WORKING, 4, &accel_working);
247 if (r) 246 if (r) {
247 fprintf(stderr, "%s: amdgpu_query_info(ACCEL_WORKING) failed (%i)\n",
248 __func__, r);
248 goto cleanup; 249 goto cleanup;
250 }
249 if (!accel_working) { 251 if (!accel_working) {
252 fprintf(stderr, "%s: AMDGPU_INFO_ACCEL_WORKING = 0\n", __func__);
250 r = -EBADF; 253 r = -EBADF;
251 goto cleanup; 254 goto cleanup;
252 } 255 }
253 256
254 r = amdgpu_query_gpu_info_init(dev); 257 r = amdgpu_query_gpu_info_init(dev);
255 if (r) 258 if (r) {
259 fprintf(stderr, "%s: amdgpu_query_gpu_info_init failed\n", __func__);
256 goto cleanup; 260 goto cleanup;
261 }
257 262
258 dev->vamgr = calloc(1, sizeof(struct amdgpu_bo_va_mgr)); 263 start = dev->dev_info.virtual_address_offset;
259 if (dev->vamgr == NULL) 264 max = MIN2(dev->dev_info.virtual_address_max, 0x100000000ULL);
260 goto cleanup; 265 amdgpu_vamgr_init(&dev->vamgr_32, start, max,
266 dev->dev_info.virtual_address_alignment);
261 267
262 amdgpu_vamgr_init(dev->vamgr, dev->dev_info.virtual_address_offset, 268 start = max;
263 dev->dev_info.virtual_address_max, 269 max = MAX2(dev->dev_info.virtual_address_max, 0x100000000ULL);
270 amdgpu_vamgr_init(&dev->vamgr, start, max,
264 dev->dev_info.virtual_address_alignment); 271 dev->dev_info.virtual_address_alignment);
265 272
266 max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff); 273 start = dev->dev_info.high_va_offset;
267 start = amdgpu_vamgr_find_va(dev->vamgr, 274 max = MIN2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
268 max - dev->dev_info.virtual_address_offset, 275 0x100000000ULL);
269 dev->dev_info.virtual_address_alignment, 0); 276 amdgpu_vamgr_init(&dev->vamgr_high_32, start, max,
270 if (start > 0xffffffff)
271 goto free_va; /* shouldn't get here */
272
273 dev->vamgr_32 = calloc(1, sizeof(struct amdgpu_bo_va_mgr));
274 if (dev->vamgr_32 == NULL)
275 goto free_va;
276 amdgpu_vamgr_init(dev->vamgr_32, start, max,
277 dev->dev_info.virtual_address_alignment); 277 dev->dev_info.virtual_address_alignment);
278 278
279 start = max;
280 max = MAX2(dev->dev_info.high_va_max, (start & ~0xffffffffULL) +
281 0x100000000ULL);
282 amdgpu_vamgr_init(&dev->vamgr_high, start, max,
283 dev->dev_info.virtual_address_alignment);
284
285 amdgpu_parse_asic_ids(dev);
286
279 *major_version = dev->major_version; 287 *major_version = dev->major_version;
280 *minor_version = dev->minor_version; 288 *minor_version = dev->minor_version;
281 *device_handle = dev; 289 *device_handle = dev;
@@ -284,13 +292,6 @@ int amdgpu_device_initialize(int fd,
284 292
285 return 0; 293 return 0;
286 294
287free_va:
288 r = -ENOMEM;
289 amdgpu_vamgr_free_va(dev->vamgr, start,
290 max - dev->dev_info.virtual_address_offset);
291 amdgpu_vamgr_deinit(dev->vamgr);
292 free(dev->vamgr);
293
294cleanup: 295cleanup:
295 if (dev->fd >= 0) 296 if (dev->fd >= 0)
296 close(dev->fd); 297 close(dev->fd);
@@ -307,14 +308,21 @@ int amdgpu_device_deinitialize(amdgpu_device_handle dev)
307 308
308const char *amdgpu_get_marketing_name(amdgpu_device_handle dev) 309const char *amdgpu_get_marketing_name(amdgpu_device_handle dev)
309{ 310{
310 const struct amdgpu_asic_id_table_t *t = amdgpu_asic_id_table; 311 return dev->marketing_name;
312}
311 313
312 while (t->did) { 314int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info,
313 if ((t->did == dev->info.asic_id) && 315 void *value)
314 (t->rid == dev->info.pci_rev_id)) 316{
315 return t->marketing_name; 317 uint32_t *val32 = (uint32_t*)value;
316 t++; 318
319 switch (info) {
320 case amdgpu_sw_info_address32_hi:
321 if (dev->vamgr_high_32.va_max)
322 *val32 = (dev->vamgr_high_32.va_max - 1) >> 32;
323 else
324 *val32 = (dev->vamgr_32.va_max - 1) >> 32;
325 return 0;
317 } 326 }
318 327 return -EINVAL;
319 return NULL;
320} 328}
diff --git a/amdgpu/amdgpu_gpu_info.c b/amdgpu/amdgpu_gpu_info.c
index 66c7e0e1..b68e1c4f 100644
--- a/amdgpu/amdgpu_gpu_info.c
+++ b/amdgpu/amdgpu_gpu_info.c
@@ -22,10 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <errno.h> 25#include <errno.h>
30#include <string.h> 26#include <string.h>
31 27
@@ -169,53 +165,57 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
169 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config; 165 dev->info.vce_harvest_config = dev->dev_info.vce_harvest_config;
170 dev->info.pci_rev_id = dev->dev_info.pci_rev; 166 dev->info.pci_rev_id = dev->dev_info.pci_rev;
171 167
172 for (i = 0; i < (int)dev->info.num_shader_engines; i++) { 168 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
173 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) | 169 for (i = 0; i < (int)dev->info.num_shader_engines; i++) {
174 (AMDGPU_INFO_MMR_SH_INDEX_MASK << 170 unsigned instance = (i << AMDGPU_INFO_MMR_SE_INDEX_SHIFT) |
175 AMDGPU_INFO_MMR_SH_INDEX_SHIFT); 171 (AMDGPU_INFO_MMR_SH_INDEX_MASK <<
172 AMDGPU_INFO_MMR_SH_INDEX_SHIFT);
176 173
177 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0, 174 r = amdgpu_read_mm_registers(dev, 0x263d, 1, instance, 0,
178 &dev->info.backend_disable[i]); 175 &dev->info.backend_disable[i]);
179 if (r) 176 if (r)
180 return r; 177 return r;
181 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */ 178 /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
182 dev->info.backend_disable[i] = 179 dev->info.backend_disable[i] =
183 (dev->info.backend_disable[i] >> 16) & 0xff; 180 (dev->info.backend_disable[i] >> 16) & 0xff;
184
185 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
186 &dev->info.pa_sc_raster_cfg[i]);
187 if (r)
188 return r;
189 181
190 if (dev->info.family_id >= AMDGPU_FAMILY_CI) { 182 r = amdgpu_read_mm_registers(dev, 0xa0d4, 1, instance, 0,
191 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0, 183 &dev->info.pa_sc_raster_cfg[i]);
192 &dev->info.pa_sc_raster_cfg1[i]);
193 if (r) 184 if (r)
194 return r; 185 return r;
186
187 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
188 r = amdgpu_read_mm_registers(dev, 0xa0d5, 1, instance, 0,
189 &dev->info.pa_sc_raster_cfg1[i]);
190 if (r)
191 return r;
192 }
195 } 193 }
196 } 194 }
197 195
198 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0, 196 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0,
199 dev->info.gb_tile_mode); 197 &dev->info.gb_addr_cfg);
200 if (r) 198 if (r)
201 return r; 199 return r;
202 200
203 if (dev->info.family_id >= AMDGPU_FAMILY_CI) { 201 if (dev->info.family_id < AMDGPU_FAMILY_AI) {
204 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0, 202 r = amdgpu_read_mm_registers(dev, 0x2644, 32, 0xffffffff, 0,
205 dev->info.gb_macro_tile_mode); 203 dev->info.gb_tile_mode);
206 if (r) 204 if (r)
207 return r; 205 return r;
208 }
209 206
210 r = amdgpu_read_mm_registers(dev, 0x263e, 1, 0xffffffff, 0, 207 if (dev->info.family_id >= AMDGPU_FAMILY_CI) {
211 &dev->info.gb_addr_cfg); 208 r = amdgpu_read_mm_registers(dev, 0x2664, 16, 0xffffffff, 0,
212 if (r) 209 dev->info.gb_macro_tile_mode);
213 return r; 210 if (r)
211 return r;
212 }
214 213
215 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0, 214 r = amdgpu_read_mm_registers(dev, 0x9d8, 1, 0xffffffff, 0,
216 &dev->info.mc_arb_ramcfg); 215 &dev->info.mc_arb_ramcfg);
217 if (r) 216 if (r)
218 return r; 217 return r;
218 }
219 219
220 dev->info.cu_active_number = dev->dev_info.cu_active_number; 220 dev->info.cu_active_number = dev->dev_info.cu_active_number;
221 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask; 221 dev->info.cu_ao_mask = dev->dev_info.cu_ao_mask;
@@ -230,8 +230,9 @@ drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev)
230int amdgpu_query_gpu_info(amdgpu_device_handle dev, 230int amdgpu_query_gpu_info(amdgpu_device_handle dev,
231 struct amdgpu_gpu_info *info) 231 struct amdgpu_gpu_info *info)
232{ 232{
233 if ((dev == NULL) || (info == NULL)) 233 if (!dev || !info)
234 return -EINVAL; 234 return -EINVAL;
235
235 /* Get ASIC info*/ 236 /* Get ASIC info*/
236 *info = dev->info; 237 *info = dev->info;
237 238
@@ -296,7 +297,7 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
296 struct drm_amdgpu_info_gds gds_config = {}; 297 struct drm_amdgpu_info_gds gds_config = {};
297 int r; 298 int r;
298 299
299 if (gds_info == NULL) 300 if (!gds_info)
300 return -EINVAL; 301 return -EINVAL;
301 302
302 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG, 303 r = amdgpu_query_info(dev, AMDGPU_INFO_GDS_CONFIG,
@@ -314,3 +315,18 @@ int amdgpu_query_gds_info(amdgpu_device_handle dev,
314 315
315 return 0; 316 return 0;
316} 317}
318
319int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type,
320 unsigned size, void *value)
321{
322 struct drm_amdgpu_info request;
323
324 memset(&request, 0, sizeof(request));
325 request.return_pointer = (uintptr_t)value;
326 request.return_size = size;
327 request.query = AMDGPU_INFO_SENSOR;
328 request.sensor_info.type = sensor_type;
329
330 return drmCommandWrite(dev->fd, DRM_AMDGPU_INFO, &request,
331 sizeof(struct drm_amdgpu_info));
332}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 4f039b68..99b8ce0b 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -25,10 +25,6 @@
25#ifndef _AMDGPU_INTERNAL_H_ 25#ifndef _AMDGPU_INTERNAL_H_
26#define _AMDGPU_INTERNAL_H_ 26#define _AMDGPU_INTERNAL_H_
27 27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <assert.h> 28#include <assert.h>
33#include <pthread.h> 29#include <pthread.h>
34 30
@@ -53,8 +49,6 @@ struct amdgpu_bo_va_hole {
53}; 49};
54 50
55struct amdgpu_bo_va_mgr { 51struct amdgpu_bo_va_mgr {
56 /* the start virtual address */
57 uint64_t va_offset;
58 uint64_t va_max; 52 uint64_t va_max;
59 struct list_head va_holes; 53 struct list_head va_holes;
60 pthread_mutex_t bo_va_mutex; 54 pthread_mutex_t bo_va_mutex;
@@ -76,6 +70,7 @@ struct amdgpu_device {
76 unsigned major_version; 70 unsigned major_version;
77 unsigned minor_version; 71 unsigned minor_version;
78 72
73 char *marketing_name;
79 /** List of buffer handles. Protected by bo_table_mutex. */ 74 /** List of buffer handles. Protected by bo_table_mutex. */
80 struct util_hash_table *bo_handles; 75 struct util_hash_table *bo_handles;
81 /** List of buffer GEM flink names. Protected by bo_table_mutex. */ 76 /** List of buffer GEM flink names. Protected by bo_table_mutex. */
@@ -84,10 +79,14 @@ struct amdgpu_device {
84 pthread_mutex_t bo_table_mutex; 79 pthread_mutex_t bo_table_mutex;
85 struct drm_amdgpu_info_device dev_info; 80 struct drm_amdgpu_info_device dev_info;
86 struct amdgpu_gpu_info info; 81 struct amdgpu_gpu_info info;
87 /** The global VA manager for the whole virtual address space */ 82 /** The VA manager for the lower virtual address space */
88 struct amdgpu_bo_va_mgr *vamgr; 83 struct amdgpu_bo_va_mgr vamgr;
89 /** The VA manager for the 32bit address space */ 84 /** The VA manager for the 32bit address space */
90 struct amdgpu_bo_va_mgr *vamgr_32; 85 struct amdgpu_bo_va_mgr vamgr_32;
86 /** The VA manager for the high virtual address space */
87 struct amdgpu_bo_va_mgr vamgr_high;
88 /** The VA manager for the 32bit high address space */
89 struct amdgpu_bo_va_mgr vamgr_high_32;
91}; 90};
92 91
93struct amdgpu_bo { 92struct amdgpu_bo {
@@ -135,19 +134,12 @@ struct amdgpu_semaphore {
135 * Functions. 134 * Functions.
136 */ 135 */
137 136
138drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
139
140drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 137drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
141 uint64_t max, uint64_t alignment); 138 uint64_t max, uint64_t alignment);
142 139
143drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr); 140drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
144 141
145drm_private uint64_t 142drm_private void amdgpu_parse_asic_ids(struct amdgpu_device *dev);
146amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
147 uint64_t alignment, uint64_t base_required);
148
149drm_private void
150amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size);
151 143
152drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev); 144drm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
153 145
@@ -179,26 +171,4 @@ static inline bool update_references(atomic_t *dst, atomic_t *src)
179 return false; 171 return false;
180} 172}
181 173
182/**
183 * Assignment between two amdgpu_bo pointers with reference counting.
184 *
185 * Usage:
186 * struct amdgpu_bo *dst = ... , *src = ...;
187 *
188 * dst = src;
189 * // No reference counting. Only use this when you need to move
190 * // a reference from one pointer to another.
191 *
192 * amdgpu_bo_reference(&dst, src);
193 * // Reference counters are updated. dst is decremented and src is
194 * // incremented. dst is freed if its reference counter is 0.
195 */
196static inline void amdgpu_bo_reference(struct amdgpu_bo **dst,
197 struct amdgpu_bo *src)
198{
199 if (update_references(&(*dst)->refcount, &src->refcount))
200 amdgpu_bo_free_internal(*dst);
201 *dst = src;
202}
203
204#endif 174#endif
diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 8a707cbc..1de9f952 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -21,10 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#include <stdlib.h> 24#include <stdlib.h>
29#include <string.h> 25#include <string.h>
30#include <errno.h> 26#include <errno.h>
@@ -34,25 +30,33 @@
34#include "util_math.h" 30#include "util_math.h"
35 31
36int amdgpu_va_range_query(amdgpu_device_handle dev, 32int amdgpu_va_range_query(amdgpu_device_handle dev,
37 enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end) 33 enum amdgpu_gpu_va_range type,
34 uint64_t *start, uint64_t *end)
38{ 35{
39 if (type == amdgpu_gpu_va_range_general) { 36 if (type != amdgpu_gpu_va_range_general)
40 *start = dev->dev_info.virtual_address_offset; 37 return -EINVAL;
41 *end = dev->dev_info.virtual_address_max; 38
42 return 0; 39 *start = dev->dev_info.virtual_address_offset;
43 } 40 *end = dev->dev_info.virtual_address_max;
44 return -EINVAL; 41 return 0;
45} 42}
46 43
47drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 44drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
48 uint64_t max, uint64_t alignment) 45 uint64_t max, uint64_t alignment)
49{ 46{
50 mgr->va_offset = start; 47 struct amdgpu_bo_va_hole *n;
48
51 mgr->va_max = max; 49 mgr->va_max = max;
52 mgr->va_alignment = alignment; 50 mgr->va_alignment = alignment;
53 51
54 list_inithead(&mgr->va_holes); 52 list_inithead(&mgr->va_holes);
55 pthread_mutex_init(&mgr->bo_va_mutex, NULL); 53 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
54 pthread_mutex_lock(&mgr->bo_va_mutex);
55 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
56 n->size = mgr->va_max - start;
57 n->offset = start;
58 list_add(&n->list, &mgr->va_holes);
59 pthread_mutex_unlock(&mgr->bo_va_mutex);
56} 60}
57 61
58drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) 62drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
@@ -65,13 +69,14 @@ drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
65 pthread_mutex_destroy(&mgr->bo_va_mutex); 69 pthread_mutex_destroy(&mgr->bo_va_mutex);
66} 70}
67 71
68drm_private uint64_t 72static drm_private uint64_t
69amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 73amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
70 uint64_t alignment, uint64_t base_required) 74 uint64_t alignment, uint64_t base_required)
71{ 75{
72 struct amdgpu_bo_va_hole *hole, *n; 76 struct amdgpu_bo_va_hole *hole, *n;
73 uint64_t offset = 0, waste = 0; 77 uint64_t offset = 0, waste = 0;
74 78
79
75 alignment = MAX2(alignment, mgr->va_alignment); 80 alignment = MAX2(alignment, mgr->va_alignment);
76 size = ALIGN(size, mgr->va_alignment); 81 size = ALIGN(size, mgr->va_alignment);
77 82
@@ -79,12 +84,10 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
79 return AMDGPU_INVALID_VA_ADDRESS; 84 return AMDGPU_INVALID_VA_ADDRESS;
80 85
81 pthread_mutex_lock(&mgr->bo_va_mutex); 86 pthread_mutex_lock(&mgr->bo_va_mutex);
82 /* TODO: using more appropriate way to track the holes */ 87 LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
83 /* first look for a hole */
84 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
85 if (base_required) { 88 if (base_required) {
86 if(hole->offset > base_required || 89 if (hole->offset > base_required ||
87 (hole->offset + hole->size) < (base_required + size)) 90 (hole->offset + hole->size) < (base_required + size))
88 continue; 91 continue;
89 waste = base_required - hole->offset; 92 waste = base_required - hole->offset;
90 offset = base_required; 93 offset = base_required;
@@ -123,41 +126,14 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
123 } 126 }
124 } 127 }
125 128
126 if (base_required) {
127 if (base_required < mgr->va_offset) {
128 pthread_mutex_unlock(&mgr->bo_va_mutex);
129 return AMDGPU_INVALID_VA_ADDRESS;
130 }
131 offset = mgr->va_offset;
132 waste = base_required - mgr->va_offset;
133 } else {
134 offset = mgr->va_offset;
135 waste = offset % alignment;
136 waste = waste ? alignment - waste : 0;
137 }
138
139 if (offset + waste + size > mgr->va_max) {
140 pthread_mutex_unlock(&mgr->bo_va_mutex);
141 return AMDGPU_INVALID_VA_ADDRESS;
142 }
143
144 if (waste) {
145 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
146 n->size = waste;
147 n->offset = offset;
148 list_add(&n->list, &mgr->va_holes);
149 }
150
151 offset += waste;
152 mgr->va_offset += size + waste;
153 pthread_mutex_unlock(&mgr->bo_va_mutex); 129 pthread_mutex_unlock(&mgr->bo_va_mutex);
154 return offset; 130 return AMDGPU_INVALID_VA_ADDRESS;
155} 131}
156 132
157drm_private void 133static drm_private void
158amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) 134amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
159{ 135{
160 struct amdgpu_bo_va_hole *hole; 136 struct amdgpu_bo_va_hole *hole, *next;
161 137
162 if (va == AMDGPU_INVALID_VA_ADDRESS) 138 if (va == AMDGPU_INVALID_VA_ADDRESS)
163 return; 139 return;
@@ -165,61 +141,47 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
165 size = ALIGN(size, mgr->va_alignment); 141 size = ALIGN(size, mgr->va_alignment);
166 142
167 pthread_mutex_lock(&mgr->bo_va_mutex); 143 pthread_mutex_lock(&mgr->bo_va_mutex);
168 if ((va + size) == mgr->va_offset) { 144 hole = container_of(&mgr->va_holes, hole, list);
169 mgr->va_offset = va; 145 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
170 /* Delete uppermost hole if it reaches the new top */ 146 if (next->offset < va)
171 if (!LIST_IS_EMPTY(&mgr->va_holes)) { 147 break;
172 hole = container_of(mgr->va_holes.next, hole, list); 148 hole = next;
173 if ((hole->offset + hole->size) == va) { 149 }
174 mgr->va_offset = hole->offset; 150
151 if (&hole->list != &mgr->va_holes) {
152 /* Grow upper hole if it's adjacent */
153 if (hole->offset == (va + size)) {
154 hole->offset = va;
155 hole->size += size;
156 /* Merge lower hole if it's adjacent */
157 if (next != hole &&
158 &next->list != &mgr->va_holes &&
159 (next->offset + next->size) == va) {
160 next->size += hole->size;
175 list_del(&hole->list); 161 list_del(&hole->list);
176 free(hole); 162 free(hole);
177 } 163 }
178 }
179 } else {
180 struct amdgpu_bo_va_hole *next;
181
182 hole = container_of(&mgr->va_holes, hole, list);
183 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
184 if (next->offset < va)
185 break;
186 hole = next;
187 }
188
189 if (&hole->list != &mgr->va_holes) {
190 /* Grow upper hole if it's adjacent */
191 if (hole->offset == (va + size)) {
192 hole->offset = va;
193 hole->size += size;
194 /* Merge lower hole if it's adjacent */
195 if (next != hole
196 && &next->list != &mgr->va_holes
197 && (next->offset + next->size) == va) {
198 next->size += hole->size;
199 list_del(&hole->list);
200 free(hole);
201 }
202 goto out;
203 }
204 }
205
206 /* Grow lower hole if it's adjacent */
207 if (next != hole && &next->list != &mgr->va_holes &&
208 (next->offset + next->size) == va) {
209 next->size += size;
210 goto out; 164 goto out;
211 } 165 }
166 }
212 167
213 /* FIXME on allocation failure we just lose virtual address space 168 /* Grow lower hole if it's adjacent */
214 * maybe print a warning 169 if (next != hole && &next->list != &mgr->va_holes &&
215 */ 170 (next->offset + next->size) == va) {
216 next = calloc(1, sizeof(struct amdgpu_bo_va_hole)); 171 next->size += size;
217 if (next) { 172 goto out;
218 next->size = size; 173 }
219 next->offset = va; 174
220 list_add(&next->list, &hole->list); 175 /* FIXME on allocation failure we just lose virtual address space
221 } 176 * maybe print a warning
177 */
178 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
179 if (next) {
180 next->size = size;
181 next->offset = va;
182 list_add(&next->list, &hole->list);
222 } 183 }
184
223out: 185out:
224 pthread_mutex_unlock(&mgr->bo_va_mutex); 186 pthread_mutex_unlock(&mgr->bo_va_mutex);
225} 187}
@@ -235,10 +197,21 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
235{ 197{
236 struct amdgpu_bo_va_mgr *vamgr; 198 struct amdgpu_bo_va_mgr *vamgr;
237 199
238 if (flags & AMDGPU_VA_RANGE_32_BIT) 200 /* Clear the flag when the high VA manager is not initialized */
239 vamgr = dev->vamgr_32; 201 if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
240 else 202 flags &= ~AMDGPU_VA_RANGE_HIGH;
241 vamgr = dev->vamgr; 203
204 if (flags & AMDGPU_VA_RANGE_HIGH) {
205 if (flags & AMDGPU_VA_RANGE_32_BIT)
206 vamgr = &dev->vamgr_high_32;
207 else
208 vamgr = &dev->vamgr_high;
209 } else {
210 if (flags & AMDGPU_VA_RANGE_32_BIT)
211 vamgr = &dev->vamgr_32;
212 else
213 vamgr = &dev->vamgr;
214 }
242 215
243 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment); 216 va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
244 size = ALIGN(size, vamgr->va_alignment); 217 size = ALIGN(size, vamgr->va_alignment);
@@ -249,7 +222,10 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
249 if (!(flags & AMDGPU_VA_RANGE_32_BIT) && 222 if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
250 (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) { 223 (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
251 /* fallback to 32bit address */ 224 /* fallback to 32bit address */
252 vamgr = dev->vamgr_32; 225 if (flags & AMDGPU_VA_RANGE_HIGH)
226 vamgr = &dev->vamgr_high_32;
227 else
228 vamgr = &dev->vamgr_32;
253 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size, 229 *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
254 va_base_alignment, va_base_required); 230 va_base_alignment, va_base_required);
255 } 231 }
diff --git a/amdgpu/amdgpu_vm.c b/amdgpu/amdgpu_vm.c
new file mode 100644
index 00000000..da9d07f8
--- /dev/null
+++ b/amdgpu/amdgpu_vm.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "amdgpu_drm.h"
26#include "xf86drm.h"
27#include "amdgpu_internal.h"
28
29int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags)
30{
31 union drm_amdgpu_vm vm;
32
33 vm.in.op = AMDGPU_VM_OP_RESERVE_VMID;
34 vm.in.flags = flags;
35
36 return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
37 &vm, sizeof(vm));
38}
39
40int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags)
41{
42 union drm_amdgpu_vm vm;
43
44 vm.in.op = AMDGPU_VM_OP_UNRESERVE_VMID;
45 vm.in.flags = flags;
46
47 return drmCommandWriteRead(dev->fd, DRM_AMDGPU_VM,
48 &vm, sizeof(vm));
49}
diff --git a/amdgpu/meson.build b/amdgpu/meson.build
new file mode 100644
index 00000000..f39d7bf6
--- /dev/null
+++ b/amdgpu/meson.build
@@ -0,0 +1,66 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21
22datadir_amdgpu = join_paths(get_option('prefix'), get_option('datadir'), 'libdrm')
23
24libdrm_amdgpu = shared_library(
25 'drm_amdgpu',
26 [
27 files(
28 'amdgpu_asic_id.c', 'amdgpu_bo.c', 'amdgpu_cs.c', 'amdgpu_device.c',
29 'amdgpu_gpu_info.c', 'amdgpu_vamgr.c', 'amdgpu_vm.c', 'util_hash.c',
30 'util_hash_table.c',
31 ),
32 config_file,
33 ],
34 c_args : [
35 warn_c_args,
36 '-DAMDGPU_ASIC_ID_TABLE="@0@"'.format(join_paths(datadir_amdgpu, 'amdgpu.ids')),
37 ],
38 include_directories : [inc_root, inc_drm],
39 link_with : libdrm,
40 dependencies : [dep_pthread_stubs, dep_atomic_ops],
41 version : '1.0.0',
42 install : true,
43)
44
45install_headers('amdgpu.h', subdir : 'libdrm')
46
47pkg.generate(
48 name : 'libdrm_amdgpu',
49 libraries : libdrm_amdgpu,
50 subdirs : ['.', 'libdrm'],
51 version : meson.project_version(),
52 requires_private : 'libdrm',
53 description : 'Userspace interface to kernel DRM services for amdgpu',
54)
55
56ext_libdrm_amdgpu = declare_dependency(
57 link_with : [libdrm, libdrm_amdgpu],
58 include_directories : [inc_drm, include_directories('.')],
59)
60
61test(
62 'amdgpu-symbol-check',
63 prog_bash,
64 env : env_test,
65 args : [files('amdgpu-symbol-check'), libdrm_amdgpu]
66)
diff --git a/amdgpu/util_hash.c b/amdgpu/util_hash.c
index 87cb671b..7e590419 100644
--- a/amdgpu/util_hash.c
+++ b/amdgpu/util_hash.c
@@ -30,10 +30,6 @@
30 * Zack Rusin <zackr@vmware.com> 30 * Zack Rusin <zackr@vmware.com>
31 */ 31 */
32 32
33#ifdef HAVE_CONFIG_H
34#include "config.h"
35#endif
36
37#include "util_hash.h" 33#include "util_hash.h"
38 34
39#include <stdlib.h> 35#include <stdlib.h>
diff --git a/amdgpu/util_hash.h b/amdgpu/util_hash.h
index 01a4779b..6eed1569 100644
--- a/amdgpu/util_hash.h
+++ b/amdgpu/util_hash.h
@@ -44,10 +44,6 @@
44#ifndef UTIL_HASH_H 44#ifndef UTIL_HASH_H
45#define UTIL_HASH_H 45#define UTIL_HASH_H
46 46
47#ifdef HAVE_CONFIG_H
48#include "config.h"
49#endif
50
51#include <stdbool.h> 47#include <stdbool.h>
52 48
53#include "libdrm_macros.h" 49#include "libdrm_macros.h"
diff --git a/amdgpu/util_hash_table.c b/amdgpu/util_hash_table.c
index fa7f6eab..89a8bf9b 100644
--- a/amdgpu/util_hash_table.c
+++ b/amdgpu/util_hash_table.c
@@ -38,10 +38,6 @@
38 */ 38 */
39 39
40 40
41#ifdef HAVE_CONFIG_H
42#include "config.h"
43#endif
44
45#include "util_hash_table.h" 41#include "util_hash_table.h"
46#include "util_hash.h" 42#include "util_hash.h"
47 43
diff --git a/amdgpu/util_hash_table.h b/amdgpu/util_hash_table.h
index e0001289..5e295a81 100644
--- a/amdgpu/util_hash_table.h
+++ b/amdgpu/util_hash_table.h
@@ -34,10 +34,6 @@
34#ifndef U_HASH_TABLE_H_ 34#ifndef U_HASH_TABLE_H_
35#define U_HASH_TABLE_H_ 35#define U_HASH_TABLE_H_
36 36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#include "libdrm_macros.h" 37#include "libdrm_macros.h"
42 38
43/** 39/**
diff --git a/android/gralloc_handle.h b/android/gralloc_handle.h
new file mode 100644
index 00000000..bcf753da
--- /dev/null
+++ b/android/gralloc_handle.h
@@ -0,0 +1,111 @@
1/*
2 * Copyright (C) 2010-2011 Chia-I Wu <olvaffe@gmail.com>
3 * Copyright (C) 2010-2011 LunarG Inc.
4 * Copyright (C) 2016 Linaro, Ltd., Rob Herring <robh@kernel.org>
5 * Copyright (C) 2018 Collabora, Robert Foss <robert.foss@collabora.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __ANDROID_GRALLOC_HANDLE_H__
27#define __ANDROID_GRALLOC_HANDLE_H__
28
29#include <cutils/native_handle.h>
30#include <stdint.h>
31
32/* support users of drm_gralloc/gbm_gralloc */
33#define gralloc_gbm_handle_t gralloc_handle_t
34#define gralloc_drm_handle_t gralloc_handle_t
35
36struct gralloc_handle_t {
37 native_handle_t base;
38
39 /* dma-buf file descriptor
40 * Must be located first since, native_handle_t is allocated
41 * using native_handle_create(), which allocates space for
42 * sizeof(native_handle_t) + sizeof(int) * (numFds + numInts)
43 * numFds = GRALLOC_HANDLE_NUM_FDS
44 * numInts = GRALLOC_HANDLE_NUM_INTS
45 * Where numFds represents the number of FDs and
46 * numInts represents the space needed for the
47 * remainder of this struct.
48 * And the FDs are expected to be found first following
49 * native_handle_t.
50 */
51 int prime_fd;
52
53 /* api variables */
54 uint32_t magic; /* differentiate between allocator impls */
55 uint32_t version; /* api version */
56
57 uint32_t width; /* width of buffer in pixels */
58 uint32_t height; /* height of buffer in pixels */
59 uint32_t format; /* pixel format (Android) */
60 uint32_t usage; /* android libhardware usage flags */
61
62 uint32_t stride; /* the stride in bytes */
63 uint64_t modifier; /* buffer modifiers */
64
65 int data_owner; /* owner of data (for validation) */
66 union {
67 void *data; /* pointer to struct gralloc_gbm_bo_t */
68 uint64_t reserved;
69 } __attribute__((aligned(8)));
70};
71
72#define GRALLOC_HANDLE_VERSION 3
73#define GRALLOC_HANDLE_MAGIC 0x60585350
74#define GRALLOC_HANDLE_NUM_FDS 1
75#define GRALLOC_HANDLE_NUM_INTS ( \
76 ((sizeof(struct gralloc_handle_t) - sizeof(native_handle_t))/sizeof(int)) \
77 - GRALLOC_HANDLE_NUM_FDS)
78
79static inline struct gralloc_handle_t *gralloc_handle(buffer_handle_t handle)
80{
81 return (struct gralloc_handle_t *)handle;
82}
83
84/**
85 * Create a buffer handle.
86 */
87static inline native_handle_t *gralloc_handle_create(int32_t width,
88 int32_t height,
89 int32_t hal_format,
90 int32_t usage)
91{
92 struct gralloc_handle_t *handle;
93 native_handle_t *nhandle = native_handle_create(GRALLOC_HANDLE_NUM_FDS,
94 GRALLOC_HANDLE_NUM_INTS);
95
96 if (!nhandle)
97 return NULL;
98
99 handle = gralloc_handle(nhandle);
100 handle->magic = GRALLOC_HANDLE_MAGIC;
101 handle->version = GRALLOC_HANDLE_VERSION;
102 handle->width = width;
103 handle->height = height;
104 handle->format = hal_format;
105 handle->usage = usage;
106 handle->prime_fd = -1;
107
108 return nhandle;
109}
110
111#endif
diff --git a/autogen.sh b/autogen.sh
index d82ab180..13d6991e 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -6,15 +6,15 @@ test -z "$srcdir" && srcdir=.
6ORIGDIR=`pwd` 6ORIGDIR=`pwd`
7cd "$srcdir" 7cd "$srcdir"
8 8
9autoreconf --force --verbose --install || exit 1 9git config --local --get format.subjectPrefix >/dev/null ||
10cd "$ORIGDIR" || exit $?
11
12git config --local --get format.subjectPrefix ||
13 git config --local format.subjectPrefix "PATCH libdrm" 2>/dev/null 10 git config --local format.subjectPrefix "PATCH libdrm" 2>/dev/null
14 11
15git config --local --get sendemail.to || 12git config --local --get sendemail.to >/dev/null ||
16 git config --local sendemail.to "dri-devel@lists.freedesktop.org" 2>/dev/null 13 git config --local sendemail.to "dri-devel@lists.freedesktop.org" 2>/dev/null
17 14
15autoreconf --force --verbose --install || exit 1
16cd "$ORIGDIR" || exit $?
17
18if test -z "$NOCONFIGURE"; then 18if test -z "$NOCONFIGURE"; then
19 "$srcdir"/configure "$@" 19 "$srcdir"/configure "$@"
20fi 20fi
diff --git a/configure.ac b/configure.ac
index 1da9d86b..98a350c0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -20,7 +20,7 @@
20 20
21AC_PREREQ([2.63]) 21AC_PREREQ([2.63])
22AC_INIT([libdrm], 22AC_INIT([libdrm],
23 [2.4.75], 23 [2.4.91],
24 [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI], 24 [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
25 [libdrm]) 25 [libdrm])
26 26
@@ -28,6 +28,7 @@ AC_CONFIG_HEADERS([config.h])
28AC_CONFIG_SRCDIR([Makefile.am]) 28AC_CONFIG_SRCDIR([Makefile.am])
29AC_CONFIG_MACRO_DIR([m4]) 29AC_CONFIG_MACRO_DIR([m4])
30AC_CONFIG_AUX_DIR([build-aux]) 30AC_CONFIG_AUX_DIR([build-aux])
31PKG_PROG_PKG_CONFIG
31 32
32# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC 33# Require xorg-macros minimum of 1.12 for XORG_WITH_XSLTPROC
33m4_ifndef([XORG_MACROS_VERSION], 34m4_ifndef([XORG_MACROS_VERSION],
@@ -44,6 +45,7 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
44# Check for programs 45# Check for programs
45AC_PROG_CC 46AC_PROG_CC
46AC_PROG_CC_C99 47AC_PROG_CC_C99
48AC_PROG_NM
47 49
48if test "x$ac_cv_prog_cc_c99" = xno; then 50if test "x$ac_cv_prog_cc_c99" = xno; then
49 AC_MSG_ERROR([Building libdrm requires C99 enabled compiler]) 51 AC_MSG_ERROR([Building libdrm requires C99 enabled compiler])
@@ -53,20 +55,39 @@ AC_USE_SYSTEM_EXTENSIONS
53AC_SYS_LARGEFILE 55AC_SYS_LARGEFILE
54AC_FUNC_ALLOCA 56AC_FUNC_ALLOCA
55 57
58save_CFLAGS="$CFLAGS"
59export CFLAGS="$CFLAGS -Werror"
56AC_HEADER_MAJOR 60AC_HEADER_MAJOR
61CFLAGS="$save_CFLAGS"
62
57AC_CHECK_HEADERS([sys/sysctl.h sys/select.h]) 63AC_CHECK_HEADERS([sys/sysctl.h sys/select.h])
58 64
59# Initialize libtool 65# Initialize libtool
60LT_PREREQ([2.2]) 66LT_PREREQ([2.2])
61LT_INIT([disable-static]) 67LT_INIT([disable-static])
62 68
63 69dnl pthread-stubs is mandatory on some BSD platforms, due to the nature of the
64 70dnl project. Even then there's a notable issue as described in the project README
65AC_SUBST(PTHREADSTUBS_CFLAGS) 71case "$host_os" in
66AC_SUBST(PTHREADSTUBS_LIBS) 72linux* | cygwin* | darwin* | solaris* | *-gnu* | gnu* | openbsd*)
73 pthread_stubs_possible="no"
74 ;;
75* )
76 pthread_stubs_possible="yes"
77 ;;
78esac
79
80if test "x$pthread_stubs_possible" = xyes; then
81 PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs >= 0.4)
82 AC_SUBST(PTHREADSTUBS_CFLAGS)
83 AC_SUBST(PTHREADSTUBS_LIBS)
84fi
67 85
68pkgconfigdir=${libdir}/pkgconfig 86pkgconfigdir=${libdir}/pkgconfig
69AC_SUBST(pkgconfigdir) 87AC_SUBST(pkgconfigdir)
88libdrmdatadir=${datadir}/libdrm
89AC_SUBST(libdrmdatadir)
90
70AC_ARG_ENABLE([udev], 91AC_ARG_ENABLE([udev],
71 [AS_HELP_STRING([--enable-udev], 92 [AS_HELP_STRING([--enable-udev],
72 [Enable support for using udev instead of mknod (default: disabled)])], 93 [Enable support for using udev instead of mknod (default: disabled)])],
@@ -173,7 +194,9 @@ AC_CHECK_FUNCS([clock_gettime], [CLOCK_LIB=],
173 [AC_MSG_ERROR([Couldn't find clock_gettime])])]) 194 [AC_MSG_ERROR([Couldn't find clock_gettime])])])
174AC_SUBST([CLOCK_LIB]) 195AC_SUBST([CLOCK_LIB])
175 196
176AC_CHECK_FUNCS([open_memstream], [HAVE_OPEN_MEMSTREAM=yes]) 197AC_CHECK_FUNCS([open_memstream],
198 [AC_DEFINE([HAVE_OPEN_MEMSTREAM], 1, [Have open_memstream()])],
199 [AC_DEFINE([HAVE_OPEN_MEMSTREAM], 0)])
177 200
178dnl Use lots of warning flags with with gcc and compatible compilers 201dnl Use lots of warning flags with with gcc and compatible compilers
179 202
@@ -182,7 +205,7 @@ dnl skipped and all flags rechecked. So there's no need to do anything
182dnl else. If for any reason you need to force a recheck, just change 205dnl else. If for any reason you need to force a recheck, just change
183dnl MAYBE_WARN in an ignorable way (like adding whitespace) 206dnl MAYBE_WARN in an ignorable way (like adding whitespace)
184 207
185MAYBE_WARN="-Wall -Wextra \ 208MAYBE_WARN="-Wall -Wextra -Werror=undef \
186-Wsign-compare -Werror-implicit-function-declaration \ 209-Wsign-compare -Werror-implicit-function-declaration \
187-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \ 210-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
188-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \ 211-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
@@ -244,9 +267,13 @@ AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives, [
244if test "x$drm_cv_atomic_primitives" = xIntel; then 267if test "x$drm_cv_atomic_primitives" = xIntel; then
245 AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1, 268 AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 1,
246 [Enable if your compiler supports the Intel __sync_* atomic primitives]) 269 [Enable if your compiler supports the Intel __sync_* atomic primitives])
270else
271 AC_DEFINE(HAVE_LIBDRM_ATOMIC_PRIMITIVES, 0)
247fi 272fi
248if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then 273if test "x$drm_cv_atomic_primitives" = "xlibatomic-ops"; then
249 AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed]) 274 AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 1, [Enable if you have libatomic-ops-dev installed])
275else
276 AC_DEFINE(HAVE_LIB_ATOMIC_OPS, 0)
250fi 277fi
251 278
252dnl Print out the approapriate message considering the value set be the 279dnl Print out the approapriate message considering the value set be the
@@ -325,6 +352,8 @@ AC_SUBST(PCIACCESS_LIBS)
325 352
326if test "x$UDEV" = xyes; then 353if test "x$UDEV" = xyes; then
327 AC_DEFINE(UDEV, 1, [Have UDEV support]) 354 AC_DEFINE(UDEV, 1, [Have UDEV support])
355else
356 AC_DEFINE(UDEV, 0)
328fi 357fi
329 358
330AC_CANONICAL_HOST 359AC_CANONICAL_HOST
@@ -343,32 +372,34 @@ AM_CONDITIONAL(HAVE_LIBKMS, [test "x$LIBKMS" = xyes])
343AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes]) 372AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" = xyes])
344if test "x$INTEL" = xyes; then 373if test "x$INTEL" = xyes; then
345 AC_DEFINE(HAVE_INTEL, 1, [Have intel support]) 374 AC_DEFINE(HAVE_INTEL, 1, [Have intel support])
375else
376 AC_DEFINE(HAVE_INTEL, 0)
346fi 377fi
347 378
348AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes]) 379AM_CONDITIONAL(HAVE_VMWGFX, [test "x$VMWGFX" = xyes])
349if test "x$VMWGFX" = xyes; then 380if test "x$VMWGFX" = xyes; then
350 AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers]) 381 AC_DEFINE(HAVE_VMWGFX, 1, [Have vmwgfx kernel headers])
382else
383 AC_DEFINE(HAVE_VMWGFX, 0)
351fi 384fi
352 385
353AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes]) 386AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes])
354if test "x$NOUVEAU" = xyes; then 387if test "x$NOUVEAU" = xyes; then
355 AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support]) 388 AC_DEFINE(HAVE_NOUVEAU, 1, [Have nouveau (nvidia) support])
389else
390 AC_DEFINE(HAVE_NOUVEAU, 0)
356fi 391fi
357 392
358AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes]) 393AM_CONDITIONAL(HAVE_OMAP, [test "x$OMAP" = xyes])
359if test "x$OMAP" = xyes; then
360 AC_DEFINE(HAVE_OMAP, 1, [Have OMAP support])
361fi
362 394
363AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes]) 395AM_CONDITIONAL(HAVE_EXYNOS, [test "x$EXYNOS" = xyes])
364if test "x$EXYNOS" = xyes; then 396if test "x$EXYNOS" = xyes; then
365 AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support]) 397 AC_DEFINE(HAVE_EXYNOS, 1, [Have EXYNOS support])
398else
399 AC_DEFINE(HAVE_EXYNOS, 0)
366fi 400fi
367 401
368AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes]) 402AM_CONDITIONAL(HAVE_FREEDRENO, [test "x$FREEDRENO" = xyes])
369if test "x$FREEDRENO" = xyes; then
370 AC_DEFINE(HAVE_FREEDRENO, 1, [Have freedreno support])
371fi
372 403
373if test "x$FREEDRENO_KGSL" = xyes; then 404if test "x$FREEDRENO_KGSL" = xyes; then
374 if test "x$FREEDRENO" != xyes; then 405 if test "x$FREEDRENO" != xyes; then
@@ -378,11 +409,15 @@ fi
378AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes]) 409AM_CONDITIONAL(HAVE_FREEDRENO_KGSL, [test "x$FREEDRENO_KGSL" = xyes])
379if test "x$FREEDRENO_KGSL" = xyes; then 410if test "x$FREEDRENO_KGSL" = xyes; then
380 AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface]) 411 AC_DEFINE(HAVE_FREEDRENO_KGSL, 1, [Have freedreno support for KGSL kernel interface])
412else
413 AC_DEFINE(HAVE_FREEDRENO_KGSL, 0)
381fi 414fi
382 415
383AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes]) 416AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" = xyes])
384if test "x$RADEON" = xyes; then 417if test "x$RADEON" = xyes; then
385 AC_DEFINE(HAVE_RADEON, 1, [Have radeon support]) 418 AC_DEFINE(HAVE_RADEON, 1, [Have radeon support])
419else
420 AC_DEFINE(HAVE_RADEON, 0)
386fi 421fi
387 422
388if test "x$AMDGPU" != xno; then 423if test "x$AMDGPU" != xno; then
@@ -409,36 +444,30 @@ AM_CONDITIONAL(HAVE_AMDGPU, [test "x$AMDGPU" = xyes])
409if test "x$AMDGPU" = xyes; then 444if test "x$AMDGPU" = xyes; then
410 AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support]) 445 AC_DEFINE(HAVE_AMDGPU, 1, [Have amdgpu support])
411 446
412 AC_DEFINE(HAVE_CUNIT, [test "x$have_cunit" != "xno"], [Enable CUNIT Have amdgpu support])
413
414 if test "x$have_cunit" = "xno"; then 447 if test "x$have_cunit" = "xno"; then
415 AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests]) 448 AC_MSG_WARN([Could not find cunit library. Disabling amdgpu tests])
416 fi 449 fi
450else
451 AC_DEFINE(HAVE_AMDGPU, 0)
417fi 452fi
418 453
419AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes]) 454AM_CONDITIONAL(HAVE_TEGRA, [test "x$TEGRA" = xyes])
420if test "x$TEGRA" = xyes; then
421 AC_DEFINE(HAVE_TEGRA, 1, [Have Tegra support])
422fi
423 455
424AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes]) 456AM_CONDITIONAL(HAVE_ROCKCHIP, [test "x$ROCKCHIP" = xyes])
425if test "x$ROCKCHIP" = xyes; then 457if test "x$ROCKCHIP" = xyes; then
426 AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support]) 458 AC_DEFINE(HAVE_ROCKCHIP, 1, [Have ROCKCHIP support])
459fi
427 460
428AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes]) 461AM_CONDITIONAL(HAVE_VC4, [test "x$VC4" = xyes])
429if test "x$VC4" = xyes; then 462if test "x$VC4" = xyes; then
430 AC_DEFINE(HAVE_VC4, 1, [Have VC4 support]) 463 AC_DEFINE(HAVE_VC4, 1, [Have VC4 support])
464else
465 AC_DEFINE(HAVE_VC4, 0)
431fi 466fi
432 467
433AM_CONDITIONAL(HAVE_ETNAVIV, [test "x$ETNAVIV" = xyes]) 468AM_CONDITIONAL(HAVE_ETNAVIV, [test "x$ETNAVIV" = xyes])
434if test "x$ETNAVIV" = xyes; then
435 AC_DEFINE(HAVE_ETNAVIV, 1, [Have etnaviv support])
436fi
437 469
438AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes]) 470AM_CONDITIONAL(HAVE_INSTALL_TESTS, [test "x$INSTALL_TESTS" = xyes])
439if test "x$INSTALL_TESTS" = xyes; then
440 AC_DEFINE(HAVE_INSTALL_TESTS, 1, [Install test programs])
441fi
442 471
443AC_ARG_ENABLE([cairo-tests], 472AC_ARG_ENABLE([cairo-tests],
444 [AS_HELP_STRING([--enable-cairo-tests], 473 [AS_HELP_STRING([--enable-cairo-tests],
@@ -456,6 +485,8 @@ if test "x$CAIRO" = xyes; then
456 AC_MSG_ERROR([Cairo support required but not present]) 485 AC_MSG_ERROR([Cairo support required but not present])
457 fi 486 fi
458 AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support]) 487 AC_DEFINE(HAVE_CAIRO, 1, [Have Cairo support])
488else
489 AC_DEFINE(HAVE_CAIRO, 0)
459fi 490fi
460AC_MSG_RESULT([$CAIRO]) 491AC_MSG_RESULT([$CAIRO])
461AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes]) 492AM_CONDITIONAL(HAVE_CAIRO, [test "x$CAIRO" = xyes])
@@ -497,6 +528,8 @@ if test "x$VALGRIND" = "xyes"; then
497 AC_MSG_ERROR([Valgrind support required but not present]) 528 AC_MSG_ERROR([Valgrind support required but not present])
498 fi 529 fi
499 AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings]) 530 AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
531else
532 AC_DEFINE([HAVE_VALGRIND], 0)
500fi 533fi
501 534
502AC_MSG_RESULT([$VALGRIND]) 535AC_MSG_RESULT([$VALGRIND])
@@ -514,11 +547,16 @@ AC_LINK_IFELSE([AC_LANG_PROGRAM([
514 547
515if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then 548if test "x$HAVE_ATTRIBUTE_VISIBILITY" = xyes; then
516 AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))]) 549 AC_DEFINE(HAVE_VISIBILITY, 1, [Compiler supports __attribute__(("hidden"))])
550else
551 AC_DEFINE(HAVE_VISIBILITY, 0)
517fi 552fi
518 553
554CFLAGS="$CFLAGS -include config.h"
555
519AC_SUBST(WARN_CFLAGS) 556AC_SUBST(WARN_CFLAGS)
520AC_CONFIG_FILES([ 557AC_CONFIG_FILES([
521 Makefile 558 Makefile
559 data/Makefile
522 libkms/Makefile 560 libkms/Makefile
523 libkms/libkms.pc 561 libkms/libkms.pc
524 intel/Makefile 562 intel/Makefile
diff --git a/data/Android.mk b/data/Android.mk
new file mode 100644
index 00000000..62013f0c
--- /dev/null
+++ b/data/Android.mk
@@ -0,0 +1,10 @@
1LOCAL_PATH := $(call my-dir)
2
3include $(CLEAR_VARS)
4LOCAL_MODULE := amdgpu.ids
5LOCAL_MODULE_TAGS := optional
6LOCAL_MODULE_CLASS := ETC
7LOCAL_PROPRIETARY_MODULE := true
8LOCAL_MODULE_RELATIVE_PATH := hwdata
9LOCAL_SRC_FILES := $(LOCAL_MODULE)
10include $(BUILD_PREBUILT)
diff --git a/data/Makefile.am b/data/Makefile.am
new file mode 100644
index 00000000..897a7f35
--- /dev/null
+++ b/data/Makefile.am
@@ -0,0 +1,25 @@
1# Copyright © 2017 Advanced Micro Devices, Inc.
2# All Rights Reserved.
3#
4# Permission is hereby granted, free of charge, to any person obtaining a
5# copy of this software and associated documentation files (the "Software"),
6# to deal in the Software without restriction, including without limitation
7# on the rights to use, copy, modify, merge, publish, distribute, sub
8# license, and/or sell copies of the Software, and to permit persons to whom
9# the Software is furnished to do so, subject to the following conditions:
10#
11# The above copyright notice and this permission notice (including the next
12# paragraph) shall be included in all copies or substantial portions of the
13# Software.
14#
15# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18# ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22libdrmdatadir = @libdrmdatadir@
23if HAVE_AMDGPU
24dist_libdrmdata_DATA = amdgpu.ids
25endif
diff --git a/data/amdgpu.ids b/data/amdgpu.ids
new file mode 100644
index 00000000..1828e410
--- /dev/null
+++ b/data/amdgpu.ids
@@ -0,0 +1,187 @@
1# List of AMDGPU IDs
2#
3# Syntax:
4# device_id, revision_id, product_name <-- single tab after comma
5
61.0.0
76600, 0, AMD Radeon HD 8600/8700M
86600, 81, AMD Radeon (TM) R7 M370
96601, 0, AMD Radeon (TM) HD 8500M/8700M
106604, 0, AMD Radeon R7 M265 Series
116604, 81, AMD Radeon (TM) R7 M350
126605, 0, AMD Radeon R7 M260 Series
136605, 81, AMD Radeon (TM) R7 M340
146606, 0, AMD Radeon HD 8790M
156607, 0, AMD Radeon (TM) HD8530M
166608, 0, AMD FirePro W2100
176610, 0, AMD Radeon HD 8600 Series
186610, 81, AMD Radeon (TM) R7 350
196610, 83, AMD Radeon (TM) R5 340
206611, 0, AMD Radeon HD 8500 Series
216613, 0, AMD Radeon HD 8500 series
226617, C7, AMD Radeon R7 240 Series
236640, 0, AMD Radeon HD 8950
246640, 80, AMD Radeon (TM) R9 M380
256646, 0, AMD Radeon R9 M280X
266646, 80, AMD Radeon (TM) R9 M470X
276647, 0, AMD Radeon R9 M270X
286647, 80, AMD Radeon (TM) R9 M380
296649, 0, AMD FirePro W5100
306658, 0, AMD Radeon R7 200 Series
31665C, 0, AMD Radeon HD 7700 Series
32665D, 0, AMD Radeon R7 200 Series
33665F, 81, AMD Radeon (TM) R7 300 Series
346660, 0, AMD Radeon HD 8600M Series
356660, 81, AMD Radeon (TM) R5 M335
366660, 83, AMD Radeon (TM) R5 M330
376663, 0, AMD Radeon HD 8500M Series
386663, 83, AMD Radeon (TM) R5 M320
396664, 0, AMD Radeon R5 M200 Series
406665, 0, AMD Radeon R5 M200 Series
416665, 83, AMD Radeon (TM) R5 M320
426667, 0, AMD Radeon R5 M200 Series
43666F, 0, AMD Radeon HD 8500M
446780, 0, ATI FirePro V (FireGL V) Graphics Adapter
45678A, 0, ATI FirePro V (FireGL V) Graphics Adapter
466798, 0, AMD Radeon HD 7900 Series
47679A, 0, AMD Radeon HD 7900 Series
48679B, 0, AMD Radeon HD 7900 Series
49679E, 0, AMD Radeon HD 7800 Series
5067A0, 0, AMD Radeon FirePro W9100
5167A1, 0, AMD Radeon FirePro W8100
5267B0, 0, AMD Radeon R9 200 Series
5367B0, 80, AMD Radeon (TM) R9 390 Series
5467B1, 0, AMD Radeon R9 200 Series
5567B1, 80, AMD Radeon (TM) R9 390 Series
5667B9, 0, AMD Radeon R9 200 Series
5767DF, C1, Radeon RX 580 Series
5867DF, C2, Radeon RX 570 Series
5967DF, C3, Radeon RX 580 Series
6067DF, C4, AMD Radeon (TM) RX 480 Graphics
6167DF, C5, AMD Radeon (TM) RX 470 Graphics
6267DF, C6, Radeon RX 570 Series
6367DF, C7, AMD Radeon (TM) RX 480 Graphics
6467DF, CF, AMD Radeon (TM) RX 470 Graphics
6567DF, D7, Radeon(TM) RX 470 Graphics
6667DF, E3, Radeon RX Series
6767DF, E7, Radeon RX 580 Series
6867DF, EF, Radeon RX 570 Series
6967C2, 01, AMD Radeon (TM) Pro V7350x2
7067C2, 02, AMD Radeon (TM) Pro V7300X
7167C4, 00, AMD Radeon (TM) Pro WX 7100 Graphics
7267C7, 00, AMD Radeon (TM) Pro WX 5100 Graphics
7367C0, 00, AMD Radeon (TM) Pro WX 7100 Graphics
7467D0, 01, AMD Radeon (TM) Pro V7350x2
7567D0, 02, AMD Radeon (TM) Pro V7300X
7667E0, 00, AMD Radeon (TM) Pro WX Series
7767E3, 00, AMD Radeon (TM) Pro WX 4100
7867E8, 00, AMD Radeon (TM) Pro WX Series
7967E8, 01, AMD Radeon (TM) Pro WX Series
8067E8, 80, AMD Radeon (TM) E9260 Graphics
8167EB, 00, AMD Radeon (TM) Pro V5300X
8267EF, C0, AMD Radeon (TM) RX Graphics
8367EF, C1, AMD Radeon (TM) RX 460 Graphics
8467EF, C3, Radeon RX Series
8567EF, C5, AMD Radeon (TM) RX 460 Graphics
8667EF, C7, AMD Radeon (TM) RX Graphics
8767EF, CF, AMD Radeon (TM) RX 460 Graphics
8867EF, E0, Radeon RX 560 Series
8967EF, E1, Radeon RX Series
9067EF, E3, Radeon RX Series
9167EF, E5, Radeon RX 560 Series
9267EF, EF, AMD Radeon (TM) RX Graphics
9367EF, FF, Radeon(TM) RX 460 Graphics
9467FF, C0, AMD Radeon (TM) RX Graphics
9567FF, C1, AMD Radeon (TM) RX Graphics
9667FF, CF, Radeon RX 560 Series
9767FF, EF, Radeon RX 560 Series
9867FF, FF, Radeon RX 550 Series
996800, 0, AMD Radeon HD 7970M
1006801, 0, AMD Radeon(TM) HD8970M
1016808, 0, ATI FirePro V(FireGL V) Graphics Adapter
1026809, 0, ATI FirePro V(FireGL V) Graphics Adapter
1036810, 0, AMD Radeon(TM) HD 8800 Series
1046810, 81, AMD Radeon (TM) R7 370 Series
1056811, 0, AMD Radeon(TM) HD8800 Series
1066811, 81, AMD Radeon (TM) R7 300 Series
1076818, 0, AMD Radeon HD 7800 Series
1086819, 0, AMD Radeon HD 7800 Series
1096820, 0, AMD Radeon HD 8800M Series
1106820, 81, AMD Radeon (TM) R9 M375
1116820, 83, AMD Radeon (TM) R9 M375X
1126821, 0, AMD Radeon HD 8800M Series
1136821, 87, AMD Radeon (TM) R7 M380
1146821, 83, AMD Radeon R9 (TM) M370X
1156822, 0, AMD Radeon E8860
1166823, 0, AMD Radeon HD 8800M Series
1176825, 0, AMD Radeon HD 7800M Series
1186827, 0, AMD Radeon HD 7800M Series
1196828, 0, ATI FirePro V(FireGL V) Graphics Adapter
120682B, 0, AMD Radeon HD 8800M Series
121682B, 87, AMD Radeon (TM) R9 M360
122682C, 0, AMD FirePro W4100
123682D, 0, AMD Radeon HD 7700M Series
124682F, 0, AMD Radeon HD 7700M Series
1256835, 0, AMD Radeon R7 Series / HD 9000 Series
1266837, 0, AMD Radeon HD7700 Series
127683D, 0, AMD Radeon HD 7700 Series
128683F, 0, AMD Radeon HD 7700 Series
1296860, 00, Radeon Instinct MI25
1306860, 01, Radeon Pro V320
1316860, 02, Radeon Instinct MI25
1326860, 03, Radeon Pro V340
1336860, 04, Radeon Instinct MI25x2
1346861, 00, Radeon(TM) Pro WX9100
1356862, 00, Radeon Pro SSG
1366863, 00, Radeon Vega Frontier Edition
1376864, 03, Radeon Pro V340
1386864, 04, Instinct MI25x2
1396868, 00, Radeon(TM) Pro WX8100
140686C, 00, GLXT (Radeon Instinct MI25) MxGPU VFID
141686C, 01, GLXT (Radeon Pro V320) MxGPU
142686C, 02, GLXT (Radeon Instinct MI25) MxGPU
143686C, 03, GLXT (Radeon Pro V340) MxGPU
144686C, 04, GLXT (Radeon Instinct MI25x2) MxGPU
145687F, C0, Radeon RX Vega
146687F, C1, Radeon RX Vega
147687F, C3, Radeon RX Vega
1486900, 0, AMD Radeon R7 M260
1496900, 81, AMD Radeon (TM) R7 M360
1506900, 83, AMD Radeon (TM) R7 M340
1516901, 0, AMD Radeon R5 M255
1526907, 0, AMD Radeon R5 M255
1536907, 87, AMD Radeon (TM) R5 M315
1546920, 0, AMD RADEON R9 M395X
1556920, 1, AMD RADEON R9 M390X
1566921, 0, AMD Radeon R9 M295X
1576929, 0, AMD FirePro S7150
158692B, 0, AMD FirePro W7100
1596938, 0, AMD Radeon R9 200 Series
1606938, F0, AMD Radeon R9 200 Series
1616938, F1, AMD Radeon (TM) R9 380 Series
1626939, F0, AMD Radeon R9 200 Series
1636939, 0, AMD Radeon R9 200 Series
1646939, F1, AMD Radeon (TM) R9 380 Series
1656980, 00, Radeon Pro WX3100
1666985, 00, AMD Radeon Pro WX3100
1676987, 80, AMD Embedded Radeon E9171
1686995, 00, AMD Radeon Pro WX2100
1696997, 00, Radeon Pro WX2100
170699F, 81, AMD Embedded Radeon E9170 Series
171699F, C0, Radeon 500 Series
172699F, C3, Radeon 500 Series
173699F, C7, Radeon RX 550 Series
1747300, C1, AMD FirePro (TM) S9300 x2
1757300, C8, AMD Radeon (TM) R9 Fury Series
1767300, C9, Radeon (TM) Pro Duo
1777300, CB, AMD Radeon (TM) R9 Fury Series
1787300, CA, AMD Radeon (TM) R9 Fury Series
1799874, C4, AMD Radeon R7 Graphics
1809874, C5, AMD Radeon R6 Graphics
1819874, C6, AMD Radeon R6 Graphics
1829874, C7, AMD Radeon R5 Graphics
1839874, C8, AMD Radeon R7 Graphics
1849874, 81, AMD Radeon R6 Graphics
1859874, 87, AMD Radeon R5 Graphics
1869874, 85, AMD Radeon R6 Graphics
1879874, 84, AMD Radeon R7 Graphics
diff --git a/data/meson.build b/data/meson.build
new file mode 100644
index 00000000..9c26b66e
--- /dev/null
+++ b/data/meson.build
@@ -0,0 +1,27 @@
1# Copyright © 2017-2018 Intel Corporation
2
3# Permission is hereby granted, free of charge, to any person obtaining a copy
4# of this software and associated documentation files (the "Software"), to deal
5# in the Software without restriction, including without limitation the rights
6# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7# copies of the Software, and to permit persons to whom the Software is
8# furnished to do so, subject to the following conditions:
9
10# The above copyright notice and this permission notice shall be included in
11# all copies or substantial portions of the Software.
12
13# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19# SOFTWARE.
20
21if with_amdgpu
22 install_data(
23 'amdgpu.ids',
24 install_mode : 'rw-r--r--',
25 install_dir : datadir_amdgpu,
26 )
27endif
diff --git a/etnaviv/Makefile.sources b/etnaviv/Makefile.sources
index 52580567..0eb73783 100644
--- a/etnaviv/Makefile.sources
+++ b/etnaviv/Makefile.sources
@@ -3,6 +3,7 @@ LIBDRM_ETNAVIV_FILES := \
3 etnaviv_gpu.c \ 3 etnaviv_gpu.c \
4 etnaviv_bo.c \ 4 etnaviv_bo.c \
5 etnaviv_bo_cache.c \ 5 etnaviv_bo_cache.c \
6 etnaviv_perfmon.c \
6 etnaviv_pipe.c \ 7 etnaviv_pipe.c \
7 etnaviv_cmd_stream.c \ 8 etnaviv_cmd_stream.c \
8 etnaviv_drm.h \ 9 etnaviv_drm.h \
diff --git a/etnaviv/etnaviv-symbol-check b/etnaviv/etnaviv-symbol-check
index 22afd168..bc509615 100755
--- a/etnaviv/etnaviv-symbol-check
+++ b/etnaviv/etnaviv-symbol-check
@@ -39,8 +39,14 @@ etna_cmd_stream_new
39etna_cmd_stream_del 39etna_cmd_stream_del
40etna_cmd_stream_timestamp 40etna_cmd_stream_timestamp
41etna_cmd_stream_flush 41etna_cmd_stream_flush
42etna_cmd_stream_flush2
42etna_cmd_stream_finish 43etna_cmd_stream_finish
44etna_cmd_stream_perf
43etna_cmd_stream_reloc 45etna_cmd_stream_reloc
46etna_perfmon_create
47etna_perfmon_del
48etna_perfmon_get_dom_by_name
49etna_perfmon_get_sig_by_name
44EOF 50EOF
45done) 51done)
46 52
diff --git a/etnaviv/etnaviv_bo.c b/etnaviv/etnaviv_bo.c
index 4ad0434e..32f7b348 100644
--- a/etnaviv/etnaviv_bo.c
+++ b/etnaviv/etnaviv_bo.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h" 27#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h" 28#include "etnaviv_drmif.h"
33 29
@@ -173,7 +169,7 @@ struct etna_bo *etna_bo_from_name(struct etna_device *dev, uint32_t name)
173 pthread_mutex_lock(&table_lock); 169 pthread_mutex_lock(&table_lock);
174 170
175 /* check name table first, to see if bo is already open: */ 171 /* check name table first, to see if bo is already open: */
176 bo = lookup_bo(dev->name_table, req.handle); 172 bo = lookup_bo(dev->name_table, name);
177 if (bo) 173 if (bo)
178 goto out_unlock; 174 goto out_unlock;
179 175
@@ -206,10 +202,15 @@ struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
206 int ret, size; 202 int ret, size;
207 uint32_t handle; 203 uint32_t handle;
208 204
205 /* take the lock before calling drmPrimeFDToHandle to avoid
206 * racing against etna_bo_del, which might invalidate the
207 * returned handle.
208 */
209 pthread_mutex_lock(&table_lock); 209 pthread_mutex_lock(&table_lock);
210 210
211 ret = drmPrimeFDToHandle(dev->fd, fd, &handle); 211 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
212 if (ret) { 212 if (ret) {
213 pthread_mutex_unlock(&table_lock);
213 return NULL; 214 return NULL;
214 } 215 }
215 216
diff --git a/etnaviv/etnaviv_bo_cache.c b/etnaviv/etnaviv_bo_cache.c
index 8924651f..c81de262 100644
--- a/etnaviv/etnaviv_bo_cache.c
+++ b/etnaviv/etnaviv_bo_cache.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include "etnaviv_priv.h" 27#include "etnaviv_priv.h"
32#include "etnaviv_drmif.h" 28#include "etnaviv_drmif.h"
33 29
@@ -124,20 +120,32 @@ static int is_idle(struct etna_bo *bo)
124 120
125static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags) 121static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t flags)
126{ 122{
127 struct etna_bo *bo = NULL; 123 struct etna_bo *bo = NULL, *tmp;
128 124
129 pthread_mutex_lock(&table_lock); 125 pthread_mutex_lock(&table_lock);
130 while (!LIST_IS_EMPTY(&bucket->list)) {
131 bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
132 126
133 if (bo->flags == flags && is_idle(bo)) { 127 if (LIST_IS_EMPTY(&bucket->list))
134 list_del(&bo->list); 128 goto out_unlock;
135 break; 129
130 LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket->list, list) {
131 /* skip BOs with different flags */
132 if (bo->flags != flags)
133 continue;
134
135 /* check if the first BO with matching flags is idle */
136 if (is_idle(bo)) {
137 list_delinit(&bo->list);
138 goto out_unlock;
136 } 139 }
137 140
138 bo = NULL; 141 /* If the oldest BO is still busy, don't try younger ones */
139 break; 142 break;
140 } 143 }
144
145 /* There was no matching buffer found */
146 bo = NULL;
147
148out_unlock:
141 pthread_mutex_unlock(&table_lock); 149 pthread_mutex_unlock(&table_lock);
142 150
143 return bo; 151 return bo;
diff --git a/etnaviv/etnaviv_cmd_stream.c b/etnaviv/etnaviv_cmd_stream.c
index 9ce3f363..13730168 100644
--- a/etnaviv/etnaviv_cmd_stream.c
+++ b/etnaviv/etnaviv_cmd_stream.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28# include <config.h>
29#endif
30
31#include <assert.h> 27#include <assert.h>
32 28
33#include "etnaviv_drmif.h" 29#include "etnaviv_drmif.h"
@@ -105,6 +101,7 @@ void etna_cmd_stream_del(struct etna_cmd_stream *stream)
105 101
106 free(stream->buffer); 102 free(stream->buffer);
107 free(priv->submit.relocs); 103 free(priv->submit.relocs);
104 free(priv->submit.pmrs);
108 free(priv); 105 free(priv);
109} 106}
110 107
@@ -115,6 +112,7 @@ static void reset_buffer(struct etna_cmd_stream *stream)
115 stream->offset = 0; 112 stream->offset = 0;
116 priv->submit.nr_bos = 0; 113 priv->submit.nr_bos = 0;
117 priv->submit.nr_relocs = 0; 114 priv->submit.nr_relocs = 0;
115 priv->submit.nr_pmrs = 0;
118 priv->nr_bos = 0; 116 priv->nr_bos = 0;
119 117
120 if (priv->reset_notify) 118 if (priv->reset_notify)
@@ -177,7 +175,8 @@ static uint32_t bo2idx(struct etna_cmd_stream *stream, struct etna_bo *bo,
177 return idx; 175 return idx;
178} 176}
179 177
180static void flush(struct etna_cmd_stream *stream) 178static void flush(struct etna_cmd_stream *stream, int in_fence_fd,
179 int *out_fence_fd)
181{ 180{
182 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream); 181 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
183 int ret, id = priv->pipe->id; 182 int ret, id = priv->pipe->id;
@@ -190,10 +189,20 @@ static void flush(struct etna_cmd_stream *stream)
190 .nr_bos = priv->submit.nr_bos, 189 .nr_bos = priv->submit.nr_bos,
191 .relocs = VOID2U64(priv->submit.relocs), 190 .relocs = VOID2U64(priv->submit.relocs),
192 .nr_relocs = priv->submit.nr_relocs, 191 .nr_relocs = priv->submit.nr_relocs,
192 .pmrs = VOID2U64(priv->submit.pmrs),
193 .nr_pmrs = priv->submit.nr_pmrs,
193 .stream = VOID2U64(stream->buffer), 194 .stream = VOID2U64(stream->buffer),
194 .stream_size = stream->offset * 4, /* in bytes */ 195 .stream_size = stream->offset * 4, /* in bytes */
195 }; 196 };
196 197
198 if (in_fence_fd != -1) {
199 req.flags |= ETNA_SUBMIT_FENCE_FD_IN | ETNA_SUBMIT_NO_IMPLICIT;
200 req.fence_fd = in_fence_fd;
201 }
202
203 if (out_fence_fd)
204 req.flags |= ETNA_SUBMIT_FENCE_FD_OUT;
205
197 ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT, 206 ret = drmCommandWriteRead(gpu->dev->fd, DRM_ETNAVIV_GEM_SUBMIT,
198 &req, sizeof(req)); 207 &req, sizeof(req));
199 208
@@ -208,11 +217,21 @@ static void flush(struct etna_cmd_stream *stream)
208 bo->current_stream = NULL; 217 bo->current_stream = NULL;
209 etna_bo_del(bo); 218 etna_bo_del(bo);
210 } 219 }
220
221 if (out_fence_fd)
222 *out_fence_fd = req.fence_fd;
211} 223}
212 224
213void etna_cmd_stream_flush(struct etna_cmd_stream *stream) 225void etna_cmd_stream_flush(struct etna_cmd_stream *stream)
214{ 226{
215 flush(stream); 227 flush(stream, -1, NULL);
228 reset_buffer(stream);
229}
230
231void etna_cmd_stream_flush2(struct etna_cmd_stream *stream, int in_fence_fd,
232 int *out_fence_fd)
233{
234 flush(stream, in_fence_fd, out_fence_fd);
216 reset_buffer(stream); 235 reset_buffer(stream);
217} 236}
218 237
@@ -220,7 +239,7 @@ void etna_cmd_stream_finish(struct etna_cmd_stream *stream)
220{ 239{
221 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream); 240 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
222 241
223 flush(stream); 242 flush(stream, -1, NULL);
224 etna_pipe_wait(priv->pipe, priv->last_timestamp, 5000); 243 etna_pipe_wait(priv->pipe, priv->last_timestamp, 5000);
225 reset_buffer(stream); 244 reset_buffer(stream);
226} 245}
@@ -241,3 +260,19 @@ void etna_cmd_stream_reloc(struct etna_cmd_stream *stream, const struct etna_rel
241 260
242 etna_cmd_stream_emit(stream, addr); 261 etna_cmd_stream_emit(stream, addr);
243} 262}
263
264void etna_cmd_stream_perf(struct etna_cmd_stream *stream, const struct etna_perf *p)
265{
266 struct etna_cmd_stream_priv *priv = etna_cmd_stream_priv(stream);
267 struct drm_etnaviv_gem_submit_pmr *pmr;
268 uint32_t idx = APPEND(&priv->submit, pmrs);
269
270 pmr = &priv->submit.pmrs[idx];
271
272 pmr->flags = p->flags;
273 pmr->sequence = p->sequence;
274 pmr->read_offset = p->offset;
275 pmr->read_idx = bo2idx(stream, p->bo, ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE);
276 pmr->domain = p->signal->domain->id;
277 pmr->signal = p->signal->signal;
278}
diff --git a/etnaviv/etnaviv_device.c b/etnaviv/etnaviv_device.c
index 3ce92030..d83e8d3e 100644
--- a/etnaviv/etnaviv_device.c
+++ b/etnaviv/etnaviv_device.c
@@ -24,10 +24,6 @@
24 * Christian Gmeiner <christian.gmeiner@gmail.com> 24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */ 25 */
26 26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <stdlib.h> 27#include <stdlib.h>
32#include <linux/stddef.h> 28#include <linux/stddef.h>
33#include <linux/types.h> 29#include <linux/types.h>
diff --git a/etnaviv/etnaviv_drm.h b/etnaviv/etnaviv_drm.h
index 2584c1cc..0d5c49dc 100644
--- a/etnaviv/etnaviv_drm.h
+++ b/etnaviv/etnaviv_drm.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* 2/*
2 * Copyright (C) 2015 Etnaviv Project 3 * Copyright (C) 2015 Etnaviv Project
3 * 4 *
@@ -54,6 +55,12 @@ struct drm_etnaviv_timespec {
54#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 55<