From 493daaec1e59310ccbd342fdd918d069e495b769 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Tue, 26 Jun 2018 16:24:59 -0500 Subject: [PATCH] sgx_km: Build SGX KM from source Signed-off-by: Andrew F. Davis --- sgx_km/Android.mk | 35 + sgx_km/eurasia_km/GPL-COPYING | 344 ++ sgx_km/eurasia_km/INSTALL | 72 + sgx_km/eurasia_km/MIT-COPYING | 41 + sgx_km/eurasia_km/README | 49 + .../eurasiacon/build/linux2/_objects.mk | 121 + .../eurasiacon/build/linux2/bits.mk | 116 + .../eurasiacon/build/linux2/buildvars.mk | 267 + .../eurasiacon/build/linux2/commands.mk | 312 + .../build/linux2/common/android/arch.mk | 107 + .../linux2/common/android/arch_common.mk | 158 + .../linux2/common/android/extra_config.mk | 115 + .../build/linux2/common/android/features.mk | 254 + .../linux2/common/android/install.sh.tpl | 315 ++ .../build/linux2/common/android/paths.mk | 60 + .../linux2/common/android/platform_version.mk | 192 + .../eurasiacon/build/linux2/common/dridrm.mk | 63 + .../eurasiacon/build/linux2/common/ion.mk | 65 + .../eurasiacon/build/linux2/common/omap4.mk | 44 + .../eurasiacon/build/linux2/common/opencl.mk | 40 + .../build/linux2/config/compiler.mk | 240 + .../build/linux2/config/compilers/arm-eabi.mk | 2 + .../config/compilers/arm-linux-androideabi.mk | 2 + .../compilers/mips64el-linux-android.mk | 29 + .../eurasiacon/build/linux2/config/core.mk | 790 +++ .../eurasiacon/build/linux2/defs.mk | 198 + .../build/linux2/kbuild/Makefile.template | 91 + .../eurasiacon/build/linux2/kbuild/kbuild.mk | 87 + .../eurasiacon/build/linux2/kernel_module.mk | 93 + .../eurasiacon/build/linux2/kernel_version.mk | 100 + .../build/linux2/moduledefs/host_x86_64.mk | 57 + .../build/linux2/moduledefs/target_armv7-a.mk | 130 + .../linux2/moduledefs/target_mips32r6el.mk | 134 + .../build/linux2/moduledefs/target_neutral.mk | 45 + .../build/linux2/moduledefs_common.mk | 164 + .../eurasiacon/build/linux2/modules.mk | 48 + .../build/linux2/omap_android/Makefile | 263 + .../eurasiacon/build/linux2/prepare_tree.mk | 56 + .../eurasiacon/build/linux2/pvrversion.mk | 53 + .../eurasiacon/build/linux2/scripts.mk | 364 ++ .../eurasiacon/build/linux2/shared_library.mk | 210 + .../eurasiacon/build/linux2/this_makefile.mk | 68 + .../eurasiacon/build/linux2/tools/cc-check.sh | 119 + .../eurasiacon/build/linux2/toplevel.mk | 312 + sgx_km/eurasia_km/include4/dbgdrvif.h | 382 ++ sgx_km/eurasia_km/include4/img_defs.h | 227 + sgx_km/eurasia_km/include4/img_types.h | 231 + sgx_km/eurasia_km/include4/kernel_types.h | 136 + sgx_km/eurasia_km/include4/pdumpdefs.h | 127 + sgx_km/eurasia_km/include4/pvr_debug.h | 283 + sgx_km/eurasia_km/include4/pvr_sync_user.h | 137 + sgx_km/eurasia_km/include4/pvrmodule.h | 48 + sgx_km/eurasia_km/include4/pvrversion.h | 68 + sgx_km/eurasia_km/include4/services.h | 1641 ++++++ sgx_km/eurasia_km/include4/servicesext.h | 980 ++++ sgx_km/eurasia_km/include4/sgx_options.h | 245 + sgx_km/eurasia_km/include4/sgxapi_km.h | 554 ++ sgx_km/eurasia_km/include4/sgxscript.h | 108 + .../include/env/linux/pvr_drm_shared.h | 76 + .../services4/include/ion_sys_private.h | 50 + .../services4/include/kernelbuffer.h | 97 + .../services4/include/kerneldisplay.h | 239 + sgx_km/eurasia_km/services4/include/pdump.h | 51 + .../eurasia_km/services4/include/pvr_bridge.h | 1696 ++++++ .../services4/include/pvr_bridge_km.h | 422 ++ sgx_km/eurasia_km/services4/include/pvrmmap.h | 73 + .../services4/include/pvrsrv_errors.h | 311 + .../services4/include/servicesint.h | 616 ++ .../eurasia_km/services4/include/sgx_bridge.h | 577 ++ .../services4/include/sgx_mkif_km.h | 476 ++ .../include/sgx_ukernel_status_codes.h | 998 ++++ sgx_km/eurasia_km/services4/include/sgxinfo.h | 341 ++ .../srvkm/bridged/bridged_pvr_bridge.c | 5024 +++++++++++++++++ .../srvkm/bridged/bridged_pvr_bridge.h | 257 + .../services4/srvkm/bridged/bridged_support.c | 113 + .../services4/srvkm/bridged/bridged_support.h | 68 + .../srvkm/bridged/sgx/bridged_sgx_bridge.c | 3058 ++++++++++ .../srvkm/bridged/sgx/bridged_sgx_bridge.h | 61 + .../services4/srvkm/common/buffer_manager.c | 3485 ++++++++++++ .../services4/srvkm/common/deviceclass.c | 2945 ++++++++++ .../services4/srvkm/common/deviceid.h | 51 + .../services4/srvkm/common/devicemem.c | 3491 ++++++++++++ .../services4/srvkm/common/handle.c | 2548 +++++++++ .../eurasia_km/services4/srvkm/common/hash.c | 739 +++ .../eurasia_km/services4/srvkm/common/lists.c | 159 + .../eurasia_km/services4/srvkm/common/mem.c | 175 + .../services4/srvkm/common/mem_debug.c | 276 + .../services4/srvkm/common/metrics.c | 209 + .../services4/srvkm/common/osfunc_common.c | 48 + .../services4/srvkm/common/pdump_common.c | 3117 ++++++++++ .../services4/srvkm/common/perproc.c | 398 ++ .../eurasia_km/services4/srvkm/common/power.c | 996 ++++ .../services4/srvkm/common/pvrsrv.c | 1909 +++++++ .../eurasia_km/services4/srvkm/common/queue.c | 1764 ++++++ sgx_km/eurasia_km/services4/srvkm/common/ra.c | 2217 ++++++++ .../services4/srvkm/common/refcount.c | 760 +++ .../services4/srvkm/common/resman.c | 990 ++++ .../services4/srvkm/common/ttrace.c | 601 ++ .../services4/srvkm/devices/sgx/mmu.c | 4734 ++++++++++++++++ .../services4/srvkm/devices/sgx/mmu.h | 501 ++ .../services4/srvkm/devices/sgx/pb.c | 493 ++ .../srvkm/devices/sgx/sgx_bridge_km.h | 254 + .../services4/srvkm/devices/sgx/sgxconfig.h | 645 +++ .../services4/srvkm/devices/sgx/sgxinfokm.h | 631 +++ .../services4/srvkm/devices/sgx/sgxinit.c | 3642 ++++++++++++ .../services4/srvkm/devices/sgx/sgxkick.c | 811 +++ .../services4/srvkm/devices/sgx/sgxpower.c | 666 +++ .../services4/srvkm/devices/sgx/sgxreset.c | 824 +++ .../services4/srvkm/devices/sgx/sgxtransfer.c | 1007 ++++ .../services4/srvkm/devices/sgx/sgxutils.c | 1976 +++++++ .../services4/srvkm/devices/sgx/sgxutils.h | 195 + .../services4/srvkm/env/linux/Kbuild.mk | 194 + .../services4/srvkm/env/linux/Linux.mk | 45 + .../env/linux/dma_fence_sync_native_server.c | 94 + .../services4/srvkm/env/linux/dmabuf.c | 328 ++ .../services4/srvkm/env/linux/dmabuf.h | 98 + .../services4/srvkm/env/linux/env_data.h | 93 + .../services4/srvkm/env/linux/env_perproc.h | 79 + .../services4/srvkm/env/linux/event.c | 413 ++ .../services4/srvkm/env/linux/event.h | 48 + .../services4/srvkm/env/linux/ion.c | 580 ++ .../services4/srvkm/env/linux/ion.h | 73 + .../services4/srvkm/env/linux/linkage.h | 77 + .../services4/srvkm/env/linux/lma_heap_ion.h | 45 + .../services4/srvkm/env/linux/lock.h | 56 + .../eurasia_km/services4/srvkm/env/linux/mm.c | 2770 +++++++++ .../eurasia_km/services4/srvkm/env/linux/mm.h | 675 +++ .../services4/srvkm/env/linux/mmap.c | 1659 ++++++ .../services4/srvkm/env/linux/mmap.h | 232 + .../services4/srvkm/env/linux/module.c | 1288 +++++ .../services4/srvkm/env/linux/mutex.c | 163 + .../services4/srvkm/env/linux/mutex.h | 100 + .../services4/srvkm/env/linux/mutils.c | 179 + .../services4/srvkm/env/linux/mutils.h | 128 + .../services4/srvkm/env/linux/osfunc.c | 4850 ++++++++++++++++ .../services4/srvkm/env/linux/osperproc.c | 155 + .../services4/srvkm/env/linux/pdump.c | 855 +++ .../services4/srvkm/env/linux/private_data.h | 91 + .../services4/srvkm/env/linux/proc.c | 1059 ++++ .../services4/srvkm/env/linux/proc.h | 95 + .../services4/srvkm/env/linux/pvr_bridge_k.c | 761 +++ .../srvkm/env/linux/pvr_counting_timeline.c | 158 + .../srvkm/env/linux/pvr_counting_timeline.h | 25 + .../services4/srvkm/env/linux/pvr_debug.c | 522 ++ .../services4/srvkm/env/linux/pvr_drm.c | 808 +++ .../services4/srvkm/env/linux/pvr_drm.h | 185 + .../services4/srvkm/env/linux/pvr_fence.c | 1681 ++++++ .../services4/srvkm/env/linux/pvr_fence.h | 252 + .../srvkm/env/linux/pvr_linux_fence.c | 1533 +++++ .../srvkm/env/linux/pvr_linux_fence.h | 81 + .../services4/srvkm/env/linux/pvr_sw_fence.c | 141 + .../services4/srvkm/env/linux/pvr_sw_fence.h | 17 + .../services4/srvkm/env/linux/pvr_sync.c | 1492 +++++ .../services4/srvkm/env/linux/pvr_sync.h | 201 + .../srvkm/env/linux/pvr_sync_common.c | 379 ++ .../srvkm/env/linux/pvr_sync_common.h | 90 + .../srvkm/env/linux/pvr_sync_dma_fence.c | 812 +++ .../services4/srvkm/env/linux/pvr_uaccess.h | 89 + .../srvkm/env/linux/pvrsrv_sync_server.h | 29 + .../services4/srvkm/env/linux/systrace.c | 376 ++ .../services4/srvkm/env/linux/systrace.h | 70 + .../services4/srvkm/hwdefs/mnemedefs.h | 117 + .../services4/srvkm/hwdefs/ocpdefs.h | 308 + .../services4/srvkm/hwdefs/sgx520defs.h | 555 ++ .../services4/srvkm/hwdefs/sgx530defs.h | 542 ++ .../services4/srvkm/hwdefs/sgx531defs.h | 601 ++ .../services4/srvkm/hwdefs/sgx535defs.h | 739 +++ .../services4/srvkm/hwdefs/sgx540defs.h | 605 ++ .../srvkm/hwdefs/sgx543_v1.164defs.h | 1396 +++++ .../services4/srvkm/hwdefs/sgx543defs.h | 1487 +++++ .../services4/srvkm/hwdefs/sgx544defs.h | 1487 +++++ .../services4/srvkm/hwdefs/sgx545defs.h | 1290 +++++ .../services4/srvkm/hwdefs/sgxdefs.h | 116 + .../services4/srvkm/hwdefs/sgxerrata.h | 484 ++ .../services4/srvkm/hwdefs/sgxfeaturedefs.h | 290 + .../services4/srvkm/hwdefs/sgxmmu.h | 99 + .../services4/srvkm/hwdefs/sgxmpdefs.h | 387 ++ .../services4/srvkm/include/buffer_manager.h | 640 +++ .../services4/srvkm/include/device.h | 409 ++ .../services4/srvkm/include/devicemem.h | 52 + .../services4/srvkm/include/dmabuf_sync.h | 69 + .../services4/srvkm/include/handle.h | 547 ++ .../eurasia_km/services4/srvkm/include/hash.h | 277 + .../services4/srvkm/include/ion_sync.h | 73 + .../services4/srvkm/include/lists.h | 353 ++ .../services4/srvkm/include/metrics.h | 146 + .../services4/srvkm/include/osfunc.h | 802 +++ .../services4/srvkm/include/osperproc.h | 94 + .../services4/srvkm/include/pdump_int.h | 100 + .../services4/srvkm/include/pdump_km.h | 446 ++ .../services4/srvkm/include/pdump_osfunc.h | 385 ++ .../services4/srvkm/include/perfkm.h | 53 + .../services4/srvkm/include/perproc.h | 141 + .../services4/srvkm/include/power.h | 140 + .../services4/srvkm/include/queue.h | 154 + .../eurasia_km/services4/srvkm/include/ra.h | 290 + .../services4/srvkm/include/refcount.h | 293 + .../services4/srvkm/include/resman.h | 153 + .../srvkm/include/services_headers.h | 68 + .../services4/srvkm/include/srvkm.h | 273 + .../services4/srvkm/include/ttrace.h | 200 + .../services4/srvkm/include/ttrace_common.h | 151 + .../services4/srvkm/include/ttrace_tokens.h | 135 + .../services4/system/include/syscommon.h | 393 ++ .../services4/system/omap/oemfuncs.h | 80 + .../services4/system/omap/sgxfreq.c | 846 +++ .../services4/system/omap/sgxfreq.h | 122 + .../system/omap/sgxfreq_activeidle.c | 206 + .../services4/system/omap/sgxfreq_cool.c | 216 + .../services4/system/omap/sgxfreq_on3demand.c | 295 + .../services4/system/omap/sgxfreq_onoff.c | 205 + .../services4/system/omap/sgxfreq_userspace.c | 149 + .../services4/system/omap/sysconfig.c | 1267 +++++ .../services4/system/omap/sysconfig.h | 113 + .../services4/system/omap/sysinfo.h | 70 + .../services4/system/omap/syslocal.h | 262 + .../services4/system/omap/sysutils.c | 63 + .../services4/system/omap/sysutils_linux.c | 721 +++ .../tools/intern/debug/client/linuxsrv.h | 64 + .../tools/intern/debug/dbgdriv/Kbuild.mk | 51 + .../tools/intern/debug/dbgdriv/Linux.mk | 45 + .../intern/debug/dbgdriv/common/dbgdriv.c | 2936 ++++++++++ .../intern/debug/dbgdriv/common/dbgdriv.h | 155 + .../debug/dbgdriv/common/dbgdriv_handle.c | 141 + .../debug/dbgdriv/common/dbgdriv_ioctl.h | 57 + .../intern/debug/dbgdriv/common/hostfunc.h | 82 + .../intern/debug/dbgdriv/common/hotkey.c | 199 + .../intern/debug/dbgdriv/common/hotkey.h | 82 + .../tools/intern/debug/dbgdriv/common/ioctl.c | 827 +++ .../intern/debug/dbgdriv/linux/hostfunc.c | 395 ++ .../tools/intern/debug/dbgdriv/linux/main.c | 355 ++ sgx_km/lib/modules/pvrsrvkm.ko | Bin 417820 -> 0 bytes 232 files changed, 123021 insertions(+) create mode 100644 sgx_km/Android.mk create mode 100644 sgx_km/eurasia_km/GPL-COPYING create mode 100644 sgx_km/eurasia_km/INSTALL create mode 100644 sgx_km/eurasia_km/MIT-COPYING create mode 100644 sgx_km/eurasia_km/README create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/_objects.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/bits.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/buildvars.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/commands.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch_common.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/extra_config.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/features.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/install.sh.tpl create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/paths.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/platform_version.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/dridrm.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/ion.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/omap4.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/common/opencl.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/config/compiler.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-eabi.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-linux-androideabi.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/mips64el-linux-android.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/config/core.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/defs.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/Makefile.template create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/kbuild.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_module.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_version.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/host_x86_64.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_armv7-a.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_mips32r6el.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_neutral.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs_common.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/modules.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/omap_android/Makefile create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/prepare_tree.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/pvrversion.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/scripts.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/shared_library.mk create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/this_makefile.mk create mode 100755 sgx_km/eurasia_km/eurasiacon/build/linux2/tools/cc-check.sh create mode 100644 sgx_km/eurasia_km/eurasiacon/build/linux2/toplevel.mk create mode 100644 sgx_km/eurasia_km/include4/dbgdrvif.h create mode 100644 sgx_km/eurasia_km/include4/img_defs.h create mode 100644 sgx_km/eurasia_km/include4/img_types.h create mode 100644 sgx_km/eurasia_km/include4/kernel_types.h create mode 100644 sgx_km/eurasia_km/include4/pdumpdefs.h create mode 100644 sgx_km/eurasia_km/include4/pvr_debug.h create mode 100644 sgx_km/eurasia_km/include4/pvr_sync_user.h create mode 100644 sgx_km/eurasia_km/include4/pvrmodule.h create mode 100644 sgx_km/eurasia_km/include4/pvrversion.h create mode 100644 sgx_km/eurasia_km/include4/services.h create mode 100644 sgx_km/eurasia_km/include4/servicesext.h create mode 100644 sgx_km/eurasia_km/include4/sgx_options.h create mode 100644 sgx_km/eurasia_km/include4/sgxapi_km.h create mode 100644 sgx_km/eurasia_km/include4/sgxscript.h create mode 100644 sgx_km/eurasia_km/services4/include/env/linux/pvr_drm_shared.h create mode 100644 sgx_km/eurasia_km/services4/include/ion_sys_private.h create mode 100644 sgx_km/eurasia_km/services4/include/kernelbuffer.h create mode 100644 sgx_km/eurasia_km/services4/include/kerneldisplay.h create mode 100644 sgx_km/eurasia_km/services4/include/pdump.h create mode 100644 sgx_km/eurasia_km/services4/include/pvr_bridge.h create mode 100644 sgx_km/eurasia_km/services4/include/pvr_bridge_km.h create mode 100644 sgx_km/eurasia_km/services4/include/pvrmmap.h create mode 100644 sgx_km/eurasia_km/services4/include/pvrsrv_errors.h create mode 100644 sgx_km/eurasia_km/services4/include/servicesint.h create mode 100644 sgx_km/eurasia_km/services4/include/sgx_bridge.h create mode 100644 sgx_km/eurasia_km/services4/include/sgx_mkif_km.h create mode 100644 sgx_km/eurasia_km/services4/include/sgx_ukernel_status_codes.h create mode 100644 sgx_km/eurasia_km/services4/include/sgxinfo.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/buffer_manager.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/deviceclass.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/deviceid.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/devicemem.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/handle.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/hash.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/lists.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/mem.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/mem_debug.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/metrics.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/osfunc_common.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/pdump_common.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/perproc.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/power.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/pvrsrv.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/queue.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/ra.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/refcount.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/resman.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/common/ttrace.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/pb.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgx_bridge_km.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxconfig.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinfokm.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinit.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxkick.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxpower.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxreset.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxtransfer.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/Kbuild.mk create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/Linux.mk create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/dma_fence_sync_native_server.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/env_data.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/env_perproc.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/event.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/event.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/ion.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/ion.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/linkage.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/lma_heap_ion.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/lock.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mm.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mm.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/module.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/osfunc.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/osperproc.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pdump.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/private_data.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/proc.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/proc.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_bridge_k.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_debug.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_dma_fence.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_uaccess.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/pvrsrv_sync_server.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.c create mode 100644 sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/mnemedefs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/ocpdefs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx520defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx530defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx531defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx535defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx540defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx543_v1.164defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx543defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx544defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx545defs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxdefs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxerrata.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxfeaturedefs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxmmu.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxmpdefs.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/buffer_manager.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/device.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/devicemem.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/dmabuf_sync.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/handle.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/hash.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/ion_sync.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/lists.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/metrics.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/osfunc.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/osperproc.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/pdump_int.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/pdump_km.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/pdump_osfunc.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/perfkm.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/perproc.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/power.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/queue.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/ra.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/refcount.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/resman.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/services_headers.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/srvkm.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/ttrace.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/ttrace_common.h create mode 100644 sgx_km/eurasia_km/services4/srvkm/include/ttrace_tokens.h create mode 100644 sgx_km/eurasia_km/services4/system/include/syscommon.h create mode 100644 sgx_km/eurasia_km/services4/system/omap/oemfuncs.h create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq.h create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq_activeidle.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq_cool.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq_on3demand.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq_onoff.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sgxfreq_userspace.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sysconfig.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sysconfig.h create mode 100644 sgx_km/eurasia_km/services4/system/omap/sysinfo.h create mode 100644 sgx_km/eurasia_km/services4/system/omap/syslocal.h create mode 100644 sgx_km/eurasia_km/services4/system/omap/sysutils.c create mode 100644 sgx_km/eurasia_km/services4/system/omap/sysutils_linux.c create mode 100644 sgx_km/eurasia_km/tools/intern/debug/client/linuxsrv.h create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Kbuild.mk create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Linux.mk create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.c create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.h create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_handle.c create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_ioctl.h create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hostfunc.h create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.c create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.h create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/ioctl.c create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/hostfunc.c create mode 100644 sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/main.c delete mode 100644 sgx_km/lib/modules/pvrsrvkm.ko diff --git a/sgx_km/Android.mk b/sgx_km/Android.mk new file mode 100644 index 0000000..fb27474 --- /dev/null +++ b/sgx_km/Android.mk @@ -0,0 +1,35 @@ +# +# Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +local-build := $(LOCAL_PATH)/eurasia_km/eurasiacon/build/linux2/omap_android +out-intermediates := $(call intermediates-dir-for, ETC, pvrsrvkm) + +PVRSRVKM := $(LOCAL_PATH)/lib/modules/pvrsrvkm.ko + +$(PVRSRVKM): $(local-build)/Makefile $(KERNELDIR)/.version + @echo "Building $@" + @$(MAKE) -C $(local-build) \ + CROSS_COMPILE=arm-linux-androideabi- \ + KERNEL_CROSS_COMPILE=arm-linux-androideabi- \ + KERNELDIR=$(KERNELDIR) \ + ARCH=arm \ + ANDROID_ROOT=$(ANDROID_BUILD_TOP) \ + PLATFORM_RELEASE="8.1" \ + OUT=$(abspath $(out-intermediates)) + @$(ACP) -fp $(abspath $(out-intermediates))/target_armv7-a/pvrsrvkm.ko $@ diff --git a/sgx_km/eurasia_km/GPL-COPYING b/sgx_km/eurasia_km/GPL-COPYING new file mode 100644 index 0000000..83d1261 --- /dev/null +++ b/sgx_km/eurasia_km/GPL-COPYING @@ -0,0 +1,344 @@ +------------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + Appendix: How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + +------------------------------------------------------------------------- + diff --git a/sgx_km/eurasia_km/INSTALL b/sgx_km/eurasia_km/INSTALL new file mode 100644 index 0000000..aefa6c3 --- /dev/null +++ b/sgx_km/eurasia_km/INSTALL @@ -0,0 +1,72 @@ + +SGX Embedded Systems DDK for the Linux kernel. +Copyright (C) Imagination Technologies Ltd. All rights reserved. +====================================================================== + +This file covers how to build and install the Imagination Technologies +SGX DDK for the Linux kernel. + + +Build System Environment Variables +------------------------------------------- + +The SGX DDK Build scripts depend on a number of environment variables +being setup before compilation or installation of DDK software can +commence: + +$DISCIMAGE +The DDK Build scripts install files to the location specified by the +DISCIMAGE environment variable, when the make install target is used. +This should point to the target filesystem. +$ export DISCIMAGE=/path/to/filesystem + +$KERNELDIR +When building the SGX DDK kernel module, the build needs access +to the headers of the Linux kernel +$ export KERNELDIR=/path/to/kernel + +$PATH +If a cross compiler is being used make sure the PATH environment variable +includes the path to the toolchain +$ export PATH=$PATH:/path/to/toolchain + +$CROSS_COMPILE +Since the SGX DDK Build scripts are geared toward a cross-compilation +workflow, the CROSS_COMPILE environment variable needs to be set +$ export CROSS_COMPILE=toolchain-prefix- + + +Build and Install Instructions +------------------------------------------- + +The SGX DDK configures different target builds within directories under +eurasiacon/build/linux/. + +The supported build targets are: + + all Makes everything + clean Removes all intermediate files created by a build. + clobber Removes all binaries for all builds as well. + install Runs the install script generated by the build. + +The following variables may be set on the command line to influence a build. + + BUILD The type of build being performed. + Alternatives are release, timing or debug. + CFLAGS Build dependent optimisations and debug information flags. + SILENT Determines whether text of commands is produced during build. + +To build for, change to the appropriate target directory, e.g.: +$ cd eurasiacon/build/linux/platform/kbuild + +Issue the make command: +$ make BUILD=debug all + +The DDK software must be installed by the root user. Become the root user: +$ su + +Install the DDK software: +$ make install + +Become an ordinary user again: +$ exit diff --git a/sgx_km/eurasia_km/MIT-COPYING b/sgx_km/eurasia_km/MIT-COPYING new file mode 100644 index 0000000..0cbd14e --- /dev/null +++ b/sgx_km/eurasia_km/MIT-COPYING @@ -0,0 +1,41 @@ + +This software is Copyright (C) Imagination Technologies Ltd. + +You may use, distribute and copy this software under the terms of the MIT +license displayed below. + +----------------------------------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, this Software may be used under the terms of the GNU General +Public License Version 2 ("GPL") in which case the provisions of GPL are +applicable instead of those above. + +If you wish to allow use of your version of this Software only under the terms +of GPL, and not to allow others to use your version of this file under the +terms of the MIT license, indicate your decision by deleting from each file +the provisions above and replace them with the notice and other provisions +required by GPL as set out in the file called "GPL-COPYING" included in this +distribution. If you do not delete the provisions above, a recipient may use +your version of this file under the terms of either the MIT license or GPL. + +----------------------------------------------------------------------------- + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +----------------------------------------------------------------------------- diff --git a/sgx_km/eurasia_km/README b/sgx_km/eurasia_km/README new file mode 100644 index 0000000..2eae109 --- /dev/null +++ b/sgx_km/eurasia_km/README @@ -0,0 +1,49 @@ + +SGX Embedded Systems DDK for Linux kernel. +Copyright (C) Imagination Technologies Ltd. All rights reserved. +====================================================================== + + +About +------------------------------------------- + +This is the Imagination Technologies SGX DDK for the Linux kernel. + + +License +------------------------------------------- + +You may use, distribute and copy this software under the terms of the MIT +license. Details of this license can be found in the file "MIT-COPYING". + +Alternatively, you may use, distribute and copy this software under the terms +of the GNU General Public License version 2. The full GNU General Public +License version 2 can be found in the file "GPL-COPYING". + + +Build and Install Instructions +------------------------------------------- + +For details see the "INSTALL" file. + +To build for, change to the appropriate target directory, e.g.: +$ cd eurasiacon/build/linux/platform/kbuild + +Issue the make command: +$ make BUILD=debug all + +The DDK software must be installed by the root user. Become the root user: +$ su + +Install the DDK software: +# make install + +Become an ordinary user again: +$ exit + + +Contact information: +------------------------------------------- + +Imagination Technologies Ltd. +Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/_objects.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/_objects.mk new file mode 100644 index 0000000..82da9d6 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/_objects.mk @@ -0,0 +1,121 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +#@Description Common processing for all modules that compile code. +### ########################################################################### + +# Filter for source types +MODULE_C_SOURCES := $(filter %.c,$(MODULE_SOURCES)) +MODULE_CXX_SOURCES := $(filter %.cpp,$(MODULE_SOURCES)) + +MODULE_UNRECOGNISED_SOURCES := $(filter-out %.c %.cpp, $(MODULE_SOURCES)) + +ifneq ($(strip $(MODULE_UNRECOGNISED_SOURCES)),) +$(error In makefile $(THIS_MAKEFILE): Module $(THIS_MODULE) specified source files with unrecognised suffixes: $(MODULE_UNRECOGNISED_SOURCES)) +endif + +# Objects built from MODULE_SOURCES +# Objects built from MODULE_C_SOURCES and MODULE_CXX_SOURCES +MODULE_C_OBJECTS := $(addprefix $(MODULE_INTERMEDIATES_DIR)/,$(foreach _cobj,$(MODULE_C_SOURCES:.c=.o),$(notdir $(_cobj)))) +MODULE_CXX_OBJECTS := $(addprefix $(MODULE_INTERMEDIATES_DIR)/,$(foreach _cxxobj,$(MODULE_CXX_SOURCES:.cpp=.o),$(notdir $(_cxxobj)))) + +# MODULE_GENERATED_DEPENDENCIES are generated as a side effect of running the +# rules below, but if we wanted to generate .d files for things that GCC +# couldn't handle, we could add a rule with $(MODULE_GENERATED_DEPENDENCIES) +# as a target +MODULE_GENERATED_DEPENDENCIES := $(MODULE_C_OBJECTS:.o=.d) $(MODULE_CXX_OBJECTS:.o=.d) +-include $(MODULE_GENERATED_DEPENDENCIES) + +MODULE_DEPENDS := $(addprefix $(MODULE_OUT)/,$($(THIS_MODULE)_depends)) +MODULE_DEPENDS += $(addprefix $(GENERATED_CODE_OUT)/,$($(THIS_MODULE)_genheaders)) + +# Add any MODULE_OUT relative include flags here +MODULE_INCLUDE_FLAGS += $(addprefix -I $(MODULE_OUT)/, $($(THIS_MODULE)_includes_relative)) + +define rule-for-objects-o-from-one-c +$(1): MODULE_CC := $$(MODULE_CC) +$(1): MODULE_CFLAGS := $$(MODULE_CFLAGS) +$(1): MODULE_HOST_CFLAGS := $$(MODULE_HOST_CFLAGS) +$(1): MODULE_INCLUDE_FLAGS := $$(MODULE_INCLUDE_FLAGS) +$(1): MODULE_ALLOWED_CFLAGS := $$(MODULE_ALLOWED_CFLAGS) +$(1): THIS_MODULE := $$(THIS_MODULE) +ifneq ($(PKG_CONFIG_ENV_VAR),) +$(1): export PKG_CONFIG_TOP_BUILD_DIR := $(abspath $(MODULE_OUT)) +$(1): export $(PKG_CONFIG_ENV_VAR) := $(abspath $(MODULE_OUT)/pkgconfig) +endif +$(1): export PKG_CONFIG_SYSROOT_DIR := $(PKG_CONFIG_SYSROOT_DIR) +$(1): $$(MODULE_DEPENDS) $$(THIS_MAKEFILE) +$(1): | $$(MODULE_INTERMEDIATES_DIR) +$(1): $(2) + @: $(if $(MODULE_CHECK_CFLAGS), + $(if $(filter-out $(MODULE_ALLOWED_CFLAGS),$($(THIS_MODULE)_cflags)),\ + $(error $(THIS_MODULE): LTO-incompatible cflag(s) used: \ + $(filter-out $(MODULE_ALLOWED_CFLAGS),$($(THIS_MODULE)_cflags))))) + $$(check-src) +ifeq ($(MODULE_HOST_BUILD),true) + $$(host-o-from-one-c) +else + $$(target-o-from-one-c) +endif +endef + +# This rule is used to compile C++ source files +define rule-for-objects-o-from-one-cxx +$(1): MODULE_CXX := $$(MODULE_CXX) +$(1): MODULE_CXXFLAGS := $$(MODULE_CXXFLAGS) +$(1): MODULE_HOST_CXXFLAGS := $$(MODULE_HOST_CXXFLAGS) +$(1): MODULE_INCLUDE_FLAGS := $$(MODULE_INCLUDE_FLAGS) +$(1): MODULE_ALLOWED_CFLAGS := $$(MODULE_ALLOWED_CFLAGS) +$(1): THIS_MODULE := $$(THIS_MODULE) +ifneq ($(PKG_CONFIG_ENV_VAR),) +$(1): export PKG_CONFIG_TOP_BUILD_DIR := $(abspath $(MODULE_OUT)) +$(1): export $(PKG_CONFIG_ENV_VAR) := $(abspath $(MODULE_OUT)/pkgconfig) +endif +$(1): $$(MODULE_DEPENDS) $$(THIS_MAKEFILE) +$(1): | $$(MODULE_INTERMEDIATES_DIR) +$(1): $(2) + @: $(if $(MODULE_CHECK_CFLAGS), + $(if $(filter-out $(MODULE_ALLOWED_CFLAGS),$($(THIS_MODULE)_cxxflags)),\ + $(error $(THIS_MODULE): LTO-incompatible cxxflag(s) used: \ + $(filter-out $(MODULE_ALLOWED_CFLAGS),$($(THIS_MODULE)_cxxflags))))) +ifeq ($(MODULE_HOST_BUILD),true) + $$(host-o-from-one-cxx) +else + $$(target-o-from-one-cxx) +endif +endef diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/bits.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/bits.mk new file mode 100644 index 0000000..b6bcbbc --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/bits.mk @@ -0,0 +1,116 @@ +########################################################################### ### +#@Title Useful special targets which don't build anything +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifneq ($(filter dumpvar-%,$(MAKECMDGOALS)),) +dumpvar-%: ; +$(foreach _var_to_dump,$(patsubst dumpvar-%,%,$(filter dumpvar-%,$(MAKECMDGOALS))),$(info $(if $(filter undefined,$(origin $(_var_to_dump))),# $$($(_var_to_dump)) is not set,$(_var_to_dump) := $($(_var_to_dump))))) +endif + +ifneq ($(filter whereis-%,$(MAKECMDGOALS)),) +whereis-%: ; +$(foreach _module_to_find,$(patsubst whereis-%,%,$(filter whereis-%,$(MAKECMDGOALS))),$(info $(if $(INTERNAL_MAKEFILE_FOR_MODULE_$(_module_to_find)),$(INTERNAL_MAKEFILE_FOR_MODULE_$(_module_to_find)),# No module $(_module_to_find)))) +endif + +ifneq ($(filter whatis-%,$(MAKECMDGOALS)),) +whatis-$(HOST_OUT)/%: ; +whatis-$(TARGET_PRIMARY_OUT)/%: ; +whatis-$(TARGET_NEUTRAL_OUT)/%: ; +$(foreach _file_to_find,$(patsubst whatis-%,%,$(filter whatis-%,$(MAKECMDGOALS))),$(info $(strip $(foreach _m,$(ALL_MODULES),$(if $(filter $(_file_to_find),$(INTERNAL_TARGETS_FOR_$(_m))),$(_file_to_find) is in $(_m) which is defined in $(INTERNAL_MAKEFILE_FOR_MODULE_$(_m)),))))) +endif + +.PHONY: ls-modules +ls-modules: + @: $(foreach _m,$(ALL_MODULES),$(info $($(_m)_type) $(_m) $(patsubst $(TOP)/%,%,$(INTERNAL_MAKEFILE_FOR_MODULE_$(_m))))) + +ifeq ($(strip $(MAKECMDGOALS)),visualise) +FORMAT ?= xlib +GRAPHVIZ ?= neato +visualise: $(OUT)/MAKE_RULES.dot + $(GRAPHVIZ) -T$(FORMAT) -o $(OUT)/MAKE_RULES.$(FORMAT) $< +$(OUT)/MAKE_RULES.dot: $(OUT)/MAKE_RULES + perl $(MAKE_TOP)/tools/depgraph.pl -t $(TOP) -g $(firstword $(GRAPHVIZ)) $(OUT)/MAKE_RULES >$(OUT)/MAKE_RULES.dot +$(OUT)/MAKE_RULES: $(ALL_MAKEFILES) + -$(MAKE) -C $(TOP) -f $(MAKE_TOP)/toplevel.mk TOP=$(TOP) OUT=$(OUT) ls-modules -qp >$(OUT)/MAKE_RULES 2>&1 +else +visualise: + @: $(error visualise specified along with other goals. This is not supported) +endif + +.PHONY: help +help: + @echo 'Build targets' + @echo ' make, make build Build all components of the build' + @echo ' make components Build only the user-mode components' + @echo ' make kbuild Build only the kernel-mode components' + @echo ' make docs Build the build's supporting documentation' + @echo ' make MODULE Build the module MODULE and all of its dependencies' + @echo ' make eurasiacon/binary2_.../target/libsomething.so' + @echo ' Build a particular file (including intermediates)' + @echo 'Variables' + @echo ' make V=1 ... Print the commands that are executed' + @echo ' make W=1 ... Enable extra compiler warnings' + @echo ' make D=opt ... Set build system debug option (D=help for a list)' + @echo ' make OUT=dir ... Place output+intermediates in specified directory' + @echo ' EXCLUDED_APIS=... List of APIs to remove from the build' + @echo ' make SOMEOPTION=1 ... Set configuration options (see config/core.mk)' + @echo ' Defaults are set by $(PVR_BUILD_DIR)/Makefile' + @echo 'Clean targets' + @echo ' make clean Remove only intermediates for the current build' + @echo ' make clobber As "make clean", but remove output files too' + @echo ' make clean-MODULE Clean (or clobber) only files for MODULE' + @echo '' + @echo 'Special targets' + @echo ' make whereis-MODULE Show the path to the Linux.mk defining MODULE' + @echo ' make whatis-FILE Show which module builds an output FILE' + @echo ' make ls-modules List all modules defined by makefiles' + +ifneq ($(filter help,$(D)),) +empty := +space := $(empty) $(empty) +$(info Debug options) +$(info $(space)D=modules dump module info) +$(info $(space)D=config dump all config options + type and origin) +$(info $(space)D=freeze-config prevent config changes) +$(info $(space)D=config-changes dump diffs when config changes) +$(info $(space)D=nobuild stop before running the main build) +$(info Options can be combined: make D=freeze-config,config-changes) +$(error D=help given) +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/buildvars.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/buildvars.mk new file mode 100644 index 0000000..4d4afa6 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/buildvars.mk @@ -0,0 +1,267 @@ +########################################################################### ### +#@Title Define global variables +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@Description This file is read once at the start of the build, after reading +# in config.mk. It should define the non-MODULE_* variables used +# in commands, like ALL_CFLAGS +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# NOTE: You must *not* use the cc-option et al macros in COMMON_FLAGS, +# COMMON_CFLAGS or COMMON_USER_FLAGS. These flags are shared between +# host and target, which might use compilers with different capabilities. + +# ANOTHER NOTE: All flags here must be architecture-independent (i.e. no +# -march or toolchain include paths) + +# These flags are used for kernel, User C and User C++ +# +COMMON_FLAGS := -W -Wall + +# Some GCC warnings are C only, so we must mask them from C++ +# +COMMON_CFLAGS := $(COMMON_FLAGS) \ + -Wdeclaration-after-statement -Wno-format-zero-length \ + -Wmissing-prototypes -Wstrict-prototypes + +# User C and User C++ optimization control. Does not affect kernel. +# +ifeq ($(BUILD),debug) +COMMON_USER_FLAGS := -O0 +else +OPTIM ?= -O2 +ifeq ($(USE_LTO),1) +COMMON_USER_FLAGS := $(OPTIM) -flto +else +COMMON_USER_FLAGS := $(OPTIM) +endif +endif + +# FIXME: We should probably audit the driver for aliasing +# +COMMON_USER_FLAGS += -fno-strict-aliasing + +# We always enable debugging. Either the release binaries are stripped +# and the symbols put in the symbolpackage, or we're building debug. +# +COMMON_USER_FLAGS += -g + +# User C and User C++ warning flags +# +COMMON_USER_FLAGS += \ + -Wpointer-arith -Wunused-parameter \ + -Wmissing-format-attribute + +# Additional warnings, and optional warnings. +# +TESTED_TARGET_USER_FLAGS := \ + $(call cc-option,-Wno-missing-field-initializers) \ + $(call cc-option,-fdiagnostics-show-option) \ + $(call cc-option,-Wno-self-assign) \ + $(call cc-option,-Wno-parentheses-equality) +TESTED_HOST_USER_FLAGS := \ + $(call host-cc-option,-Wno-missing-field-initializers) \ + $(call host-cc-option,-fdiagnostics-show-option) \ + $(call host-cc-option,-Wno-self-assign) \ + $(call host-cc-option,-Wno-parentheses-equality) + +# These flags are clang-specific. +# -Wno-unused-command-line-argument works around a buggy interaction +# with ccache, see https://bugzilla.samba.org/show_bug.cgi?id=8118 +# -fcolor-diagnostics force-enables colored error messages which +# get disabled when ccache is piped through ccache. +# +TESTED_TARGET_USER_FLAGS += \ + $(call cc-option,-Qunused-arguments) \ + $(call cc-option,-fcolor-diagnostics) +TESTED_HOST_USER_FLAGS += \ + $(call host-cc-option,-Qunused-arguments) \ + $(call host-cc-option,-fcolor-diagnostics) + +ifeq ($(W),1) +TESTED_TARGET_USER_FLAGS += \ + $(call cc-option,-Wbad-function-cast) \ + $(call cc-option,-Wcast-qual) \ + $(call cc-option,-Wcast-align) \ + $(call cc-option,-Wconversion) \ + $(call cc-option,-Wdisabled-optimization) \ + $(call cc-option,-Wlogical-op) \ + $(call cc-option,-Wmissing-declarations) \ + $(call cc-option,-Wmissing-include-dirs) \ + $(call cc-option,-Wnested-externs) \ + $(call cc-option,-Wold-style-definition) \ + $(call cc-option,-Woverlength-strings) \ + $(call cc-option,-Wpacked) \ + $(call cc-option,-Wpacked-bitfield-compat) \ + $(call cc-option,-Wpadded) \ + $(call cc-option,-Wredundant-decls) \ + $(call cc-option,-Wshadow) \ + $(call cc-option,-Wswitch-default) \ + $(call cc-option,-Wvla) \ + $(call cc-option,-Wwrite-strings) +TESTED_HOST_USER_FLAGS += \ + $(call host-cc-option,-Wbad-function-cast) \ + $(call host-cc-option,-Wcast-qual) \ + $(call host-cc-option,-Wcast-align) \ + $(call host-cc-option,-Wconversion) \ + $(call host-cc-option,-Wdisabled-optimization) \ + $(call host-cc-option,-Wlogical-op) \ + $(call host-cc-option,-Wmissing-declarations) \ + $(call host-cc-option,-Wmissing-include-dirs) \ + $(call host-cc-option,-Wnested-externs) \ + $(call host-cc-option,-Wold-style-definition) \ + $(call host-cc-option,-Woverlength-strings) \ + $(call host-cc-option,-Wpacked) \ + $(call host-cc-option,-Wpacked-bitfield-compat) \ + $(call host-cc-option,-Wpadded) \ + $(call host-cc-option,-Wredundant-decls) \ + $(call host-cc-option,-Wshadow) \ + $(call host-cc-option,-Wswitch-default) \ + $(call host-cc-option,-Wvla) \ + $(call host-cc-option,-Wwrite-strings) +endif + +TESTED_TARGET_USER_FLAGS += \ + $(call cc-optional-warning,-Wunused-but-set-variable) +TESTED_HOST_USER_FLAGS += \ + $(call host-cc-optional-warning,-Wunused-but-set-variable) + +KBUILD_FLAGS := \ + -Wno-unused-parameter -Wno-sign-compare + +# androideabi toolchain adds `pic` by default, disable it +# for kernel module build +KBUILD_FLAGS += -fno-pic +KBUILD_FLAGS += -fstack-protector-strong + +TESTED_KBUILD_FLAGS := \ + $(call kernel-cc-option,-Wmissing-include-dirs) \ + $(call kernel-cc-option,-Wno-type-limits) \ + $(call kernel-cc-option,-Wno-pointer-arith) \ + $(call kernel-cc-option,-Wno-aggregate-return) \ + $(call kernel-cc-option,-Wno-unused-but-set-variable) \ + $(call kernel-cc-option,-Wno-ignored-qualifiers) \ + $(call kernel-cc-option,-Wno-old-style-declaration) \ + $(call kernel-cc-optional-warning,-Wbad-function-cast) \ + $(call kernel-cc-optional-warning,-Wcast-qual) \ + $(call kernel-cc-optional-warning,-Wcast-align) \ + $(call kernel-cc-optional-warning,-Wconversion) \ + $(call kernel-cc-optional-warning,-Wdisabled-optimization) \ + $(call kernel-cc-optional-warning,-Wlogical-op) \ + $(call kernel-cc-optional-warning,-Wmissing-declarations) \ + $(call kernel-cc-optional-warning,-Wmissing-include-dirs) \ + $(call kernel-cc-optional-warning,-Wnested-externs) \ + $(call kernel-cc-optional-warning,-Wno-missing-field-initializers) \ + $(call kernel-cc-optional-warning,-Wold-style-definition) \ + $(call kernel-cc-optional-warning,-Woverlength-strings) \ + $(call kernel-cc-optional-warning,-Wpacked) \ + $(call kernel-cc-optional-warning,-Wpacked-bitfield-compat) \ + $(call kernel-cc-optional-warning,-Wpadded) \ + $(call kernel-cc-optional-warning,-Wredundant-decls) \ + $(call kernel-cc-optional-warning,-Wshadow) \ + $(call kernel-cc-optional-warning,-Wswitch-default) \ + $(call kernel-cc-optional-warning,-Wvla) \ + $(call kernel-cc-optional-warning,-Wwrite-strings) + +# User C only +# +ALL_CFLAGS := \ + $(COMMON_USER_FLAGS) $(COMMON_CFLAGS) $(TESTED_TARGET_USER_FLAGS) \ + $(SYS_CFLAGS) +ALL_HOST_CFLAGS := \ + $(COMMON_USER_FLAGS) $(COMMON_CFLAGS) $(TESTED_HOST_USER_FLAGS) + +# User C++ only +# +ALL_CXXFLAGS := \ + -fno-rtti -fno-exceptions \ + $(COMMON_USER_FLAGS) $(COMMON_FLAGS) $(TESTED_TARGET_USER_FLAGS) \ + $(SYS_CXXFLAGS) +ALL_HOST_CXXFLAGS := \ + -fno-rtti -fno-exceptions \ + $(COMMON_USER_FLAGS) $(COMMON_FLAGS) $(TESTED_HOST_USER_FLAGS) + +# Workaround for some target clangs that don't support -O0 w/ PIC. +# +ifeq ($(cc-is-clang),true) +ALL_CFLAGS := $(patsubst -O0,-O1,$(ALL_CFLAGS)) +ALL_CXXFLAGS := $(patsubst -O0,-O1,$(ALL_CXXFLAGS)) +endif + +# Kernel C only +# +ALL_KBUILD_CFLAGS := $(COMMON_CFLAGS) $(KBUILD_FLAGS) $(TESTED_KBUILD_FLAGS) + +# User C and C++ +# +# NOTE: ALL_HOST_LDFLAGS should probably be using -rpath-link too, and if we +# ever need to support building host shared libraries, it's required. +# +# We can't use it right now because we want to support non-GNU-compatible +# linkers like the Darwin 'ld' which doesn't support -rpath-link. +# +# For the same reason (Darwin 'ld') don't bother checking for text +# relocations in host binaries. +# +ALL_HOST_LDFLAGS := +ALL_LDFLAGS := -Wl,--warn-shared-textrel + +ALL_LDFLAGS += $(SYS_LDFLAGS) + +# Optional security hardening features. +ifneq ($(FORTIFY),) +ALL_CFLAGS += -fstack-protector -Wa,--noexecstack -D_FORTIFY_SOURCE=2 +ALL_CXXFLAGS += -fstack-protector -Wa,--noexecstack -D_FORTIFY_SOURCE=2 +ALL_LDFLAGS += -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now +endif + +# This variable contains a list of all modules built by kbuild +ALL_KBUILD_MODULES := + +# This variable contains a list of all modules which contain C++ source files +ALL_CXX_MODULES := + +# Toolchain triple for cross environment +CROSS_TRIPLE := $(patsubst %-,%,$(notdir $(CROSS_COMPILE))) + +ifneq ($(TOOLCHAIN),) +$(warning **********************************************) +$(warning The TOOLCHAIN option has been removed, but) +$(warning you have it set (via $(origin TOOLCHAIN))) +$(warning **********************************************) +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/commands.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/commands.mk new file mode 100644 index 0000000..d4876c9 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/commands.mk @@ -0,0 +1,312 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# from-one-* recipes make a thing from one source file, so they use $<. Others +# use $(MODULE_something) instead of $^ + +# We expect that MODULE_*FLAGS contains all the flags we need, including the +# flags for all modules (like $(ALL_CFLAGS) and $(ALL_HOST_CFLAGS)), and +# excluding flags for include search dirs or for linking libraries. The +# exceptions are ALL_EXE_LDFLAGS and ALL_LIB_LDFLAGS, since they depend on the +# type of thing being linked, so they appear in the commands below + +define host-o-from-one-c +$(if $(V),,@echo " HOST_CC " $(call relative-to-top,$<)) +$(MODULE_CC) -MD -c $(MODULE_HOST_CFLAGS) $(MODULE_INCLUDE_FLAGS) \ + -include $(CONFIG_H) $< -o $@ +endef + +define target-o-from-one-c +$(if $(V),,@echo " CC " $(call relative-to-top,$<)) +$(MODULE_CC) -MD -c $(MODULE_CFLAGS) $(SYS_INCLUDES) $(MODULE_INCLUDE_FLAGS) \ + -include $(CONFIG_H) $< -o $@ +endef + +define host-o-from-one-cxx +$(if $(V),,@echo " HOST_CXX" $(call relative-to-top,$<)) +$(MODULE_CXX) -MD -c $(MODULE_HOST_CXXFLAGS) $(MODULE_INCLUDE_FLAGS) \ + -include $(CONFIG_H) $< -o $@ +endef + +define target-o-from-one-cxx +$(if $(V),,@echo " CXX " $(call relative-to-top,$<)) +$(MODULE_CXX) -MD -c $(MODULE_CXXFLAGS) $(SYS_INCLUDES) $(MODULE_INCLUDE_FLAGS) \ + -include $(CONFIG_H) $< -o $@ +endef + +define host-executable-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CC) $(MODULE_HOST_LDFLAGS) \ + -o $@ $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_LIBRARY_DIR_FLAGS) \ + $(MODULE_LIBRARY_FLAGS) +endef + +define host-executable-cxx-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CXX) $(MODULE_HOST_LDFLAGS) \ + -o $@ $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_LIBRARY_DIR_FLAGS) \ + $(MODULE_LIBRARY_FLAGS) +endef + +define target-executable-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CC) \ + $(MODULE_EXE_LDFLAGS) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_EXE_CRTBEGIN) $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_EXE_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) $(MODULE_LIBGCC) +endef + +define target-executable-cxx-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CXX) \ + $(MODULE_EXE_LDFLAGS_CXX) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_EXE_CRTBEGIN) $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_EXE_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) $(MODULE_LIBGCC) +endef + +define target-shared-library-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CC) -shared -Wl,-Bsymbolic \ + $(MODULE_LIB_LDFLAGS) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_LIB_CRTBEGIN) $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_LIB_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) $(MODULE_LIBGCC) +endef + +# If there were any C++ source files in a shared library, we use this recipe, +# which runs the C++ compiler to link the final library +define target-shared-library-cxx-from-o +$(if $(V),,@echo " LD " $(call relative-to-top,$@)) +$(MODULE_CXX) -shared -Wl,-Bsymbolic \ + $(MODULE_LIB_LDFLAGS_CXX) $(MODULE_LDFLAGS) -o $@ \ + $(MODULE_LIB_CRTBEGIN) $(sort $(MODULE_ALL_OBJECTS)) $(MODULE_LIB_CRTEND) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) $(MODULE_LIBGCC) +endef + +define host-shared-library-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CC) -shared -Wl,-Bsymbolic \ + $(MODULE_HOST_LDFLAGS) -o $@ \ + $(sort $(MODULE_ALL_OBJECTS)) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) +endef + +# If there were any C++ source files in a shared library, we use this recipe, +# which runs the C++ compiler to link the final library +define host-shared-library-cxx-from-o +$(if $(V),,@echo " HOST_LD " $(call relative-to-top,$@)) +$(MODULE_CXX) -shared -Wl,-Bsymbolic \ + $(MODULE_HOST_LDFLAGS) -o $@ \ + $(sort $(MODULE_ALL_OBJECTS)) \ + $(MODULE_LIBRARY_DIR_FLAGS) $(MODULE_LIBRARY_FLAGS) +endef + +define target-copy-debug-information +$(OBJCOPY) --only-keep-debug $@ $(basename $@).dbg +endef + +define host-strip-debug-information +$(HOST_STRIP) --strip-unneeded $@ +endef + +define target-strip-debug-information +$(STRIP) --strip-unneeded $@ +endef + +define target-add-debuglink +$(if $(V),,@echo " DBGLINK " $(call relative-to-top,$(basename $@).dbg)) +$(OBJCOPY) --add-gnu-debuglink=$(basename $@).dbg $@ +endef + +define host-static-library-from-o +$(if $(V),,@echo " HOST_AR " $(call relative-to-top,$@)) +$(HOST_AR) cru $@ $(sort $(MODULE_ALL_OBJECTS)) +endef + +define target-static-library-from-o +$(if $(V),,@echo " AR " $(call relative-to-top,$@)) +$(AR) cru $@ $(sort $(MODULE_ALL_OBJECTS)) +endef + +define tab-c-from-y +$(if $(V),,@echo " BISON " $(call relative-to-top,$<)) +$(BISON) $(MODULE_BISON_FLAGS) -o $@ -d $< +endef + +define l-c-from-l +$(if $(V),,@echo " FLEX " $(call relative-to-top,$<)) +$(FLEX) $(MODULE_FLEX_FLAGS) -o$@ $< +endef + +define clean-dirs +$(if $(V),,@echo " RM " $(call relative-to-top,$(MODULE_DIRS_TO_REMOVE))) +$(RM) -rf $(MODULE_DIRS_TO_REMOVE) +endef + +define make-directory +$(MKDIR) -p $@ +endef + +define check-exports-from-file +endef + +define check-exports +$(call check-exports-from-file,$(if $1,$1,$(notdir $@).txt)) +endef + +# Programs used in recipes + +BISON ?= bison +CC ?= gcc +CC_SECONDARY ?= $(CC) +CROSS_COMPILE_SECONDARY ?= $(CROSS_COMPILE) +CXX ?= g++ +CXX_SECONDARY ?= $(CXX) +HOST_CC ?= gcc +HOST_CXX ?= g++ +JAR ?= jar +JAVA ?= java +JAVAC ?= javac +ZIP ?= zip +PKG_CONFIG ?= pkg-config + +ifeq ($(USE_CCACHE),1) +CCACHE ?= ccache +endif + +# Define CHMOD and CC_CHECK first so we can use cc-is-clang +# +override CHMOD := $(if $(V),,@)chmod +override CC_CHECK := $(if $(V),,@)$(MAKE_TOP)/tools/cc-check.sh + +# If clang is detected, the compiler name is invariant but CROSS_COMPILE +# is reflected in the use of -target. For GCC this is always encoded into +# the binary. If CROSS_COMPILE is not set we can skip this. +# +# If we're doing a build with multiple target architectures, we might need +# two separate compilers to build binaries for each architecture. In this +# case, CROSS_COMPILE and CROSS_COMPILE_SECONDARY are the cross compiler +# prefix for the two compilers - $(CC) and $(CC_SECONDARY). +# +# Set the secondary compiler first before we overwrite $(CC). +# + +ifneq ($(CROSS_COMPILE_SECONDARY),) + ifeq ($(cc-is-clang),true) + override CC_SECONDARY := \ + $(CC_SECONDARY) \ + -target $(patsubst %-,%,$(CROSS_COMPILE_SECONDARY)) \ + -B$(dir $(shell which $(CROSS_COMPILE_SECONDARY)gcc)) + override CXX_SECONDARY := \ + $(CXX_SECONDARY) \ + -target $(patsubst %-,%,$(CROSS_COMPILE_SECONDARY)) \ + -B$(dir $(shell which $(CROSS_COMPILE_SECONDARY)gcc)) + else + ifeq ($(origin CC_SECONDARY),file) + override CC_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(CC_SECONDARY) + endif + ifeq ($(origin CXX_SECONDARY),file) + override CXX_SECONDARY := $(CROSS_COMPILE_SECONDARY)$(CXX_SECONDARY) + endif + endif +endif + +# Apply compiler wrappers and V=1 handling +override CC_SECONDARY := $(if $(V),,@)$(CCACHE)$(DISTCC) $(CC_SECONDARY) +override CXX_SECONDARY := $(if $(V),,@)$(CCACHE)$(DISTCC) $(CXX_SECONDARY) + +ifneq ($(CROSS_COMPILE),) + ifeq ($(cc-is-clang),true) + override CC := \ + $(CC) \ + -target $(patsubst %-,%,$(CROSS_COMPILE)) \ + -B$(dir $(shell which $(CROSS_COMPILE)gcc)) + override CXX := \ + $(CXX) \ + -target $(patsubst %-,%,$(CROSS_COMPILE)) \ + -B$(dir $(shell which $(CROSS_COMPILE)gcc)) + else + ifeq ($(origin CC),file) + override CC := $(CROSS_COMPILE)$(CC) + endif + ifeq ($(origin CXX),file) + override CXX := $(CROSS_COMPILE)$(CXX) + endif + endif +else + $(if $(CROSS_COMPILE_SECONDARY),$(error CROSS_COMPILE_SECONDARY is set but CROSS_COMPILE is empty)) +endif + +# Apply compiler wrappers and V=1 handling +override CC := $(if $(V),,@)$(CCACHE)$(DISTCC) $(CC) +override CXX := $(if $(V),,@)$(CCACHE)$(DISTCC) $(CXX) + +override AR := $(if $(V),,@)$(CROSS_COMPILE)ar +override BISON := $(if $(V),,@)$(BISON) +override BZIP2 := $(if $(V),,@)bzip2 -9 +override CAT := $(if $(V),,@)cat +override CP := $(if $(V),,@)cp +override ECHO := $(if $(V),,@)echo +override FLEX := $(if $(V),,@)flex +override GAWK := $(if $(V),,@)gawk +override GREP := $(if $(V),,@)grep +override HOST_AR := $(if $(V),,@)ar +override HOST_CC := $(if $(V),,@)$(CCACHE) $(HOST_CC) +override HOST_CXX := $(if $(V),,@)$(CCACHE) $(HOST_CXX) +override HOST_STRIP := $(if $(V),,@)strip +override INSTALL := $(if $(V),,@)install +override JAR := $(if $(V),,@)$(JAR) +override JAVA := $(if $(V),,@)$(JAVA) +override JAVAC := $(if $(V),,@)$(JAVAC) +override LN := $(if $(V),,@)ln -f +override M4 := $(if $(V),,@)m4 +override MKDIR := $(if $(V),,@)mkdir +override MV := $(if $(V),,@)mv +override OBJCOPY := $(if $(V),,@)$(CROSS_COMPILE)objcopy +override PDSASM := $(if $(V),,@)$(HOST_OUT)/pdsasm +override RANLIB := $(if $(V),,@)$(CROSS_COMPILE)ranlib +override RM := $(if $(V),,@)rm -f +override SED := $(if $(V),,@)sed +override STRIP := $(if $(V),,@)$(CROSS_COMPILE)strip +override TAR := $(if $(V),,@)tar +override TOUCH := $(if $(V),,@)touch +override USEASM := $(if $(V),,@)$(HOST_OUT)/useasm +override USELINK := $(if $(V),,@)$(HOST_OUT)/uselink +override VHD2INC := $(if $(V),,@)$(HOST_OUT)/vhd2inc +override ZIP := $(if $(V),,@)$(ZIP) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch.mk new file mode 100644 index 0000000..7b41576 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch.mk @@ -0,0 +1,107 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +include ../common/android/platform_version.mk + +# Now we have included the platform_version.mk file, we know we have a +# correctly configured OUT_DIR and can probe it to figure out our +# architecture. + +$(eval $(subst #,$(newline),$(shell cat $(BUILD_PROP) | \ + grep '^ro.product.cpu.abilist=\|^ro.product.cpu.abilist32=' | \ + sed -e 's,ro.product.cpu.abilist=,JNI_CPU_ABI=,' \ + -e 's,ro.product.cpu.abilist32=,JNI_CPU_ABI_2ND=,' | \ + tr ',' ' ' | tr '\n' '#'))) + +# If ARCH is set, use that to remap to an "Android" ARCH.. +ANDROID_ARCH := $(filter arm arm64 x86 x86_64,$(ARCH)) + +# x86 is special and has another legacy ARCH name which is remapped +ifeq ($(ARCH),i386) +TARGET_ARCH := x86 +endif + +ifeq ($(ANDROID_ARCH),) +# ..otherwise, try to use the ABI list to figure it out. +# We check 64-bit variants before 32, as a 64-build may be backwards compatible, +# so the abilist contain both 64- and 32-bit variants +ifneq ($(filter arm64-v8a,$(JNI_CPU_ABI)),) +TARGET_ARCH=arm64 +else ifneq ($(filter armeabi-v7a armeabi,$(JNI_CPU_ABI)),) +TARGET_ARCH=arm +else ifneq ($(filter mips64,$(JNI_CPU_ABI)),) +TARGET_ARCH=mips64 +else ifneq ($(filter mips,$(JNI_CPU_ABI)),) +TARGET_ARCH=mips +else ifneq ($(filter x86_64,$(JNI_CPU_ABI)),) +TARGET_ARCH=x86_64 +else ifneq ($(filter x86,$(JNI_CPU_ABI)),) +TARGET_ARCH=x86 +else +$(error ARCH not set and JNI_CPU_ABI=$(JNI_CPU_ABI) was not remappable) +endif +else +TARGET_ARCH := $(ANDROID_ARCH) +endif + +JNI_CPU_ABI := $(word 1,$(JNI_CPU_ABI)) +JNI_CPU_ABI_2ND := $(word 1,$(JNI_CPU_ABI_2ND)) + +include ../common/android/arch_common.mk + +ifneq ($(filter arm arm64 mips mips64,$(TARGET_ARCH)),) +LDM_PLATFORM ?= 1 +endif + +ifneq ($(filter x86 x86_64,$(TARGET_ARCH)),) +LDM_PCI ?= 1 +endif + +ifneq ($(filter x86 x86_64,$(TARGET_ARCH)),) +KERNEL_CROSS_COMPILE ?= undef +endif + +ifneq ($(filter arm64 mips64 x86_64,$(TARGET_ARCH)),) +ifeq ($(MULTIARCH),) +$(warning *** 64-bit architecture detected. Enabling MULTIARCH=1.) +$(warning *** If you want a 64-bit only build, use MULTIARCH=64only.) +export MULTIARCH := 1 +endif +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch_common.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch_common.mk new file mode 100644 index 0000000..2cd272e --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/arch_common.mk @@ -0,0 +1,158 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +SYS_CFLAGS := \ + -fno-short-enums \ + -funwind-tables \ + -D__linux__ + +SYS_INCLUDES := + +ifneq ($(TARGET_PLATFORM),) + + # Support for building with the Android NDK >= r15b. + # The NDK provides only the most basic includes and libraries. + + SYS_INCLUDES += \ + -isystem $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM)/arch-$(TARGET_ARCH)/usr/include \ + -isystem $(NDK_SYSROOT)/usr/include/drm \ + -isystem $(NDK_SYSROOT)/usr/include + +else # !TARGET_PLATFORM + + # These libraries are not coming from the NDK now, so we need to include them + # from the ANDROID_ROOT source tree. + + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/bionic/libc/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/android/uapi \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi \ + -isystem $(ANDROID_ROOT)/bionic/libm/include \ + -isystem $(ANDROID_ROOT)/external/libdrm/include/drm \ + -isystem $(ANDROID_ROOT)/external/zlib/src \ + -isystem $(ANDROID_ROOT)/frameworks/native/include + + ifeq ($(is_future_version),1) + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/libnativehelper/include_jni + else ifeq ($(is_aosp_master),1) + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/libnativehelper/include_jni + else + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/libnativehelper/include/nativehelper + endif + +endif # !TARGET_PLATFORM + + # These components aren't in the NDK. They *are* in the VNDK. If this is an + # NDK or non-NDK build, but not a VNDK build, include the needed bits from + # the ANDROID_ROOT source tree. We put libsync first because the NDK copy + # of the sync headers have been stripped in an unsupported way. + + SYS_INCLUDES := \ + -isystem $(ANDROID_ROOT)/system/core/libsync/include \ + $(SYS_INCLUDES) \ + -isystem $(ANDROID_ROOT)/external/libdrm \ + -isystem $(ANDROID_ROOT)/external/libpng \ + -isystem $(ANDROID_ROOT)/external/libunwind/include \ + -isystem $(ANDROID_ROOT)/hardware/libhardware/include \ + -isystem $(ANDROID_ROOT)/system/media/camera/include + + # boringssl replaced openssl from Marshmallow + ifeq ($(is_at_least_marshmallow),1) + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/external/boringssl/src/include + else + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/external/openssl/include + endif + + # libjpeg-turbo replaced libjpeg from Nougat + ifeq ($(is_at_least_nougat),1) + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/external/libjpeg-turbo + else + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/external/jpeg + endif + + # Handle upstream includes refactoring + ifeq ($(is_at_least_oreo),1) + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/frameworks/native/libs/nativewindow/include \ + -isystem $(ANDROID_ROOT)/system/core/libbacktrace/include \ + -isystem $(ANDROID_ROOT)/system/core/libsystem/include \ + -isystem $(ANDROID_ROOT)/system/core/libutils/include + ifeq ($(NDK_ROOT),) + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/frameworks/native/libs/arect/include \ + -isystem $(ANDROID_ROOT)/system/core/liblog/include + endif + else + SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/frameworks/base/include \ + -isystem $(ANDROID_ROOT)/system/core/include + endif + +# This is comparing PVR_BUILD_DIR to see if it is omap and adding +# includes required for it's HWC. +ifeq ($(notdir $(abspath .)),omap_android) +SYS_INCLUDES += \ + -isystem $(ANDROID_ROOT)/hardware/ti/omap4xxx/kernel-headers \ + -isystem $(ANDROID_ROOT)/hardware/ti/omap4xxx/ion +endif + +# Always include the NDK compatibility directory, because it allows us to +# compile in inline versions of simple functions to eliminate dependencies, +# and we can also constrain the available APIs. Do this last, so we can +# make sure it is always first on the include list. + +SYS_INCLUDES := -isystem eurasiacon/android/ndk $(SYS_INCLUDES) + +# Android enables build-id sections to allow mapping binaries to debug +# information for symbol resolution +SYS_LDFLAGS += -Wl,--build-id=md5 + +SYS_EXE_LDFLAGS_CXX := -lstdc++ + +SYS_LIB_LDFLAGS_CXX := $(SYS_EXE_LDFLAGS_CXX) + +OPTIM := -O2 diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/extra_config.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/extra_config.mk new file mode 100644 index 0000000..5cb8d84 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/extra_config.mk @@ -0,0 +1,115 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + + + +$(eval $(call BothConfigC,ANDROID,)) + + + +$(eval $(call TunableBothConfigC,SUPPORT_PVRSRV_ANDROID_SYSTRACE,)) + +$(eval $(call TunableBothConfigMake,SUPPORT_ANDROID_PLATFORM,)) +$(eval $(call TunableBothConfigMake,SUPPORT_PVRSRV_ANDROID_SYSTRACE,)) + +$(eval $(call TunableBothConfigMake,PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC,)) +$(eval $(call TunableBothConfigC,PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC,)) + +$(eval $(call TunableBothConfigMake,PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE,)) +$(eval $(call TunableBothConfigC,PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE,)) + +ifeq ($(NO_HARDWARE),1) + override PVR_ANDROID_COMPOSERHAL := null +endif + +ifneq ($(PVR_ANDROID_COMPOSERHAL),drm) + ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),1) + override PVR_ANDROID_COMPOSERHAL := null + endif +endif + + +# Most development systems will have at least one copy of java, but some may +# have more. If the build system detected that a specific 'forced' version +# of java should be used, and the user didn't override JAVAC, try to set it +# up here. We'll use JAVA_HOME if it's set, falling back to PATH if it is +# not. +ifeq ($(JAVAC),) + # If JAVA_HOME is unset, implement some assumed paths taken from Android's + # build/envsetup.sh script (these are intentionally Ubuntu centric). + ifeq ($(JAVA_HOME),) + ifeq ($(LEGACY_USE_JAVA7),1) + JAVA_HOME ?= /usr/lib/jvm/java-7-openjdk-amd64 + else + JAVA_HOME ?= /usr/lib/jvm/java-8-openjdk-amd64 + endif + ifeq ($(wildcard $(JAVA_HOME)),) + JAVA_HOME := + endif + endif + + ifeq ($(JAVA_HOME),) + JAVA ?= java + JAVAC ?= javac + else + JAVA := $(JAVA_HOME)/bin/java + JAVAC := $(JAVA_HOME)/bin/javac + ifeq ($(wildcard $(JAVAC)),) + $(error JAVA_HOME does not point to a valid java installation) + endif + endif + + # Test the configured JDK for validity + ifeq ($(LEGACY_USE_JAVA6),1) + ifeq ($(shell $(JAVA) -version 2>&1 | grep -qe 'Java(TM).*1\.6\.0' && echo 1 || echo 0),0) + $(error '$(JAVA) -version' was not for Oracle JDK 6) + endif + else ifeq ($(LEGACY_USE_JAVA7),1) + ifeq ($(shell $(JAVA) -version 2>&1 | grep -qe 'OpenJDK.*7u' && echo 1 || echo 0),0) + $(error '$(JAVA) -version' was not for OpenJDK 7) + endif + else + ifeq ($(shell $(JAVA) -version 2>&1 | grep -qe 'OpenJDK.*1\.8\.' && echo 1 || echo 0),0) + $(error '$(JAVA) -version' was not for OpenJDK 8) + endif + endif +endif + +include ../common/ion.mk diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/features.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/features.mk new file mode 100644 index 0000000..bc92a6f --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/features.mk @@ -0,0 +1,254 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Basic support option tuning for Android +# +SUPPORT_ANDROID_PLATFORM := 1 +SUPPORT_OPENGLES1_V1_ONLY := 1 +DONT_USE_SONAMES := 1 + +# Meminfo IDs are required for buffer stamps +# +SUPPORT_MEMINFO_IDS := 1 + +# Enable services ion support by default +# +SUPPORT_ION ?= 0 +SUPPORT_DMABUF := 1 + +# Need multi-process support in PDUMP +# +SUPPORT_PDUMP_MULTI_PROCESS := 1 + +# Always print debugging after 5 seconds of no activity +# +CLIENT_DRIVER_DEFAULT_WAIT_RETRIES := 50 + +# Android WSEGL is always the same +# +OPK_DEFAULT := libpvrANDROID_WSEGL.so + +# srvkm is always built, but bufferclass_example is only built +# before EGL_image_external was generally available. +# +KERNEL_COMPONENTS := srvkm + +# Use the new PVR_DPF implementation to allow lower message levels +# to be stripped from production drivers +# +PVRSRV_NEW_PVR_DPF := 1 + +# Production Android builds don't want PVRSRVGetDCSystemBuffer +# +SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER := 0 + +# Prefer to limit the 3D parameters heap to <16MB and move the +# extra 48MB to the general heap. This only affects cores with +# 28bit MMUs (520, 530, 531, 540). +# +SUPPORT_LARGE_GENERAL_HEAP := 1 + +# Enable a page pool for uncached memory allocations. This improves +# the performance of such allocations because the pages are temporarily +# not returned to Linux and therefore do not have to be re-invalidated +# (fewer cache invalidates are needed). +# +# Default the cache size to a maximum of 5400 pages (~21MB). If using +# newer Linux kernels (>=3.0) the cache may be reclaimed and become +# smaller than this maximum during runtime. +# +PVR_LINUX_MEM_AREA_POOL_MAX_PAGES ?= 5400 + +############################################################################## +# Enable source fortification by default +# +FORTIFY ?= 1 + +############################################################################## +# Unless overridden by the user, assume the RenderScript Compute API level +# matches that of the SDK API_LEVEL. +# +RSC_API_LEVEL ?= $(API_LEVEL) +ifneq ($(findstring $(RSC_API_LEVEL),21 22),) +RSC_API_LEVEL := 20 +endif + +############################################################################## +# Framebuffer target extension is used to find configs compatible with +# the framebuffer (added in JB MR1). +# +EGL_EXTENSION_ANDROID_FRAMEBUFFER_TARGET := 1 + +############################################################################## +# Handle various platform includes for unittests +# +UNITTEST_INCLUDES := \ + eurasiacon/android \ + $(ANDROID_ROOT)/frameworks/base/native/include \ + $(ANDROID_ROOT)/frameworks/native/include \ + $(ANDROID_ROOT)/frameworks/native/opengl/include \ + $(ANDROID_ROOT)/libnativehelper/include/nativehelper + +UNITTEST_INCLUDES += eurasiacon/unittests/include + +############################################################################## +# Future versions moved proprietary libraries to a vendor directory +# +ifeq ($(wildcard $(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor),) +PVRSRV_MODULE_BASEDIR := /system/lib/modules/ +BIN_DESTDIR := /system/vendor/bin +APP_DESTDIR := /data/app +else +PVR_ANDROID_FORCE_APP_NATIVE_UNPACKED := 1 +PVRSRV_MODULE_BASEDIR := /vendor/lib/modules/ +BIN_DESTDIR := /vendor/bin +APP_DESTDIR := /vendor/app +endif + +############################################################################## +# Android doesn't use these install script variables. They're still in place +# because the Linux install scripts use them. +# +SHLIB_DESTDIR := not-used +EGL_DESTDIR := not-used + +# Must give our EGL/GLES libraries a globally unique name +# +EGL_BASENAME_SUFFIX := _POWERVR_SGX$(SGXCORE)_$(SGX_CORE_REV) + +SYS_CXXFLAGS := -fuse-cxa-atexit $(SYS_CFLAGS) +SYS_INCLUDES += -isystem $(LIBCXX_INCLUDE_PATH) + +############################################################################## +# Support the OES_EGL_image_external extensions in the client drivers. +# +GLES1_EXTENSION_EGL_IMAGE_EXTERNAL := 1 +GLES2_EXTENSION_EGL_IMAGE_EXTERNAL := 1 + +############################################################################## +# ICS requires that at least one driver EGLConfig advertises the +# EGL_RECORDABLE_ANDROID attribute. The platform requires that surfaces +# rendered with this config can be consumed by an OMX video encoder. +# +EGL_EXTENSION_ANDROID_RECORDABLE := 1 + +############################################################################## +# ICS added the EGL_ANDROID_blob_cache extension. Enable support for this +# extension in EGL/GLESv2. +# +EGL_EXTENSION_ANDROID_BLOB_CACHE := 1 + +############################################################################## +# JB MR1 introduces cross-process syncs associated with a fd. +# This requires a new enough kernel version to have the base/sync driver. +# +EGL_EXTENSION_ANDROID_NATIVE_FENCE_SYNC ?= 1 + +############################################################################## +# Kernel 4.9 introduces new sync framework for cross-process sync +# +ifneq ($(strip $(KERNELDIR)),) +include ../kernel_version.mk +ifeq ($(call kernel-version-at-least,4,9,27),true) +PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE ?= 1 +else +PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC ?= 1 +endif +endif + +############################################################################## +# Versions of Android prior to Nougat required Java 7 (OpenJDK). +# +ifeq ($(is_at_least_nougat),0) +LEGACY_USE_JAVA7 ?= 1 +endif + +############################################################################## +# Lollipop supports 64-bit. Configure BCC to emit both 32-bit and 64-bit LLVM +# bitcode in the renderscript driver. +# +ifeq ($(is_at_least_lollipop),1) +PVR_ANDROID_BCC_MULTIARCH_SUPPORT := 1 +endif + +############################################################################## +# Versions of Android prior to Nougat required .apk files to be processed with +# zipalign. Using this tool on Nougat or greater will corrupt the .apk file, +# as alignment is already done by signapk.jar, so we must disable it. +# +ifeq ($(is_at_least_nougat),0) +LEGACY_USE_ZIPALIGN ?= 1 +endif + +############################################################################## +# Marshmallow needs --soname turned on +# +ifeq ($(is_at_least_marshmallow),1) +PVR_ANDROID_NEEDS_SONAME ?= 1 +endif + +############################################################################## +# Marshmallow replaces RAW_SENSOR with RAW10, RAW12 and RAW16 +# +ifeq ($(is_at_least_marshmallow),1) +PVR_ANDROID_HAS_HAL_PIXEL_FORMAT_RAWxx := 1 +endif + +############################################################################## +# Marshmallow onwards DDK stopped render script acceleration using GPU. +# This flag stops device allocation. +# +ifeq ($(is_at_least_marshmallow),1) +PVR_ANDROID_HAS_GRALLOC_USAGE_RENDERSCRIPT := 1 +endif + +# On Android O, the file was moved to for +# DDK use. A symlink was left for legacy reasons, but it conflicts with +# the NDK. Tell the driver to avoid using the symlink compatibility. +# +ifeq ($(is_at_least_oreo),1) +override PVR_ANDROID_HAS_ANDROID_SYNC_H := 1 +endif + +# Placeholder for future version handling +# +ifeq ($(is_future_version),1) +-include ../common/android/future_version.mk +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/install.sh.tpl b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/install.sh.tpl new file mode 100644 index 0000000..bc9fa24 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/install.sh.tpl @@ -0,0 +1,315 @@ +#!/bin/bash +############################################################################ ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +#### ########################################################################### +# Help on how to invoke +# +function usage { + echo "usage: $0 [options...]" + echo "" + echo "Options: -v Verbose mode." + echo " -n Dry-run mode." + echo " -u Uninstall-only mode." + echo " --root Use as the root of the install file system." + echo " (Overrides the DISCIMAGE environment variable.)" + exit 1 +} + +WD=`pwd` +SCRIPT_ROOT=`dirname $0` +cd $SCRIPT_ROOT + +PVRVERSION=[PVRVERSION] +PVRBUILD=[PVRBUILD] +PRIMARY_ARCH="[PRIMARY_ARCH]" +ARCHITECTURES="[ARCHITECTURES]" + +APP_DESTDIR=[APP_DESTDIR] +BIN_DESTDIR=[BIN_DESTDIR] +DATA_DESTDIR=[BIN_DESTDIR] + +# Exit with an error messages. +# $1=blurb +# +function bail { + if [ ! -z "$1" ]; then + echo "$1" >&2 + fi + + echo "" >&2 + echo "Installation failed" >&2 + exit 1 +} + +# Copy all the required files into their appropriate places on the local machine. +function install_locally { + # Define functions required for local installs + + # basic installation function + # $1=fromfile, $2=destfilename, $3=blurb, $4=chmod-flags, $5=chown-flags + # + function install_file { + if [ -z "$DDK_INSTALL_LOG" ]; then + bail "INTERNAL ERROR: Invoking install without setting logfile name" + fi + DESTFILE=${DISCIMAGE}$2 + DESTDIR=`dirname $DESTFILE` + + if [ ! -e $1 ]; then + [ -n "$VERBOSE" ] && echo "skipping file $1 -> $2" + return + fi + + # Destination directory - make sure it's there and writable + # + if [ -d "${DESTDIR}" ]; then + if [ ! -w "${DESTDIR}" ]; then + bail "${DESTDIR} is not writable." + fi + else + $DOIT mkdir -p ${DESTDIR} || bail "Couldn't mkdir -p ${DESTDIR}" + [ -n "$VERBOSE" ] && echo "Created directory `dirname $2`" + fi + + # Delete the original so that permissions don't persist. + # + $DOIT rm -f $DESTFILE + + $DOIT cp -f $1 $DESTFILE || bail "Couldn't copy $1 to $DESTFILE" + $DOIT chmod $4 ${DESTFILE} + + echo "$3 `basename $1` -> $2" + $DOIT echo "file $2" >> $DDK_INSTALL_LOG + } + + # If we install to an empty $DISCIMAGE, then we need to create some + # dummy directories, even if they contain no files, otherwise 'adb + # sync' may fail. (It allows '/vendor' to not exist currently.) + [ ! -d ${DISCIMAGE}/data ] && mkdir ${DISCIMAGE}/data + [ ! -d ${DISCIMAGE}/system ] && mkdir ${DISCIMAGE}/system + + for arch in $ARCHITECTURES; do + if [ ! -d $arch ]; then + echo "Missing architecture $arch" + if [ "$arch" = "$PRIMARY_ARCH" ]; then + echo "Primary architecture is missing, aborting!" + exit 1 + else + continue + fi + fi + + BASE_DESTDIR=`dirname ${BIN_DESTDIR}` + case $arch in + target*64* ) + SHLIB_DESTDIR=${BASE_DESTDIR}/lib64 + ;; + *) + SHLIB_DESTDIR=${BASE_DESTDIR}/lib + esac + EGL_DESTDIR=${SHLIB_DESTDIR}/egl + + pushd $arch > /dev/null + # Install UM components + if [ -f install_um.sh ]; then + DDK_INSTALL_LOG=$UMLOG + echo "Installing User components for architecture $arch" + $DOIT echo "version $PVRVERSION" > $DDK_INSTALL_LOG + source install_um.sh + echo + fi + popd > /dev/null + done + + pushd $PRIMARY_ARCH > /dev/null + # Install KM components + if [ -f install_km.sh ]; then + DDK_INSTALL_LOG=$KMLOG + echo "Installing Kernel components for architecture $PRIMARY_ARCH" + $DOIT echo "version $PVRVERSION" > $DDK_INSTALL_LOG + source install_km.sh + echo + fi + popd > /dev/null + + # Create an OLDLOG so old versions of the driver can uninstall. + $DOIT echo "version $PVRVERSION" > $OLDLOG + if [ -f $KMLOG ]; then + tail -n +2 $KMLOG >> $OLDLOG + fi + if [ -f $UMLOG ]; then + tail -n +2 $UMLOG >> $OLDLOG + fi + + # Make sure new logs are newer than $OLDLOG + touch -m -d "last sunday" $OLDLOG +} + +# Read the appropriate install log and delete anything therein. +function uninstall_locally { + # Function to uninstall something. + function do_uninstall { + LOG=$1 + + if [ ! -f $LOG ]; then + echo "Nothing to un-install." + return; + fi + + BAD=0 + VERSION="" + while read type data; do + case $type in + version) + echo "Uninstalling existing version $data" + VERSION="$data" + ;; + link|file) + if [ -z "$VERSION" ]; then + BAD=1; + echo "No version record at head of $LOG" + elif ! $DOIT rm -f ${DISCIMAGE}${data}; then + BAD=1; + else + [ -n "$VERBOSE" ] && echo "Deleted $type $data" + fi + ;; + tree) + ;; + esac + done < $1; + + if [ $BAD = 0 ]; then + echo "Uninstallation completed." + $DOIT rm -f $LOG + else + echo "Uninstallation failed!!!" + fi + } + + if [ -z "$OLDLOG" -o -z "$KMLOG" -o -z "$UMLOG" ]; then + bail "INTERNAL ERROR: Invoking uninstall without setting logfile name" + fi + + # Uninstall anything installed using the old-style install scripts. + LEGACY_LOG=0 + if [ -f $OLDLOG ]; then + if [ -f $KMLOG -a $KMLOG -nt $OLDLOG ]; then + # Last install was new scheme. + rm $OLDLOG + elif [ -f $UMLOG -a $UMLOG -nt $OLDLOG ]; then + # Last install was new scheme. + rm $OLDLOG + else + echo "Uninstalling all components from legacy log." + do_uninstall $OLDLOG + LEGACY_LOG=1 + echo + fi + fi + + if [ $LEGACY_LOG = 0 ]; then + # Uninstall KM components if we are doing a KM install. + if [ -f install_km.sh -a -f $KMLOG ]; then + echo "Uninstalling Kernel components" + do_uninstall $KMLOG + echo + fi + # Uninstall UM components if we are doing a UM install. + if [ -f install_um.sh -a -f $UMLOG ]; then + echo "Uninstalling User components" + do_uninstall $UMLOG + echo + fi + fi +} + +# Work out if there are any special instructions. +# +while [ "$1" ]; do + case "$1" in + -v|--verbose) + VERBOSE=v + ;; + -r|--root) + DISCIMAGE=$2 + shift; + ;; + -u|--uninstall) + UNINSTALL_ONLY=y + ;; + -n) + DOIT=echo + ;; + -h | --help | *) + usage + ;; + esac + shift +done + +if [ ! -z "$DISCIMAGE" ]; then + + if [ ! -d "$DISCIMAGE" ]; then + bail "$0: $DISCIMAGE does not exist." + fi + + echo + if [ $DISCIMAGE == "/" ]; then + echo "Installing PowerVR '$PVRVERSION ($PVRBUILD)' locally" + else + echo "Installing PowerVR '$PVRVERSION ($PVRBUILD)' on $DISCIMAGE" + fi + echo + echo "File system installation root is $DISCIMAGE" + echo + + OLDLOG=$DISCIMAGE/powervr_ddk_install.log + KMLOG=$DISCIMAGE/powervr_ddk_install_km.log + UMLOG=$DISCIMAGE/powervr_ddk_install_um.log + + uninstall_locally + + if [ "$UNINSTALL_ONLY" != "y" ]; then + install_locally + fi + +else + bail "DISCIMAGE must be set for installation to be possible." +fi diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/paths.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/paths.mk new file mode 100644 index 0000000..fa91e86 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/paths.mk @@ -0,0 +1,60 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +TARGET_BUILD_TYPE ?= release + +HOST_OS ?= linux +HOST_ARCH ?= x86 + +ifeq ($(wildcard $(OUT_DIR)),) +OUT_DIR = $(ANDROID_ROOT)/out +endif + +ifeq ($(TARGET_BUILD_TYPE),debug) +TARGET_ROOT := $(OUT_DIR)/debug/target +else +TARGET_ROOT := $(OUT_DIR)/target +endif + +ifeq ($(NDK_ROOT),) +LIBCXX_INCLUDE_PATH := $(ANDROID_ROOT)/external/libcxx/include +else +LIBCXX_INCLUDE_PATH := $(NDK_ROOT)/sources/cxx-stl/llvm-libc++/include +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/platform_version.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/platform_version.mk new file mode 100644 index 0000000..0a91f6b --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/android/platform_version.mk @@ -0,0 +1,192 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# If there's no build.prop file in the expected location, bail out. Tell the +# user which file we were trying to read in case TARGET_DEVICE was not set. +# +BUILD_PROP := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/build.prop +ifeq ($(wildcard $(BUILD_PROP)),) +$(warning *** Could not determine Android version. Did you set ANDROID_ROOT,\ +OUT_DIR and TARGET_DEVICE in your environment correctly?) +$(error Error reading $(BUILD_PROP)) +endif + +# Extract version.release and version.codename from the build.prop file. +# If either of the values aren't in the build.prop, the Make variables won't +# be defined, and fallback handling will take place. +# +define newline + + +endef +$(eval $(subst #,$(newline),$(shell cat $(BUILD_PROP) | \ + grep '^ro.build.version.release=\|^ro.build.id=' | \ + sed -e 's,ro.build.version.release=,PLATFORM_RELEASE=,' \ + -e 's,ro.build.id=,PLATFORM_BUILDID=,' | tr '\n' '#'))) + +define release-starts-with +$(shell echo $(PLATFORM_RELEASE) | grep -q ^$(1); \ + [ "$$?" = "0" ] && echo 1 || echo 0) +endef + +# ro.build.version.release contains the version number for release builds, or +# the version codename otherwise. In this case we need to assume that the +# version of Android we're building against has the features that are in the +# final release of that version, so we set PLATFORM_RELEASE to the +# corresponding release number. +# +# NOTE: It's the _string_ ordering that matters here, not the version number +# ordering. You need to make sure that strings that are sub-strings of other +# checked strings appear _later_ in this list. +# +# e.g. 'LollipopMR1' starts with 'Lollipop', but it is not Lollipop. +# +# NOTE: The version codenames for Android stopped after KitKat, don't read +# too much into the below names. They are mostly placeholders/reminders. +# +ifeq ($(call release-starts-with,LollipopMR1),1) +override PLATFORM_RELEASE := 5.1 +else ifeq ($(call release-starts-with,Marshmallow),1) +override PLATFORM_RELEASE := 6.0 +else ifeq ($(call release-starts-with,NougatMR),1) +override PLATFORM_RELEASE := 7.1 +else ifeq ($(call release-starts-with,Nougat),1) +override PLATFORM_RELEASE := 7.0 +else ifeq ($(call release-starts-with,Oreo),1) +override PLATFORM_RELEASE := 8.0 +else ifeq ($(PLATFORM_BUILDID),OC) +override PLATFORM_RELEASE := 8.0.80 +else ifeq ($(shell echo $(PLATFORM_RELEASE) | grep -qE "[A-Za-z]+"; echo $$?),0) +override PLATFORM_RELEASE := 8.2 +endif + +# Workaround for master. Sometimes there is an AOSP version ahead of +# the current master version number, but master still has more features. +# +ifeq ($(PLATFORM_RELEASE),8.0.80) +override PLATFORM_RELEASE := 8.0 +is_aosp_master := 1 +endif + +PLATFORM_RELEASE_MAJ := $(shell echo $(PLATFORM_RELEASE) | cut -f1 -d'.') +PLATFORM_RELEASE_MIN := $(shell echo $(PLATFORM_RELEASE) | cut -f2 -d'.') +PLATFORM_RELEASE_PATCH := $(shell echo $(PLATFORM_RELEASE) | cut -f3 -d'.') + +# Not all versions have a patchlevel; fix that up here +# +ifeq ($(PLATFORM_RELEASE_PATCH),) +PLATFORM_RELEASE_PATCH := 0 +endif + +# Macros to help categorize support for features and API_LEVEL for tests. +# +is_at_least_lollipop_mr1 := \ + $(shell ( test $(PLATFORM_RELEASE_MAJ) -gt 5 || \ + ( test $(PLATFORM_RELEASE_MAJ) -eq 5 && \ + test $(PLATFORM_RELEASE_MIN) -gt 0 ) ) && echo 1 || echo 0) +is_at_least_marshmallow := \ + $(shell ( test $(PLATFORM_RELEASE_MAJ) -ge 6 ) && echo 1 || echo 0) +is_at_least_nougat := \ + $(shell ( test $(PLATFORM_RELEASE_MAJ) -ge 7 ) && echo 1 || echo 0) +is_at_least_nougat_mr1 := \ + $(shell ( test $(PLATFORM_RELEASE_MAJ) -gt 7 || \ + ( test $(PLATFORM_RELEASE_MAJ) -eq 7 && \ + test $(PLATFORM_RELEASE_MIN) -gt 0 ) ) && echo 1 || echo 0) +is_at_least_oreo := \ + $(shell ( test $(PLATFORM_RELEASE_MAJ) -ge 8 ) && echo 1 || echo 0) + +# Assume "future versions" are >7.1, but we don't really know +is_future_version := \ + $(shell ( test $(PLATFORM_RELEASE_MAJ) -gt 8 || \ + ( test $(PLATFORM_RELEASE_MAJ) -eq 8 && \ + test $(PLATFORM_RELEASE_MIN) -gt 0 ) ) && echo 1 || echo 0) + +# Picking an exact match of API_LEVEL for the platform we're building +# against can avoid compatibility theming and affords better integration. +# +# This is also a good place to select the right jack toolchain. +# +ifeq ($(is_future_version),1) +JACK_VERSION ?= 4.32.CANDIDATE +API_LEVEL := 26 +else ifeq ($(is_at_least_oreo),1) +ifeq ($(is_aosp_master),1) +override JACK_VERSION := +else +JACK_VERSION ?= 4.31.CANDIDATE +endif +API_LEVEL := 26 +else ifeq ($(is_at_least_nougat_mr1),1) +JACK_VERSION ?= 3.36.CANDIDATE +API_LEVEL := 25 +else ifeq ($(is_at_least_nougat),1) +JACK_VERSION ?= 3.36.CANDIDATE +API_LEVEL := 24 +else ifeq ($(is_at_least_marshmallow),1) +JACK_VERSION ?= 2.21.RELEASE +API_LEVEL := 23 +else ifeq ($(is_at_least_lollipop_mr1),1) +# This early version had no version-file.version.code; fake it +JACK_VERSION ?= 1.0.RELEASE +API_LEVEL := 22 +else +$(error Must build against Android >= 5.1) +endif + +# If the NDK is enabled, check it has API_LEVEL support for us +ifneq ($(NDK_ROOT),) + NDK_PLATFORMS_ROOT ?= $(NDK_ROOT)/platforms + ifeq ($(strip $(wildcard $(NDK_PLATFORMS_ROOT)/android-*)),) + $(error NDK_PLATFORMS_ROOT does not point to a valid location) + endif + override TARGET_PLATFORM := android-$(API_LEVEL) + ifeq ($(strip $(wildcard $(NDK_PLATFORMS_ROOT)/$(TARGET_PLATFORM))),) + $(error NDK support for $(TARGET_PLATFORM) is missing) + endif +endif + +# Each DDK is tested against only a single version of the platform. +# Warn if a different platform version is used. +# +ifeq ($(is_future_version),1) +$(info WARNING: Android version is newer than this DDK supports) +else ifneq ($(is_at_least_marshmallow),1) +$(info WARNING: Android version is older than this DDK supports) +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/dridrm.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/dridrm.mk new file mode 100644 index 0000000..dfe3018 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/dridrm.mk @@ -0,0 +1,63 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +$(eval $(call TunableBothConfigC,SUPPORT_DRI_DRM,)) +$(eval $(call TunableBothConfigC,SUPPORT_DRI_DRM_EXT,)) +$(eval $(call TunableKernelConfigC,SUPPORT_DRI_DRM_PLUGIN,)) + + +$(eval $(call TunableBothConfigMake,SUPPORT_DRI_DRM,)) + +ifeq ($(SUPPORT_DRI_DRM),1) +ifeq ($(SUPPORT_DRI_DRM_NO_LIBDRM),1) +endif +$(eval $(call TunableKernelConfigC,PVR_SECURE_DRM_AUTH_EXPORT,)) +$(eval $(call TunableKernelConfigC,SUPPORT_DRM_MODESET,)) +endif + +$(eval $(call TunableKernelConfigC,PVR_DISPLAY_CONTROLLER_DRM_IOCTL,)) + +$(eval $(call TunableBothConfigC,PVR_DRI_DRM_NOT_PCI)) +$(eval $(call TunableBothConfigMake,PVR_DRI_DRM_NOT_PCI)) + +$(eval $(call TunableKernelConfigC,PVR_DRI_DRM_PLATFORM_DEV,)) + + + diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/ion.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/ion.mk new file mode 100644 index 0000000..f3f3c46 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/ion.mk @@ -0,0 +1,65 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifneq ($(KERNELDIR),) +ifneq ($(SUPPORT_ION),) + +# Support kernels built out-of-tree with O=/other/path +# In those cases, KERNELDIR will be O, not the source tree. +ifneq ($(wildcard $(KERNELDIR)/source),) +KSRCDIR := $(KERNELDIR)/source +else +KSRCDIR := $(KERNELDIR) +endif + +ifneq ($(wildcard $(KSRCDIR)/drivers/staging/android/ion/ion.h),) +# The kernel has a more recent version of ion, located in drivers/staging. +# Change the default header paths and the behaviour wrt sg_dma_len. +SUPPORT_ION_HEADER := \"../drivers/staging/android/ion/ion.h\" +SUPPORT_ION_PRIV_HEADER := \"../drivers/staging/android/ion/ion_priv.h\" +SUPPORT_ION_USE_SG_LENGTH := 1 +endif + +$(eval $(call TunableKernelConfigC,SUPPORT_ION_HEADER,\"linux/ion.h\")) +$(eval $(call TunableKernelConfigC,SUPPORT_ION_PRIV_HEADER,\"../drivers/gpu/ion/ion_priv.h\")) +$(eval $(call TunableKernelConfigC,SUPPORT_ION_USE_SG_LENGTH,)) + +endif +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/omap4.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/omap4.mk new file mode 100644 index 0000000..9a2cdd5 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/omap4.mk @@ -0,0 +1,44 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +$(eval $(call TunableKernelConfigC,PVR_NO_OMAP_TIMER,)) +$(eval $(call TunableKernelConfigC,PVR_OMAPLFB_DONT_USE_FB_PAN_DISPLAY,)) +$(eval $(call TunableKernelConfigC,PVR_OMAPLFB_DRM_FB,)) +$(eval $(call TunableKernelConfigC,VS_PRODUCT_VERSION,)) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/common/opencl.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/opencl.mk new file mode 100644 index 0000000..b5f84d4 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/common/opencl.mk @@ -0,0 +1,40 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compiler.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compiler.mk new file mode 100644 index 0000000..82c9d44 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compiler.mk @@ -0,0 +1,240 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Check for valid values of $(MULTIARCH). +ifeq ($(strip $(MULTIARCH)),0) +$(error MULTIARCH must be empty to disable multiarch) +endif + +define calculate-compiler-preferred-target + ifeq ($(2),qcc) + $(1)_compiler_preferred_target := qcc + else + $(1)_compiler_preferred_target := $$(subst --,-,$$(shell $(2) -dumpmachine)) + ifeq ($$($(1)_compiler_preferred_target),) + $$(warning No output from '$(2) -dumpmachine') + $$(warning Check that the compiler is in your PATH and CROSS_COMPILE is) + $$(warning set correctly.) + $$(error Unable to run compiler '$(2)') + endif + ifneq ($$(filter x86_64-%,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := x86_64-linux-gnu + endif + ifneq ($$(filter i386-% i486-% i586-% i686-%,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := i386-linux-gnu + endif + ifneq ($$(filter arm-linux-android,$$($(1)_compiler_preferred_target)),) + $(1)_compiler_preferred_target := arm-linux-androideabi + endif + endif +endef + +define cross-compiler-name + ifeq ($$(origin CC),file) + $(1) := $(2)$(3) + else + $(1) := $(3) + ifeq ($$(_CLANG),true) + ifneq ($(strip $(2)),) + $(1) := $(3) -target $$(patsubst %-,%,$(2)) -Qunused-arguments -fcolor-diagnostics + else + $(1) := $(3) -Qunused-arguments -fcolor-diagnostics + endif + endif + endif +endef + +# Work out the host compiler architecture +$(eval $(call calculate-compiler-preferred-target,host,$(HOST_CC))) + +ifeq ($(host_compiler_preferred_target),x86_64-linux-gnu) + ifeq ($(ARCH),i386) + HOST_PRIMARY_ARCH := host_i386 + HOST_FORCE_32BIT := -m32 + else + HOST_PRIMARY_ARCH := host_x86_64 + HOST_32BIT_ARCH := host_i386 + HOST_FORCE_32BIT := -m32 + endif +else +ifeq ($(host_compiler_preferred_target),i386-linux-gnu) + HOST_PRIMARY_ARCH := host_i386 + HOST_32BIT_ARCH := host_i386 +else +ifeq ($(host_compiler_preferred_target),arm-linux-gnueabihf) + HOST_PRIMARY_ARCH := host_armhf + HOST_32BIT_ARCH := host_armhf +else +ifeq ($(host_compiler_preferred_target),aarch64-linux-gnu) + HOST_PRIMARY_ARCH := host_aarch64 + HOST_32BIT_ARCH := host_armhf +else + $(error Unknown host compiler target architecture $(host_compiler_preferred_target)) +endif +endif +endif +endif + +# Workaround our lack of support for non-Linux HOST_CCs +ifneq ($(HOST_CC_IS_LINUX),1) + $(warning $$(HOST_CC) is non-Linux. Trying to work around.) + override HOST_CC := $(HOST_CC) -D__linux__ + $(eval $(call BothConfigMake,HOST_CC,$(HOST_CC))) +endif + +$(eval $(call BothConfigMake,HOST_PRIMARY_ARCH,$(HOST_PRIMARY_ARCH))) +$(eval $(call BothConfigMake,HOST_32BIT_ARCH,$(HOST_32BIT_ARCH))) +$(eval $(call BothConfigMake,HOST_FORCE_32BIT,$(HOST_FORCE_32BIT))) + +TARGET_ALL_ARCH := +TARGET_PRIMARY_ARCH := +TARGET_SECONDARY_ARCH := + +# Work out the target compiler cross triple, and include the corresponding +# compilers/*.mk file, which sets TARGET_PRIMARY_ARCH and +# TARGET_SECONDARY_ARCH for that compiler. +# +compilers := ../config/compilers +define include-compiler-file + ifeq ($(strip $(1)),) + $$(error empty arg passed to include-compiler-file) + endif + ifeq ($$(wildcard $$(compilers)/$(1).mk),) + $$(warning ******************************************************) + $$(warning Compiler target '$(1)' not recognised) + $$(warning (missing $$(compilers)/$(1).mk file)) + $$(warning ******************************************************) + $$(error Compiler '$(1)' not recognised) + endif + include $$(compilers)/$(1).mk +endef + +# Check the kernel cross compiler to work out which architecture it targets. +# We can then tell if CROSS_COMPILE targets a different architecture. +ifneq ($(origin KERNEL_CROSS_COMPILE),undefined) + # First, calculate the value of KERNEL_CROSS_COMPILE as it would be seen by + # the main build, so we can check it here in the config stage. + $(call one-word-only,KERNEL_CROSS_COMPILE) + _kernel_cross_compile := $(if $(filter undef,$(KERNEL_CROSS_COMPILE)),,$(KERNEL_CROSS_COMPILE)) + # We can take shortcuts with KERNEL_CROSS_COMPILE, as we don't want to + # respect CC and we don't support clang in that part currently. + _kernel_cross_compile := $(_kernel_cross_compile)gcc + # Then check the compiler. + $(eval $(call calculate-compiler-preferred-target,target,$(_kernel_cross_compile))) + $(eval $(call include-compiler-file,$(target_compiler_preferred_target))) + _kernel_primary_arch := $(TARGET_PRIMARY_ARCH) +else + # We can take shortcuts with KERNEL_CROSS_COMPILE, as we don't want to + # respect CC and we don't support clang in that part currently. + _kernel_cross_compile := $(CROSS_COMPILE)gcc + # KERNEL_CROSS_COMPILE will be the same as CROSS_COMPILE, so we don't need + # to do the compatibility check. + _kernel_primary_arch := +endif + +$(eval $(call cross-compiler-name,_cc,$(CROSS_COMPILE),$(CC))) +$(eval $(call cross-compiler-name,_cc_secondary,$(if $(CROSS_COMPILE_SECONDARY),$(CROSS_COMPILE_SECONDARY),$(CROSS_COMPILE)),$(CC_SECONDARY))) +$(eval $(call calculate-compiler-preferred-target,target,$(_cc))) +$(eval $(call include-compiler-file,$(target_compiler_preferred_target))) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),1) +ifeq ($(MULTIARCH),1) + ifneq ($(MAKECMDGOALS),kbuild) + ifneq ($(COMPONENTS),) + $(eval $(call calculate-compiler-preferred-target,target_secondary,$(_cc_secondary))) + ifneq ($(target_compiler_preferred_target),$(target_secondary_compiler_preferred_target)) + $(eval $(call include-compiler-file,$(target_secondary_compiler_preferred_target))) + + ifeq ($(TARGET_SECONDARY_ARCH),) + $(error $(CROSS_COMPILE_SECONDARY) not supported for MULTIARCH builds) + endif + endif + endif + endif +endif +endif + +define remap-arch +$(if $(INTERNAL_ARCH_REMAP_$(1)),$(INTERNAL_ARCH_REMAP_$(1)),$(1)) +endef + +# Remap 'essentially compatible' architectures so the KM vs UM check +# isn't too strict. These mixtures are widely supported. +INTERNAL_ARCH_REMAP_target_armhf := target_armv7-a +INTERNAL_ARCH_REMAP_target_armel := target_armv7-a +INTERNAL_ARCH_REMAP_target_mips32r2el := target_mips32el +INTERNAL_ARCH_REMAP_target_mips32r6el := target_mips32el + +# Sanity check: if KERNEL_CROSS_COMPILE was set, it has to target the same +# architecture as CROSS_COMPILE. +ifneq ($(_kernel_primary_arch),) + ifneq ($(call remap-arch,$(TARGET_PRIMARY_ARCH)),$(call remap-arch,$(_kernel_primary_arch))) + $(warning ********************************************************) + $(warning Error: Kernel and user-mode cross compilers build for) + $(warning different targets) + $(warning $(space)$(space)CROSS_COMPILE=$(CROSS_COMPILE)) + $(warning $(space)$(space)$(space)builds for $(TARGET_PRIMARY_ARCH)) + $(warning $(space)$(space)KERNEL_CROSS_COMPILE=$(KERNEL_CROSS_COMPILE)) + $(warning $(space)$(space)$(space)builds for $(_kernel_primary_arch)) + $(warning ********************************************************) + $(error Mismatching kernel and user-mode cross compilers) + endif +endif + +ifneq ($(MULTIARCH),32only) +TARGET_ALL_ARCH += $(TARGET_PRIMARY_ARCH) +endif +ifneq ($(MULTIARCH),64only) +TARGET_ALL_ARCH += $(TARGET_SECONDARY_ARCH) +endif + +$(eval $(call BothConfigMake,TARGET_PRIMARY_ARCH,$(TARGET_PRIMARY_ARCH))) +$(eval $(call BothConfigMake,TARGET_SECONDARY_ARCH,$(TARGET_SECONDARY_ARCH))) +$(eval $(call BothConfigMake,TARGET_ALL_ARCH,$(TARGET_ALL_ARCH))) +$(eval $(call BothConfigMake,TARGET_FORCE_32BIT,$(TARGET_FORCE_32BIT))) + +$(info ******* Multiarch build: $(if $(MULTIARCH),yes,no)) +$(info ******* Primary arch: $(if $(TARGET_PRIMARY_ARCH),$(TARGET_PRIMARY_ARCH),none)) +$(info ******* Secondary arch: $(if $(TARGET_SECONDARY_ARCH),$(TARGET_SECONDARY_ARCH),none)) + +# Find the paths to libgcc for the primary and secondary architectures. +LIBGCC := $(shell $(_cc) -print-libgcc-file-name) +LIBGCC_SECONDARY := $(shell $(_cc_secondary) $(TARGET_FORCE_32BIT) -print-libgcc-file-name) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-eabi.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-eabi.mk new file mode 100644 index 0000000..d671ba9 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-eabi.mk @@ -0,0 +1,2 @@ +# 32-bit ARM EABI compiler +TARGET_PRIMARY_ARCH := target_armv7-a diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-linux-androideabi.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-linux-androideabi.mk new file mode 100644 index 0000000..8aaeaad --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/arm-linux-androideabi.mk @@ -0,0 +1,2 @@ +# 32-bit Android ARM compiler +include $(compilers)/arm-eabi.mk diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/mips64el-linux-android.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/mips64el-linux-android.mk new file mode 100644 index 0000000..8777b2a --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/compilers/mips64el-linux-android.mk @@ -0,0 +1,29 @@ +# 64-bit MIPS R6 little-endian compiler +IS_KERNEL_32 := 0 +ifneq ($(KERNELDIR),) + IS_KERNEL_32 = ($(shell grep -q "CONFIG_MIPS=y" $(KERNELDIR)/.config && echo 1 || echo 0)) + ifneq ($(ARCH),mips) + ifeq ($(IS_KERNEL_32),1) + $(warning ******************************************************) + $(warning Your kernel appears to be configured for 32-bit MIPS,) + $(warning but CROSS_COMPILE (or KERNEL_CROSS_COMPILE) points) + $(warning to a 64-bit compiler.) + $(warning If you want a 32-bit build, either set CROSS_COMPILE) + $(warning to point to a 32-bit compiler, or build with ARCH=mips) + $(warning to force 32-bit mode with your existing compiler.) + $(warning ******************************************************) + $(error Invalid CROSS_COMPILE / kernel architecture combination) + endif # CONFIG_X86_32 + endif # ARCH=mips +endif # KERNELDIR + +# If ARCH=mips is set, force a build for 32-bit only, even though we're +# using a 64-bit compiler. +ifeq ($(ARCH),mips) + TARGET_PRIMARY_ARCH := target_mips32r6el + ifeq ($(IS_KERNEL_32),0) + USE_64BIT_COMPAT := 1 + endif +else + $(error MIPS64 build is not supported) +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/config/core.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/core.mk new file mode 100644 index 0000000..dc8c9e7 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/config/core.mk @@ -0,0 +1,790 @@ +########################################################################### ### +#@Title Root build configuration. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Configuration wrapper for new build system. This file deals with +# configuration of the build. Add to this file anything that deals +# with switching driver options on/off and altering the defines or +# objects the build uses. +# +# At the end of this file is an exhaustive list of all variables +# that are passed between the platform/config stage and the generic +# build. PLEASE refrain from adding more variables than necessary +# to this stage -- almost all options can go through config.h. +# + +################################# MACROS #################################### + +# Write out a kernel GNU make option. +# +define KernelConfigMake +$$(shell echo "override $(1) := $(2)" >>$(CONFIG_KERNEL_MK).new) +$(if $(filter config,$(D)),$(info KernelConfigMake $(1) := $(2) # $(if $($(1)),$(origin $(1)),default))) +endef + +# Write out a GNU make option for both user & kernel +# +define BothConfigMake +$$(eval $$(call KernelConfigMake,$(1),$(2))) +endef + +# Conditionally write out a kernel GNU make option +# +define TunableKernelConfigMake +ifneq ($$($(1)),) +ifneq ($$($(1)),0) +$$(eval $$(call KernelConfigMake,$(1),$$($(1)))) +endif +else +ifneq ($(2),) +$$(eval $$(call KernelConfigMake,$(1),$(2))) +endif +endif +endef + +# Conditionally write out a GNU make option for both user & kernel +# +define TunableBothConfigMake +$$(eval $$(call TunableKernelConfigMake,$(1),$(2))) +endef + +# Write out a kernel-only option +# +define KernelConfigC +$$(shell echo "#define $(1) $(2)" >>$(CONFIG_KERNEL_H).new) +$(if $(filter config,$(D)),$(info KernelConfigC #define $(1) $(2) /* $(if $($(1)),$(origin $(1)),default) */),) +endef + +# Write out an option for both user & kernel +# +define BothConfigC +$$(eval $$(call KernelConfigC,$(1),$(2))) +endef + +# Conditionally write out a kernel-only option +# +define TunableKernelConfigC +ifneq ($$($(1)),) +ifneq ($$($(1)),0) +ifeq ($$($(1)),1) +$$(eval $$(call KernelConfigC,$(1),)) +else +$$(eval $$(call KernelConfigC,$(1),$$($(1)))) +endif +endif +else +ifneq ($(2),) +ifeq ($(2),1) +$$(eval $$(call KernelConfigC,$(1),)) +else +$$(eval $$(call KernelConfigC,$(1),$(2))) +endif +endif +endif +endef + +# Conditionally write out an option for both user & kernel +# +define TunableBothConfigC +$$(eval $$(call TunableKernelConfigC,$(1),$(2))) +endef + +############################### END MACROS ################################## + +# Check we have a new enough version of GNU make. +# +need := 3.81 +ifeq ($(filter $(need),$(firstword $(sort $(MAKE_VERSION) $(need)))),) +$(error A version of GNU make >= $(need) is required - this is version $(MAKE_VERSION)) +endif + +# Try to guess EURASIAROOT if it wasn't set. Check this location. +# +_GUESSED_EURASIAROOT := $(abspath ../../../..) +ifneq ($(strip $(EURASIAROOT)),) +# We don't want to warn about EURASIAROOT if it's empty: this might mean that +# it's not set at all anywhere, but it could also mean that it's set like +# "export EURASIAROOT=" or "make EURASIAROOT= sometarget". If it is set but +# empty, we'll act as if it's unset and not warn. +ifneq ($(strip $(EURASIAROOT)),$(_GUESSED_EURASIAROOT)) +nothing := +space := $(nothing) $(nothing) +$(warning EURASIAROOT is set (via: $(origin EURASIAROOT)), but its value does not) +$(warning match the root of this source tree, so it is being ignored) +$(warning EURASIAROOT is set to: $(EURASIAROOT)) +$(warning $(space)The detected root is: $(_GUESSED_EURASIAROOT)) +$(warning To suppress this message, unset EURASIAROOT or set it empty) +endif +# else, EURASIAROOT matched the actual root of the source tree: don't warn +endif +override EURASIAROOT := $(_GUESSED_EURASIAROOT) +TOP := $(EURASIAROOT) + +ifneq ($(words $(TOP)),1) +$(warning This source tree is located in a path which contains whitespace,) +$(warning which is not supported.) +$(warning $(space)The root is: $(TOP)) +$(error Whitespace found in $$(TOP)) +endif + +$(call directory-must-exist,$(TOP)) + +include ../defs.mk + +# Infer PVR_BUILD_DIR from the directory configuration is launched from. +# Check anyway that such a directory exists. +# +PVR_BUILD_DIR := $(notdir $(abspath .)) +$(call directory-must-exist,$(TOP)/eurasiacon/build/linux2/$(PVR_BUILD_DIR)) + +# Output directory for configuration, object code, +# final programs/libraries, and install/rc scripts. +# +BUILD ?= release +ifneq ($(WINDOW_SYSTEM),) +OUT ?= $(TOP)/eurasiacon/binary_$(PVR_BUILD_DIR)_$(WINDOW_SYSTEM)_$(BUILD) +else +OUT ?= $(TOP)/eurasiacon/binary2_$(PVR_BUILD_DIR)_$(BUILD) +endif +override OUT := $(if $(filter /%,$(OUT)),$(OUT),$(TOP)/$(OUT)) + +CONFIG_MK := $(OUT)/config.mk +CONFIG_H := $(OUT)/config.h +CONFIG_KERNEL_MK := $(OUT)/config_kernel.mk +CONFIG_KERNEL_H := $(OUT)/config_kernel.h + +# Convert commas to spaces in $(D). This is so you can say "make +# D=config-changes,freeze-config" and have $(filter config-changes,$(D)) +# still work. +comma := , +empty := +space := $(empty) $(empty) +override D := $(subst $(comma),$(space),$(D)) + +# Create the OUT directory and delete any previous intermediary files +# +$(shell mkdir -p $(OUT)) +$(shell \ + for file in $(CONFIG_MK).new $(CONFIG_H).new \ + $(CONFIG_KERNEL_MK).new $(CONFIG_KERNEL_H).new; do \ + rm -f $$file; \ + done) + +# Some targets don't need information about any modules. If we only specify +# these targets on the make command line, set INTERNAL_CLOBBER_ONLY to +# indicate that toplevel.mk shouldn't read any makefiles +CLOBBER_ONLY_TARGETS := clean clobber help install +INTERNAL_CLOBBER_ONLY := +ifneq ($(strip $(MAKECMDGOALS)),) +INTERNAL_CLOBBER_ONLY := \ +$(if \ + $(strip $(foreach _cmdgoal,$(MAKECMDGOALS),\ + $(if $(filter $(_cmdgoal),$(CLOBBER_ONLY_TARGETS)),,x))),,true) +endif + +# For a clobber-only build, we shouldn't regenerate any config files, or +# require things like SGXCORE to be set +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + +# GNU Make has builtin values for CC/CXX which we don't want to trust. This +# is because $(CROSS_COMPILE)$(CC) doesn't always expand to a cross compiler +# toolchain binary name (e.g. most toolchains have 'gcc' but not 'cc'). + +ifeq ($(origin CC),default) + _CC := $(CROSS_COMPILE)gcc + CC := gcc +else + _CLANG := $(shell $(TOP)/eurasiacon/build/linux2/tools/cc-check.sh --clang --cc $(CC)) + _CC := $(CC) + ifeq ($(_CLANG),true) + ifneq ($(strip $(CROSS_COMPILE)),) + _CC := $(CC) -target $(patsubst %-,%,$(CROSS_COMPILE)) -Qunused-arguments -fcolor-diagnostics + else + _CC := $(CC) -Qunused-arguments -fcolor-diagnostics + endif + endif +endif + +ifeq ($(origin CXX),default) + _CXX := $(CROSS_COMPILE)g++ + CXX := g++ +else + _CLANGXX := $(shell $(TOP)/eurasiacon/build/linux2/tools/cc-check.sh --clang --cc $(CXX)) + _CXX := $(CXX) + ifeq ($(_CLANGXX),true) + ifneq ($(strip $(CROSS_COMPILE)),) + _CXX := $(CXX) -target $(patsubst %-,%,$(CROSS_COMPILE)) -Qunused-arguments -fcolor-diagnostics + else + _CXX := $(CXX) -Qunused-arguments -fcolor-diagnostics + endif + endif +endif + +CC_SECONDARY ?= $(CC) +HOST_CC ?= gcc + +# Work out if we are targeting ARM before we start tweaking _CC. +TARGETING_AARCH64 := $(shell \ + $(_CC) -dM -E - /dev/null 2>&1 && echo 1) + +TARGETING_MIPS := $(shell \ + $(_CC) -dM -E - /dev/null 2>&1 && echo 1) + +HOST_CC_IS_LINUX := $(shell \ + $(HOST_CC) -dM -E - /dev/null 2>&1 && echo 1) + +-include ../config/user-defs.mk + +# FIXME: Backwards compatibility remaps. +# +ifeq ($(SUPPORT_SLC),1) +SGX_FEATURE_SYSTEM_CACHE := 1 +endif +ifeq ($(BYPASS_SLC),1) +SGX_BYPASS_SYSTEM_CACHE := 1 +endif +ifeq ($(BYPASS_DCU),1) +SGX_BYPASS_DCU := 1 +endif +ifneq ($(SGXCOREREV),) +SGX_CORE_REV := $(SGXCOREREV) +endif + +# Core handling +# +ifeq ($(SGXCORE),) +$(error Must specify SGXCORE) +endif +ifeq ($(SGX_CORE_REV),) +override USE_SGX_CORE_REV_HEAD := 1 +else ifeq ($(SGX_CORE_REV),000) +override USE_SGX_CORE_REV_HEAD := 1 +override SGX_CORE_REV := +else +override USE_SGX_CORE_REV_HEAD := 0 +endif + +# Enforced dependencies. Move this to an include. +# +ifeq ($(SUPPORT_LINUX_USING_WORKQUEUES),1) +override PVR_LINUX_USING_WORKQUEUES := 1 +override PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE := 1 +override PVR_LINUX_TIMERS_USING_WORKQUEUES := 1 +ifneq ($(SUPPORT_ANDROID_FPGA),1) +override SYS_CUSTOM_POWERLOCK_WRAP := 1 +endif +else ifeq ($(SUPPORT_LINUX_USING_SHARED_WORKQUEUES),1) +override PVR_LINUX_USING_WORKQUEUES := 1 +override PVR_LINUX_MISR_USING_WORKQUEUE := 1 +override PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE := 1 +override SYS_CUSTOM_POWERLOCK_WRAP := 1 +endif + +ifneq ($(PDUMP),1) +override SUPPORT_PDUMP_MULTI_PROCESS := 0 +endif + +ifeq ($(SUPPORT_HYBRID_PB),1) +override SUPPORT_SHARED_PB := 1 +override SUPPORT_PERCONTEXT_PB := 1 +else ifeq ($(SUPPORT_PERCONTEXT_PB),1) +override SUPPORT_SHARED_PB := 0 +endif + +ifeq ($(NO_HARDWARE),1) +override SYS_USING_INTERRUPTS := 0 +override SUPPORT_HW_RECOVERY := 0 +override SUPPORT_ACTIVE_POWER_MANAGEMENT := 0 +endif + +ifeq ($(SGX_FEATURE_36BIT_MMU),1) +override IMG_ADDRSPACE_PHYSADDR_BITS := 64 +else +override IMG_ADDRSPACE_PHYSADDR_BITS := 32 +endif + +ifeq ($(SGXCORE),535) +ifeq ($(PVRSRV_USSE_EDM_STATUS_DEBUG),1) +SUPPORT_SGX_HWPERF ?= not-overridden +ifeq ($(SUPPORT_SGX_HWPERF),not-overridden) +$(warning Setting SUPPORT_SGX_HWPERF=0 because PVRSRV_USSE_EDM_STATUS_DEBUG=1) +SUPPORT_SGX_HWPERF := 0 +endif +endif +PVR2D_ALT_2DHW ?= 0 +endif + +# Multi-core handling must be done separately to other options +# Also do some sanity checks +# +ifeq ($(SGX_FEATURE_MP),1) +ifeq ($(SGX_FEATURE_MP_CORE_COUNT),) +ifeq ($(SGX_FEATURE_MP_CORE_COUNT_TA),) +$(error Must specify SGX_FEATURE_MP_CORE_COUNT or both SGX_FEATURE_MP_CORE_COUNT_TA and SGX_FEATURE_MP_CORE_COUNT_3D with SGX_FEATURE_MP) +else +$(eval $(call BothConfigC,SGX_FEATURE_MP_CORE_COUNT_TA,$(SGX_FEATURE_MP_CORE_COUNT_TA))) +endif +ifeq ($(SGX_FEATURE_MP_CORE_COUNT_3D),) +$(error Must specify SGX_FEATURE_MP_CORE_COUNT or both SGX_FEATURE_MP_CORE_COUNT_TA and SGX_FEATURE_MP_CORE_COUNT_3D with SGX_FEATURE_MP) +else +$(eval $(call BothConfigC,SGX_FEATURE_MP_CORE_COUNT_3D,$(SGX_FEATURE_MP_CORE_COUNT_3D))) +endif +else +$(eval $(call BothConfigC,SGX_FEATURE_MP_CORE_COUNT,$(SGX_FEATURE_MP_CORE_COUNT))) +endif +endif + +# Rather than requiring the user to have to define two variables (one quoted, +# one not), make PVRSRV_MODNAME a non-tunable and give it an overridable +# default here. +# +PVRSRV_MODNAME ?= pvrsrvkm + +# Normally builds don't touch this, but we use it to influence the components +# list. Make sure it is defined early enough to make this possible. +# +SUPPORT_PVRSRV_DEVICE_CLASS ?= 1 + +# Default place for binaries and shared libraries +BIN_DESTDIR ?= /usr/local/bin +SHARE_DESTDIR ?= /usr/local/share +SHLIB_DESTDIR ?= /usr/lib + +# The user didn't set CROSS_COMPILE. There's probably nothing wrong +# with that, but we'll let them know anyway. +# +ifeq ($(CROSS_COMPILE),) +$(warning CROSS_COMPILE is not set. Target components will be built with the host compiler) +endif + +# The user is trying to set one of the old SUPPORT_ options on the +# command line or in the environment. This isn't supported any more +# and will often break the build. The user is generally only trying +# to remove a component from the list of targets to build, so we'll +# point them at the new way of doing this. +define sanity-check-support-option-origin +ifeq ($$(filter undefined file,$$(origin $(1))),) +$$(warning *** Setting $(1) via $$(origin $(1)) is deprecated) +$$(error If you are trying to disable a component, use e.g. EXCLUDED_APIS="opengles1 opengl") +endif +endef +$(foreach _o,SYS_CFLAGS SYS_CXXFLAGS SYS_INCLUDES SYS_COMMON_LDFLAGS SYS_EXE_LDFLAGS SYS_LIB_LDFLAGS SYS_EXE_LDFLAGS_CXX SYS_LIB_LDFLAGS_CXX SUPPORT_EWS SUPPORT_NULLWS SUPPORT_OPENGLES1 SUPPORT_OPENGLES2 SUPPORT_OPENCL SUPPORT_OPENGL SUPPORT_SURFACELESS SUPPORT_UNITTESTS SUPPORT_XORG SUPPORT_WAYLAND,$(eval $(call sanity-check-support-option-origin,$(_o)))) + +# Check for words in EXCLUDED_APIS that aren't understood by the +# common/apis/*.mk files. This should be kept in sync with all the tests on +# EXCLUDED_APIS in those files +_excludable_apis := opencl opengl opengles1 opengles2 unittests scripts composerhal camerahal memtrackhal sensorhal +_excluded_apis := $(subst $(comma),$(space),$(EXCLUDED_APIS)) +_unrecognised := $(strip $(filter-out $(_excludable_apis),$(_excluded_apis))) +ifneq ($(_unrecognised),) +$(warning *** Unrecognised entries in EXCLUDED_APIS: $(_unrecognised)) +$(warning *** EXCLUDED_APIS was set via: $(origin EXCLUDED_APIS)) +$(error Excludable APIs are: $(_excludable_apis)) +endif + +override EXCLUDED_APIS := $(filter $(_excludable_apis), $(_excluded_apis)) + +# Build's selected list of components +# +-include components.mk + +# Set up the host and target compiler. +include ../config/compiler.mk + +# PDUMP needs extra components +# +ifeq ($(PDUMP),1) +ifneq ($(COMPONENTS),) +COMPONENTS += pdump +endif +ifeq ($(SUPPORT_DRI_DRM),1) +EXTRA_PVRSRVKM_COMPONENTS += dbgdrv +else +KERNEL_COMPONENTS += dbgdrv +endif +endif + +ifneq ($(WINDOW_SYSTEM),) +endif +ifeq ($(MESA_EGL),1) + SUPPORT_OPENGLES1_V1_ONLY := 1 + GLES1_EXTENSION_EGL_IMAGE_EXTERNAL := 1 + GLES2_EXTENSION_EGL_IMAGE_EXTERNAL := 1 +else +endif + +ifneq ($(SUPPORT_BUILD_LWS),) + ifneq ($(SYSROOT),) + $(info WARNING: You have specified a SYSROOT (or are using a buildroot compiler) and enabled SUPPORT_BUILD_LWS.) + $(info We will ignore the sysroot and will build all required LWS components.) + $(info Unset SUPPORT_BUILD_LWS if this is not what you want.) + endif + override SYSROOT:= +endif + + +$(if $(filter config,$(D)),$(info Build configuration:)) + +################################# CONFIG #################################### + +# If KERNELDIR is set, write it out to the config.mk, with +# KERNEL_COMPONENTS and KERNEL_ID +# +ifneq ($(strip $(KERNELDIR)),) +include ../kernel_version.mk +PVRSRV_MODULE_BASEDIR ?= /lib/modules/$(KERNEL_ID)/extra +$(eval $(call BothConfigMake,KERNELDIR,$(KERNELDIR))) +# Needed only by install script +$(eval $(call BothConfigMake,KERNEL_ID,$(KERNEL_ID))) +$(eval $(call KernelConfigMake,PVRSRV_MODULE_BASEDIR,$(PVRSRV_MODULE_BASEDIR))) +$(eval $(call KernelConfigMake,KERNEL_COMPONENTS,$(KERNEL_COMPONENTS))) +$(eval $(call TunableKernelConfigMake,EXTRA_PVRSRVKM_COMPONENTS,)) +$(eval $(call TunableKernelConfigMake,EXTRA_KBUILD_SOURCE,)) + +# If KERNEL_CROSS_COMPILE is set to "undef", this is magically +# equivalent to being unset. If it is unset, we use CROSS_COMPILE +# (which might also be unset). If it is set, use it directly. +ifneq ($(KERNEL_CROSS_COMPILE),undef) +KERNEL_CROSS_COMPILE ?= $(CROSS_COMPILE) +$(eval $(call TunableBothConfigMake,KERNEL_CROSS_COMPILE,)) +endif + +# Alternatively, allow the CC used for kbuild to be overridden +# exactly, bypassing any KERNEL_CROSS_COMPILE configuration. +$(eval $(call TunableBothConfigMake,KERNEL_CC,)) + +# Check the KERNELDIR has a kernel built. +VMLINUX := +#$(strip $(wildcard $(KERNELDIR)/vmlinux)) + +ifneq ($(wildcard $(VMLINUX)),) + ifneq ($(shell file $(KERNELDIR)/vmlinux | grep 64-bit >/dev/null && echo 1),$(shell $(_CC) -dM -E - /dev/null && echo 1)) + $(error Attempting to build 64-bit DDK against 32-bit kernel, or 32-bit DDK against 64-bit kernel. This is not allowed.) + endif + LINUXCFG := $(strip $(wildcard $(KERNELDIR)/.config)) + VMLINUX_IS_64BIT := $(shell file $(VMLINUX) | grep 64-bit >/dev/null || echo false) + VMLINUX_HAS_PAE36 := $(shell cat $(LINUXCFG) | grep CONFIG_X86_PAE=y >/dev/null || echo false) + VMLINUX_HAS_PAE40 := $(shell cat $(LINUXCFG) | grep CONFIG_ARM_LPAE=y >/dev/null || echo false) + VMLINUX_HAS_DMA32 := $(shell cat $(LINUXCFG) | grep CONFIG_ZONE_DMA32=y >/dev/null || echo false) + ifneq ($(VMLINUX_IS_64BIT),false) + $(warning $$(KERNELDIR)/vmlinux: Note: vmlinux is 64-bit, which is supported but currently experimental.) + endif +else + $(warning $$(KERNELDIR)/vmlinux does not exist. Kbuild may fail.) +endif + +endif # KERNELDIR + +ifneq ($(VMLINUX_HAS_PAE40),false) +ifeq ($(VMLINUX_HAS_DMA32),false) +$(warning SGX MMUs are currently supported up to only 36 bits max. Your Kernel is built with 40-bit PAE but does not have CONFIG_ZONE_DMA32.) +$(warning This means you must ensure the runtime system has <= 4GB of RAM, or there will be BIG problems...) +endif +endif + +ifneq ($(SGX_FEATURE_36BIT_MMU),1) +ifneq ($(VMLINUX_IS_64BIT),false) +# Kernel is 64-bit +ifeq ($(VMLINUX_HAS_DMA32),false) +$(warning SGX is configured with 32-bit MMU. Your Kernel is 64-bit but does not have CONFIG_ZONE_DMA32.) +$(warning This means you must ensure the runtime system has <= 4GB of RAM, or there will be BIG problems...) +endif +else + # Kernel is 32-bit +ifneq ($(VMLINUX_HAS_PAE36),false) +ifeq ($(VMLINUX_HAS_DMA32),false) +$(warning SGX is configured with 32-bit MMU. Your Kernel is 32-bit PAE, but does not have CONFIG_ZONE_DMA32. ) +$(warning This means you must ensure the runtime system has <= 4GB of RAM, or there will be BIG problems...) +endif +endif +endif +endif + + +# Ideally configured by platform Makefiles, as necessary +# +SHADER_DESTDIR := $(SHARE_DESTDIR)/pvr/shaders/ + +# Invariant options for Linux +# +$(eval $(call BothConfigC,LINUX,)) + +$(eval $(call BothConfigC,PVR_BUILD_DIR,"\"$(PVR_BUILD_DIR)\"")) +$(eval $(call BothConfigC,PVR_BUILD_TYPE,"\"$(BUILD)\"")) +$(eval $(call BothConfigC,PVRSRV_MODNAME,"\"$(PVRSRV_MODNAME)\"")) +$(eval $(call BothConfigMake,PVRSRV_MODNAME,$(PVRSRV_MODNAME))) +$(eval $(call BothConfigMake,PVR_BUILD_DIR,$(PVR_BUILD_DIR))) +$(eval $(call BothConfigMake,PVR_BUILD_TYPE,$(BUILD))) + +$(eval $(call TunableBothConfigC,USE_64BIT_COMPAT,)) +$(eval $(call TunableBothConfigC,SGXCORE,)) +$(eval $(call BothConfigC,SGX$(SGXCORE),)) +$(eval $(call BothConfigC,SUPPORT_SGX$(SGXCORE),)) + +$(eval $(call TunableBothConfigC,SUPPORT_SGX,1)) +$(eval $(call TunableBothConfigC,SGX_CORE_REV,)) +$(eval $(call TunableBothConfigC,USE_SGX_CORE_REV_HEAD,)) + +$(eval $(call BothConfigC,TRANSFER_QUEUE,)) +$(eval $(call BothConfigC,PVR_SECURE_HANDLES,)) + + +# Support syncing LISR & MISR. This is required for OS's where +# on SPM platforms the LISR and MISR can run at the same time and +# thus during powerdown we need to drain all pending LISRs before +# proceeding to do the actual powerdown +$(eval $(call KernelConfigC,SUPPORT_LISR_MISR_SYNC)) + +ifneq ($(DISPLAY_CONTROLLER),) +$(eval $(call BothConfigC,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER))) +$(eval $(call BothConfigMake,DISPLAY_CONTROLLER,$(DISPLAY_CONTROLLER))) +endif + +ifneq ($(DRM_DISPLAY_CONTROLLER),) +$(eval $(call KernelConfigMake,DRM_DISPLAY_CONTROLLER,$(DRM_DISPLAY_CONTROLLER))) +endif + +ifneq ($(BUFFERCLASS_MODULE),) +$(eval $(call BothConfigMake,BUFFERCLASS_MODULE,$(BUFFERCLASS_MODULE))) +endif + +ifneq ($(strip $(KERNELDIR)),) +PVR_LINUX_MEM_AREA_POOL_MAX_PAGES ?= 0 +ifneq ($(PVR_LINUX_MEM_AREA_POOL_MAX_PAGES),0) +PVR_LINUX_MEM_AREA_USE_VMAP ?= 1 +include ../kernel_version.mk +ifeq ($(call kernel-version-at-least,3,0),true) +PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK ?= 1 +endif +endif +$(eval $(call KernelConfigC,PVR_LINUX_MEM_AREA_POOL_MAX_PAGES,$(PVR_LINUX_MEM_AREA_POOL_MAX_PAGES))) +$(eval $(call TunableKernelConfigC,PVR_LINUX_MEM_AREA_USE_VMAP,)) +$(eval $(call TunableKernelConfigC,PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK,)) +endif + + +$(eval $(call BothConfigMake,PVR_SYSTEM,$(PVR_SYSTEM))) + + +# Build-type dependent options +# +$(eval $(call BothConfigMake,BUILD,$(BUILD))) + +ifeq ($(BUILD),debug) +$(eval $(call BothConfigC,DEBUG,)) +$(eval $(call KernelConfigC,DEBUG_LINUX_MEMORY_ALLOCATIONS,)) +$(eval $(call KernelConfigC,DEBUG_LINUX_MEM_AREAS,)) +$(eval $(call KernelConfigC,DEBUG_LINUX_MMAP_AREAS,)) +$(eval $(call KernelConfigC,DEBUG_BRIDGE_KM,)) +else ifeq ($(BUILD),release) +$(eval $(call BothConfigC,RELEASE,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,1)) +$(eval $(call TunableBothConfigC,PVR_DBGPRIV_LEVEL,)) +else ifeq ($(BUILD),timing) +$(eval $(call BothConfigC,TIMING,)) +$(eval $(call TunableBothConfigMake,DEBUGLINK,1)) +else +$(error BUILD= must be either debug, release or timing) +endif + +# User-configurable options +# +$(eval $(call TunableBothConfigC,SUPPORT_PERCONTEXT_PB,1)) +$(eval $(call TunableBothConfigC,SUPPORT_SHARED_PB,)) +$(eval $(call TunableBothConfigC,SUPPORT_HYBRID_PB,)) +$(eval $(call TunableBothConfigC,SUPPORT_HW_RECOVERY,1)) +$(eval $(call TunableBothConfigC,SUPPORT_ACTIVE_POWER_MANAGEMENT,1)) +$(eval $(call TunableBothConfigC,SUPPORT_SGX_HWPERF,1)) +$(eval $(call TunableBothConfigC,SUPPORT_SGX_LOW_LATENCY_SCHEDULING,1)) +$(eval $(call TunableBothConfigC,SUPPORT_SGX_CONTEXT_PRIORITY_PER_THREAD,)) + +$(eval $(call TunableBothConfigC,SUPPORT_MEMINFO_IDS,)) +$(eval $(call TunableBothConfigC,SUPPORT_SGX_NEW_STATUS_VALS,1)) +$(eval $(call TunableBothConfigC,SUPPORT_PDUMP_MULTI_PROCESS,)) +$(eval $(call TunableBothConfigC,SUPPORT_DBGDRV_EVENT_OBJECTS,1)) +$(eval $(call TunableBothConfigC,SGX_FEATURE_SYSTEM_CACHE,)) +$(eval $(call TunableBothConfigC,SGX_BYPASS_SYSTEM_CACHE,)) +$(eval $(call TunableBothConfigC,SGX_BYPASS_DCU,)) +$(eval $(call TunableBothConfigC,SGX_FAST_DPM_INIT,)) +$(eval $(call TunableBothConfigC,SGX_FEATURE_MP,)) +$(eval $(call TunableBothConfigC,SGX_FEATURE_MP_PLUS,)) +$(eval $(call TunableBothConfigC,FPGA,)) +$(eval $(call TunableBothConfigC,PDUMP,)) +$(eval $(call TunableBothConfigC,MEM_TRACK_INFO_DEBUG,)) +$(eval $(call TunableBothConfigC,PVRSRV_DEVMEM_TIME_STATS,)) +$(eval $(call TunableBothConfigC,NO_HARDWARE,)) +$(eval $(call TunableBothConfigC,PDUMP_DEBUG_OUTFILES,)) +$(eval $(call TunableBothConfigC,PVRSRV_USSE_EDM_STATUS_DEBUG,)) +$(eval $(call TunableBothConfigC,PVRSRV_RESET_ON_HWTIMEOUT,)) +$(eval $(call TunableBothConfigC,SYS_USING_INTERRUPTS,1)) +$(eval $(call TunableBothConfigC,SUPPORT_EXTERNAL_SYSTEM_CACHE,)) +$(eval $(call TunableBothConfigC,PVRSRV_NEW_PVR_DPF,)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_DPF,)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_ASSERT,)) +$(eval $(call TunableBothConfigC,PVRSRV_NEED_PVR_TRACE,)) +$(eval $(call TunableBothConfigC,SUPPORT_SECURE_33657_FIX,)) +$(eval $(call TunableBothConfigC,SUPPORT_ION,)) +$(eval $(call TunableBothConfigC,SUPPORT_DMABUF,)) +$(eval $(call TunableBothConfigC,SUPPORT_HWRECOVERY_TRACE_LIMIT,)) +$(eval $(call TunableBothConfigC,SUPPORT_PVRSRV_DEVICE_CLASS,)) +$(eval $(call TunableBothConfigC,SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER,1)) +$(eval $(call TunableBothConfigC,SUPPORT_NV12_FROM_2_HWADDRS,)) +$(eval $(call TunableBothConfigC,SGX_FEATURE_36BIT_MMU,)) +$(eval $(call TunableBothConfigC,IMG_ADDRSPACE_PHYSADDR_BITS,)) +$(eval $(call TunableBothConfigC,PVRSRV_EXTRA_PB_DEBUG,)) +$(eval $(call TunableBothConfigC,PVRSRV_DEBUG_CCB_MAX,)) + +$(eval $(call TunableKernelConfigC,SUPPORT_LINUX_X86_WRITECOMBINE,1)) +$(eval $(call TunableKernelConfigC,SUPPORT_LINUX_X86_PAT,1)) +$(eval $(call TunableKernelConfigC,SGX_DYNAMIC_TIMING_INFO,)) +$(eval $(call TunableKernelConfigC,SYS_SGX_ACTIVE_POWER_LATENCY_MS,)) +$(eval $(call TunableKernelConfigC,SYS_CUSTOM_POWERLOCK_WRAP,)) +$(eval $(call TunableKernelConfigC,PVR_LINUX_USING_WORKQUEUES,)) +$(eval $(call TunableKernelConfigC,PVR_LINUX_MISR_USING_WORKQUEUE,)) +$(eval $(call TunableKernelConfigC,PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE,)) +$(eval $(call TunableKernelConfigC,PVR_LINUX_TIMERS_USING_WORKQUEUES,)) +$(eval $(call TunableKernelConfigC,PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE,)) +$(eval $(call TunableKernelConfigC,LDM_PLATFORM,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_DEVICE_TREE,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_PLATFORM_PRE_REGISTERED,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_PLATFORM_PRE_REGISTERED_DEV,)) +$(eval $(call TunableKernelConfigC,PVR_LDM_DRIVER_REGISTRATION_NAME,"\"$(PVRSRV_MODNAME)\"")) +$(eval $(call TunableKernelConfigC,LDM_PCI,)) +$(eval $(call TunableKernelConfigC,PVRSRV_DUMP_MK_TRACE,)) +$(eval $(call TunableKernelConfigC,PVRSRV_DUMP_KERNEL_CCB,)) +$(eval $(call TunableKernelConfigC,PVRSRV_REFCOUNT_DEBUG,)) +$(eval $(call TunableKernelConfigC,PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND,)) +$(eval $(call TunableKernelConfigC,HYBRID_SHARED_PB_SIZE,)) +$(eval $(call TunableKernelConfigC,SUPPORT_LARGE_GENERAL_HEAP,)) +$(eval $(call TunableKernelConfigC,SUPPORT_OLD_ION_API,)) +$(eval $(call TunableKernelConfigC,TTRACE,)) +$(eval $(call TunableKernelConfigC,TTRACE_LARGE_BUFFER,)) +$(eval $(call TunableKernelConfigC,SUPPORT_PDUMP_SYNC_DEBUG,)) +$(eval $(call TunableKernelConfigC,SUPPORT_PER_SYNC_DEBUG,)) +$(eval $(call TunableKernelConfigC,SUPPORT_FORCE_SYNC_DUMP,)) + +ifneq ($(filter opengl,$(COMPONENTS)),) +SUPPORT_OPENGL = 1 +endif +ifneq ($(filter opengles1,$(COMPONENTS)),) +endif +ifneq ($(filter opengles2,$(COMPONENTS)),) +endif +ifneq ($(filter opencl,$(COMPONENTS)),) +endif + + +$(eval $(call TunableBothConfigMake,OPTIM,)) +$(eval $(call TunableBothConfigMake,SUPPORT_ION,)) +$(eval $(call TunableBothConfigMake,SUPPORT_DMABUF,)) +$(eval $(call TunableBothConfigMake,SUPPORT_PVRSRV_DEVICE_CLASS,)) + + +$(eval $(call TunableKernelConfigMake,TTRACE,)) + + +$(if $(USE_CCACHE),$(if $(USE_DISTCC),$(error\ +Enabling both USE_CCACHE and USE_DISTCC at the same time is not supported))) + +SUPPORT_SGX_LOW_LATENCY_SCHEDULING ?= 1 +SUPPORT_SGX_CONTEXT_PRIORITY_PER_THREAD ?= 0 + +ifeq ($(SUPPORT_SGX_CONTEXT_PRIORITY_PER_THREAD),1) +ifeq ($(SUPPORT_OPENGL),) +ifeq ($(SUPPORT_SGX_LOW_LATENCY_SCHEDULING),1) +$(eval $(call BothConfigC,SGX_FEATURE_CONTEXT_PRIORITY_PER_THREAD,)) +else +$(info SGX_CONTEXT_PRIORITY_PER_THREAD requires Low latency scheduling to be enabled) +endif +else +$(info SGX_CONTEXT_PRIORITY_PER_THREAD requires OpenGL support to be disabled) +endif +endif + +endif # INTERNAL_CLOBBER_ONLY + +export INTERNAL_CLOBBER_ONLY +export TOP +export OUT + +MAKE_ETC := -Rr --no-print-directory -C $(TOP) TOP=$(TOP) OUT=$(OUT) \ + -f eurasiacon/build/linux2/toplevel.mk + +# This must match the default value of MAKECMDGOALS below, and the default +# goal in toplevel.mk +.DEFAULT_GOAL := build + +ifeq ($(MAKECMDGOALS),) +MAKECMDGOALS := build +else +# We can't pass autogen to toplevel.mk +MAKECMDGOALS := $(filter-out autogen,$(MAKECMDGOALS)) +endif + +.PHONY: autogen +autogen: +ifeq ($(INTERNAL_CLOBBER_ONLY),) + @$(MAKE) -s --no-print-directory -C $(EURASIAROOT) \ + -f eurasiacon/build/linux2/prepare_tree.mk \ + LDM_PCI=$(LDM_PCI) LDM_PLATFORM=$(LDM_PLATFORM) +else + @: +endif + +# This deletes built-in suffix rules. Otherwise the submake isn't run when +# saying e.g. "make thingy.a" +.SUFFIXES: + +# Because we have a match-anything rule below, we'll run the main build when +# we're actually trying to remake various makefiles after they're read in. +# These rules try to prevent that +%.mk: ; +Makefile%: ; +Makefile: ; + +.PHONY: build kbuild install +build kbuild install: autogen + @$(if $(MAKECMDGOALS),$(MAKE) $(MAKE_ETC) $(MAKECMDGOALS) $(eval MAKECMDGOALS :=),:) + +%: autogen + @$(if $(MAKECMDGOALS),$(MAKE) $(MAKE_ETC) $(MAKECMDGOALS) $(eval MAKECMDGOALS :=),:) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/defs.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/defs.mk new file mode 100644 index 0000000..29fc365 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/defs.mk @@ -0,0 +1,198 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +define must-be-defined +$(if $(filter undefined,$(origin $(1))),$(error In makefile $(THIS_MAKEFILE): $$($(1)) must be defined),) +endef + +define must-be-nonempty +$(if $(strip $($(1))),,$(error In makefile $(THIS_MAKEFILE): $$($(1)) must contain a value)) +endef + +define directory-must-exist +$(if $(wildcard $(abspath $(1)/)),,$(error Directory $(1) must exist)) +endef + +define one-word-only +$(if $(filter-out $(firstword $($(1))),$($(1))),$(error In makefile $(THIS_MAKEFILE): $$($(1)) must contain only one word),) +endef + +define module-library +$(patsubst lib%.so,%,$(if $($(1)_target),$($(1)_target),$(1).so)) +endef + +# This is done to allow module type makefiles to use $(THIS_MAKEFILE) +define register-module +INTERNAL_MAKEFILE_FOR_MODULE_$(1) := $(THIS_MAKEFILE) +endef + +define process-module-arch +MODULE_ARCH := $$(strip $(2)) +include $$(MAKE_TOP)/moduledefs_common.mk +include $$(MAKE_TOP)/moduledefs/$$(MODULE_ARCH).mk +include $$(MAKE_TOP)/$$(strip $$($$(THIS_MODULE)_type)).mk +.SECONDARY: $$(MODULE_INTERMEDIATES_DIR) +$$(MODULE_INTERMEDIATES_DIR): + $$(make-directory) +MODULE_CLEAN_TARGETS += $$(MODULE_INTERMEDIATES_DIR) +INTERNAL_TARGETS_FOR_$(1) += $$(MODULE_TARGETS) +INTERNAL_CLEAN_TARGETS_FOR_$(1) += $$(MODULE_CLEAN_TARGETS) +INTERNAL_CLOBBER_TARGETS_FOR_$(1) += $$(MODULE_CLEAN_TARGETS) $$(MODULE_CLOBBER_TARGETS) $$(MODULE_TARGETS) +endef + +target_neutral_types := \ + apk \ + bison_parser \ + copy_files \ + custom \ + flex_lexer \ + dex \ + gen_dispatch \ + image_header \ + inline_shaders \ + java_archive \ + module_group \ + pds_header \ + use_header + +doc_types := doc + +define calculate-arch-list +# Work out the target platforms for this module +MODULE_ARCH_LIST := $(2) +ifeq ($$(MODULE_ARCH_LIST),) +ifneq ($$(filter $(1),$(doc_types)),) +MODULE_ARCH_LIST := doc +else +ifneq ($$(filter $(1),$(target_neutral_types)),) +MODULE_ARCH_LIST := target_neutral +else +ifneq ($$(filter $(1),kernel_module),) +MODULE_ARCH_LIST := $(TARGET_PRIMARY_ARCH) +else +MODULE_ARCH_LIST := $(TARGET_ALL_ARCH) +endif +endif +endif +endif +endef + +define process-module +THIS_MODULE := $(1) +THIS_MAKEFILE := $(INTERNAL_MAKEFILE_FOR_MODULE_$(1)) +INTERNAL_TARGETS_FOR_$(1) := +INTERNAL_CLEAN_TARGETS_FOR_$(1) := +INTERNAL_CLOBBER_TARGETS_FOR_$(1) := +include $$(MAKE_TOP)/this_makefile.mk +$$(call must-be-nonempty,THIS_MAKEFILE) +$$(call must-be-nonempty,$(1)_type) +$$(eval $$(call calculate-arch-list,$$($(1)_type),$$($(1)_arch))) +INTERNAL_ARCH_LIST_FOR_$(1) := $$(MODULE_ARCH_LIST) +$$(foreach _m,$$(MODULE_ARCH_LIST),$$(eval $$(call process-module-arch,$(1),$$(_m)))) +endef + +# This can be used by module_type.mk files to indicate that they can't be +# built as host_module_type +define target-build-only +$(if $(filter true,$(MODULE_HOST_BUILD)),$(error In makefile $(THIS_MAKEFILE): Module $(THIS_MODULE) attempted to build a host $(1), which is not supported)) +endef + +define relative-to-top +$(patsubst $(TOP)/%,%,$(1)) +endef + +define cc-check +$(shell \ + CC_CHECK=$(patsubst @%,%,$(CC_CHECK)) && \ + $(patsubst @%,%,$(CHMOD)) +x $$CC_CHECK && \ + $$CC_CHECK --cc "$(1)" --out "$(2)" $(3)) +endef + +define cc-is-clang +$(call cc-check,$(patsubst @%,%,$(CC)),$(OUT),--clang) +endef + +define cc-option +$(call cc-check,$(patsubst @%,%,$(CC)),$(OUT),$(1)) +endef + +define cxx-option +$(call cc-check,$(patsubst @%,%,$(CXX)),$(OUT),$(1)) +endef + +define host-cc-option +$(call cc-check,$(patsubst @%,%,$(HOST_CC)),$(OUT),$(1)) +endef + +define host-cxx-option +$(call cc-check,$(patsubst @%,%,$(HOST_CXX)),$(OUT),$(1)) +endef + +define kernel-cc-option +$(call cc-check,$(KERNEL_CROSS_COMPILE)gcc,$(OUT),$(1)) +endef + +# Turn a particular warning on, or explicitly turn it off, depending on +# the value of W. The "-W" or "-Wno-" part of the warning need not be +# specified. +define cc-optional-warning +$(call cc-option,-W$(if $(W),,no-)$(patsubst -W%,%,$(patsubst -Wno-%,%,$(1)))) +endef + +define host-cc-optional-warning +$(call host-cc-option,-W$(if $(W),,no-)$(patsubst -W%,%,$(patsubst -Wno-%,%,$(1)))) +endef + +define kernel-cc-optional-warning +$(call kernel-cc-option,-W$(if $(W),,no-)$(patsubst -W%,%,$(patsubst -Wno-%,%,$(1)))) +endef + +define module-info-line +$(if $(filter modules,$(D)),$(info [$(THIS_MODULE)] <$(MODULE_ARCH)> $(1)),) +endef + +# $(call if-exists,A,B) => A if A is a file which exists, otherwise B +define if-exists +$(if $(wildcard $(1)),$(1),$(2)) +endef + +define unsupported-module-var +$(if $(strip $($(THIS_MODULE)_$(1))),$(error In makefile $(THIS_MAKEFILE): Setting '$(THIS_MODULE)_$(1)' has no effect, because $(THIS_MODULE) has type $($(THIS_MODULE)_type))) +endef diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/Makefile.template b/sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/Makefile.template new file mode 100644 index 0000000..822c3df --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/Makefile.template @@ -0,0 +1,91 @@ +########################################################################### ### +#@Title Root kernel makefile +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# This top-level kbuild makefile builds all the Linux kernel modules in the +# DDK. To run kbuild, this makefile is copied to $(TARGET_PRIMARY_OUT)/kbuild/Makefile +# and make is invoked in $(TARGET_PRIMARY_OUT)/kbuild. + +# This makefile doesn't define any kbuild special variables apart from +# ccflags-y and obj-m. The variables for objects are picked up by including +# the kbuild makefile fragments named in $(INTERNAL_KBUILD_MAKEFILES). The +# list of objects that these fragments make is collected in +# $(INTERNAL_KBUILD_OBJECTS) and $(INTERNAL_EXTRA_KBUILD_OBJECTS). These +# variables are set according to the build's $(KERNEL_COMPONENTS) and +# $(EXTRA_PVRSRVKM_COMPONENTS). To add a new kernel module to the build, edit +# these variables in the per-build Makefile. + +include $(OUT)/config_kernel.mk + +.SECONDARY: + +$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/external/%.c: $(EXTRA_KBUILD_SOURCE)/%.c + @if [ ! -e $(dir $@) ]; then mkdir -p $(dir $@); fi + @if [ ! -h $@ ]; then ln -sf $< $@; fi + +$(OUT)/$(TARGET_PRIMARY_ARCH)/kbuild/%.c: $(TOP)/%.c + @if [ ! -e $(dir $@) ]; then mkdir -p $(dir $@); fi + @if [ ! -h $@ ]; then ln -sf $< $@; fi + +ccflags-y += -D__linux__ -include $(OUT)/config_kernel.h \ + -I$(OUT)/include \ + -I$(TOP)/include4 \ + -I$(TOP)/services4/include \ + -I$(TOP)/services4/system/$(PVR_SYSTEM) \ + -I$(TOP)/services4/system/include \ + -I$(TOP)/services4/srvkm/bridged \ + -I$(TOP)/services4/srvkm/bridged/sgx \ + -I$(TOP)/services4/srvkm/common \ + -I$(TOP)/services4/srvkm/devices/sgx \ + -I$(TOP)/services4/srvkm/env/linux \ + -I$(TOP)/services4/srvkm/include + +ifeq ($(PVR_LOCAL_HWDEFS),) +ccflags-y += -I$(TOP)/services4/srvkm/hwdefs +else +ccflags-y += -I$(TOP)/hwdefs +endif + +include $(INTERNAL_KBUILD_MAKEFILES) + +$(if $(pvrsrvkm-y),,$(error pvrsrvkm-y was empty, which could mean that srvkm is missing from $$(KERNEL_COMPONENTS))) +pvrsrvkm-y += $(foreach _m,$(INTERNAL_EXTRA_KBUILD_OBJECTS:.o=),$($(_m)-y)) + +obj-m += $(INTERNAL_KBUILD_OBJECTS) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/kbuild.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/kbuild.mk new file mode 100644 index 0000000..e289a25 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/kbuild/kbuild.mk @@ -0,0 +1,87 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +$(if $(strip $(KERNELDIR)),,$(error KERNELDIR must be set)) +$(call directory-must-exist,$(KERNELDIR)) + +$(TARGET_PRIMARY_OUT)/kbuild/Makefile: $(MAKE_TOP)/kbuild/Makefile.template + @[ ! -e $(dir $@) ] && mkdir -p $(dir $@) || true + $(CP) -f $< $@ + +# We need to make INTERNAL_KBUILD_MAKEFILES absolute because the files will be +# read while chdir'd into $(KERNELDIR) +INTERNAL_KBUILD_MAKEFILES := $(abspath $(foreach _m,$(KERNEL_COMPONENTS) $(EXTRA_PVRSRVKM_COMPONENTS),$(if $(INTERNAL_KBUILD_MAKEFILE_FOR_$(_m)),$(INTERNAL_KBUILD_MAKEFILE_FOR_$(_m)),$(error Unknown kbuild module "$(_m)")))) +INTERNAL_KBUILD_OBJECTS := $(foreach _m,$(KERNEL_COMPONENTS),$(if $(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(error BUG: Unknown kbuild module "$(_m)" should have been caught earlier))) +INTERNAL_EXTRA_KBUILD_OBJECTS := $(foreach _m,$(EXTRA_PVRSRVKM_COMPONENTS),$(if $(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(INTERNAL_KBUILD_OBJECTS_FOR_$(_m)),$(error BUG: Unknown kbuild module "$(_m)" should have been caught earlier))) +.PHONY: kbuild kbuild_clean + +kbuild: $(TARGET_PRIMARY_OUT)/kbuild/Makefile + $(if $(V),,@)$(MAKE) -Rr --no-print-directory -C $(KERNELDIR) \ + M=$(abspath $(TARGET_PRIMARY_OUT)/kbuild) \ + INTERNAL_KBUILD_MAKEFILES="$(INTERNAL_KBUILD_MAKEFILES)" \ + INTERNAL_KBUILD_OBJECTS="$(INTERNAL_KBUILD_OBJECTS)" \ + INTERNAL_EXTRA_KBUILD_OBJECTS="$(INTERNAL_EXTRA_KBUILD_OBJECTS)" \ + EXTRA_KBUILD_SOURCE="$(EXTRA_KBUILD_SOURCE)" \ + TARGET_PRIMARY_ARCH=$(TARGET_PRIMARY_ARCH) \ + CROSS_COMPILE="$(CCACHE) $(KERNEL_CROSS_COMPILE)" \ + EXTRA_CFLAGS="$(ALL_KBUILD_CFLAGS)" \ + CC=$(if $(KERNEL_CC),$(KERNEL_CC),$(KERNEL_CROSS_COMPILE)gcc) \ + V=$(V) W=$(W) \ + TOP=$(TOP) + @for kernel_module in $(addprefix $(TARGET_PRIMARY_OUT)/kbuild/,$(INTERNAL_KBUILD_OBJECTS:.o=.ko)); do \ + cp $$kernel_module $(TARGET_PRIMARY_OUT); \ + done + +kbuild_clean: $(TARGET_PRIMARY_OUT)/kbuild/Makefile + $(if $(V),,@)$(MAKE) -Rr --no-print-directory -C $(KERNELDIR) \ + M=$(abspath $(TARGET_PRIMARY_OUT)/kbuild) \ + INTERNAL_KBUILD_MAKEFILES="$(INTERNAL_KBUILD_MAKEFILES)" \ + INTERNAL_KBUILD_OBJECTS="$(INTERNAL_KBUILD_OBJECTS)" \ + INTERNAL_EXTRA_KBUILD_OBJECTS="$(INTERNAL_EXTRA_KBUILD_OBJECTS)" \ + EXTRA_KBUILD_SOURCE="$(EXTRA_KBUILD_SOURCE)" \ + TARGET_PRIMARY_ARCH=$(TARGET_PRIMARY_ARCH) \ + CROSS_COMPILE="$(CCACHE) $(KERNEL_CROSS_COMPILE)" \ + EXTRA_CFLAGS="$(ALL_KBUILD_CFLAGS)" \ + CC=$(if $(KERNEL_CC),$(KERNEL_CC),$(KERNEL_CROSS_COMPILE)gcc) \ + V=$(V) W=$(W) \ + TOP=$(TOP) clean + +kbuild_install: install +kbuild: install_script_km diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_module.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_module.mk new file mode 100644 index 0000000..095fb02 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_module.mk @@ -0,0 +1,93 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Rules for making kernel modules with kbuild. This makefile doesn't define +# any rules that build the modules, it only copies the kbuild Makefile into +# the right place and then invokes kbuild to do the actual build + +$(call target-build-only,kernel module) + +MODULE_KBUILD_DIR := $(MODULE_OUT)/kbuild + +# $(THIS_MODULE)_makefile names the kbuild makefile fragment used to build +# this module's objects +$(call must-be-nonempty,$(THIS_MODULE)_makefile) +MODULE_KBUILD_MAKEFILE := $($(THIS_MODULE)_makefile) +$(if $(wildcard $(abspath $(MODULE_KBUILD_MAKEFILE))),,$(error In makefile $(THIS_MAKEFILE): Module $(THIS_MODULE) requires kbuild makefile $(MODULE_KBUILD_MAKEFILE), which is missing)) + +# $(THIS_MODULE)_target specifies the name of the kernel module +$(call must-be-nonempty,$(THIS_MODULE)_target) +MODULE_TARGETS := $($(THIS_MODULE)_target) +MODULE_KBUILD_OBJECTS := $($(THIS_MODULE)_target:.ko=.o) + +$(call module-info-line,kernel module: $(MODULE_TARGETS)) + +# Unusually, we define $(THIS_MODULE)_install_path if the user didn't, as we +# can't use MODULE_INSTALL_PATH in the scripts.mk logic. +ifeq ($($(THIS_MODULE)_install_path),) +$(THIS_MODULE)_install_path := \ + $${MOD_DESTDIR}/$(patsubst $(MODULE_OUT)/%,%,$(MODULE_TARGETS)) +endif + +MODULE_INSTALL_PATH := $($(THIS_MODULE)_install_path) + +# Here we could maybe include $(MODULE_KBUILD_MAKEFILE) and look at +# $(MODULE_KBUILD_OBJECTS)-y to see which source files might be built + +.PHONY: $(THIS_MODULE) +$(THIS_MODULE): MODULE_KBUILD_MAKEFILE := $(MODULE_KBUILD_MAKEFILE) +$(THIS_MODULE): MODULE_KBUILD_OBJECTS := $(MODULE_KBUILD_OBJECTS) +$(THIS_MODULE): + @echo "kbuild module '$@'" + @echo " MODULE_KBUILD_MAKEFILE := $(MODULE_KBUILD_MAKEFILE)" + @echo " MODULE_KBUILD_OBJECTS := $(MODULE_KBUILD_OBJECTS)" + @echo ' Being built:' $(if $(filter $@,$(KERNEL_COMPONENTS)),"yes (separate module)",$(if $(filter $@,$(EXTRA_PVRSRVKM_COMPONENTS)),"yes (into pvrsrvkm)","no")) + @echo "Module $@ is a kbuild module. Run 'make kbuild' to make it" + @false + +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_TYPE := $($(THIS_MODULE)_type) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_INSTALL_PATH := $(MODULE_INSTALL_PATH) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_TARGETS := $(patsubst $(MODULE_OUT)/%,%,$(MODULE_TARGETS)) +$(MODULE_INTERMEDIATES_DIR)/.install: $(THIS_MAKEFILE) | $(MODULE_INTERMEDIATES_DIR) + @echo 'install_file $(MODULE_TARGETS) $(MODULE_INSTALL_PATH) "$(MODULE_TYPE)" 0644 0:0' >$@ + +ALL_KBUILD_MODULES += $(THIS_MODULE) +INTERNAL_KBUILD_MAKEFILE_FOR_$(THIS_MODULE) := $(MODULE_KBUILD_MAKEFILE) +INTERNAL_KBUILD_OBJECTS_FOR_$(THIS_MODULE) := $(MODULE_KBUILD_OBJECTS) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_version.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_version.mk new file mode 100644 index 0000000..35ecceb --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/kernel_version.mk @@ -0,0 +1,100 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +$(if $(KERNELDIR),,$(error KERNELDIR must be set to obtain a version)) + +override KERNEL_VERSION := \ + $(shell grep "^VERSION = " $(KERNELDIR)/Makefile | cut -f3 -d' ') +override KERNEL_PATCHLEVEL := \ + $(shell grep "^PATCHLEVEL = " $(KERNELDIR)/Makefile | cut -f3 -d' ') +override KERNEL_SUBLEVEL := \ + $(shell grep "^SUBLEVEL = " $(KERNELDIR)/Makefile | cut -f3 -d' ') +override KERNEL_EXTRAVERSION := \ + $(shell grep "^EXTRAVERSION = " $(KERNELDIR)/Makefile | cut -f3 -d' ') + +# Break the kernel version up into a space separated list +kernel_version_as_list := $(KERNEL_VERSION) \ + $(KERNEL_PATCHLEVEL) \ + $(KERNEL_SUBLEVEL) \ + $(patsubst .%,%,$(KERNEL_EXTRAVERSION)) + +# The base ID doesn't have to be accurate; we only use it for +# feature checks which will not care about extraversion bits +# +override KERNEL_BASE_ID := \ + $(KERNEL_VERSION).$(KERNEL_PATCHLEVEL).$(KERNEL_SUBLEVEL) + +# Try to get the kernel ID from the kernel.release file. +# +KERNEL_ID ?= \ + $(shell cat $(KERNELDIR)/include/config/kernel.release 2>/dev/null) + +# If the kernel ID isn't set yet, try to set it from the UTS_RELEASE +# macro. +# +ifeq ($(strip $(KERNEL_ID)),) +KERNEL_ID := \ + $(shell grep -h '\#define UTS_RELEASE' \ + $(KERNELDIR)/include/linux/* | cut -f3 -d' ' | sed s/\"//g) +endif + +ifeq ($(strip $(KERNEL_ID)),) +KERNEL_ID := \ + $(KERNEL_VERSION).$(KERNEL_PATCHLEVEL).$(KERNEL_SUBLEVEL)$(KERNEL_EXTRAVERSION) +endif + +# Return 1 if the kernel version is at least the value passed to the +# function, else return nothing. +# Examples +# $(call kernel-version-at-least,2,6,35) +# $(call kernel-version-at-least,2,6,35,7) +# +define kernel-version-at-least +$(shell set -- $(kernel_version_as_list) 0 0 0 0; \ + Y=true; \ + for D in $1 $2 $3 $4; \ + do \ + [ $$1 ] || break; \ + [ $$1 -eq $$D ] && { shift; continue; };\ + [ $$1 -lt $$D ] && Y=; \ + break; \ + done; \ + echo $$Y) +endef diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/host_x86_64.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/host_x86_64.mk new file mode 100644 index 0000000..1130045 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/host_x86_64.mk @@ -0,0 +1,57 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_HOST_BUILD := true + +MODULE_CC := $(HOST_CC) +MODULE_CXX := $(HOST_CXX) + +MODULE_HOST_CFLAGS := $(ALL_HOST_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_HOST_CXXFLAGS := $(ALL_HOST_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_HOST_LDFLAGS := $(ALL_HOST_LDFLAGS) -L$(MODULE_OUT) $($(THIS_MODULE)_ldflags) + +ifneq ($(BUILD),debug) + ifeq ($(USE_LTO),1) + MODULE_HOST_LDFLAGS := \ + $(sort $(filter-out -W% -D%,$(ALL_HOST_CFLAGS) $(ALL_HOST_CXXFLAGS))) \ + $(MODULE_HOST_LDFLAGS) + endif +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_armv7-a.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_armv7-a.mk new file mode 100644 index 0000000..bf0979e --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_armv7-a.mk @@ -0,0 +1,130 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_HOST_BUILD := + +MODULE_CC := $(CC_SECONDARY) -march=armv7-a -mfloat-abi=softfp +MODULE_CXX := $(CXX_SECONDARY) -march=armv7-a -mfloat-abi=softfp + +ifneq ($(BUILD),debug) +MODULE_CC := $(MODULE_CC) -mthumb +MODULE_CXX := $(MODULE_CXX) -mthumb +endif + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj$(if $(MULTIARCH),_arm,) + +# Linker flags used to find system libraries. +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(_obj)/lib \ + -Xlinker -rpath-link=$(_obj)/lib \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib +ifneq ($(wildcard $(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor),) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib +else +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib +endif + +ifeq ($(NDK_ROOT),) + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-arm/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-arm \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/arm \ + $(MODULE_INCLUDE_FLAGS) + +else # NDK_ROOT + +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-arm/usr + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +endif # NDK_ROOT + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker -lc + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) + +# Keeping it same as C for now +MODULE_LIB_LDFLAGS_CXX := $(MODULE_EXE_LDFLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/lib/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/lib/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/lib/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/lib/crtend_so.o + +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(LIBGCC_SECONDARY) + +MODULE_ARCH_TAG := $(_obj) + +endif # SUPPORT_ANDROID_PLATFORM + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 32 diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_mips32r6el.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_mips32r6el.mk new file mode 100644 index 0000000..0891966 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_mips32r6el.mk @@ -0,0 +1,134 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MIPS_ABI_FLAGS := -EL -march=mips32r2 -mabi=32 -mfp32 -modd-spreg -mno-fused-madd -Wa,-mmxu + +MODULE_CC := $(CC) $(MIPS_ABI_FLAGS) +MODULE_CXX := $(CXX) $(MIPS_ABI_FLAGS) + +MODULE_CFLAGS := $(ALL_CFLAGS) $($(THIS_MODULE)_cflags) $(MIPS_ABI_FLAGS) +MODULE_CXXFLAGS := $(ALL_CXXFLAGS) $($(THIS_MODULE)_cxxflags) $(MIPS_ABI_FLAGS) +MODULE_LDFLAGS := $($(THIS_MODULE)_ldflags) -L$(MODULE_OUT) -Xlinker -rpath-link=$(MODULE_OUT) $(ALL_LDFLAGS) $(MIPS_ABI_FLAGS) + +# Since this is a target module, add system-specific include flags. +MODULE_INCLUDE_FLAGS := \ + $(SYS_INCLUDES_RESIDUAL) \ + $(addprefix -isystem ,$(filter-out $(patsubst -I%,%,$(filter -I%,$(MODULE_INCLUDE_FLAGS))),$(SYS_INCLUDES_ISYSTEM))) \ + $(MODULE_INCLUDE_FLAGS) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +_obj := $(TARGET_ROOT)/product/$(TARGET_DEVICE)/obj$(if $(MULTIARCH),_mips,) + +# Linker flags used to find system libraries. +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(_obj)/lib \ + -Xlinker -rpath-link=$(_obj)/lib \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/lib +ifneq ($(wildcard $(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor),) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/vendor/lib +else +MODULE_SYSTEM_LIBRARY_DIR_FLAGS += \ + -L$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib \ + -Xlinker -rpath-link=$(TARGET_ROOT)/product/$(TARGET_DEVICE)/system/vendor/lib +endif + +ifeq ($(NDK_ROOT),) + +MODULE_INCLUDE_FLAGS := \ + -isystem $(ANDROID_ROOT)/bionic/libc/arch-mips/include \ + -isystem $(ANDROID_ROOT)/bionic/libc/kernel/uapi/asm-mips \ + -isystem $(ANDROID_ROOT)/bionic/libm/include/mips \ + $(MODULE_INCLUDE_FLAGS) + +else # NDK_ROOT + +_obj := $(NDK_ROOT)/platforms/$(TARGET_PLATFORM)/arch-mips/usr + +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := \ + -L$(NDK_ROOT)/sources/cxx-stl/llvm-libc++/libs/mips \ + $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +endif # NDK_ROOT + +MODULE_LDFLAGS += $(MODULE_SYSTEM_LIBRARY_DIR_FLAGS) + +MODULE_EXE_LDFLAGS := \ + -Bdynamic -nostdlib -Wl,-dynamic-linker,/system/bin/linker -lc \ + -Wl,-melf32ltsmip + +MODULE_LIB_LDFLAGS := $(MODULE_EXE_LDFLAGS) +# Keeping it same as C for now +MODULE_LIB_LDFLAGS_CXX := $(MODULE_EXE_LDFLAGS) + +MODULE_EXE_CRTBEGIN := $(_obj)/lib/crtbegin_dynamic.o +MODULE_EXE_CRTEND := $(_obj)/lib/crtend_android.o + +MODULE_LIB_CRTBEGIN := $(_obj)/lib/crtbegin_so.o +MODULE_LIB_CRTEND := $(_obj)/lib/crtend_so.o + +MODULE_LIBGCC := $(shell $(patsubst @%,%,$(MODULE_CC)) -print-libgcc-file-name) +MODULE_LIBGCC := -Wl,--version-script,$(MAKE_TOP)/common/libgcc.lds $(MODULE_LIBGCC) + +MODULE_ARCH_TAG := $(_obj) + +else + +# On Linux, we currently don't need to specify any flags to find the system +# libraries. +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := + +MODULE_ARCH_TAG := mipsel + +endif # SUPPORT_ANDROID_PLATFORM + +ifneq ($(BUILD),debug) +ifeq ($(USE_LTO),1) +MODULE_LDFLAGS := \ + $(sort $(filter-out -W% -D%,$(ALL_CFLAGS) $(ALL_CXXFLAGS))) \ + $(MODULE_LDFLAGS) +endif +endif + +MODULE_ARCH_BITNESS := 32 diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_neutral.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_neutral.mk new file mode 100644 index 0000000..b51b614 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs/target_neutral.mk @@ -0,0 +1,45 @@ +########################################################################### ### +#@File +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_HOST_BUILD := + +MODULE_BISON_FLAGS := $(ALL_BISON_FLAGS) $($(THIS_MODULE)_bisonflags) +MODULE_FLEX_FLAGS := $(ALL_FLEX_FLAGS) $($(THIS_MODULE)_flexflags) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs_common.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs_common.mk new file mode 100644 index 0000000..55007cf --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/moduledefs_common.mk @@ -0,0 +1,164 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +MODULE_OUT := $(RELATIVE_OUT)/$(MODULE_ARCH) +MODULE_INTERMEDIATES_DIR := $(MODULE_OUT)/intermediates/$(THIS_MODULE) + +MODULE_TARGETS := +MODULE_CLEAN_TARGETS := +MODULE_CLOBBER_TARGETS := + +MODULE_HOST_BUILD := +MODULE_CLEAN_TARGETS := +MODULE_CLOBBER_TARGETS := + +MODULE_CFLAGS := +MODULE_CXXFLAGS := +MODULE_LDFLAGS := +MODULE_BISON_FLAGS := +MODULE_FLEX_FLAGS := +MODULE_FLEXXX_FLAGS := + +MODULE_HOST_CFLAGS := +MODULE_HOST_CXXFLAGS := +MODULE_HOST_LDFLAGS := + +MODULE_ARCH_TAG := $(patsubst i%86,i686,$(subst host_,,$(subst target_,,$(MODULE_ARCH)))) +MODULE_ARCH_BITNESS := + +# Only allow cflags that do not affect code generation. This is to ensure +# proper binary compatibility when LTO (Link-Time Optimization) is enabled. +# We make exceptions for the below flags which will all fail linkage in +# non-LTO mode if incorrectly specified. +# +# NOTE: Only used by static_library and objects right now. Other module +# types should not be affected by complex code generation flags w/ LTO. +# Set MODULE_CHECK_CFLAGS in the module makefile to enable this check. +MODULE_CHECK_CFLAGS := +MODULE_ALLOWED_CFLAGS := -W% -D% -std=% -fPIC -fPIE -pie -m32 + +# -L flags for library search dirs: these are relative to $(TOP), unless +# they're absolute paths +MODULE_LIBRARY_DIR_FLAGS := $(foreach _path,$($(THIS_MODULE)_libpaths),$(if $(filter /%,$(_path)),-L$(call relative-to-top,$(_path)),-L$(_path))) +# -L options to find system libraries (may be arch-specific) +MODULE_SYSTEM_LIBRARY_DIR_FLAGS := +# -I flags for header search dirs (same rules as for -L) +MODULE_INCLUDE_FLAGS := $(foreach _path,$($(THIS_MODULE)_includes),$(if $(filter /%,$(_path)),-I$(call relative-to-top,$(_path)),-I$(_path))) + +# These define the rules for finding source files. +# +# - If a name begins with a slash, we strip $(TOP) off the front if it +# begins with $(TOP). This is so that we don't get really long error +# messages from the compiler if the source tree is in a deeply nested +# directory, but we still do get absolute paths if you say "make +# OUT=/tmp/somewhere" +# +# - Otherwise, if a name contains a slash and begins with $(OUT), we leave +# it as it is. This is so you can say "module_src := +# $(TARGET_INTERMEDIATES)/something/generated.c" +# +# - Otherwise, we assume it's a path referring to somewhere under the +# directory containing Linux.mk, and add $(THIS_DIR) to it +_SOURCES_WITHOUT_SLASH := \ + $(strip $(foreach _s,$($(THIS_MODULE)_src),$(if $(findstring /,$(_s)),,$(_s)))) +_SOURCES_WITH_SLASH := \ + $(strip $(foreach _s,$($(THIS_MODULE)_src),$(if $(findstring /,$(_s)),$(_s),))) +MODULE_SOURCES := $(addprefix $(THIS_DIR)/,$(_SOURCES_WITHOUT_SLASH)) +MODULE_SOURCES += $(call relative-to-top,$(filter /%,$(_SOURCES_WITH_SLASH))) + +_RELATIVE_SOURCES_WITH_SLASH := \ + $(filter-out /%,$(_SOURCES_WITH_SLASH)) +_OUTDIR_RELATIVE_SOURCES_WITH_SLASH := \ + $(filter $(RELATIVE_OUT)/%,$(_RELATIVE_SOURCES_WITH_SLASH)) +_THISDIR_RELATIVE_SOURCES_WITH_SLASH := \ + $(filter-out $(RELATIVE_OUT)/%,$(_RELATIVE_SOURCES_WITH_SLASH)) +MODULE_SOURCES += $(_OUTDIR_RELATIVE_SOURCES_WITH_SLASH) +MODULE_SOURCES += $(addprefix $(THIS_DIR)/,$(_THISDIR_RELATIVE_SOURCES_WITH_SLASH)) + +# Add generated sources +MODULE_SOURCES += $(addprefix $(MODULE_OUT)/,$($(THIS_MODULE)_src_relative)) + +# MODULE_LIBRARY_FLAGS contains the flags to link each library. The rules +# are: +# +# module_staticlibs := mylib +# module_libs := mylib +# Use -lmylib +# +# module_extlibs := mylib +# Use $(libmylib_ldflags) if that variable is defined (empty counts as +# defined). Otherwise use -lmylib +# +# module_libs := :mylib +# Use -l:mylib.so + +MODULE_LIBRARY_FLAGS := \ + $(addprefix -l, $($(THIS_MODULE)_staticlibs)) \ + $(addprefix -l,$($(THIS_MODULE)_libs)) \ + $(foreach _lib,$($(THIS_MODULE)_extlibs),$(if $(filter undefined,$(origin lib$(_lib)_ldflags)),-l$(_lib),$(lib$(_lib)_ldflags))) + +ifeq ($(MODULE_HOST_BUILD),) + ifneq ($(SYSROOT),) + ifneq ($(SYSROOT),/) + ifeq (${MODULE_ARCH_TAG},armhf) + MULTIARCH_DIR := arm-linux-gnueabihf + else ifeq (${MODULE_ARCH_TAG},i686) + MULTIARCH_DIR := i386-linux-gnu + else + MULTIARCH_DIR := ${MODULE_ARCH_TAG}-linux-gnu + endif + + # Restrict pkg-config to looking only in the SYSROOT + PKG_CONFIG_LIBDIR := ${SYSROOT}/usr/local/lib/pkgconfig:${SYSROOT}/usr/lib/${MULTIARCH_DIR}/pkgconfig:${SYSROOT}/usr/lib/pkgconfig:${SYSROOT}/usr/share/pkgconfig + + # SYSROOT doesn't always do the right thing. So explicitly add necessary paths to the link path + MODULE_LDFLAGS += -Xlinker -rpath-link=${SYSROOT}/usr/lib/${MULTIARCH_DIR} -Xlinker -rpath-link=${SYSROOT}/lib/${MULTIARCH_DIR} -Xlinker -rpath-link=${SYSROOT}/usr/lib/ + endif + endif +endif + +# pkg-config integration; +# FIXME: We don't support arbitrary CFLAGS yet (just includes) +ifneq ($(PKG_CONFIG),) +$(foreach _package,$($(THIS_MODULE)_packages),\ + $(eval MODULE_INCLUDE_FLAGS += `$(PKG_CONFIG) --cflags-only-I $(_package)`)\ + $(eval MODULE_LIBRARY_FLAGS += `$(PKG_CONFIG) --libs-only-l $(_package)`)\ + $(eval MODULE_LIBRARY_DIR_FLAGS += `$(PKG_CONFIG) --libs-only-L $(_package)`)) +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/modules.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/modules.mk new file mode 100644 index 0000000..971f21a --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/modules.mk @@ -0,0 +1,48 @@ +########################################################################### ### +#@Title Module processing +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Bits for processing $(modules) after reading in each Linux.mk + +#$(info ---- $(modules) ----) + +$(foreach _m,$(modules),$(if $(filter $(_m),$(ALL_MODULES)),$(error In makefile $(THIS_MAKEFILE): Duplicate module $(_m) (first seen in $(INTERNAL_MAKEFILE_FOR_MODULE_$(_m))) listed in $$(modules)),$(eval $(call register-module,$(_m))))) + +ALL_MODULES += $(modules) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/omap_android/Makefile b/sgx_km/eurasia_km/eurasiacon/build/linux2/omap_android/Makefile new file mode 100644 index 0000000..295ec95 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/omap_android/Makefile @@ -0,0 +1,263 @@ +########################################################################### ### +#@Title Root makefile for omap4430 Android. Builds everything else. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# If a TARGET_PRODUCT is specified but not a TARGET_DEVICE, try to +# derive the TARGET_DEVICE from TARGET_PRODUCT. +# +ifeq ($(TARGET_DEVICE),) +override TARGET_DEVICE := \ + $(patsubst mini_%,%,$(patsubst %_full,%,$(TARGET_PRODUCT))) +endif + +ifeq ($(TARGET_DEVICE),) +override TARGET_DEVICE := jacinto6evm +endif + +# Customize this build as per the TARGET_DEVICE setting +# +ifneq ($(filter blaze blaze_tablet panda tuna maguro toro mysid yakju,$(TARGET_DEVICE)),) +SGXCORE := 540 +SGX_CORE_REV := 120 +HAL_VARIANT := omap4 +endif +ifneq ($(filter blaze.4470 blaze_tablet.4470,$(TARGET_DEVICE)),) +SGXCORE := 544 +SGX_CORE_REV := 112 +HAL_VARIANT := omap4 +endif +ifneq ($(filter omap5sevm panda5,$(TARGET_DEVICE)),) +SGXCORE := 544 +SGX_CORE_REV := 105 +SGX_FEATURE_MP := 1 +SGX_FEATURE_SYSTEM_CACHE := 1 +SGX_FEATURE_MP_CORE_COUNT := 2 +HAL_VARIANT := omap5 + +# OMAP Product Version +VS_PRODUCT_VERSION := 5 + +# FIXME: Re-enable this ASAP +SUPPORT_ACTIVE_POWER_MANAGEMENT := 0 +endif +ifneq ($(filter jacinto6evm am57xevm,$(TARGET_DEVICE)),) +SGXCORE := 544 +SGX_CORE_REV := 116 +SGX_FEATURE_MP := 1 +SGX_FEATURE_SYSTEM_CACHE := 1 +SGX_FEATURE_MP_CORE_COUNT := 2 +HAL_VARIANT := jacinto6 + +# OMAP Product Version +# FIXME: Get rid of this +VS_PRODUCT_VERSION := 5 + +# FIXME: Re-enable this ASAP +SUPPORT_ACTIVE_POWER_MANAGEMENT := 0 + +SUPPORT_PVRSRV_DEVICE_CLASS := 0 + +endif + +ifneq ($(filter am57xevm,$(TARGET_DEVICE)),) +HAL_VARIANT := am57xevm +endif + +# Handle any TARGET_DEVICE remapping. Not all DDK TARGET_DEVICEs +# require new Android TARGET_DEVICE builds. +# +ifeq ($(TARGET_DEVICE),blaze.4470) +override TARGET_DEVICE := blaze +endif +ifeq ($(TARGET_DEVICE),blaze_tablet.4470) +override TARGET_DEVICE := blaze_tablet +endif + +SGX_DYNAMIC_TIMING_INFO := 1 + +SUPPORT_LINUX_USING_WORKQUEUES := 1 + +SUPPORT_PVRSRV_ANDROID_SYSTRACE := 1 + +SUPPORT_DMABUF := 1 + +# Enable PVR_DPF prints for fatal/error +PVRSRV_NEED_PVR_DPF := 1 +PVRSRV_NEW_PVR_DPF := 1 + +# TODO: Set SUPPORT_PVRSVR_DEVICE_CLASS to 0 +#DISPLAY_CONTROLLER should not be used for DRM based display controller +#ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),0) +#DISPLAY_CONTROLLER := omaplfb +#endif +SUPPORT_PVRSRV_DEVICE_CLASS := 0 + +PVR_SYSTEM := omap + +KERNEL_CROSS_COMPILE ?= arm-eabi- + +# We have more memory on OMAP platforms, so we can spare to make the +# pool larger, and have higher resolutions which benefit from it. +# +PVR_LINUX_MEM_AREA_POOL_MAX_PAGES ?= 10800 + +SUPPORT_TI_VERSION_STRING := 1 + +include ../common/android/paths.mk +include ../common/android/arch.mk +include ../common/android/features.mk + +ifneq ($(strip $(KERNELDIR)),) + include ../kernel_version.mk + ifeq ($(call kernel-version-at-least,2,6,35),true) + PVR_NO_OMAP_TIMER := 1 + endif + ifeq ($(call kernel-version-at-least,2,6,39),true) + ifeq ($(LDM_PLATFORM),1) + PVR_LDM_PLATFORM_PRE_REGISTERED := 1 + PVR_LDM_PLATFORM_PRE_REGISTERED_DEV := "\"pvrsrvkm\"" + endif + endif + ifeq ($(call kernel-version-at-least,3,0),true) + SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED := 1 + SYS_OMAP_HAS_DVFS_FRAMEWORK := 0 + endif + ifeq ($(call kernel-version-at-least,3,8,13),true) + PVR_LDM_DEVICE_TREE := 1 + endif +else + $(warning "KERNELDIR is not set, so can't feature check DVFS or dsscomp.") + $(warning "Assuming we want DVFS and dsscomp support.") + SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED := 1 + SYS_OMAP_HAS_DVFS_FRAMEWORK := 0 +endif + +ifneq ($(LDM_PLATFORM),1) +SUPPORT_LINUX_USING_WORKQUEUES := 0 +SUPPORT_LINUX_USING_SHARED_WORKQUEUES := 1 +SUPPORT_ACTIVE_POWER_MANAGEMENT := 0 +ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),0) +DISPLAY_CONTROLLER := pvrlfb +OMAP_NON_FLIP_DISPLAY := 1 +endif +else # LDM_PLATFORM != 1 +ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),0) +DISPLAY_CONTROLLER := omaplfb +endif +endif # LDM_PLATFORM != 1 + +ifeq ($(SUPPORT_DRI_DRM),1) +ifeq ($(PVR_LDM_PLATFORM_PRE_REGISTERED),1) +PVR_DRI_DRM_PLATFORM_DEV := 1 +PVR_DRI_DRM_STATIC_BUS_ID := 1 +PVR_DRI_DRM_DEV_BUS_ID := "\"platform:pvrsrvkm"\" +else +PVR_DRI_DRM_NOT_PCI := 1 +KERNEL_COMPONENTS += linux_drm +endif +ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),0) +EXTRA_PVRSRVKM_COMPONENTS += $(DISPLAY_CONTROLLER) +endif +EXTRA_KBUILD_SOURCE := $(KERNELDIR) +# FIXME: Only required for comparison with X's KM +PVR_SECURE_DRM_AUTH_EXPORT := 1 +ifneq ($(OMAP_NON_FLIP_DISPLAY),1) +PVR_DISPLAY_CONTROLLER_DRM_IOCTL := 1 +endif +else # SUPPORT_DRI_DRM == 1 +ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),0) +KERNEL_COMPONENTS += $(DISPLAY_CONTROLLER) +endif +endif # SUPPORT_DRI_DRM == 1 + +SUPPORT_ANDROID_OMAP_NV12 := 1 + +PVR_ANDROID_USE_WINDOW_TRANSFORM_HINT := 1 + +PVR_ANDROID_PLATFORM_HAS_LINUX_FBDEV := 1 + +# Handle Google's OMAP-based products +# +ifneq ($(filter tuna maguro toro mysid yakju,$(TARGET_DEVICE)),) +# These default on in tuna_defconfig +PVRSRV_USSE_EDM_STATUS_DEBUG ?= 1 +PVRSRV_DUMP_MK_TRACE ?= 1 +# Go back to the old compiler for tuna kernel modules +KERNEL_CROSS_COMPILE := arm-eabi- +endif + +ifeq ($(NO_HARDWARE),1) +ifeq ($(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED),1) +$(info WARNING: SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED=1 is incompatible with NO_HARDWARE=1) +$(info WARNING: Setting SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED=0 and switching to dc_nohw) +override SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED := 0 +ifneq ($(SUPPORT_PVRSRV_DEVICE_CLASS),0) +override DISPLAY_CONTROLLER := dc_nohw +KERNEL_COMPONENTS += dc_nohw +endif +endif # SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED == 1 +endif # NO_HARDWARE == 1 + +# we are not using PVR composerhal +#PVR_ANDROID_COMPOSERHAL ?= omap + +include ../config/core.mk +include ../common/android/extra_config.mk +include ../common/dridrm.mk +include ../common/opencl.mk +include ../common/omap4.mk + +# Not all OMAP kernels have a compatible DVFS framework +# +$(eval $(call TunableKernelConfigC,SYS_OMAP_HAS_DVFS_FRAMEWORK,)) + +# If set, services allows two flips to enter the processing queue, +# and does not add read dependencies to the set of buffers last +# flipped to. This is necessary for DSS composition on OMAP. +# +$(eval $(call TunableKernelConfigC,SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED,)) + +# On OMAP a TILER-specific NV12 format is supported. +# +$(eval $(call TunableUserConfigMake,SUPPORT_ANDROID_OMAP_NV12,)) + +$(eval $(call TunableUserConfigC,PVR_MUTEXES_COND_USING_PTHREAD_CONDVARS,1)) + +$(eval $(call TunableKernelConfigC,SUPPORT_TI_VERSION_STRING,)) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/prepare_tree.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/prepare_tree.mk new file mode 100644 index 0000000..75dcca6 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/prepare_tree.mk @@ -0,0 +1,56 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +.PHONY: prepare_tree + +prepare_tree: + +INTERNAL_INCLUDED_PREPARE_HEADERS := +-include eurasiacon/build/linux2/prepare_headers.mk +ifneq ($(INTERNAL_INCLUDED_PREPARE_HEADERS),true) +missing_headers := $(strip $(shell test ! -e include4/pvrversion.h && echo true)) +ifdef missing_headers +$(info ) +$(info ** include4/pvrversion.h is missing, and cannot be rebuilt.) +$(info ** Cannot continue.) +$(info ) +$(error Missing headers) +endif +endif diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/pvrversion.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/pvrversion.mk new file mode 100644 index 0000000..c56bba7 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/pvrversion.mk @@ -0,0 +1,53 @@ +########################################################################### ### +#@Title Extract info from pvrversion.h +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Version information +PVRVERSION_H := $(TOP)/include4/pvrversion.h + +# scripts.mk uses these to set the install script's version suffix +PVRVERSION_MAJ := $(shell perl -ne '/\sPVRVERSION_MAJ\s+(\w+)/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_MIN := $(shell perl -ne '/\sPVRVERSION_MIN\s+(\w+)/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_FAMILY := $(shell perl -ne '/\sPVRVERSION_FAMILY\s+"(\S+)"/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_BRANCHNAME := $(shell perl -ne '/\sPVRVERSION_BRANCHNAME\s+"(\S+)"/ and print $$1' $(PVRVERSION_H)) +PVRVERSION_BUILD := $(shell perl -ne '/\sPVRVERSION_BUILD\s+(\w+)/ and print $$1' $(PVRVERSION_H)) + +PVRVERSION_NUM := $(PVRVERSION_MAJ).$(PVRVERSION_MIN).$(PVRVERSION_BUILD) +PVRVERSION := "$(PVRVERSION_FAMILY)_$(PVRVERSION_BRANCHNAME)\@$(PVRVERSION_BUILD)" diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/scripts.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/scripts.mk new file mode 100644 index 0000000..9293bd5 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/scripts.mk @@ -0,0 +1,364 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($(SUPPORT_ANDROID_PLATFORM),) + +define if-component +ifneq ($$(filter $(1),$$(COMPONENTS)),) +M4DEFS += $(2) +endif +endef + +define if-kernel-component +ifneq ($$(filter $(1),$$(KERNEL_COMPONENTS)),) +M4DEFS_K += $(2) +endif +endef + +# common.m4 lives here +# +M4FLAGS := -I$(MAKE_TOP)/scripts + +# The driver version is required to rename libraries +# +include $(MAKE_TOP)/pvrversion.mk + +# These defs are required for both KM and UM install script. +M4DEFS_K := \ + -DKM_SUFFIX=ko \ + -DPVRVERSION="$(PVRVERSION)" \ + -DPVR_BUILD_DIR=$(PVR_BUILD_DIR) \ + -DPVRSRV_MODNAME=$(PVRSRV_MODNAME) + +ifneq ($(BUFFERCLASS_MODULE),) +$(eval $(call if-kernel-component,$(BUFFERCLASS_MODULE),\ + -DBUFFER_CLASS_DEVICE=$(BUFFERCLASS_MODULE))) +endif + +ifneq ($(DISPLAY_CONTROLLER),) +$(eval $(call if-kernel-component,$(DISPLAY_CONTROLLER),\ + -DDISPLAY_CONTROLLER=$(DISPLAY_CONTROLLER))) +endif + +ifeq ($(PDUMP),1) +M4DEFS_K += -DPDUMP=1 +endif + +ifeq ($(SUPPORT_DRM),1) +M4DEFS_K += -DSUPPORT_DRM=1 +ifeq ($(SUPPORT_DRM_DC_MODULE),1) +M4DEFS_K += -DSUPPORT_DRM_DC_MODULE=1 +endif +endif + +# These defs are either obsolete features, or not derived from +# user variables +# +M4DEFS := \ + -DLIB_SUFFIX=so \ + -DSUPPORT_SRVINIT=1 \ + -DSOLIB_VERSION=$(PVRVERSION_MAJ).$(PVRVERSION_MIN).$(PVRVERSION_BUILD) \ + -DSUPPORT_UNITTESTS=1 + +ifeq ($(MESA_EGL),1) + M4DEFS += -DSUPPORT_MESA=1 +else + ifneq ($(filter xorg,$(COMPONENTS)),) + ifneq ($(filter opengl,$(COMPONENTS)),) + M4DEFS += -DSUPPORT_MESA=1 + endif + endif +endif + +ifeq ($(SUPPORT_BUILD_LWS),1) + M4DEFS += -DLWS_INSTALL_TREE=1 +endif + +ifneq ($(DRM_DISPLAY_CONTROLLER),) + M4DEFS_K += \ + -DHAVE_DRM_DISPLAY_MODULE=1 \ + -DDISPLAY_KERNEL_MODULE=$(DRM_DISPLAY_CONTROLLER) \ + -DDISPLAY_CONTROLLER=$(DRM_DISPLAY_CONTROLLER) +else + M4DEFS_K += -DDISPLAY_KERNEL_MODULE=$(DISPLAY_CONTROLLER) +endif + +ifeq ($(PVR_LWS_NOBC),1) + M4DEFS += -DNO_BUFFER_CLASS_MODULE=1 +endif + +# Map COMPONENTS on to SUPPORT_ defs +# +$(eval $(call if-component,opengles1,\ + -DSUPPORT_OPENGLES1=1 -DOGLES1_MODULE=$(opengles1_target) \ + -DSUPPORT_OPENGLES1_V1_ONLY=$(if $(SUPPORT_OPENGLES1_V1_ONLY),1,0))) +$(eval $(call if-component,opengles2,\ + -DSUPPORT_OPENGLES2=1 -DOGLES2_MODULE=$(opengles2_target))) +$(eval $(call if-component,egl,\ + -DSUPPORT_LIBEGL=1 -DEGL_MODULE=$(egl_target))) +$(eval $(call if-component,pvr2d,\ + -DSUPPORT_LIBPVR2D=1)) +$(eval $(call if-component,glslcompiler,\ + -DSUPPORT_SOURCE_SHADER=1)) +$(eval $(call if-component,opencl,\ + -DSUPPORT_OPENCL=1)) +$(eval $(call if-component,opengl,\ + -DSUPPORT_OPENGL=1)) +$(eval $(call if-component,null_pvr2d_flip,\ + -DSUPPORT_NULL_PVR2D_FLIP=1)) +$(eval $(call if-component,null_pvr2d_blit,\ + -DSUPPORT_NULL_PVR2D_BLIT=1)) +$(eval $(call if-component,null_pvr2d_front,\ + -DSUPPORT_NULL_PVR2D_FRONT=1)) +$(eval $(call if-component,null_pvr2d_linuxfb,\ + -DSUPPORT_NULL_PVR2D_LINUXFB=1)) +$(eval $(call if-component,null_drm_ws,\ + -DSUPPORT_NULL_DRM_WS=1)) +$(eval $(call if-component,ews_ws,\ + -DSUPPORT_EWS=1)) +$(eval $(call if-component,imgtcl,\ + -DSUPPORT_IMGTCL=1)) +$(eval $(call if-component,ews_wm,\ + -DSUPPORT_LUA=1)) +$(eval $(call if-component,xmultiegltest,\ + -DSUPPORT_XUNITTESTS=1)) +$(eval $(call if-component,pvr_conf,\ + -DSUPPORT_XORG_CONF=1)) +$(eval $(call if-component,graphicshal,\ + -DSUPPORT_GRAPHICS_HAL=1)) +$(eval $(call if-component,xorg,\ + -DSUPPORT_XORG=1 \ + -DXORG_DIR=$(LWS_PREFIX) \ + -DXORG_EXPLICIT_PVR_SERVICES_LOAD=$(XORG_EXPLICIT_PVR_SERVICES_LOAD))) +$(eval $(call if-component,surfaceless,\ + -DSUPPORT_SURFACELESS=1)) +$(eval $(call if-component,wl,\ + -DSUPPORT_WAYLAND=1)) + +# These defs are common to all driver builds, and inherited from config.mk +# +M4DEFS += \ + -DPROFILE_COMMON=1 \ + -DFFGEN_UNIFLEX=1 \ + -DSUPPORT_SGX_HWPERF=$(SUPPORT_SGX_HWPERF) \ + +# These are common to some builds, and inherited from config.mk +# +ifeq ($(SUPPORT_DRI_DRM),1) +M4DEFS_K += -DSUPPORT_DRI_DRM=1 -DSUPPORT_DRI_DRM_NOT_PCI=$(PVR_DRI_DRM_NOT_PCI) +ifeq ($(PVR_DRI_DRM_NOT_PCI),1) +M4DEFS_K += -DDRM_MODNAME=drm +endif +endif + +ifeq ($(PVR_REMVIEW),1) +M4DEFS += -DPVR_REMVIEW=1 +endif + +# Build UM script using old scheme using M4 +define create-install-um-script-m4 +$(RELATIVE_OUT)/$(1)/install_um.sh: $(PVRVERSION_H) $(CONFIG_MK)\ + $(MAKE_TOP)/scripts/common.m4 \ + $(MAKE_TOP)/$(PVR_BUILD_DIR)/install_um.sh.m4 \ + | $(RELATIVE_OUT)/$(1) + $$(if $(V),,@echo " GEN " $$(call relative-to-top,$$@)) + $(M4) $(M4FLAGS) $(M4DEFS) $(M4DEFS_K) \ + $(MAKE_TOP)/scripts/common.m4 \ + $(MAKE_TOP)/$(PVR_BUILD_DIR)/install_um.sh.m4 > $$@ +install_script: $(RELATIVE_OUT)/$(1)/install_um.sh +endef + +$(foreach _t,$(TARGET_ALL_ARCH),$(eval $(call create-install-um-script-m4,$(_t)))) + +$(TARGET_PRIMARY_OUT)/rc.pvr: \ + $(PVRVERSION_H) $(CONFIG_MK) $(CONFIG_KERNEL_MK) \ + $(MAKE_TOP)/scripts/rc.pvr.m4 $(MAKE_TOP)/$(PVR_BUILD_DIR)/rc.pvr.m4 \ + | $(TARGET_PRIMARY_OUT) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(M4) $(M4FLAGS) $(M4DEFS) $(M4DEFS_K) $(MAKE_TOP)/scripts/rc.pvr.m4 \ + $(MAKE_TOP)/$(PVR_BUILD_DIR)/rc.pvr.m4 > $@ + $(CHMOD) +x $@ + +init_script: $(TARGET_PRIMARY_OUT)/rc.pvr + +endif # ifneq ($(SUPPORT_ANDROID_PLATFORM),) + +# This code mimics the way Make processes our implicit/explicit goal list. +# It tries to build up a list of components that were actually built, from +# whence an install script is generated. +# +ifneq ($(MAKECMDGOALS),) +BUILT_UM := $(MAKECMDGOALS) +ifneq ($(filter build components,$(MAKECMDGOALS)),) +BUILT_UM += $(COMPONENTS) +endif +BUILT_UM := $(sort $(filter $(ALL_MODULES) xorg wl surfaceless,$(BUILT_UM))) +else +BUILT_UM := $(sort $(COMPONENTS)) +endif + +ifneq ($(MAKECMDGOALS),) +BUILT_KM := $(MAKECMDGOALS) +ifneq ($(filter build kbuild,$(MAKECMDGOALS)),) +BUILT_KM += $(KERNEL_COMPONENTS) +endif +BUILT_KM := $(sort $(filter $(ALL_MODULES),$(BUILT_KM))) +else +BUILT_KM := $(sort $(KERNEL_COMPONENTS)) +endif + +INSTALL_UM_MODULES := \ + $(strip $(foreach _m,$(BUILT_UM),\ + $(if $(filter doc,$($(_m)_type)),,\ + $(if $(filter host_%,$($(_m)_arch)),,\ + $(if $($(_m)_install_path),$(_m),\ + $(warning WARNING: UM $(_m)_install_path not defined)))))) + +# Build up a list of installable shared libraries. The shared_library module +# type is specially guaranteed to define $(_m)_target, even if the Linux.mk +# itself didn't. The list is formatted with : pairs e.g. +# "moduleA:libmoduleA.so moduleB:libcustom.so" for later processing. +ALL_SHARED_INSTALLABLE := \ + $(sort $(foreach _a,$(ALL_MODULES),\ + $(if $(filter shared_library,$($(_a)_type)),$(_a):$($(_a)_target),))) + +# Handle implicit install dependencies. Executables and shared libraries may +# be linked against other shared libraries. Avoid requiring the user to +# specify the program's binary dependencies explicitly with $(m)_install_extra +INSTALL_UM_MODULES := \ + $(sort $(INSTALL_UM_MODULES) \ + $(foreach _a,$(ALL_SHARED_INSTALLABLE),\ + $(foreach _m,$(INSTALL_UM_MODULES),\ + $(foreach _l,$($(_m)_libs),\ + $(if $(filter lib$(_l).so,$(word 2,$(subst :, ,$(_a)))),\ + $(word 1,$(subst :, ,$(_a)))))))) + +# Add explicit dependencies that must be installed +INSTALL_UM_MODULES := \ + $(sort $(INSTALL_UM_MODULES) \ + $(foreach _m,$(INSTALL_UM_MODULES),\ + $($(_m)_install_dependencies))) + +define calculate-um-fragments +# Work out which modules are required for this arch. +INSTALL_UM_MODULES_$(1) := \ + $$(strip $$(foreach _m,$(INSTALL_UM_MODULES),\ + $$(if $$(filter $(1),$$(INTERNAL_ARCH_LIST_FOR_$$(_m))),$$(_m)))) + +INSTALL_UM_FRAGMENTS_$(1) := $$(foreach _m,$$(INSTALL_UM_MODULES_$(1)),$(RELATIVE_OUT)/$(1)/intermediates/$$(_m)/.install) + +.PHONY: install_um_$(1)_debug +install_um_$(1)_debug: $$(INSTALL_UM_FRAGMENTS_$(1)) + $(CAT) $$^ +endef + +$(foreach _t,$(TARGET_ALL_ARCH) target_neutral,$(eval $(call calculate-um-fragments,$(_t)))) + +INSTALL_KM_FRAGMENTS := \ + $(strip $(foreach _m,$(BUILT_KM),\ + $(if $(filter-out kernel_module,$($(_m)_type)),,\ + $(if $($(_m)_install_path),\ + $(TARGET_PRIMARY_OUT)/intermediates/$(_m)/.install,\ + $(warning WARNING: $(_m)_install_path not defined))))) + +.PHONY: install_um_debug +install_um_debug: $(INSTALL_UM_FRAGMENTS) + $(CAT) $^ + +.PHONY: install_km_debug +install_km_debug: $(INSTALL_KM_FRAGMENTS) + $(CAT) $^ + +ifneq ($(INSTALL_KM_FRAGMENTS),) +$(TARGET_PRIMARY_OUT)/install_km.sh: $(INSTALL_KM_FRAGMENTS) $(CONFIG_KERNEL_MK) | $(TARGET_PRIMARY_OUT) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(ECHO) KERNELVERSION=$(KERNEL_ID) > $@ + $(ECHO) MOD_DESTDIR=$(patsubst %/,%,$(PVRSRV_MODULE_BASEDIR)) >> $@ +ifeq ($(SUPPORT_ANDROID_PLATFORM),) + $(ECHO) check_module_directory /lib/modules/$(KERNEL_ID) >> $@ +endif + $(CAT) $(INSTALL_KM_FRAGMENTS) >> $@ +install_script_km: $(TARGET_PRIMARY_OUT)/install_km.sh +endif + +# Build UM script using new scheme which does not use M4 for anything +# (Only works for Android and target_neutral right now.) +define create-install-um-script +ifneq ($$(INSTALL_UM_FRAGMENTS_$(1)),) +$(RELATIVE_OUT)/$(1)/install_um.sh: $$(INSTALL_UM_FRAGMENTS_$(1)) | $(RELATIVE_OUT)/$(1) + $(if $(V),,@echo " GEN " $$(call relative-to-top,$$@)) + $(CAT) $$(INSTALL_UM_FRAGMENTS_$(1)) > $$@ +install_script: $(RELATIVE_OUT)/$(1)/install_um.sh +endif +endef +$(eval $(call create-install-um-script,target_neutral)) + +ifneq ($(SUPPORT_ANDROID_PLATFORM),) +$(foreach _t,$(TARGET_ALL_ARCH),$(eval $(call create-install-um-script,$(_t)))) +endif + +# Build the top-level install script that drives the install. +ifneq ($(SUPPORT_ANDROID_PLATFORM),) +install_sh_template := $(MAKE_TOP)/common/android/install.sh.tpl +else +install_sh_template := $(MAKE_TOP)/scripts/install.sh.tpl +endif + +$(RELATIVE_OUT)/install.sh: $(PVRVERSION_H) | $(RELATIVE_OUT) +# In customer packages only one of config.mk or config_kernel.mk will exist. +# We can depend on either one, as long as we rebuild the install script when +# the config options it uses change. +$(RELATIVE_OUT)/install.sh: $(call if-exists,$(CONFIG_MK),$(CONFIG_KERNEL_MK)) +$(RELATIVE_OUT)/install.sh: $(install_sh_template) + $(if $(V),,@echo " GEN " $(call relative-to-top,$@)) + $(ECHO) 's/\[PVRVERSION\]/$(subst /,\/,$(PVRVERSION))/g;' > $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[PVRBUILD\]/$(BUILD)/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[PRIMARY_ARCH\]/$(TARGET_PRIMARY_ARCH)/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[ARCHITECTURES\]/$(TARGET_ALL_ARCH) target_neutral/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[LWS_PREFIX\]/$(subst /,\/,$(LWS_PREFIX))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[APP_DESTDIR\]/$(subst /,\/,$(APP_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[BIN_DESTDIR\]/$(subst /,\/,$(BIN_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[SHADER_DESTDIR\]/$(subst /,\/,$(SHADER_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + $(ECHO) 's/\[SHLIB_DESTDIR\]/$(subst /,\/,$(SHLIB_DESTDIR))/g;' >> $(RELATIVE_OUT)/install.sh.sed + @sed -f $(RELATIVE_OUT)/install.sh.sed $< > $@ + $(CHMOD) +x $@ + $(RM) $(RELATIVE_OUT)/install.sh.sed +install_script: $(RELATIVE_OUT)/install.sh +install_script_km: $(RELATIVE_OUT)/install.sh diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/shared_library.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/shared_library.mk new file mode 100644 index 0000000..43e708d --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/shared_library.mk @@ -0,0 +1,210 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ifeq ($($(THIS_MODULE)_target),) +$(THIS_MODULE)_target := $(THIS_MODULE).so +endif + +MODULE_TARGETS := $(addprefix $(MODULE_OUT)/,$(if $($(THIS_MODULE)_target),$($(THIS_MODULE)_target),$(THIS_MODULE).so)) +$(call module-info-line,shared library: $(MODULE_TARGETS)) + +include eurasiacon/build/linux2/_objects.mk + +# Objects built by other modules +MODULE_EXTERNAL_OBJECTS := $($(THIS_MODULE)_obj) + +# Determine whether the C++ compiler is needed for linking. +MODULE_NEEDS_CXX_LINKER := $(if $($(THIS_MODULE)_force_cxx_linker),true,) +ifeq ($($(THIS_MODULE)_force_cxx_linker),) +# If this module contains any C++ source files, MODULE_NEEDS_CXX_LINKER is +# set, because we have to use the C++ compiler to link it +MODULE_NEEDS_CXX_LINKER := $(if $(strip $(MODULE_CXX_SOURCES)),true,) +endif +ifeq ($(MODULE_NEEDS_CXX_LINKER),true) +ALL_CXX_MODULES += $(THIS_MODULE) +endif + +MODULE_ALL_OBJECTS := \ + $(MODULE_C_OBJECTS) $(MODULE_CXX_OBJECTS) \ + $(MODULE_EXTERNAL_OBJECTS) + +# Libraries that can be made, which this module links with +MODULE_BUILT_LIBRARIES := $(patsubst %,$(MODULE_OUT)/lib%.so,$($(THIS_MODULE)_libs)) +MODULE_BUILT_STATIC_LIBRARIES := $(patsubst %,$(MODULE_OUT)/lib%.a,$($(THIS_MODULE)_staticlibs)) + +# Disallow undefined symbols, except for X.org video drivers +MODULE_LDFLAGS := \ + $(if $($(THIS_MODULE)_allow_undefined),,-Wl,--no-undefined) $(MODULE_LDFLAGS) +MODULE_HOST_LDFLAGS := \ + $(if $($(THIS_MODULE)_allow_undefined),,-Wl,--no-undefined) $(MODULE_HOST_LDFLAGS) + +# Android may need to have DT_SONAME turned on +ifeq ($(PVR_ANDROID_NEEDS_SONAME),1) + MODULE_LDFLAGS += -Wl,--soname=$($(THIS_MODULE)_target) +endif + +MODULE_SONAME := +ifneq ($(DONT_USE_SONAMES),1) + ifneq ($($(THIS_MODULE)_soname),) + MODULE_SONAME := $($(THIS_MODULE)_soname) + MODULE_LDFLAGS += -Wl,--soname=$(MODULE_SONAME) + endif +endif + +ifeq ($(SUPPORT_BUILD_LWS),1) + ifneq ($(MODULE_ARCH),$(TARGET_PRIMARY_ARCH)) + lws_libdir := $(LWS_PREFIX)/lib32 + else + lws_libdir := $(LWS_PREFIX)/lib + endif + MODULE_LIBRARY_DIR_FLAGS += -L $(MODULE_OUT)$(lws_libdir) + MODULE_LIBRARY_DIR_FLAGS += -Wl,-rpath-link=$(MODULE_OUT)$(lws_libdir) + ifeq ($(filter $(lws_libdir),/usr/lib /usr/lib32),) + MODULE_LIBRARY_DIR_FLAGS += -Wl,-rpath=$(lws_libdir) + endif +endif + +ifneq ($($(THIS_MODULE)_libpaths_relative),) +MODULE_LIBRARY_DIR_FLAGS += $(addprefix -L $(MODULE_OUT)/,$($(THIS_MODULE)_libpaths_relative)) +endif + +ifneq ($($(THIS_MODULE)_whole_extlibs),) +MODULE_LDFLAGS += -Wl,--whole-archive $(addprefix -l,$($(THIS_MODULE)_whole_extlibs)) -Wl,--no-whole-archive +endif + +ifneq ($(MODULE_HOST_BUILD),true) +# Unusually, we define $(THIS_MODULE)_install_path if the user didn't, as we +# can't use MODULE_INSTALL_PATH in the scripts.mk logic. +ifeq ($($(THIS_MODULE)_install_path),) +$(THIS_MODULE)_install_path := \ + $${SHLIB_DESTDIR}/$(patsubst $(MODULE_OUT)/%,%,$(MODULE_TARGETS)) +endif +MODULE_INSTALL_PATH := $($(THIS_MODULE)_install_path) +endif + +MODULE_EXPORTS=$($(THIS_MODULE)_exports) + +.PHONY: $(THIS_MODULE) +$(THIS_MODULE): $(MODULE_TARGETS) + +# This is the rule used to link the final shared library +.DELETE_ON_ERROR: $(MODULE_TARGETS) +$(MODULE_TARGETS): MODULE_CC := $(MODULE_CC) +$(MODULE_TARGETS): MODULE_CXX := $(MODULE_CXX) +$(MODULE_TARGETS): MODULE_LDFLAGS := $(MODULE_LDFLAGS) +$(MODULE_TARGETS): MODULE_HOST_LDFLAGS := $(MODULE_HOST_LDFLAGS) +$(MODULE_TARGETS): MODULE_CFLAGS := -fPIC $(MODULE_CFLAGS) +$(MODULE_TARGETS): MODULE_HOST_CFLAGS := -fPIC $(MODULE_HOST_CFLAGS) +$(MODULE_TARGETS): CHECKEXPORTS_DIR := $(MODULE_INTERMEDIATES_DIR) +$(MODULE_TARGETS): MODULE_LIBRARY_DIR_FLAGS := $(MODULE_LIBRARY_DIR_FLAGS) +$(MODULE_TARGETS): MODULE_LIBRARY_FLAGS := $(MODULE_LIBRARY_FLAGS) +$(MODULE_TARGETS): MODULE_ALL_OBJECTS := $(MODULE_ALL_OBJECTS) +$(MODULE_TARGETS): MODULE_SONAME := $(MODULE_SONAME) +$(MODULE_TARGETS): MODULE_LIB_LDFLAGS := $(MODULE_LIB_LDFLAGS) +$(MODULE_TARGETS): MODULE_LIB_CRTBEGIN := $(MODULE_LIB_CRTBEGIN) +$(MODULE_TARGETS): MODULE_LIB_CRTEND := $(MODULE_LIB_CRTEND) +$(MODULE_TARGETS): MODULE_LIBGCC := $(MODULE_LIBGCC) +$(MODULE_TARGETS): MODULE_EXPORTS := $(MODULE_EXPORTS) +ifeq ($(MODULE_HOST_BUILD),) + ifneq ($(PKG_CONFIG_ENV_VAR),) + $(MODULE_TARGETS): export PKG_CONFIG_TOP_BUILD_DIR := $(abspath $(MODULE_OUT)) + $(MODULE_TARGETS): export $(PKG_CONFIG_ENV_VAR) := $(abspath $(MODULE_OUT)/pkgconfig) + else ifneq ($(PKG_CONFIG_LIBDIR),) + $(MODULE_TARGETS): export PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR) + $(MODULE_TARGETS): export PKG_CONFIG_PATH := $(PKG_CONFIG_PATH) + $(MODULE_TARGETS): export PKG_CONFIG_ALLOW_SYSTEM_LIBS := 1 + $(MODULE_TARGETS): export PKG_CONFIG_SYSROOT_DIR := $(PKG_CONFIG_SYSROOT_DIR) + endif +endif +$(MODULE_TARGETS): export PKG_CONFIG_SYSROOT_DIR := $(PKG_CONFIG_SYSROOT_DIR) +$(MODULE_TARGETS): $(MODULE_BUILT_LIBRARIES) $(MODULE_BUILT_STATIC_LIBRARIES) +$(MODULE_TARGETS): $(MODULE_ALL_OBJECTS) $(THIS_MAKEFILE) + +ifeq ($(MODULE_HOST_BUILD),true) +ifeq ($(MODULE_NEEDS_CXX_LINKER),true) + $(host-shared-library-cxx-from-o) +else + $(host-shared-library-from-o) +endif +ifeq ($(DEBUGLINK),1) + $(host-copy-debug-information) + $(host-strip-debug-information) + $(host-add-debuglink) +endif +else # MODULE_HOST_BUILD +ifeq ($(MODULE_NEEDS_CXX_LINKER),true) + $(target-shared-library-cxx-from-o) +else + $(target-shared-library-from-o) +endif +ifneq ($(MODULE_SONAME),) + $(LN) $@ $@.1 +endif +ifeq ($(DEBUGLINK),1) + $(target-copy-debug-information) + $(target-strip-debug-information) + $(target-add-debuglink) +endif + $(call check-exports,$(MODULE_EXPORTS)) +endif # MODULE_HOST_BUILD + +ifneq ($(MODULE_HOST_BUILD),true) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_TYPE := $($(THIS_MODULE)_type) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_INSTALL_PATH := $(MODULE_INSTALL_PATH) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_TARGETS := $(patsubst $(MODULE_OUT)/%,%,$(MODULE_TARGETS)) +$(MODULE_INTERMEDIATES_DIR)/.install: MODULE_SONAME := $(MODULE_SONAME) +$(MODULE_INTERMEDIATES_DIR)/.install: $(THIS_MAKEFILE) | $(MODULE_INTERMEDIATES_DIR) +$(MODULE_INTERMEDIATES_DIR)/.install: $(PVRVERSION_H) +ifeq ($(MODULE_SONAME),) + @echo 'install_file $(MODULE_TARGETS) $(MODULE_INSTALL_PATH) "$(MODULE_TYPE)" 0644 0:0' >$@ +else + @echo 'install_file $(MODULE_TARGETS) $(MODULE_INSTALL_PATH).$(PVRVERSION_NUM) "$(MODULE_TYPE)" 0644 0:0' >$@ + @echo 'link_library $(MODULE_INSTALL_PATH).$(PVRVERSION_NUM)' >>$@ +endif +endif + + +MODULE_CFLAGS += -fPIC -pie +MODULE_CXXFLAGS += -fPIC -pie +MODULE_HOST_CFLAGS += -fPIC -pie +MODULE_HOST_CXXFLAGS += -fPIC -pie + +$(foreach _src_file,$(MODULE_C_SOURCES),$(eval $(call rule-for-objects-o-from-one-c,$(MODULE_INTERMEDIATES_DIR)/$(notdir $(_src_file:.c=.o)),$(_src_file)))) +$(foreach _src_file,$(MODULE_CXX_SOURCES),$(eval $(call rule-for-objects-o-from-one-cxx,$(MODULE_INTERMEDIATES_DIR)/$(notdir $(_src_file:.cpp=.o)),$(_src_file)))) diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/this_makefile.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/this_makefile.mk new file mode 100644 index 0000000..c312001 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/this_makefile.mk @@ -0,0 +1,68 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Find out the path of the Linux.mk makefile currently being processed, and +# set paths used by the build rules + +# This magic is used so we can use this_makefile.mk twice: first when reading +# in each Linux.mk, and then again when generating rules. There we set +# $(THIS_MAKEFILE), and $(REMAINING_MAKEFILES) should be empty +ifneq ($(strip $(REMAINING_MAKEFILES)),) + +# Absolute path to the Linux.mk being processed +THIS_MAKEFILE := $(firstword $(REMAINING_MAKEFILES)) + +# The list of makefiles left to process +REMAINING_MAKEFILES := $(wordlist 2,$(words $(REMAINING_MAKEFILES)),$(REMAINING_MAKEFILES)) + +else + +# When generating rules, we should have read in every Linux.mk +$(if $(INTERNAL_INCLUDED_ALL_MAKEFILES),,$(error No makefiles left in $$(REMAINING_MAKEFILES), but $$(INTERNAL_INCLUDED_ALL_MAKEFILES) is not set)) + +endif + +# Path to the directory containing Linux.mk +THIS_DIR := $(patsubst %/,%,$(dir $(THIS_MAKEFILE))) +ifeq ($(strip $(THIS_DIR)),) +$(error Empty $$(THIS_DIR) for makefile "$(THIS_MAKEFILE)") +endif + +modules := diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/tools/cc-check.sh b/sgx_km/eurasia_km/eurasiacon/build/linux2/tools/cc-check.sh new file mode 100755 index 0000000..9566c73 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/tools/cc-check.sh @@ -0,0 +1,119 @@ +#!/bin/sh +########################################################################### ### +#@Title Test the nature of the C compiler. +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +LANG=C +export LANG + +usage() { + echo "usage: $0 [--64] [--clang] --cc CC [--out OUT] [cflag]" + exit 1 +} + +check_clang() { + $CC -dM -E - /dev/null 2>&1 + if [ "$?" = "0" ]; then + # Clang must be passed a program with a main() that returns 0. + # It will produce an error if main() is improperly specified. + IS_CLANG=1 + TEST_PROGRAM="int main(void){return 0;}" + else + # If we're not clang, assume we're GCC. GCC needs to be passed + # a program with a faulty return in main() so that another + # warning (unrelated to the flag being tested) is emitted. + # This will cause GCC to warn about the unsupported warning flag. + IS_CLANG=0 + TEST_PROGRAM="int main(void){return;}" + fi +} + +do_cc() { + echo "$TEST_PROGRAM" | $CC -W -Wall $3 -xc -c - -o $1 >$2 2>&1 +} + +while [ 1 ]; do + if [ "$1" = "--64" ]; then + [ -z $CLANG ] && BIT_CHECK=1 + elif [ "$1" = "--clang" ]; then + [ -z $BIT_CHECK ] && CLANG=1 + elif [ "$1" = "--cc" ]; then + [ "x$2" = "x" ] && usage + CC="$2" && shift + elif [ "$1" = "--out" ]; then + [ "x$2" = "x" ] && usage + OUT="$2" && shift + elif [ "${1#--}" != "$1" ]; then + usage + else + break + fi + shift +done + +[ "x$CC" = "x" ] && usage +[ "x$CLANG" = "x" -a "x$OUT" = "x" ] && usage +ccof=$OUT/cc-sanity-check +log=${ccof}.log + +check_clang + +if [ "x$BIT_CHECK" = "x1" ]; then + do_cc $ccof $log "" + file $ccof | grep 64-bit >/dev/null 2>&1 + [ "$?" = "0" ] && echo true || echo false +elif [ "x$CLANG" = "x1" ]; then + [ "x$IS_CLANG" = "x1" ] && echo true || echo false +else + [ "x$1" = "x" ] && usage + do_cc $ccof $log $1 + if [ "$?" = "0" ]; then + # compile passed, but was the warning unrecognized? + if [ "x$IS_CLANG" = "x1" ]; then + grep "^warning: unknown warning option '$1'" $log >/dev/null 2>&1 + else + grep "^cc1: warning: unrecognized command line option \"$1\"" $log >/dev/null 2>&1 + fi + [ "$?" = "1" ] && echo $1 + fi +fi + +rm -f $ccof $log +exit 0 diff --git a/sgx_km/eurasia_km/eurasiacon/build/linux2/toplevel.mk b/sgx_km/eurasia_km/eurasiacon/build/linux2/toplevel.mk new file mode 100644 index 0000000..ac84bb8 --- /dev/null +++ b/sgx_km/eurasia_km/eurasiacon/build/linux2/toplevel.mk @@ -0,0 +1,312 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +# Define the default goal. This masks a previous definition of the default +# goal in Makefile.config, which must match this one +.PHONY: build +build: components kbuild + +ifeq ($(OUT),) +$(error "Must specify output directory with OUT=") +endif + +ifeq ($(TOP),) +$(error "Must specify root of source tree with TOP=") +endif +$(call directory-must-exist,$(TOP)) + +# Output directory for configuration, object code, +# final programs/libraries, and install/rc scripts. +# + +# RELATIVE_OUT is relative only if it's under $(TOP) +RELATIVE_OUT := $(patsubst $(TOP)/%,%,$(OUT)) +CONFIG_MK := $(RELATIVE_OUT)/config.mk +CONFIG_H := $(RELATIVE_OUT)/config.h +CONFIG_KERNEL_MK := $(RELATIVE_OUT)/config_kernel.mk +CONFIG_KERNEL_H := $(RELATIVE_OUT)/config_kernel.h +MAKE_TOP := eurasiacon/build/linux2 +THIS_MAKEFILE := (top-level makefiles) + +# Convert commas to spaces in $(D). This is so you can say "make +# D=config-changes,freeze-config" and have $(filter config-changes,$(D)) +# still work. +comma := , +empty := +space := $(empty) $(empty) +override D := $(subst $(comma),$(space),$(D)) + +include $(MAKE_TOP)/defs.mk + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +# Create the out directory +# +$(shell mkdir -p $(OUT)) + +# If these generated files differ from any pre-existing ones, +# replace them, causing affected parts of the driver to rebuild. +# +_want_config_diff := $(filter config-changes,$(D)) +_freeze_config := $(strip $(filter freeze-config,$(D))) +_updated_config_files := $(shell \ + $(if $(_want_config_diff),rm -f $(OUT)/config.diff;,) \ + for file in $(CONFIG_MK) $(CONFIG_H) \ + $(CONFIG_KERNEL_MK) $(CONFIG_KERNEL_H); do \ + diff -U 0 $$file $$file.new \ + >>$(if $(_want_config_diff),$(OUT)/config.diff,/dev/null) 2>/dev/null \ + && rm -f $$file.new \ + || echo $$file; \ + done) + +ifneq ($(_want_config_diff),) +# We send the diff to stderr so it isn't captured by $(shell) +$(shell [ -s $(OUT)/config.diff ] && echo >&2 "Configuration changed in $(RELATIVE_OUT):" && cat >&2 $(OUT)/config.diff) +endif + +ifneq ($(_freeze_config),) +$(if $(_updated_config_files),$(error Configuration change in $(RELATIVE_OUT) prevented by D=freeze-config),) +endif + +# Update the config, if changed +$(foreach _f,$(_updated_config_files), \ + $(shell mv -f $(_f).new $(_f) >/dev/null 2>/dev/null)) + +endif # INTERNAL_CLOBBER_ONLY + +MAKEFLAGS := -Rr --no-print-directory + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) + +# This is so you can say "find $(TOP) -name Linux.mk > /tmp/something; export +# ALL_MAKEFILES=/tmp/something; make" and avoid having to run find. This is +# handy if your source tree is mounted over NFS or something +override ALL_MAKEFILES := $(call relative-to-top,$(if $(strip $(ALL_MAKEFILES)),$(shell cat $(ALL_MAKEFILES)),$(shell find $(TOP) -type f -name Linux.mk -print -o -type d -name '.*' -prune))) +ifeq ($(strip $(ALL_MAKEFILES)),) +$(info ** Unable to find any Linux.mk files under $$(TOP). This could mean that) +$(info ** there are no makefiles, or that ALL_MAKEFILES is set in the environment) +$(info ** and points to a nonexistent or empty file.) +$(error No makefiles) +endif + +else # clobber-only +ALL_MAKEFILES := +endif + +unexport ALL_MAKEFILES + +REMAINING_MAKEFILES := $(ALL_MAKEFILES) +ALL_MODULES := +INTERNAL_INCLUDED_ALL_MAKEFILES := + +# Please do not change the format of the following lines +-include $(CONFIG_KERNEL_MK) + +# If we haven't set host/target archs, set some sensible defaults now. +# This allows things like prune.sh to work +ifeq ($(HOST_PRIMARY_ARCH),) +ifneq ($(FORCE_ARCH),) +HOST_PRIMARY_ARCH := host_i386 +HOST_32BIT_ARCH := host_i386 +endif +endif + +# Output directory for configuration, object code, +# final programs/libraries, and install/rc scripts. +HOST_OUT := $(RELATIVE_OUT)/$(HOST_PRIMARY_ARCH) +HOST_32BIT_OUT := $(RELATIVE_OUT)/$(HOST_32BIT_ARCH) +TARGET_OUT := $(RELATIVE_OUT)/$(TARGET_PRIMARY_ARCH) +TARGET_PRIMARY_OUT := $(RELATIVE_OUT)/$(TARGET_PRIMARY_ARCH) +TARGET_NEUTRAL_OUT := $(RELATIVE_OUT)/target_neutral +GENERATED_CODE_OUT := $(TARGET_NEUTRAL_OUT)/intermediates +DOCS_OUT := $(RELATIVE_OUT)/doc + +# Make directories that won't otherwise be made. +# (This is for the install scripts and other things that aren't made by +# normal module rules.) +TARGET_OUT_DIRECTORIES := $(addprefix $(RELATIVE_OUT)/,$(TARGET_ALL_ARCH)) $(TARGET_NEUTRAL_OUT) $(DOCS_OUT) +.SECONDARY: $(TARGET_OUT_DIRECTORIES) +$(TARGET_OUT_DIRECTORIES): + $(make-directory) + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +# These files may not exist in GPL km source packages +-include $(MAKE_TOP)/llvm.mk +endif + +include $(MAKE_TOP)/commands.mk + +# We don't need to include this if we're just doing a clean or a clobber +# +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +include $(MAKE_TOP)/buildvars.mk +endif + +include $(MAKE_TOP)/pvrversion.mk + +ifeq ($(INTERNAL_CLOBBER_ONLY)$(SUPPORT_ANDROID_PLATFORM),) + # doing a Linux build. We need to worry about sysroots. + + ifneq ($(SUPPORT_BUILD_LWS),) + -include $(MAKE_TOP)/xorgconf.mk + + else ifneq ($(SYSROOT),) + LWS_PREFIX ?= /usr + + ALL_CFLAGS += --sysroot=${SYSROOT} + ALL_CXXFLAGS += --sysroot=${SYSROOT} + ALL_LDFLAGS += --sysroot=${SYSROOT} + + PKG_CONFIG_SYSROOT_DIR := ${SYSROOT} + + ifneq ($(SYSROOT),/) + # Override PKG_CONFIG_PATH to prevent additional host paths from being + # searched + PKG_CONFIG_PATH := + endif + endif +endif + +HOST_INTERMEDIATES := $(HOST_OUT)/intermediates +TARGET_INTERMEDIATES := $(TARGET_OUT)/intermediates + +# Include each Linux.mk, then include modules.mk to save some information +# about each module +include $(foreach _Linux.mk,$(ALL_MAKEFILES),$(MAKE_TOP)/this_makefile.mk $(_Linux.mk) $(MAKE_TOP)/modules.mk) + +ifeq ($(strip $(REMAINING_MAKEFILES)),) +INTERNAL_INCLUDED_ALL_MAKEFILES := true +else +$(error Impossible: $(words $(REMAINING_MAKEFILES)) makefiles were mysteriously ignored when reading $$(ALL_MAKEFILES)) +endif + +# Compute the isystem paths passed in via SYS_INCLUDES. We'll use this in +# the module target_xxx makefiles to filter duplicate -isystem and -I flags, +# to ensure the module can always override the include precedence. (Also +# calculate any 'residual' non-include flags, as we need to put them back.) +SYS_INCLUDES_ISYSTEM := \ + $(subst -isystem,,$(filter -isystem%,$(subst -isystem ,-isystem,$(SYS_INCLUDES)))) +SYS_INCLUDES_RESIDUAL := \ + $(strip $(filter-out -isystem%,$(subst -isystem ,-isystem,$(SYS_INCLUDES)))) + +# At this point, all Linux.mks have been included. Now generate rules to build +# each module: for each module in $(ALL_MODULES), set per-makefile variables +$(foreach _m,$(ALL_MODULES),$(eval $(call process-module,$(_m)))) + +.PHONY: kbuild install +kbuild install: + +ifneq ($(INTERNAL_CLOBBER_ONLY),true) +-include $(MAKE_TOP)/scripts.mk +-include $(MAKE_TOP)/kbuild/kbuild.mk +endif +# We won't depend on 'build' here so that people can build subsets of +# components and still have the install script attempt to install the +# subset. +install: + @if [ ! -d "$(DISCIMAGE)" -a -z "$(INSTALL_TARGET)" ]; then \ + echo; \ + echo "** DISCIMAGE was not set or does not point to a valid directory."; \ + echo "** Either use INSTALL_TARGET or set DISCIMAGE."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(TARGET_OUT)."; \ + echo "** Cannot continue with install."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh + +.PHONY: uninstall +uninstall: install_script +uninstall: + @if [ ! -d "$(DISCIMAGE)" -a -z "$(INSTALL_TARGET)" ]; then \ + echo; \ + echo "** DISCIMAGE was not set or does not point to a valid directory."; \ + echo "** Either use INSTALL_TARGET or set DISCIMAGE."; \ + echo "** Cannot continue with uninstall."; \ + echo; \ + exit 1; \ + fi + @if [ ! -f $(RELATIVE_OUT)/install.sh ]; then \ + echo; \ + echo "** install.sh not found in $(TARGET_OUT)."; \ + echo "** Cannot continue with uninstall."; \ + echo; \ + exit 1; \ + fi + @cd $(RELATIVE_OUT) && ./install.sh -u + +# You can say 'make all_modules' to attempt to make everything, or 'make +# components' to only make the things which are listed (in the per-build +# makefiles) as components of the build. +.PHONY: all_modules all_docs components +all_modules: $(ALL_MODULES) +all_docs: ; +components: $(COMPONENTS) +docs: $(DOCS) + +# Cleaning +.PHONY: clean clobber +clean: MODULE_DIRS_TO_REMOVE := $(HOST_OUT) $(HOST_32BIT_OUT) \ + $(TARGET_OUT_DIRECTORIES) +clean: + $(clean-dirs) +clobber: MODULE_DIRS_TO_REMOVE := $(OUT) +clobber: + $(clean-dirs) + +# Saying 'make clean-MODULE' removes the intermediates for MODULE. +# clobber-MODULE deletes the output files as well +clean-%: + $(if $(V),,@echo " RM " $(call relative-to-top,$(INTERNAL_CLEAN_TARGETS_FOR_$*))) + $(RM) -rf $(INTERNAL_CLEAN_TARGETS_FOR_$*) +clobber-%: + $(if $(V),,@echo " RM " $(call relative-to-top,$(INTERNAL_CLOBBER_TARGETS_FOR_$*))) + $(RM) -rf $(INTERNAL_CLOBBER_TARGETS_FOR_$*) + +include $(MAKE_TOP)/bits.mk + +# D=nobuild stops the build before any recipes are run. This line should +# come at the end of this makefile. +$(if $(filter nobuild,$(D)),$(error D=nobuild given),) diff --git a/sgx_km/eurasia_km/include4/dbgdrvif.h b/sgx_km/eurasia_km/include4/dbgdrvif.h new file mode 100644 index 0000000..b5edb5a --- /dev/null +++ b/sgx_km/eurasia_km/include4/dbgdrvif.h @@ -0,0 +1,382 @@ +/*************************************************************************/ /*! +@Title Debug driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Debug Driver Interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _DBGDRVIF_ +#define _DBGDRVIF_ + + +#if defined(__linux__) + +#define FILE_DEVICE_UNKNOWN 0 +#define METHOD_BUFFERED 0 +#define FILE_ANY_ACCESS 0 + +#define CTL_CODE( DeviceType, Function, Method, Access ) (Function) +#define MAKEIOCTLINDEX(i) ((i) & 0xFFF) + +#else + +#include "ioctldef.h" + +#endif + +/***************************************************************************** + Stream mode stuff. +*****************************************************************************/ +#define DEBUG_CAPMODE_FRAMED 0x00000001UL +#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL +#define DEBUG_CAPMODE_HOTKEY 0x00000004UL + +#define DEBUG_OUTMODE_STANDARDDBG 0x00000001UL +#define DEBUG_OUTMODE_MONO 0x00000002UL +#define DEBUG_OUTMODE_STREAMENABLE 0x00000004UL +#define DEBUG_OUTMODE_ASYNC 0x00000008UL +#define DEBUG_OUTMODE_SGXVGA 0x00000010UL + +#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL +#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL +#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004UL +#define DEBUG_FLAGS_READONLY 0x00000008UL +#define DEBUG_FLAGS_WRITEONLY 0x00000010UL + +#define DEBUG_FLAGS_TEXTSTREAM 0x80000000UL + +/***************************************************************************** + Debug level control. Only bothered with the first 12 levels, I suspect you + get the idea... +*****************************************************************************/ +#define DEBUG_LEVEL_0 0x00000001UL +#define DEBUG_LEVEL_1 0x00000003UL +#define DEBUG_LEVEL_2 0x00000007UL +#define DEBUG_LEVEL_3 0x0000000FUL +#define DEBUG_LEVEL_4 0x0000001FUL +#define DEBUG_LEVEL_5 0x0000003FUL +#define DEBUG_LEVEL_6 0x0000007FUL +#define DEBUG_LEVEL_7 0x000000FFUL +#define DEBUG_LEVEL_8 0x000001FFUL +#define DEBUG_LEVEL_9 0x000003FFUL +#define DEBUG_LEVEL_10 0x000007FFUL +#define DEBUG_LEVEL_11 0x00000FFFUL + +#define DEBUG_LEVEL_SEL0 0x00000001UL +#define DEBUG_LEVEL_SEL1 0x00000002UL +#define DEBUG_LEVEL_SEL2 0x00000004UL +#define DEBUG_LEVEL_SEL3 0x00000008UL +#define DEBUG_LEVEL_SEL4 0x00000010UL +#define DEBUG_LEVEL_SEL5 0x00000020UL +#define DEBUG_LEVEL_SEL6 0x00000040UL +#define DEBUG_LEVEL_SEL7 0x00000080UL +#define DEBUG_LEVEL_SEL8 0x00000100UL +#define DEBUG_LEVEL_SEL9 0x00000200UL +#define DEBUG_LEVEL_SEL10 0x00000400UL +#define DEBUG_LEVEL_SEL11 0x00000800UL + +/***************************************************************************** + IOCTL values. +*****************************************************************************/ +#define DEBUG_SERVICE_IOCTL_BASE 0x800UL +#define DEBUG_SERVICE_CREATESTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_DESTROYSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_GETSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WRITESTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_READSTRING CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WRITE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_READ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_SETDEBUGMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_SETDEBUGOUTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_SETDEBUGLEVEL CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_SETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_GETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_OVERRIDEMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_DEFAULTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WRITE2 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WRITESTRINGCM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WRITECM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_SETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_GETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_ISCAPTUREFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WRITELF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_READLF CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_WAITFOREVENT CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define DEBUG_SERVICE_SETCONNNOTIFY CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x19, METHOD_BUFFERED, FILE_ANY_ACCESS) + + +typedef enum _DBG_EVENT_ +{ + DBG_EVENT_STREAM_DATA = 1 +} DBG_EVENT; + + +/***************************************************************************** + In/Out Structures +*****************************************************************************/ +typedef struct _DBG_IN_CREATESTREAM_ +{ + union + { + IMG_CHAR *pszName; + IMG_UINT64 ui64Name; + } u; + IMG_UINT32 ui32Pages; + IMG_UINT32 ui32CapMode; + IMG_UINT32 ui32OutMode; +}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM; + +typedef struct _DBG_IN_FINDSTREAM_ +{ + union + { + IMG_CHAR *pszName; + IMG_UINT64 ui64Name; + }u; + IMG_BOOL bResetStream; +}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM; + +typedef struct _DBG_IN_WRITESTRING_ +{ + union + { + IMG_CHAR *pszString; + IMG_UINT64 ui64String; + } u; + IMG_SID hStream; + IMG_UINT32 ui32Level; +}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING; + +typedef struct _DBG_IN_READSTRING_ +{ + union + { + IMG_CHAR *pszString; + IMG_UINT64 ui64String; + } u; + IMG_SID hStream; + IMG_UINT32 ui32StringLen; +} DBG_IN_READSTRING, *PDBG_IN_READSTRING; + +typedef struct _DBG_IN_SETDEBUGMODE_ +{ + IMG_SID hStream; + IMG_UINT32 ui32Mode; + IMG_UINT32 ui32Start; + IMG_UINT32 ui32End; + IMG_UINT32 ui32SampleRate; +} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE; + +typedef struct _DBG_IN_SETDEBUGOUTMODE_ +{ + IMG_SID hStream; + IMG_UINT32 ui32Mode; +} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE; + +typedef struct _DBG_IN_SETDEBUGLEVEL_ +{ + IMG_SID hStream; + IMG_UINT32 ui32Level; +} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL; + +typedef struct _DBG_IN_SETFRAME_ +{ + IMG_SID hStream; + IMG_UINT32 ui32Frame; +} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME; + +typedef struct _DBG_IN_WRITE_ +{ + union + { + IMG_UINT8 *pui8InBuffer; + IMG_UINT64 ui64InBuffer; + } u; + IMG_SID hStream; + IMG_UINT32 ui32Level; + IMG_UINT32 ui32TransferSize; +} DBG_IN_WRITE, *PDBG_IN_WRITE; + +typedef struct _DBG_IN_READ_ +{ + union + { + IMG_UINT8 *pui8OutBuffer; + IMG_UINT64 ui64OutBuffer; + } u; + IMG_SID hStream; + IMG_BOOL bReadInitBuffer; + IMG_UINT32 ui32OutBufferSize; +} DBG_IN_READ, *PDBG_IN_READ; + +typedef struct _DBG_IN_OVERRIDEMODE_ +{ + IMG_SID hStream; + IMG_UINT32 ui32Mode; +} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE; + +typedef struct _DBG_IN_ISCAPTUREFRAME_ +{ + IMG_SID hStream; + IMG_BOOL bCheckPreviousFrame; +} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME; + +typedef struct _DBG_IN_SETMARKER_ +{ + IMG_SID hStream; + IMG_UINT32 ui32Marker; +} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER; + +typedef struct _DBG_IN_WRITE_LF_ +{ + union + { + IMG_UINT8 *pui8InBuffer; + IMG_UINT64 ui64InBuffer; + } u; + IMG_UINT32 ui32Flags; + IMG_SID hStream; + IMG_UINT32 ui32Level; + IMG_UINT32 ui32BufferSize; +} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF; + +/* + Flags for above struct +*/ +#define WRITELF_FLAGS_RESETBUF 0x00000001UL + +/* + Common control structure (don't duplicate control in main stream + and init phase stream). +*/ +typedef struct _DBG_STREAM_CONTROL_ +{ + IMG_BOOL bInitPhaseComplete; /*!< init phase has finished */ + IMG_UINT32 ui32Flags; /*!< flags (see DEBUG_FLAGS above) */ + + IMG_UINT32 ui32CapMode; /*!< capturing mode framed/hot key */ + IMG_UINT32 ui32OutMode; /*!< output mode, e.g. files */ + IMG_UINT32 ui32DebugLevel; + IMG_UINT32 ui32DefaultMode; + IMG_UINT32 ui32Start; /*!< first capture frame */ + IMG_UINT32 ui32End; /*!< last frame */ + IMG_UINT32 ui32Current; /*!< current frame */ + IMG_UINT32 ui32SampleRate; /*!< capture frequency */ + IMG_UINT32 ui32Reserved; +} DBG_STREAM_CONTROL, *PDBG_STREAM_CONTROL; +/* + Per-buffer control structure. +*/ +#define MAX_STREAM_NAME_LENGTH 30 +typedef struct _DBG_STREAM_ +{ + struct _DBG_STREAM_ *psNext; + struct _DBG_STREAM_ *psInitStream; + DBG_STREAM_CONTROL *psCtrl; + IMG_BOOL bCircularAllowed; + IMG_PVOID pvBase; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32RPtr; + IMG_UINT32 ui32WPtr; + IMG_UINT32 ui32DataWritten; + IMG_UINT32 ui32Marker; /*!< marker for file splitting */ + IMG_UINT32 ui32InitPhaseWOff; /*!< snapshot offset for init phase end for follow-on pdump */ + IMG_CHAR szName[MAX_STREAM_NAME_LENGTH]; /* Give this a size, some compilers don't like [] */ +} DBG_STREAM,*PDBG_STREAM; + +/* + * Allows dbgdrv to notify services when events happen, e.g. pdump.exe starts. + * (better than resetting psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0 + * in SGXGetClientInfoKM.) + */ +typedef struct _DBGKM_CONNECT_NOTIFIER_ +{ + IMG_VOID (IMG_CALLCONV *pfnConnectNotifier) (IMG_VOID); +} DBGKM_CONNECT_NOTIFIER, *PDBGKM_CONNECT_NOTIFIER; + +/***************************************************************************** + Kernel mode service table +*****************************************************************************/ +typedef struct _DBGKM_SERVICE_TABLE_ +{ + IMG_UINT32 ui32Size; + IMG_VOID * (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages); + IMG_VOID (IMG_CALLCONV *pfnDestroyStream) (PDBG_STREAM psStream); + IMG_VOID * (IMG_CALLCONV *pfnFindStream) (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer); + IMG_UINT32 (IMG_CALLCONV *pfnWriteString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level); + IMG_UINT32 (IMG_CALLCONV *pfnReadString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit); + IMG_UINT32 (IMG_CALLCONV *pfnWriteBIN) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); + IMG_UINT32 (IMG_CALLCONV *pfnReadBIN) (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf); + IMG_VOID (IMG_CALLCONV *pfnSetCaptureMode) (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate); + IMG_VOID (IMG_CALLCONV *pfnSetOutputMode) (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode); + IMG_VOID (IMG_CALLCONV *pfnSetDebugLevel) (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel); + IMG_VOID (IMG_CALLCONV *pfnSetFrame) (PDBG_STREAM psStream,IMG_UINT32 ui32Frame); + IMG_UINT32 (IMG_CALLCONV *pfnGetFrame) (PDBG_STREAM psStream); + IMG_VOID (IMG_CALLCONV *pfnOverrideMode) (PDBG_STREAM psStream,IMG_UINT32 ui32Mode); + IMG_VOID (IMG_CALLCONV *pfnDefaultMode) (PDBG_STREAM psStream); + IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); + IMG_UINT32 (IMG_CALLCONV *pfnWriteStringCM) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level); + IMG_UINT32 (IMG_CALLCONV *pfnWriteBINCM) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); + IMG_VOID (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream,IMG_UINT32 ui32Marker); + IMG_UINT32 (IMG_CALLCONV *pfnGetMarker) (PDBG_STREAM psStream); + IMG_VOID (IMG_CALLCONV *pfnStartInitPhase) (PDBG_STREAM psStream); + IMG_VOID (IMG_CALLCONV *pfnStopInitPhase) (PDBG_STREAM psStream); + IMG_BOOL (IMG_CALLCONV *pfnIsCaptureFrame) (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame); + IMG_UINT32 (IMG_CALLCONV *pfnWriteLF) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags); + IMG_UINT32 (IMG_CALLCONV *pfnReadLF) (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf); + IMG_UINT32 (IMG_CALLCONV *pfnGetStreamOffset) (PDBG_STREAM psStream); + IMG_VOID (IMG_CALLCONV *pfnSetStreamOffset) (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset); + IMG_BOOL (IMG_CALLCONV *pfnIsLastCaptureFrame) (PDBG_STREAM psStream); + IMG_VOID (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent); + IMG_VOID (IMG_CALLCONV *pfnSetConnectNotifier) (DBGKM_CONNECT_NOTIFIER fn_notifier); + IMG_UINT32 (IMG_CALLCONV *pfnWritePersist) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE; + +#if defined(__linux__) +/***************************************************************************** + Function to export service table from debug driver to the PDUMP component. +*****************************************************************************/ +IMG_VOID DBGDrvGetServiceTable(DBGKM_SERVICE_TABLE **fn_table); +#endif + + +#endif +/***************************************************************************** + End of file (DBGDRVIF.H) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/img_defs.h b/sgx_km/eurasia_km/include4/img_defs.h new file mode 100644 index 0000000..9350cf6 --- /dev/null +++ b/sgx_km/eurasia_km/include4/img_defs.h @@ -0,0 +1,227 @@ +/*************************************************************************/ /*! +@Title Common header containing type definitions for portability +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Contains variable and structure definitions. Any platform + specific types should be defined in this file. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if !defined (__IMG_DEFS_H__) +#define __IMG_DEFS_H__ + +#include "img_types.h" + +typedef enum img_tag_TriStateSwitch +{ + IMG_ON = 0x00, + IMG_OFF, + IMG_IGNORE + +} img_TriStateSwitch, * img_pTriStateSwitch; + +#define IMG_SUCCESS 0 + +#define IMG_NO_REG 1 + +#if defined (NO_INLINE_FUNCS) + #define INLINE + #define FORCE_INLINE +#else +#if defined (__cplusplus) + #define INLINE inline + #define FORCE_INLINE inline +#else +#if !defined(INLINE) + #define INLINE __inline +#endif + #define FORCE_INLINE static __inline +#endif +#endif + + +/* Use this in any file, or use attributes under GCC - see below */ +#ifndef PVR_UNREFERENCED_PARAMETER +#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param) +#endif + +/* The best way to supress unused parameter warnings using GCC is to use a + * variable attribute. Place the unref__ between the type and name of an + * unused parameter in a function parameter list, eg `int unref__ var'. This + * should only be used in GCC build environments, for example, in files that + * compile only on Linux. Other files should use UNREFERENCED_PARAMETER */ +#ifdef __GNUC__ +#define unref__ __attribute__ ((unused)) +#else +#define unref__ +#endif + +/* + Wide character definitions +*/ +#ifndef _TCHAR_DEFINED +#if defined(UNICODE) +typedef unsigned short TCHAR, *PTCHAR, *PTSTR; +#else /* #if defined(UNICODE) */ +typedef char TCHAR, *PTCHAR, *PTSTR; +#endif /* #if defined(UNICODE) */ +#define _TCHAR_DEFINED +#endif /* #ifndef _TCHAR_DEFINED */ + + + #if defined(__linux__) || defined(__QNXNTO__) || defined(__METAG) + + #define IMG_CALLCONV + #define IMG_INTERNAL __attribute__((visibility("hidden"))) + #define IMG_EXPORT __attribute__((visibility("default"))) + #define IMG_IMPORT + #define IMG_RESTRICT __restrict__ + #if defined(USE_64BIT_COMPAT) + #define IMG_COMPAT __attribute__ ((packed)) + #else + #define IMG_COMPAT + #endif + + #else + #error("define an OS") + #endif + +// Use default definition if not overridden +#ifndef IMG_ABORT + #define IMG_ABORT() abort() +#endif + +#ifndef IMG_MALLOC + #define IMG_MALLOC(A) malloc (A) +#endif + +#ifndef IMG_FREE + #define IMG_FREE(A) free (A) +#endif + +#define IMG_CONST const + +#if defined(__GNUC__) +#define IMG_FORMAT_PRINTF(x,y) __attribute__((format(printf,x,y))) +#else +#define IMG_FORMAT_PRINTF(x,y) +#endif + +/* + * Cleanup request defines + */ +#define CLEANUP_WITH_POLL IMG_FALSE +#define FORCE_CLEANUP IMG_TRUE + +#if defined (_WIN64) +#define IMG_UNDEF (~0ULL) +#else +#define IMG_UNDEF (~0UL) +#endif + +/* + Do the right thing when using printf to output cpu addresses, + depending on architecture. + */ +#if defined (_WIN64) + #define UINTPTR_FMT "%016llX" +#else + #if defined (__x86_64__) + #define UINTPTR_FMT "%016lX" + #else + #define UINTPTR_FMT "%08lX" + #endif +#endif + +/* + Similarly for DEV_ and SYS_ PHYSADDRs, but this is dependent on 32/36-bit MMU + capability, in addition to host architecture. + */ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + #if defined(IMG_UINT32_IS_ULONG) + #define CPUPADDR_FMT "%08lX" + #define DEVPADDR_FMT "%08lX" + #define SYSPADDR_FMT "%08lX" + #else + #define CPUPADDR_FMT "%08X" + #define DEVPADDR_FMT "%08X" + #define SYSPADDR_FMT "%08X" + #endif +#else + #if defined(__x86_64__) + #define CPUPADDR_FMT "%016lX" + #define DEVPADDR_FMT "%016lX" + #define SYSPADDR_FMT "%016lX" + #else + + #define CPUPADDR_FMT "%016llX" + #define DEVPADDR_FMT "%016llX" + #define SYSPADDR_FMT "%016llX" + #endif +#endif + +/* + Define a printf format macro for the length property of the format-specifier + for size_t, that allows avoidance of C99 dependency on compilers that don't + support this, while still ensuring that whatever the size of size_t (eg 32, + 64 bit Linux builds, or Win32/64 builds), a size_t (or IMG_SIZE_T) can be + passed to printf-type functions without a cast. +*/ +#if defined LINUX + /* Use C99 format specifier where possible */ + #define SIZE_T_FMT_LEN "z" +#elif defined _WIN64 + #define SIZE_T_FMT_LEN "I" +#else + #define SIZE_T_FMT_LEN "l" /* May need to be updated as required, for other OSs */ +#endif + + +#if defined (__x86_64__) + #define IMG_UINT64_FMT "l" +#else + #define IMG_UINT64_FMT "ll" /* May need to be updated as required, for other OSs */ +#endif + +/* + Some versions of MSVC don't have snprintf, vsnprintf in their CRTs. + Remap to the deprecated unix compatibility versions. +*/ + +#endif /* #if !defined (__IMG_DEFS_H__) */ +/***************************************************************************** + End of file (IMG_DEFS.H) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/img_types.h b/sgx_km/eurasia_km/include4/img_types.h new file mode 100644 index 0000000..6d2761c --- /dev/null +++ b/sgx_km/eurasia_km/include4/img_types.h @@ -0,0 +1,231 @@ +/*************************************************************************/ /*! +@Title Global types for use by IMG APIs +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Defines type aliases for use by IMG APIs. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __IMG_TYPES_H__ +#define __IMG_TYPES_H__ + +#if defined(LINUX) && defined(__KERNEL__) + #include "kernel_types.h" +#elif defined(LINUX) || defined(__QNXNTO__) || defined(__METAG) + #include /* NULL */ + #include /* intX_t/uintX_t, format specifiers */ + #include /* INT_MIN, etc */ +#else + #error C99 support not set up for this build +#endif + +/* define all address space bit depths: */ +/* CPU virtual address space defaults to 32bits */ +#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS) +#define IMG_ADDRSPACE_CPUVADDR_BITS 32 +#endif + +/* Physical address space defaults to 32bits */ +#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS) +#define IMG_ADDRSPACE_PHYSADDR_BITS 32 +#endif + +typedef unsigned int IMG_UINT, *IMG_PUINT; +typedef signed int IMG_INT, *IMG_PINT; + +typedef uint8_t IMG_UINT8, *IMG_PUINT8; +typedef uint8_t IMG_BYTE, *IMG_PBYTE; +typedef int8_t IMG_INT8, *IMG_PINT8; +typedef char IMG_CHAR, *IMG_PCHAR; + +typedef uint16_t IMG_UINT16, *IMG_PUINT16; +typedef int16_t IMG_INT16, *IMG_PINT16; +typedef uint32_t IMG_UINT32, *IMG_PUINT32; +typedef int32_t IMG_INT32, *IMG_PINT32; + +#define IMG_UINT32_MAX UINT32_MAX + +typedef uint64_t IMG_UINT64, *IMG_PUINT64; +typedef int64_t IMG_INT64, *IMG_PINT64; + +#if !(defined(LINUX) && defined (__KERNEL__)) +/* Linux kernel mode does not use floating point */ +typedef float IMG_FLOAT, *IMG_PFLOAT; +typedef double IMG_DOUBLE, *IMG_PDOUBLE; +#endif + +typedef enum tag_img_bool +{ + IMG_FALSE = 0, + IMG_TRUE = 1, + IMG_FORCE_ALIGN = 0x7FFFFFFF +} IMG_BOOL, *IMG_PBOOL; + +typedef void IMG_VOID, *IMG_PVOID; + +typedef IMG_INT32 IMG_RESULT; + +#if defined(_WIN64) + typedef unsigned __int64 IMG_UINTPTR_T; + typedef signed __int64 IMG_INTPTR_T; + typedef signed __int64 IMG_PTRDIFF_T; + typedef IMG_UINT64 IMG_SIZE_T; +#else +#if defined (__x86_64__) || !defined(USE_64BIT_COMPAT) || defined (__KERNEL__) + typedef size_t IMG_SIZE_T; +#else + typedef IMG_UINT64 IMG_SIZE_T; +#endif + typedef uintptr_t IMG_UINTPTR_T; +#endif + +#if defined(LINUX) && defined (__KERNEL__) + typedef IMG_PVOID IMG_HANDLE; +#elif defined(LINUX) + #if defined(USE_64BIT_COMPAT) + typedef IMG_UINT64 IMG_HANDLE; + #else + typedef IMG_UINT32 IMG_HANDLE; + #endif +#else +typedef void *IMG_HANDLE; +#endif + +typedef void** IMG_HVOID, * IMG_PHVOID; + +#define IMG_NULL 0 + +/* services/stream ID */ +typedef IMG_UINTPTR_T IMG_SID; + +typedef IMG_UINTPTR_T IMG_EVENTSID; + +/* + * Address types. + * All types used to refer to a block of memory are wrapped in structures + * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot + * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the + * same thing. + * + * There is an assumption that the system contains at most one non-cpu mmu, + * and a memory block is only mapped by the MMU once. + * + * Different devices could have offset views of the physical address space. + * + */ + + +/* + * + * +------------+ +------------+ +------------+ +------------+ + * | CPU | | DEV | | DEV | | DEV | + * +------------+ +------------+ +------------+ +------------+ + * | | | | + * | PVOID |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR | + * | \-------------------/ | + * | | | + * +------------+ +------------+ | + * | MMU | | MMU | | + * +------------+ +------------+ | + * | | | + * | | | + * | | | + * +--------+ +---------+ +--------+ + * | Offset | | (Offset)| | Offset | + * +--------+ +---------+ +--------+ + * | | IMG_DEV_PHYADDR | + * | | | + * | | IMG_DEV_PHYADDR | + * +---------------------------------------------------------------------+ + * | System Address bus | + * +---------------------------------------------------------------------+ + * + */ + +typedef IMG_PVOID IMG_CPU_VIRTADDR; + +/* device virtual address */ +typedef struct _IMG_DEV_VIRTADDR +{ + /* device virtual addresses are 32bit for now */ + IMG_UINT32 uiAddr; +#define IMG_CAST_TO_DEVVADDR_UINT(var) (IMG_UINT32)(var) + +} IMG_DEV_VIRTADDR; + +typedef IMG_UINT32 IMG_DEVMEM_SIZE_T; + +/* cpu physical address */ +typedef struct _IMG_CPU_PHYADDR +{ + /* variable sized type (32,64) */ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + /* variable sized type (32,64) */ + IMG_UINT32 uiAddr; +#else + IMG_UINT64 uiAddr; +#endif +} IMG_CPU_PHYADDR; + +/* device physical address */ +typedef struct _IMG_DEV_PHYADDR +{ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + /* variable sized type (32,64) */ + IMG_UINT32 uiAddr; +#else + IMG_UINT64 uiAddr; +#endif +} IMG_DEV_PHYADDR; + +/* system physical address */ +typedef struct _IMG_SYS_PHYADDR +{ + /* variable sized type (32,64) */ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + /* variable sized type (32,64) */ + IMG_UINT32 uiAddr; +#else + IMG_UINT64 uiAddr; +#endif +} IMG_SYS_PHYADDR; + +#include "img_defs.h" + +#endif /* __IMG_TYPES_H__ */ +/****************************************************************************** + End of file (img_types.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/kernel_types.h b/sgx_km/eurasia_km/include4/kernel_types.h new file mode 100644 index 0000000..b53da19 --- /dev/null +++ b/sgx_km/eurasia_km/include4/kernel_types.h @@ -0,0 +1,136 @@ +/*************************************************************************/ /*! +@Title C99-compatible types and definitions for Linux kernel code +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +/* Limits of specified-width integer types */ + +/* S8_MIN, etc were added in kernel version 3.14. The other versions are for + * earlier kernels. They can be removed once older kernels don't need to be + * supported. + */ +#ifdef S8_MIN + #define INT8_MIN S8_MIN +#else + #define INT8_MIN (-128) +#endif + +#ifdef S8_MAX + #define INT8_MAX S8_MAX +#else + #define INT8_MAX 127 +#endif + +#ifdef U8_MAX + #define UINT8_MAX U8_MAX +#else + #define UINT8_MAX 0xFF +#endif + +#ifdef S16_MIN + #define INT16_MIN S16_MIN +#else + #define INT16_MIN (-32768) +#endif + +#ifdef S16_MAX + #define INT16_MAX S16_MAX +#else + #define INT16_MAX 32767 +#endif + +#ifdef U16_MAX + #define UINT16_MAX U16_MAX +#else + #define UINT16_MAX 0xFFFF +#endif + +#ifdef S32_MIN + #define INT32_MIN S32_MIN +#else + #define INT32_MIN (-2147483647 - 1) +#endif + +#ifdef S32_MAX + #define INT32_MAX S32_MAX +#else + #define INT32_MAX 2147483647 +#endif + +#ifdef U32_MAX + #define UINT32_MAX U32_MAX +#else + #define UINT32_MAX 0xFFFFFFFF +#endif + +#ifdef S64_MIN + #define INT64_MIN S64_MIN +#else + #define INT64_MIN (-9223372036854775807LL) +#endif + +#ifdef S64_MAX + #define INT64_MAX S64_MAX +#else + #define INT64_MAX 9223372036854775807LL +#endif + +#ifdef U64_MAX + #define UINT64_MAX U64_MAX +#else + #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL +#endif + +/* Macros for integer constants */ +#define INT8_C S8_C +#define UINT8_C U8_C +#define INT16_C S16_C +#define UINT16_C U16_C +#define INT32_C S32_C +#define UINT32_C U32_C +#define INT64_C S64_C +#define UINT64_C U64_C + +/* Format conversion of integer types */ +/* Only define PRIX64 for the moment, as this is the only format macro that + * img_types.h needs. + */ +#define PRIX64 "llX" diff --git a/sgx_km/eurasia_km/include4/pdumpdefs.h b/sgx_km/eurasia_km/include4/pdumpdefs.h new file mode 100644 index 0000000..2ba36c3 --- /dev/null +++ b/sgx_km/eurasia_km/include4/pdumpdefs.h @@ -0,0 +1,127 @@ +/*************************************************************************/ /*! +@Title PDUMP definitions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description PDUMP definitions header +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if !defined (__PDUMPDEFS_H__) +#define __PDUMPDEFS_H__ + +typedef enum _PDUMP_PIXEL_FORMAT_ +{ + PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, + PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, + PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, + PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, + PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, + PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, + PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, + PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, + PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, + PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, + PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, + PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, + PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, + PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, + PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, + PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, + PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, + PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, + PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, + PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, + PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, + PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42, + PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43, + PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44, + PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45, + PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46, + PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47, + + PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff + +} PDUMP_PIXEL_FORMAT; + +typedef enum _PDUMP_MEM_FORMAT_ +{ + PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0, + PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1, + PVRSRV_PDUMP_MEM_FORMAT_TILED = 8, + PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9, + PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10, + + PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff +} PDUMP_MEM_FORMAT; + +typedef enum _PDUMP_POLL_OPERATOR +{ + PDUMP_POLL_OPERATOR_EQUAL = 0, + PDUMP_POLL_OPERATOR_LESS = 1, + PDUMP_POLL_OPERATOR_LESSEQUAL = 2, + PDUMP_POLL_OPERATOR_GREATER = 3, + PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, + PDUMP_POLL_OPERATOR_NOTEQUAL = 5, + PDUMP_POLL_OPERATOR_FORCE_I32 = 0x7fffffff +} PDUMP_POLL_OPERATOR; + + +#endif /* __PDUMPDEFS_H__ */ + +/***************************************************************************** + End of file (pdumpdefs.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/pvr_debug.h b/sgx_km/eurasia_km/include4/pvr_debug.h new file mode 100644 index 0000000..be78c77 --- /dev/null +++ b/sgx_km/eurasia_km/include4/pvr_debug.h @@ -0,0 +1,283 @@ +/*************************************************************************/ /*! +@Title PVR Debug Declarations +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides debug functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PVR_DEBUG_H__ +#define __PVR_DEBUG_H__ + + +#include "img_types.h" + + +#if defined (__cplusplus) +extern "C" { +#endif + +#define PVR_MAX_DEBUG_MESSAGE_LEN (512) + +/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */ +#define DBGPRIV_FATAL 0x001UL +#define DBGPRIV_ERROR 0x002UL +#define DBGPRIV_BUFFERED 0x004UL +#define DBGPRIV_WARNING 0x008UL +#define DBGPRIV_MESSAGE 0x010UL +#define DBGPRIV_VERBOSE 0x020UL +#define DBGPRIV_CALLTRACE 0x040UL +#define DBGPRIV_ALLOC 0x080UL +#define DBGPRIV_DBGDRV_MESSAGE 0x100UL + +#define DBGPRIV_DBGLEVEL_COUNT 9 + +#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG) +#define PVRSRV_NEED_PVR_ASSERT +#endif + +#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF) +#define PVRSRV_NEED_PVR_DPF +#endif + +#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING)) +#define PVRSRV_NEED_PVR_TRACE +#endif + +/* PVR_ASSERT() and PVR_DBG_BREAK handling */ + +#if defined(PVRSRV_NEED_PVR_ASSERT) + +#if defined(LINUX) && defined(__KERNEL__) +/* In Linux kernel mode, use BUG() directly. This produces the correct + filename and line number in the panic message. */ +#define PVR_ASSERT(EXPR) do \ + { \ + if (!(EXPR)) \ + { \ + PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \ + "Debug assertion failed!"); \ + BUG(); \ + } \ + } while (0) + +#else /* defined(LINUX) && defined(__KERNEL__) */ + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, + IMG_UINT32 ui32Line); + +#if defined(LINUX) + #define PVR_ASSERT(EXPR) do \ + { \ + if (!(EXPR)) \ + PVRSRVDebugAssertFail(__FILE__, __LINE__); \ + } while (0) +#else + #if defined (__QNXNTO__) + #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__); + #else + #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__) + #endif +#endif + +#endif /* defined(LINUX) && defined(__KERNEL__) */ + + + #if defined(LINUX) && defined(__KERNEL__) + #define PVR_DBG_BREAK BUG() + #else + #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__) + #endif + +#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ + + #define PVR_ASSERT(EXPR) + #define PVR_DBG_BREAK + +#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ + + +/* PVR_DPF() handling */ + +#if defined(PVRSRV_NEED_PVR_DPF) + +#if defined(PVRSRV_NEW_PVR_DPF) + + /* New logging mechanism */ + #define PVR_DBG_FATAL DBGPRIV_FATAL + #define PVR_DBG_ERROR DBGPRIV_ERROR + #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED + #define PVR_DBG_WARNING DBGPRIV_WARNING + #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE + #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE + #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE + #define PVR_DBG_ALLOC DBGPRIV_ALLOC + #define PVR_DBGDRIV_MESSAGE DBGPRIV_DBGDRV_MESSAGE + + /* These levels are always on with PVRSRV_NEED_PVR_DPF */ + #define __PVR_DPF_0x001UL(x...) PVRSRVDebugPrintf(DBGPRIV_FATAL, x) + #define __PVR_DPF_0x002UL(x...) PVRSRVDebugPrintf(DBGPRIV_ERROR, x) + #define __PVR_DPF_0x004UL(x...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, x) + + /* Some are compiled out completely in release builds */ +#if defined(DEBUG) + #define __PVR_DPF_0x008UL(x...) PVRSRVDebugPrintf(DBGPRIV_WARNING, x) + #define __PVR_DPF_0x010UL(x...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, x) + #define __PVR_DPF_0x020UL(x...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, x) + #define __PVR_DPF_0x040UL(x...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, x) + #define __PVR_DPF_0x080UL(x...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, x) + #define __PVR_DPF_0x100UL(x...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, x) + +#elif defined(PVR_DBGPRIV_LEVEL) + +#if (PVR_DBGPRIV_LEVEL >= DBGPRIV_WARNING) + #define __PVR_DPF_0x008UL(x...) PVRSRVDebugPrintf(DBGPRIV_WARNING, x) +#else + #define __PVR_DPF_0x008UL(x...) +#endif + +#if (PVR_DBGPRIV_LEVEL >= DBGPRIV_MESSAGE) + #define __PVR_DPF_0x010UL(x...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, x) +#else + #define __PVR_DPF_0x010UL(x...) +#endif + +#if (PVR_DBGPRIV_LEVEL >= DBGPRIV_VERBOSE) + #define __PVR_DPF_0x020UL(x...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, x) +#else + #define __PVR_DPF_0x020UL(x...) +#endif + +#if (PVR_DBGPRIV_LEVEL >= DBGPRIV_CALLTRACE) + #define __PVR_DPF_0x040UL(x...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, x) +#else + #define __PVR_DPF_0x040UL(x...) +#endif + +#if (PVR_DBGPRIV_LEVEL >= DBGPRIV_ALLOC) + #define __PVR_DPF_0x080UL(x...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, x) +#else + #define __PVR_DPF_0x080UL(x...) +#endif + +#if (PVR_DBGPRIV_LEVEL >= DBGPRIV_DBGDRV_MESSAGE) + #define __PVR_DPF_0x100UL(x...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, x) +#else + #define __PVR_DPF_0x100UL(x...) +#endif + +#else + #define __PVR_DPF_0x008UL(x...) + #define __PVR_DPF_0x010UL(x...) + #define __PVR_DPF_0x020UL(x...) + #define __PVR_DPF_0x040UL(x...) + #define __PVR_DPF_0x080UL(x...) + #define __PVR_DPF_0x100UL(x...) +#endif + + /* Translate the different log levels to separate macros + * so they can each be compiled out. + */ +#if defined(DEBUG) + #define __PVR_DPF(lvl, x...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, x) +#else + #define __PVR_DPF(lvl, x...) __PVR_DPF_ ## lvl ("", 0, x) +#endif + + /* Get rid of the double bracketing */ + #define PVR_DPF(x) __PVR_DPF x + +#else /* defined(PVRSRV_NEW_PVR_DPF) */ + + /* Old logging mechanism */ + #define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__ + #define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__ + #define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__ + #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__ + #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__ + #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__ + #define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__ + #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED,__FILE__, __LINE__ + #define PVR_DBGDRIV_MESSAGE DBGPRIV_DBGDRV_MESSAGE, "", 0 + + #define PVR_DPF(X) PVRSRVDebugPrintf X + +#endif /* defined(PVRSRV_NEW_PVR_DPF) */ + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, + const IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR *pszFormat, + ...) IMG_FORMAT_PRINTF(4, 5); + +#if defined(PVR_DBGPRIV_LEVEL) && defined(SUPPORT_ANDROID_PLATFORM) +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintfSetLevel(IMG_UINT32 ui32DebugLevel); +#endif + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); + +#else /* defined(PVRSRV_NEED_PVR_DPF) */ + + #define PVR_DPF(X) + +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +/* PVR_TRACE() handling */ + +#if defined(PVRSRV_NEED_PVR_TRACE) + + #define PVR_TRACE(X) PVRSRVTrace X + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) + IMG_FORMAT_PRINTF(1, 2); + +#else /* defined(PVRSRV_NEED_PVR_TRACE) */ + + #define PVR_TRACE(X) + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* __PVR_DEBUG_H__ */ + +/****************************************************************************** + End of file (pvr_debug.h) +******************************************************************************/ + diff --git a/sgx_km/eurasia_km/include4/pvr_sync_user.h b/sgx_km/eurasia_km/include4/pvr_sync_user.h new file mode 100644 index 0000000..a44293f --- /dev/null +++ b/sgx_km/eurasia_km/include4/pvr_sync_user.h @@ -0,0 +1,137 @@ +/*************************************************************************/ /*! +@File pvr_sync_user.h +@Title Userspace definitions to use the kernel sync driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Version numbers and strings for PVR Consumer services + components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_SYNC_USER_H_ +#define _PVR_SYNC_USER_H_ + +#include + +#ifdef __KERNEL__ +#include "sgxapi_km.h" +#else +#include "sgxapi.h" +#endif + +#include "servicesext.h" // PVRSRV_SYNC_DATA +#include "img_types.h" + +/* This matches the sw_sync create ioctl data */ +struct PVR_SYNC_CREATE_IOCTL_DATA +{ + /* Input: Name of this sync pt. Passed to base sync driver. */ + char name[32]; + + /* Input: An fd from a previous call to ALLOC ioctl. Cannot be <0. */ + __s32 allocdSyncInfo; + + /* Output: An fd returned from the CREATE ioctl. */ + __s32 fence; +}; + +struct PVR_SYNC_ALLOC_IOCTL_DATA +{ + /* Output: An fd returned from the ALLOC ioctl */ + __s32 fence; + + /* Output: IMG_TRUE if the timeline looked idle at alloc time */ + __u32 bTimelineIdle; +}; + +#define PVR_SYNC_DEBUG_MAX_POINTS 3 + +typedef struct +{ + /* Output: A globally unique stamp/ID for the sync */ + IMG_UINT64 ui64Stamp; + + /* Output: The WOP snapshot for the sync */ + IMG_UINT32 ui32WriteOpsPendingSnapshot; +} +PVR_SYNC_DEBUG; + +struct PVR_SYNC_DEBUG_IOCTL_DATA +{ + /* Input: Fence to acquire debug for */ + int iFenceFD; + + /* Output: Number of points merged into this fence */ + IMG_UINT32 ui32NumPoints; + + struct + { + /* Output: Metadata for sync point */ + PVR_SYNC_DEBUG sMetaData; + + /* Output: 'Live' sync information. */ + PVRSRV_SYNC_DATA sSyncData; + } + sSync[PVR_SYNC_DEBUG_MAX_POINTS]; +}; + +struct PVR_SYNC_RENAME_IOCTL_DATA +{ + /* Input: Name of timeline. */ + IMG_CHAR szName[32]; +}; + +#define PVR_SYNC_IOC_MAGIC 'W' + +#define PVR_SYNC_IOC_CREATE_FENCE \ + _IOWR(PVR_SYNC_IOC_MAGIC, 0, struct PVR_SYNC_CREATE_IOCTL_DATA) + +#define PVR_SYNC_IOC_DEBUG_FENCE \ + _IOWR(PVR_SYNC_IOC_MAGIC, 1, struct PVR_SYNC_DEBUG_IOCTL_DATA) + +#define PVR_SYNC_IOC_ALLOC_FENCE \ + _IOWR(PVR_SYNC_IOC_MAGIC, 2, struct PVR_SYNC_ALLOC_IOCTL_DATA) + +#define PVR_SYNC_IOC_RENAME \ + _IOWR(PVR_SYNC_IOC_MAGIC, 3, struct PVR_SYNC_RENAME_IOCTL_DATA) + +#define PVR_SYNC_IOC_FORCE_SW_ONLY \ + _IO(PVR_SYNC_IOC_MAGIC, 4) + +#define PVRSYNC_MODNAME "pvr_sync" + +#endif /* _PVR_SYNC_USER_H_ */ diff --git a/sgx_km/eurasia_km/include4/pvrmodule.h b/sgx_km/eurasia_km/include4/pvrmodule.h new file mode 100644 index 0000000..267c7b6 --- /dev/null +++ b/sgx_km/eurasia_km/include4/pvrmodule.h @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Module Author and License. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVRMODULE_H_ +#define _PVRMODULE_H_ + +MODULE_AUTHOR("Imagination Technologies Ltd. "); +MODULE_LICENSE("Dual MIT/GPL"); + +#endif /* _PVRMODULE_H_ */ diff --git a/sgx_km/eurasia_km/include4/pvrversion.h b/sgx_km/eurasia_km/include4/pvrversion.h new file mode 100644 index 0000000..b2457fe --- /dev/null +++ b/sgx_km/eurasia_km/include4/pvrversion.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@File +@Title Version numbers and strings. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Version numbers and strings for PVR Consumer services + components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVRVERSION_H_ +#define _PVRVERSION_H_ + +#define PVR_STR(X) #X +#define PVR_STR2(X) PVR_STR(X) + +#define PVRVERSION_MAJ 1 +#define PVRVERSION_MIN 17 + +#define PVRVERSION_FAMILY "sgxddk" +#define PVRVERSION_BRANCHNAME "1.17" +#define PVRVERSION_BUILD 4948957 +#define PVRVERSION_BSCONTROL "SGX_DDK_Android" + +#define PVRVERSION_STRING "SGX_DDK_Android sgxddk 1.17@" PVR_STR2(PVRVERSION_BUILD) +#define PVRVERSION_STRING_SHORT "1.17@" PVR_STR2(PVRVERSION_BUILD) "" + +#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." + +#define PVRVERSION_BUILD_HI 494 +#define PVRVERSION_BUILD_LO 8957 +#define PVRVERSION_STRING_NUMERIC PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO) + +#endif /* _PVRVERSION_H_ */ diff --git a/sgx_km/eurasia_km/include4/services.h b/sgx_km/eurasia_km/include4/services.h new file mode 100644 index 0000000..7ee9318 --- /dev/null +++ b/sgx_km/eurasia_km/include4/services.h @@ -0,0 +1,1641 @@ +/*************************************************************************/ /*! +@Title Services API Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exported services API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SERVICES_H__ +#define __SERVICES_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "img_defs.h" +#include "servicesext.h" +#include "pdumpdefs.h" + + +/* The comment below is the front page for code-generated doxygen documentation */ +/*! + ****************************************************************************** + @mainpage + This document details the APIs and implementation of the Consumer Services. + It is intended to be used in conjunction with the Consumer Services + Software Architectural Specification and the Consumer Services Software + Functional Specification. + *****************************************************************************/ + +/****************************************************************************** + * #defines + *****************************************************************************/ + +/* 4k page size definition */ +#define PVRSRV_4K_PAGE_SIZE 4096UL + +#define PVRSRV_MAX_CMD_SIZE 1024/*!< max size in bytes of a command */ + +#define PVRSRV_MAX_DEVICES 16 /*!< Largest supported number of devices on the system */ + +#define EVENTOBJNAME_MAXLENGTH (64) + +/* + Flags associated with memory allocation + (bits 0-11) +*/ +#define PVRSRV_MEM_READ (1U<<0) +#define PVRSRV_MEM_WRITE (1U<<1) +#define PVRSRV_MEM_CACHE_CONSISTENT (1U<<2) +#define PVRSRV_MEM_NO_SYNCOBJ (1U<<3) +#define PVRSRV_MEM_INTERLEAVED (1U<<4) +#define PVRSRV_MEM_DUMMY (1U<<5) +#define PVRSRV_MEM_EDM_PROTECT (1U<<6) +#define PVRSRV_MEM_ZERO (1U<<7) +#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1U<<8) +#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1U<<9) +#define PVRSRV_MEM_NO_RESMAN (1U<<10) +#define PVRSRV_MEM_EXPORTED (1U<<11) + +/* + Heap Attribute flags + (bits 12-23) +*/ +#define PVRSRV_HAP_CACHED (1U<<12) +#define PVRSRV_HAP_UNCACHED (1U<<13) +#define PVRSRV_HAP_WRITECOMBINE (1U<<14) +#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE) +#define PVRSRV_HAP_KERNEL_ONLY (1U<<15) +#define PVRSRV_HAP_SINGLE_PROCESS (1U<<16) +#define PVRSRV_HAP_MULTI_PROCESS (1U<<17) +#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1U<<18) +#define PVRSRV_HAP_NO_CPU_VIRTUAL (1U<<19) +#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \ + |PVRSRV_HAP_SINGLE_PROCESS \ + |PVRSRV_HAP_MULTI_PROCESS \ + |PVRSRV_HAP_FROM_EXISTING_PROCESS \ + |PVRSRV_HAP_NO_CPU_VIRTUAL) + +/* + Allows user allocations to override heap attributes + (Bits shared with heap flags) +*/ +#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED +#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED +#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE + +/* + Backing store flags (defined internally) + (bits 24-26) +*/ +#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24) + +/* + Per allocation/mapping flags + (bits 27-30) + */ +#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27) +#define PVRSRV_MEM_XPROC (1U<<28) +/* Bit 29 is unused */ +#define PVRSRV_MEM_ALLOCATENONCACHEDMEM (1UL<<30) + +/* + Internal allocation/mapping flags + (bit 31) +*/ +#define PVRSRV_MEM_SPARSE (1U<<31) + + +/* + * How much context we lose on a (power) mode change + */ +#define PVRSRV_NO_CONTEXT_LOSS 0 /*!< Do not lose state on power down */ +#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1 /*!< lose state on power down */ +#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80 /*!< power state change mask */ + + +/* + * Device cookie defines + */ +#define PVRSRV_DEFAULT_DEV_COOKIE (1) /*!< default device cookie */ + + +/* + * Misc Info. present flags + */ +#define PVRSRV_MISC_INFO_TIMER_PRESENT (1U<<0) +#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1U<<1) +#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1U<<2) +#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1U<<3) +#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1U<<4) +#define PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT (1U<<5) +#define PVRSRV_MISC_INFO_FREEMEM_PRESENT (1U<<6) +#define PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT (1U<<7) +#define PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT (1U<<8) +#define PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT (1U<<9) + +#define PVRSRV_MISC_INFO_RESET_PRESENT (1U<<31) + +/* PDUMP defines */ +#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20 +#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200 + + +/* + Flags for PVRSRVChangeDeviceMemoryAttributes call. +*/ +#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001 + +/* + Flags for PVRSRVMapExtMemory and PVRSRVUnmapExtMemory + ALTERNATEVA - Used when mapping multiple virtual addresses to the same physical address. Set this flag on extra maps. + PHYSCONTIG - Physical pages are contiguous (unused) +*/ +#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001 +#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002 + +/* + Flags for PVRSRVModifySyncOps + WO_INC - Used to increment "WriteOpsPending/complete of sync info" + RO_INC - Used to increment "ReadOpsPending/complete of sync info" + RO2_INC - Used to increment "ReadOps2Pending/complete of sync info" +*/ +#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC (1U<<0) +#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC (1U<<1) +#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO2_INC (1U<<2) + +/* + Flags for Services connection. + Allows to define per-client policy for Services +*/ +#define SRV_FLAGS_PERSIST 0x1 +#define SRV_FLAGS_PDUMP_ACTIVE 0x2 + +/* + Pdump flags which are accessible to Services clients +*/ +#define PVRSRV_PDUMP_FLAGS_CONTINUOUS 0x1 + + +/****************************************************************************** + * Enums + *****************************************************************************/ + +/*! + ****************************************************************************** + * List of known device types. + *****************************************************************************/ +typedef enum _PVRSRV_DEVICE_TYPE_ +{ + PVRSRV_DEVICE_TYPE_UNKNOWN = 0 , + PVRSRV_DEVICE_TYPE_MBX1 = 1 , + PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 , + + PVRSRV_DEVICE_TYPE_M24VA = 3, + PVRSRV_DEVICE_TYPE_MVDA2 = 4, + PVRSRV_DEVICE_TYPE_MVED1 = 5, + PVRSRV_DEVICE_TYPE_MSVDX = 6, + + PVRSRV_DEVICE_TYPE_SGX = 7, + + PVRSRV_DEVICE_TYPE_VGX = 8, + + /* 3rd party devices take ext type */ + PVRSRV_DEVICE_TYPE_EXT = 9, + + PVRSRV_DEVICE_TYPE_LAST = 9, + + PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff + +} PVRSRV_DEVICE_TYPE; + +#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) ) +#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) ) +#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 ) + +/* common undefined heap ID define */ +#define PVRSRV_UNDEFINED_HEAP_ID (~0LU) + +/*! + ****************************************************************************** + * User Module type + *****************************************************************************/ +typedef enum +{ + IMG_EGL = 0x00000001, + IMG_OPENGLES1 = 0x00000002, + IMG_OPENGLES2 = 0x00000003, + IMG_D3DM = 0x00000004, + IMG_SRV_UM = 0x00000005, + IMG_OPENVG = 0x00000006, + IMG_SRVCLIENT = 0x00000007, + IMG_VISTAKMD = 0x00000008, + IMG_VISTA3DNODE = 0x00000009, + IMG_VISTAMVIDEONODE = 0x0000000A, + IMG_VISTAVPBNODE = 0x0000000B, + IMG_OPENGL = 0x0000000C, + IMG_D3D = 0x0000000D, +#if defined(SUPPORT_GRAPHICS_HAL) || defined(SUPPORT_COMPOSER_HAL) + IMG_ANDROID_HAL = 0x0000000E, +#endif +#if defined(SUPPORT_OPENCL) + IMG_OPENCL = 0x0000000F, +#endif + + IMG_MODULE_UNDEF = 0xFFFFFFFF +} IMG_MODULE_ID; + + +#define APPHINT_MAX_STRING_SIZE 256 + +/*! + ****************************************************************************** + * IMG data types + *****************************************************************************/ +typedef enum +{ + IMG_STRING_TYPE = 1, + IMG_FLOAT_TYPE , + IMG_UINT_TYPE , + IMG_INT_TYPE , + IMG_FLAG_TYPE +}IMG_DATA_TYPE; + + +/****************************************************************************** + * Structure definitions. + *****************************************************************************/ + +/*! + * Forward declaration + */ +typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA; + +/*! + ****************************************************************************** + * Device identifier structure + *****************************************************************************/ +typedef struct _PVRSRV_DEVICE_IDENTIFIER_ +{ + PVRSRV_DEVICE_TYPE eDeviceType; /*!< Identifies the type of the device */ + PVRSRV_DEVICE_CLASS eDeviceClass; /*!< Identifies more general class of device - display/3d/mpeg etc */ + IMG_UINT32 ui32DeviceIndex; /*!< Index of the device within the system */ +#if defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif +#if defined(PDUMP) + IMG_CHAR *pszPDumpDevName; /*!< Pdump memory bank name */ + IMG_CHAR *pszPDumpRegName; /*!< Pdump register bank name */ +#endif + +} PVRSRV_DEVICE_IDENTIFIER; + + +/****************************************************************************** + * Client dev info + ****************************************************************************** + */ +typedef struct _PVRSRV_CLIENT_DEV_DATA_ +{ + IMG_UINT32 ui32NumDevices; /*!< Number of services-managed devices connected */ + PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES]; /*!< Device identifiers */ + PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA); /*< device-specific connection callback */ + PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA); /*!< device-specific debug trace callback */ + +} PVRSRV_CLIENT_DEV_DATA; + + +/*! + ****************************************************************************** + * Kernel Services connection structure + *****************************************************************************/ +typedef struct _PVRSRV_CONNECTION_ +{ + IMG_HANDLE hServices; /*!< UM IOCTL handle */ + IMG_UINT32 ui32ProcessID; /*!< Process ID for resource locking */ + PVRSRV_CLIENT_DEV_DATA sClientDevData; /*!< Client device data */ + IMG_UINT32 ui32SrvFlags; /*!< Per-client Services flags */ +}PVRSRV_CONNECTION; + + +/*! + ****************************************************************************** + * This structure allows the user mode glue code to have an OS independent + * set of prototypes. + *****************************************************************************/ +typedef struct _PVRSRV_DEV_DATA_ +{ + IMG_HANDLE hConnection; /*!< Services connection info */ + IMG_HANDLE hDevCookie; /*!< Dev cookie */ + +} PVRSRV_DEV_DATA; + +/*! + ****************************************************************************** + * address:value update structure + *****************************************************************************/ +typedef struct _PVRSRV_MEMUPDATE_ +{ + IMG_UINT32 ui32UpdateAddr; /*!< Address */ + IMG_UINT32 ui32UpdateVal; /*!< value */ +} PVRSRV_MEMUPDATE; + +/*! + ****************************************************************************** + * address:value register structure + *****************************************************************************/ +typedef struct _PVRSRV_HWREG_ +{ + IMG_UINT32 ui32RegAddr; /*!< Address */ + IMG_UINT32 ui32RegVal; /*!< value */ +} PVRSRV_HWREG; + +/*! + ****************************************************************************** + * Implementation details for memory handling + *****************************************************************************/ +typedef struct _PVRSRV_MEMBLK_ +{ + IMG_DEV_VIRTADDR sDevVirtAddr; /*!< Address of the memory in the IMG MMUs address space */ + IMG_HANDLE hOSMemHandle; /*!< Stores the underlying memory allocation handle */ + IMG_HANDLE hOSWrapMem; /*!< FIXME: better way to solve this problem */ + IMG_HANDLE hBuffer; /*!< Stores the BM_HANDLE for the underlying memory management */ + IMG_HANDLE hResItem; /*!< handle to resource item for allocate */ + IMG_SYS_PHYADDR *psIntSysPAddr; + +} PVRSRV_MEMBLK; + +/*! + ****************************************************************************** + * Memory Management (externel interface) + *****************************************************************************/ +#if defined (PVRSRV_DEVMEM_TIME_STATS) +typedef struct _DEVMEM_UNMAPPING_TIME_STATS_ +{ + IMG_UINT32 ui32TimeToCPUUnmap; + IMG_UINT32 ui32TimeToDevUnmap; +} DEVMEM_UNMAPPING_TIME_STATS; + +typedef struct _PVRSRV_DEVMEM_TIMING_STATS_ +{ + /* This struct holds time taken to map/unmap device memory into CPU/GPU in microsec granularity */ + struct + { + IMG_UINT32 ui32TimeToCPUMap; + IMG_UINT32 ui32TimeToDevMap; + } sDevMemMapTimes; + + DEVMEM_UNMAPPING_TIME_STATS *psDevMemUnmapTimes; /* User supplied space for "unmap" timings */ +} PVRSRV_DEVMEM_TIMING_STATS; +#endif + +typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO; + +typedef struct _PVRSRV_CLIENT_MEM_INFO_ +{ + /* CPU Virtual Address */ + IMG_PVOID pvLinAddr; + + /* CPU Virtual Address (for kernel mode) */ + IMG_PVOID pvLinAddrKM; + + /* Device Virtual Address */ + IMG_DEV_VIRTADDR sDevVAddr; + + /* allocation flags */ + IMG_UINT32 ui32Flags; + + /* client allocation flags */ + IMG_UINT32 ui32ClientFlags; + + /* allocation size in bytes */ + IMG_SIZE_T uAllocSize; + + + /* ptr to associated client sync info - NULL if no sync */ + struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo; + + /* handle to client mapping data (OS specific) */ + IMG_HANDLE hMappingInfo; + + /* handle to kernel mem info */ + IMG_HANDLE hKernelMemInfo; + + /* resman handle for UM mapping clean-up */ + IMG_HANDLE hResItem; + +#if defined(SUPPORT_MEMINFO_IDS) + #if !defined(USE_CODE) + /* Globally unique "stamp" for allocation (not re-used until wrap) */ + IMG_UINT64 ui64Stamp; + #else /* !defined(USE_CODE) */ + IMG_UINT32 dummy1; + IMG_UINT32 dummy2; + #endif /* !defined(USE_CODE) */ +#endif /* defined(SUPPORT_MEMINFO_IDS) */ +#if defined(SUPPORT_ION) + IMG_SIZE_T uiIonBufferSize; +#endif /* defined(SUPPORT_ION) */ +#if defined(SUPPORT_DMABUF) + IMG_SIZE_T uiDmaBufSize; +#endif /* defined(SUPPORT_ION) */ + +#if defined (PVRSRV_DEVMEM_TIME_STATS) + PVRSRV_DEVMEM_TIMING_STATS sDevMemTimingStats; +#endif + + /* + ptr to next mem info + D3D uses psNext for mid-scene texture reload. + */ + struct _PVRSRV_CLIENT_MEM_INFO_ *psNext; + +} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO; + + +/*! + ****************************************************************************** + * Memory Heap Information + *****************************************************************************/ +#define PVRSRV_MAX_CLIENT_HEAPS (32) +typedef struct _PVRSRV_HEAP_INFO_ +{ + IMG_UINT32 ui32HeapID; +#if defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif + IMG_HANDLE hDevMemHeap; + IMG_DEV_VIRTADDR sDevVAddrBase; + IMG_UINT32 ui32HeapByteSize; + IMG_UINT32 ui32Attribs; + IMG_UINT32 ui32XTileStride; +}PVRSRV_HEAP_INFO; + + + + +/* + Event Object information structure +*/ +typedef struct _PVRSRV_EVENTOBJECT_ +{ + /* globally unique name of the event object */ + IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH]; + /* kernel specific handle for the event object */ + IMG_HANDLE hOSEventKM; + +} PVRSRV_EVENTOBJECT; + +/* + Cache operation type +*/ +typedef enum +{ + PVRSRV_MISC_INFO_CPUCACHEOP_NONE = 0, + PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN, + PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH, + PVRSRV_MISC_INFO_CPUCACHEOP_MAX = 0x7fffffff +} PVRSRV_MISC_INFO_CPUCACHEOP_TYPE; + +/*! + ****************************************************************************** + * Structure to retrieve misc. information from services + *****************************************************************************/ +typedef struct _PVRSRV_MISC_INFO_ +{ + IMG_UINT32 ui32StateRequest; /*!< requested State Flags */ + IMG_UINT32 ui32StatePresent; /*!< Present/Valid State Flags */ + + /*!< SOC Timer register */ + IMG_VOID *pvSOCTimerRegisterKM; + IMG_VOID *pvSOCTimerRegisterUM; + IMG_HANDLE hSOCTimerRegisterOSMemHandle; + IMG_HANDLE hSOCTimerRegisterMappingInfo; + + /*!< SOC Clock Gating registers */ + IMG_VOID *pvSOCClockGateRegs; + IMG_UINT32 ui32SOCClockGateRegsSize; + + /* Memory Stats/DDK version string depending on ui32StateRequest flags */ + IMG_CHAR *pszMemoryStr; + IMG_UINT32 ui32MemoryStrLen; + + /* global event object */ + PVRSRV_EVENTOBJECT sGlobalEventObject;//FIXME: should be private to services + IMG_HANDLE hOSGlobalEvent; + + /* Note: add misc. items as required */ + IMG_UINT32 aui32DDKVersion[4]; + + /*!< CPU cache flush controls: */ + struct + { + /*!< Defer the CPU cache op to the next HW op to be submitted (else flush now) */ + IMG_BOOL bDeferOp; + + /*!< Type of cache operation to perform */ + PVRSRV_MISC_INFO_CPUCACHEOP_TYPE eCacheOpType; + + /* This union is a bit unsightly. We need it because we'll use the psMemInfo + * directly in the srvclient PVRSRVGetMiscInfo code, and then convert it + * to a kernel meminfo if required. Try to not waste space. + */ + union + { + /*!< Input client meminfo (UM side) */ + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo; + + /*!< Output kernel meminfo (Bridge+KM side) */ + IMG_HANDLE hKernelMemInfo; + } u; + + /*!< Offset in MemInfo to start cache op */ + IMG_VOID *pvBaseVAddr; + + /*!< Length of range to perform cache op */ + IMG_UINT32 ui32Length; + } sCacheOpCtl; + + /*!< Meminfo refcount controls: */ + struct + { + /* This union is a bit unsightly. We need it because we'll use the psMemInfo + * directly in the srvclient PVRSRVGetMiscInfo code, and then convert it + * to a kernel meminfo if required. Try to not waste space. + */ + union + { + /*!< Input client meminfo (UM side) */ + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo; + + /*!< Output kernel meminfo (Bridge+KM side) */ + IMG_HANDLE hKernelMemInfo; + } u; + + /*!< Resulting refcount */ + IMG_UINT32 ui32RefCount; + } sGetRefCountCtl; + + IMG_UINT32 ui32PageSize; +} PVRSRV_MISC_INFO; + +/*! + ****************************************************************************** + * Synchronisation token + *****************************************************************************/ +typedef struct _PVRSRV_SYNC_TOKEN_ +{ + /* This token is supposed to be passed around as an opaque object + - caller should not rely on the internal fields staying the same. + The fields are hidden in sPrivate in order to reinforce this. */ + struct + { + IMG_HANDLE hKernelSyncInfo; + IMG_UINT32 ui32ReadOpsPendingSnapshot; + IMG_UINT32 ui32WriteOpsPendingSnapshot; + IMG_UINT32 ui32ReadOps2PendingSnapshot; + } sPrivate; +} PVRSRV_SYNC_TOKEN; + + +/****************************************************************************** + * PVR Client Event handling in Services + *****************************************************************************/ +typedef enum _PVRSRV_CLIENT_EVENT_ +{ + PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0 +} PVRSRV_CLIENT_EVENT; + +typedef IMG_VOID (*PFN_QUEUE_COMMAND_COMPLETE)(IMG_HANDLE hCallbackData); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(IMG_CONST PVRSRV_CLIENT_EVENT eEvent, + PVRSRV_DEV_DATA *psDevData, + IMG_PVOID pvData); + +/****************************************************************************** + * PVR Services API prototypes. + *****************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION **ppsConnection, IMG_UINT32 ui32SrvFlags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(IMG_HANDLE hConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 *puiNumDevices, + PVRSRV_DEVICE_IDENTIFIER *puiDevIDs); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION *psConnection, + IMG_UINT32 uiDevIndex, + PVRSRV_DEV_DATA *psDevData, + PVRSRV_DEVICE_TYPE eDeviceType); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_HANDLE hConnection, PVRSRV_MISC_INFO *psMiscInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_HANDLE hConnection, PVRSRV_MISC_INFO *psMiscInfo); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hOSEvent, + volatile IMG_UINT32 *pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Waitus, + IMG_UINT32 ui32Tries); + +/* memory APIs */ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE *phDevMemContext, + IMG_UINT32 *pui32SharedHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemContext + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemContext, + IMG_UINT32 *pui32SharedHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo); + +#if defined(PVRSRV_LOG_MEMORY_ALLOCS) + #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \ + (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \ + ": " logStr " (size = 0x%lx)", (unsigned long)ui32Size)), \ + PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)) +#else + #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \ + PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo) +#endif + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem2(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Attribs, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Attribs, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_HANDLE *phMemInfo + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hKernelMemInfo, + IMG_HANDLE hDstDevMemHeap, + PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_UINT32 ui32Flags); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemContext, + IMG_SIZE_T ui32ByteSize, + IMG_SIZE_T ui32PageOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_VOID *pvLinAddr, + IMG_UINT32 ui32Flags, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo, + IMG_UINT32 ui32Attribs); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_SYS_PHYADDR sSysPhysAddr, + IMG_UINT32 uiSizeInBytes, + IMG_PVOID *ppvUserAddr, + IMG_UINT32 *puiActualSize, + IMG_PVOID *ppvProcess); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_PVOID pvUserAddr, + IMG_PVOID pvProcess); + +#if defined(LINUX) +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem2(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_INT *iFd); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory2(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_INT iFd, + IMG_HANDLE hDstDevMemHeap, + PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo); +#endif /* defined(LINUX) */ + +#if defined(SUPPORT_TEXTURE_ALLOC_NOMAP) +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, PVRSRV_CLIENT_MEM_INFO *psMemInfo, void **ppvLinAddr); + +IMG_IMPORT +void IMG_CALLCONV PVRSRVUnmapDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData, PVRSRV_CLIENT_MEM_INFO *psMemInfo, void **ppvLinAddr); +#endif /* defined(SUPPORT_TEXTURE_ALLOC_NOMAP) */ + +#if defined(SUPPORT_ION) +PVRSRV_ERROR PVRSRVMapIonHandle(const PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32NumFDs, + IMG_INT *paiBufferFDs, + IMG_UINT32 ui32ChunkCount, + IMG_SIZE_T *pauiOffset, + IMG_SIZE_T *pauiSize, + IMG_UINT32 ui32Attribs, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); + +PVRSRV_ERROR PVRSRVUnmapIonHandle(const PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); +#endif /* defined (SUPPORT_ION) */ + +#if defined(SUPPORT_DMABUF) +IMG_IMPORT +PVRSRV_ERROR PVRSRVMapDmaBuf(const PVRSRV_DEV_DATA *psDevData, + const IMG_HANDLE hDevMemHeap, + const IMG_UINT32 ui32Attribs, + const IMG_INT iDmaBufFD, + const IMG_SIZE_T uiDmaBufOffset, + const IMG_SIZE_T uiDmaBufSize, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo, + IMG_SIZE_T *puiMemInfoOffset); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVMapFullDmaBuf(const PVRSRV_DEV_DATA *psDevData, + const IMG_HANDLE hDevMemHeap, + const IMG_UINT32 ui32Attribs, + const IMG_INT iDmaBufFD, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVUnmapDmaBuf(const PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); +#endif /* SUPPORT_DMABUF */ + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemSparse(const PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Attribs, + IMG_SIZE_T uAlignment, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); + +/****************************************************************************** + * PVR Allocation Synchronisation Functionality... + *****************************************************************************/ + +typedef enum _PVRSRV_SYNCVAL_MODE_ +{ + PVRSRV_SYNCVAL_READ = IMG_TRUE, + PVRSRV_SYNCVAL_WRITE = IMG_FALSE + +} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE; + +typedef IMG_UINT32 PVRSRV_SYNCVAL; + +IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired); + +IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired); + +IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired); + +IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo, + PVRSRV_SYNCVAL_MODE eMode); + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hDevMemContext, + IMG_HANDLE hDeviceClassBuffer, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_MEM_INFO *psMemInfo); + +/****************************************************************************** + * Common Device Class Enumeration + *****************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_HANDLE hConnection, + PVRSRV_DEVICE_CLASS DeviceClass, + IMG_UINT32 *pui32DevCount, + IMG_UINT32 *pui32DevID); + +/****************************************************************************** + * Display Device Class API definition + *****************************************************************************/ +IMG_IMPORT +IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_UINT32 ui32DeviceID); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, IMG_HANDLE hDevice); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat, + DISPLAY_DIMS *psDims); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice, + IMG_HANDLE *phBuffer + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice, + DISPLAY_INFO* psDisplayInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE hDevice, + IMG_UINT32 ui32Flags, + DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, + DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib, + IMG_UINT32 ui32BufferCount, + IMG_UINT32 ui32OEMFlags, + IMG_UINT32 *pui32SwapChainID, + IMG_HANDLE *phSwapChain + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain, + IMG_RECT *psDstRect); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain, + IMG_RECT *psSrcRect); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain, + IMG_UINT32 ui32CKColour); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain, + IMG_UINT32 ui32CKColour); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain, + IMG_HANDLE *phBuffer + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers2(IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain, + IMG_HANDLE *phBuffer, + IMG_SYS_PHYADDR *psPhyAddr); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice, + IMG_HANDLE hBuffer, + IMG_UINT32 ui32ClipRectCount, + IMG_RECT *psClipRect, + IMG_UINT32 ui32SwapInterval, + IMG_HANDLE hPrivateTag + ); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer2 (IMG_HANDLE hDevice, + IMG_HANDLE hBuffer, + IMG_UINT32 ui32SwapInterval, + PVRSRV_CLIENT_MEM_INFO **ppsMemInfos, + PVRSRV_CLIENT_SYNC_INFO **ppsSyncInfos, + IMG_UINT32 ui32NumMemSyncInfos, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_HANDLE *phFence); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice, + IMG_HANDLE hSwapChain + ); + +/****************************************************************************** + * Buffer Device Class API definition + *****************************************************************************/ +IMG_IMPORT +IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_UINT32 ui32DeviceID); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_HANDLE hConnection, + IMG_HANDLE hDevice); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice, + BUFFER_INFO *psBuffer); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice, + IMG_UINT32 ui32BufferIndex, + IMG_HANDLE *phBuffer + ); + +#endif /* #if defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +/****************************************************************************** + * PDUMP Function prototypes... + *****************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_HANDLE hConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(IMG_HANDLE hConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(IMG_HANDLE hConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_HANDLE hConnection, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_HANDLE hConnection, + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, + IMG_BOOL bIsRead, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol2(IMG_HANDLE hConnection, + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, + IMG_BOOL bIsRead); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_HANDLE hConnection, + IMG_PVOID pvAltLinAddr, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_HANDLE hConnection, + IMG_PVOID pvAltLinAddr, + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszRegRegion, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Flags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(const PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszRegRegion, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(const PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszRegRegion, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_HANDLE hConnection, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_HANDLE hConnection, + PVRSRV_CLIENT_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_DEV_PHYADDR sPDDevPAddr); + +#if !defined(USE_CODE) +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_HANDLE hKernelMemInfo, + IMG_DEV_PHYADDR *pPages, + IMG_UINT32 ui32NumPages, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32Length, + IMG_UINT32 ui32Flags); +#endif + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_HANDLE hConnection, + IMG_UINT32 ui32Frame); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_HANDLE hConnection, + IMG_CONST IMG_CHAR *pszComment, + IMG_BOOL bContinuous); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_HANDLE hConnection, + IMG_BOOL bContinuous, + IMG_CONST IMG_CHAR *pszFormat, ...) +#if !defined(USE_CODE) + IMG_FORMAT_PRINTF(3, 4) +#endif +; + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_HANDLE hConnection, + IMG_UINT32 ui32Flags, + IMG_CONST IMG_CHAR *pszFormat, ...) +#if !defined(USE_CODE) + IMG_FORMAT_PRINTF(3, 4) +#endif +; + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_HANDLE hConnection, + IMG_CHAR *pszString, + IMG_BOOL bContinuous); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_HANDLE hConnection, + IMG_BOOL *pbIsCapturing); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_HANDLE hDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + PDUMP_MEM_FORMAT eMemFormat, + IMG_UINT32 ui32PDumpFlags); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_CONST IMG_CHAR *pszRegRegion, + IMG_CONST IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags); + + +IMG_IMPORT +IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_HANDLE hConnection); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_DEV_DATA *psDevData, + IMG_UINT32 ui32RegOffset, + IMG_BOOL bLastFrame); + +IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName); +IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv); +IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, const IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr); + +IMG_IMPORT IMG_UINT32 PVRSRVClockus (void); +IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus); +IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void); +IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void); +IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale); + + + + + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID, + const IMG_CHAR *pszAppName, + IMG_VOID **ppvState); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID, + IMG_VOID *pvHintState); + +IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID *pvHintState, + const IMG_CHAR *pszHintName, + IMG_DATA_TYPE eDataType, + const IMG_VOID *pvDefault, + IMG_VOID *pvReturn); + +/****************************************************************************** + * Memory API(s) + *****************************************************************************/ + +/* Exported APIs */ +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T uiSize); +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T uiSize); +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uiNewSize); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem); +IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T uiSize); +IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T uiSize); + +struct _PVRSRV_MUTEX_OPAQUE_STRUCT_; +typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE; + + +#if defined(PVR_DEBUG_MUTEXES) + +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); + +#define PVRSRVCreateMutex(phMutex) PVRSRVCreateMutex(phMutex, #phMutex, __FILE__, __LINE__) +#define PVRSRVDestroyMutex(hMutex) PVRSRVDestroyMutex(hMutex, #hMutex, __FILE__, __LINE__) +#define PVRSRVLockMutex(hMutex) PVRSRVLockMutex(hMutex, #hMutex, __FILE__, __LINE__) +#define PVRSRVUnlockMutex(hMutex) PVRSRVUnlockMutex(hMutex, #hMutex, __FILE__, __LINE__) + +#else /* defined(PVR_DEBUG_MUTEXES) */ + +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex); +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex); + +#endif /* defined(PVR_DEBUG_MUTEXES) */ + + +struct _PVRSRV_RECMUTEX_OPAQUE_STRUCT_; +typedef struct _PVRSRV_RECMUTEX_OPAQUE_STRUCT_ *PVRSRV_RECMUTEX_HANDLE; + + +#if defined(PVR_DEBUG_MUTEXES) + +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateRecursiveMutex(PVRSRV_RECMUTEX_HANDLE *phMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyRecursiveMutex(PVRSRV_RECMUTEX_HANDLE hMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockRecursiveMutex(PVRSRV_RECMUTEX_HANDLE hMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockRecursiveMutex(PVRSRV_RECMUTEX_HANDLE hMutex, + IMG_CHAR pszMutexName[], + IMG_CHAR pszFilename[], + IMG_INT iLine); + +#define PVRSRVCreateRecursiveMutex(phMutex) PVRSRVCreateRecursiveMutex(phMutex, #phMutex, __FILE__, __LINE__) +#define PVRSRVDestroyRecursiveMutex(hMutex) PVRSRVDestroyRecursiveMutex(hMutex, #hMutex, __FILE__, __LINE__) +#define PVRSRVLockRecursiveMutex(hMutex) PVRSRVLockRecursiveMutex(hMutex, #hMutex, __FILE__, __LINE__) +#define PVRSRVUnlockRecursiveMutex(hMutex) PVRSRVUnlockRecursiveMutex(hMutex, #hMutex, __FILE__, __LINE__) + +#else /* defined(PVR_DEBUG_MUTEXES) */ + +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateRecursiveMutex(PVRSRV_RECMUTEX_HANDLE *phMutex); +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyRecursiveMutex(PVRSRV_RECMUTEX_HANDLE hMutex); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockRecursiveMutex(PVRSRV_RECMUTEX_HANDLE hMutex); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockRecursiveMutex(PVRSRV_RECMUTEX_HANDLE hMutex); + +#endif /* defined(PVR_DEBUG_MUTEXES) */ + +/* Non-recursive coarse-grained mutex shared between all threads in a proccess */ +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockProcessGlobalMutex(void); +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockProcessGlobalMutex(void); + + +struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_; +typedef struct _PVRSRV_SEMAPHORE_OPAQUE_STRUCT_ *PVRSRV_SEMAPHORE_HANDLE; + + + #define IMG_SEMAPHORE_WAIT_INFINITE ((IMG_UINT64)0xFFFFFFFFFFFFFFFFull) + + +#if !defined(USE_CODE) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVCreateSemaphore) +#endif +static INLINE PVRSRV_ERROR PVRSRVCreateSemaphore(PVRSRV_SEMAPHORE_HANDLE *phSemaphore, IMG_INT iInitialCount) +{ + PVR_UNREFERENCED_PARAMETER(iInitialCount); + *phSemaphore = 0; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVDestroySemaphore) +#endif +static INLINE PVRSRV_ERROR PVRSRVDestroySemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore) +{ + PVR_UNREFERENCED_PARAMETER(hSemaphore); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVWaitSemaphore) +#endif +static INLINE PVRSRV_ERROR PVRSRVWaitSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_UINT64 ui64TimeoutMicroSeconds) +{ + PVR_UNREFERENCED_PARAMETER(hSemaphore); + PVR_UNREFERENCED_PARAMETER(ui64TimeoutMicroSeconds); + return PVRSRV_ERROR_INVALID_PARAMS; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPostSemaphore) +#endif +static INLINE IMG_VOID PVRSRVPostSemaphore(PVRSRV_SEMAPHORE_HANDLE hSemaphore, IMG_INT iPostCount) +{ + PVR_UNREFERENCED_PARAMETER(hSemaphore); + PVR_UNREFERENCED_PARAMETER(iPostCount); +} + +#endif /* !defined(USE_CODE) */ + + +/* Non-exported APIs */ +#if defined(DEBUG) && (defined(__linux__) || defined(__QNXNTO__) ) +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber); + +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber); + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem); + +IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_SIZE_T ui32NewSize, + IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber); +#endif + +/****************************************************************************** + * PVR Event Object API(s) + *****************************************************************************/ + +IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hOSEvent + ); + +/*! + ****************************************************************************** + + @Function PVRSRVCreateSyncInfoModObj + + @Description Creates an empty Modification object to be later used by PVRSRVModifyPendingSyncOps + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateSyncInfoModObj(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE *phKernelSyncInfoModObj + ); + +/*! + ****************************************************************************** + + @Function PVRSRVDestroySyncInfoModObj + + @Description Destroys a Modification object. Must be empty. + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroySyncInfoModObj(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hKernelSyncInfoModObj + ); + + + +/*! + ****************************************************************************** + + @Function PVRSRVModifyPendingSyncOps + + @Description Returns PRE-INCREMENTED sync op values. Performs thread safe increment + of sync ops values as specified by ui32ModifyFlags. + + PVRSRV_ERROR_RETRY is returned if the supplied modification object + is not empty. This is on the assumption that a different thread + will imminently call PVRSRVModifyCompleteSyncOps. This thread should + sleep before retrying. It should be regarded as an error if no such + other thread exists. + + Note that this API has implied locking semantics, as follows: + + PVRSRVModifyPendingSyncOps() + - announces an operation on the buffer is "pending", and + conceptually takes a ticket to represent your place in the queue. + - NB: ** exclusive access to the resource is _NOT_ granted at this time ** + PVRSRVSyncOpsFlushToModObj() + - ensures you have exclusive access to the resource (conceptually, a LOCK) + - the previously "pending" operation can now be regarded as "in progress" + PVRSRVModifyCompleteSyncOps() + - declares that the previously "in progress" operation is now complete. (UNLOCK) + + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyPendingSyncOps(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hKernelSyncInfoModObj, + PVRSRV_CLIENT_SYNC_INFO *psSyncInfo, + IMG_UINT32 ui32ModifyFlags, + IMG_UINT32 *pui32ReadOpsPending, + IMG_UINT32 *pui32WriteOpsPending); + +/*! + ****************************************************************************** + + @Function PVRSRVModifyCompleteSyncOps + + @Description Performs thread safe increment of sync ops values as specified + by the ui32ModifyFlags that were given to PVRSRVModifyPendingSyncOps. + The supplied Modification Object will become empty. + + Note that this API has implied locking semantics, as + described above in PVRSRVModifyPendingSyncOps + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyCompleteSyncOps(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hKernelSyncInfoModObj + ); + +/*! + ****************************************************************************** + + @Function PVRSRVSyncOpsTakeToken + + @Description Takes a "deli-counter" style token for future use with + PVRSRVSyncOpsFlushToToken(). In practice this means + recording a snapshot of the current "pending" values. A + future PVRSRVSyncOpsFlushToToken() will ensure that all + operations that were pending at the time of this + PVRSRVSyncOpsTakeToken() call will be flushed. + Operations may be subsequently queued after this call + and would not be flushed. The caller is required to + provide storage for the token. The token is disposable + - i.e. the caller can simply let the token go out of + scope without telling us... in particular, there is no + obligation to call PVRSRVSyncOpsFlushToToken(). + Multiple tokens may be taken. There is no implied + locking with this API. + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsTakeToken(const PVRSRV_CONNECTION *psConnection, + const PVRSRV_CLIENT_SYNC_INFO *psSyncInfo, + PVRSRV_SYNC_TOKEN *psSyncToken); +/*! + ****************************************************************************** + + @Function PVRSRVSyncOpsFlushToToken + + @Description Tests whether the dependencies for a pending sync op modification + have been satisfied. If this function returns PVRSRV_OK, then the + "complete" counts have caught up with the snapshot of the "pending" + values taken when PVRSRVSyncOpsTakeToken() was called. + In the event that the dependencies are not (yet) met, + this call will auto-retry if bWait is specified, otherwise, it will + return PVRSRV_ERROR_RETRY. (Not really an "error") + + (auto-retry behaviour not implemented) + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToToken(const PVRSRV_CONNECTION *psConnection, + const PVRSRV_CLIENT_SYNC_INFO *psSyncInfo, + const PVRSRV_SYNC_TOKEN *psSyncToken, + IMG_BOOL bWait); +/*! + ****************************************************************************** + + @Function PVRSRVSyncOpsFlushToModObj + + @Description Tests whether the dependencies for a pending sync op modification + have been satisfied. If this function returns PVRSRV_OK, then the + "complete" counts have caught up with the snapshot of the "pending" + values taken when PVRSRVModifyPendingSyncOps() was called. + PVRSRVModifyCompleteSyncOps() can then be called without risk of + stalling. In the event that the dependencies are not (yet) met, + this call will auto-retry if bWait is specified, otherwise, it will + return PVRSRV_ERROR_RETRY. (Not really an "error") + + Note that this API has implied locking semantics, as + described above in PVRSRVModifyPendingSyncOps + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToModObj(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hKernelSyncInfoModObj, + IMG_BOOL bWait); + +/*! + ****************************************************************************** + + @Function PVRSRVSyncOpsFlushToDelta + + @Description Compares the number of outstanding operations (pending count minus + complete count) with the limit specified. If no more than ui32Delta + operations are outstanding, this function returns PVRSRV_OK. + In the event that there are too many outstanding operations, + this call will auto-retry if bWait is specified, otherwise, it will + return PVRSRV_ERROR_RETRY. (Not really an "error") + + ******************************************************************************/ +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSyncOpsFlushToDelta(const PVRSRV_CONNECTION *psConnection, + PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo, + IMG_UINT32 ui32Delta, + IMG_BOOL bWait); + +/*! + ****************************************************************************** + + @Function PVRSRVAllocSyncInfo + + @Description Creates a Sync Object. Unlike the sync objects created + automatically with "PVRSRVAllocDeviceMem", the sync objects + returned by this function do _not_ have a UM mapping to the + sync data and they do _not_ have the device virtual address + of the "opscomplete" fields. These data are to be deprecated. + + ******************************************************************************/ + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_SYNC_INFO **ppsSyncInfo); + +/*! + ****************************************************************************** + + @Function PVRSRVFreeSyncInfo + + @Description Destroys a Sync Object created via + PVRSRVAllocSyncInfo. + + ******************************************************************************/ + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData, + PVRSRV_CLIENT_SYNC_INFO *psSyncInfo); + +/*! + ****************************************************************************** + + @Function PVRSRVGetErrorString + + @Description Returns a text string relating to the PVRSRV_ERROR enum. + + ******************************************************************************/ +IMG_IMPORT +const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); + + +/*! + ****************************************************************************** + + @Function PVRSRVCacheInvalidate + + @Description Invalidate the CPU cache for a specified memory + area. Note that PVRSRVGetMiscInfo provides similar cpu + cache flush/invalidate functionality for some platforms. + + ******************************************************************************/ + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCacheInvalidate(const PVRSRV_CONNECTION *psConnection, + IMG_PVOID pvLinearAddress, + IMG_UINT32 ui32Size); + +/****************************************************************************** + Time wrapping macro +******************************************************************************/ +#define TIME_NOT_PASSED_UINT32(a,b,c) (((a) - (b)) < (c)) + +#if defined (__cplusplus) +} +#endif +#endif /* __SERVICES_H__ */ + +/****************************************************************************** + End of file (services.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/servicesext.h b/sgx_km/eurasia_km/include4/servicesext.h new file mode 100644 index 0000000..bde9caf --- /dev/null +++ b/sgx_km/eurasia_km/include4/servicesext.h @@ -0,0 +1,980 @@ +/*************************************************************************/ /*! +@Title Services definitions required by external drivers +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides services data structures, defines and prototypes + required by external drivers. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined (__SERVICESEXT_H__) +#define __SERVICESEXT_H__ + +/* + * Lock buffer read/write flags + */ +#define PVRSRV_LOCKFLG_READONLY (1) /*!< The locking process will only read the locked surface */ + +/*! + ***************************************************************************** + * Error values + * + * NOTE: If you change this, make sure you update the error texts in + * services4/include/pvrsrv_errors.h to match. + * + *****************************************************************************/ +typedef enum _PVRSRV_ERROR_ +{ + PVRSRV_OK = 0, + PVRSRV_ERROR_OUT_OF_MEMORY, + PVRSRV_ERROR_TOO_FEW_BUFFERS, + PVRSRV_ERROR_INVALID_PARAMS, + PVRSRV_ERROR_INIT_FAILURE, + PVRSRV_ERROR_CANT_REGISTER_CALLBACK, + PVRSRV_ERROR_INVALID_DEVICE, + PVRSRV_ERROR_NOT_OWNER, + PVRSRV_ERROR_BAD_MAPPING, + PVRSRV_ERROR_TIMEOUT, + PVRSRV_ERROR_FLIP_CHAIN_EXISTS, + PVRSRV_ERROR_INVALID_SWAPINTERVAL, + PVRSRV_ERROR_SCENE_INVALID, + PVRSRV_ERROR_STREAM_ERROR, + PVRSRV_ERROR_FAILED_DEPENDENCIES, + PVRSRV_ERROR_CMD_NOT_PROCESSED, + PVRSRV_ERROR_CMD_TOO_BIG, + PVRSRV_ERROR_DEVICE_REGISTER_FAILED, + PVRSRV_ERROR_TOOMANYBUFFERS, + PVRSRV_ERROR_NOT_SUPPORTED, + PVRSRV_ERROR_PROCESSING_BLOCKED, + + PVRSRV_ERROR_CANNOT_FLUSH_QUEUE, + PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE, + PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS, + PVRSRV_ERROR_RETRY, + + PVRSRV_ERROR_DDK_VERSION_MISMATCH, + PVRSRV_ERROR_BUILD_MISMATCH, + PVRSRV_ERROR_CORE_REVISION_MISMATCH, + + PVRSRV_ERROR_UPLOAD_TOO_BIG, + + PVRSRV_ERROR_INVALID_FLAGS, + PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS, + + PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY, + PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR, + PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED, + + PVRSRV_ERROR_BRIDGE_CALL_FAILED, + PVRSRV_ERROR_IOCTL_CALL_FAILED, + + PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND, + PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND, + PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT, + + PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND, + PVRSRV_ERROR_PCI_CALL_FAILED, + PVRSRV_ERROR_PCI_REGION_TOO_SMALL, + PVRSRV_ERROR_PCI_REGION_UNAVAILABLE, + PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH, + + PVRSRV_ERROR_REGISTER_BASE_NOT_SET, + + PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE, + + PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM, + PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY, + PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC, + PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR, + + PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY, + PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY, + + PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES, + PVRSRV_ERROR_FAILED_TO_FREE_PAGES, + PVRSRV_ERROR_FAILED_TO_COPY_PAGES, + PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES, + PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES, + PVRSRV_ERROR_STILL_MAPPED, + PVRSRV_ERROR_MAPPING_NOT_FOUND, + PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT, + PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE, + + PVRSRV_ERROR_INVALID_SEGMENT_BLOCK, + PVRSRV_ERROR_INVALID_SGXDEVDATA, + PVRSRV_ERROR_INVALID_DEVINFO, + PVRSRV_ERROR_INVALID_MEMINFO, + PVRSRV_ERROR_INVALID_MISCINFO, + PVRSRV_ERROR_UNKNOWN_IOCTL, + PVRSRV_ERROR_INVALID_CONTEXT, + PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT, + PVRSRV_ERROR_INVALID_HEAP, + PVRSRV_ERROR_INVALID_KERNELINFO, + PVRSRV_ERROR_UNKNOWN_POWER_STATE, + PVRSRV_ERROR_INVALID_HANDLE_TYPE, + PVRSRV_ERROR_INVALID_WRAP_TYPE, + PVRSRV_ERROR_INVALID_PHYS_ADDR, + PVRSRV_ERROR_INVALID_CPU_ADDR, + PVRSRV_ERROR_INVALID_HEAPINFO, + PVRSRV_ERROR_INVALID_PERPROC, + PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO, + PVRSRV_ERROR_INVALID_MAP_REQUEST, + PVRSRV_ERROR_INVALID_UNMAP_REQUEST, + PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP, + PVRSRV_ERROR_MAPPING_STILL_IN_USE, + + PVRSRV_ERROR_EXCEEDED_HW_LIMITS, + PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED, + + PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA, + PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT, + PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT, + PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT, + PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT, + PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD, + PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD, + PVRSRV_ERROR_THREAD_READ_ERROR, + PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER, + PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR, + PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR, + PVRSRV_ERROR_ISR_ALREADY_INSTALLED, + PVRSRV_ERROR_ISR_NOT_INSTALLED, + PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT, + PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO, + PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT, + PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES, + PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT, + PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE, + PVRSRV_ERROR_UNABLE_TO_CLOSE_HANDLE, + + PVRSRV_ERROR_INVALID_CCB_COMMAND, + + PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE, + PVRSRV_ERROR_INVALID_LOCK_ID, + PVRSRV_ERROR_RESOURCE_NOT_LOCKED, + + PVRSRV_ERROR_FLIP_FAILED, + PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED, + + PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE, + + PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED, + PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG, + PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG, + PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG, + + PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID, + + PVRSRV_ERROR_BLIT_SETUP_FAILED, + + PVRSRV_ERROR_PDUMP_NOT_AVAILABLE, + PVRSRV_ERROR_PDUMP_BUFFER_FULL, + PVRSRV_ERROR_PDUMP_BUF_OVERFLOW, + PVRSRV_ERROR_PDUMP_NOT_ACTIVE, + PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES, + + PVRSRV_ERROR_MUTEX_DESTROY_FAILED, + PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR, + + PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE, + PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND, + + PVRSRV_ERROR_PROCESS_NOT_INITIALISED, + PVRSRV_ERROR_PROCESS_NOT_FOUND, + PVRSRV_ERROR_SRV_CONNECT_FAILED, + PVRSRV_ERROR_SRV_DISCONNECT_FAILED, + PVRSRV_ERROR_DEINT_PHASE_FAILED, + PVRSRV_ERROR_INIT2_PHASE_FAILED, + + PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE, + + PVRSRV_ERROR_NO_DC_DEVICES_FOUND, + PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE, + PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE, + PVRSRV_ERROR_NO_DEVICEDATA_FOUND, + PVRSRV_ERROR_NO_DEVICENODE_FOUND, + PVRSRV_ERROR_NO_CLIENTNODE_FOUND, + PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE, + + PVRSRV_ERROR_UNABLE_TO_INIT_TASK, + PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK, + PVRSRV_ERROR_UNABLE_TO_KILL_TASK, + + PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER, + PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER, + PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER, + + PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT, + PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION, + + PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE, + PVRSRV_ERROR_HANDLE_NOT_ALLOCATED, + PVRSRV_ERROR_HANDLE_TYPE_MISMATCH, + PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE, + PVRSRV_ERROR_HANDLE_NOT_SHAREABLE, + PVRSRV_ERROR_HANDLE_NOT_FOUND, + PVRSRV_ERROR_INVALID_SUBHANDLE, + PVRSRV_ERROR_HANDLE_BATCH_IN_USE, + PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE, + + PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE, + PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED, + + PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE, + PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP, + + PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE, + + PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE, + PVRSRV_ERROR_INVALID_DEVICEID, + PVRSRV_ERROR_DEVICEID_NOT_FOUND, + + PVRSRV_ERROR_MEMORY_TEST_FAILED, + PVRSRV_ERROR_CPUPADDR_TEST_FAILED, + PVRSRV_ERROR_COPY_TEST_FAILED, + + PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED, + + PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK, + PVRSRV_ERROR_CLOCK_REQUEST_FAILED, + PVRSRV_ERROR_DISABLE_CLOCK_FAILURE, + PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE, + PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE, + PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK, + PVRSRV_ERROR_UNABLE_TO_GET_CLOCK, + PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK, + PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK, + + PVRSRV_ERROR_UNKNOWN_SGL_ERROR, + + PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE, + PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE, + + PVRSRV_ERROR_BAD_SYNC_STATE, + + PVRSRV_ERROR_CACHEOP_FAILED, + + PVRSRV_ERROR_CACHE_INVALIDATE_FAILED, + + PVRSRV_ERROR_TASK_FAILED, + + PVRSRV_ERROR_NOT_READY, + + PVRSRV_ERROR_FORCE_I32 = 0x7fffffff + +} PVRSRV_ERROR; + + +/*! + ***************************************************************************** + * List of known device classes. + *****************************************************************************/ +typedef enum _PVRSRV_DEVICE_CLASS_ +{ + PVRSRV_DEVICE_CLASS_3D = 0 , + PVRSRV_DEVICE_CLASS_DISPLAY = 1 , + PVRSRV_DEVICE_CLASS_BUFFER = 2 , + PVRSRV_DEVICE_CLASS_VIDEO = 3 , + + PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff + +} PVRSRV_DEVICE_CLASS; + + +/*! + ***************************************************************************** + * States for power management + *****************************************************************************/ +typedef enum _PVRSRV_SYS_POWER_STATE_ +{ + PVRSRV_SYS_POWER_STATE_Unspecified = -1, /*!< Unspecified : Uninitialised */ + PVRSRV_SYS_POWER_STATE_D0 = 0, /*!< On */ + PVRSRV_SYS_POWER_STATE_D1 = 1, /*!< User Idle */ + PVRSRV_SYS_POWER_STATE_D2 = 2, /*!< System Idle / sleep */ + PVRSRV_SYS_POWER_STATE_D3 = 3, /*!< Suspend / Hibernate */ + PVRSRV_SYS_POWER_STATE_D4 = 4, /*!< shutdown */ + + PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff + +} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; + + +typedef enum _PVRSRV_DEV_POWER_STATE_ +{ + PVRSRV_DEV_POWER_STATE_DEFAULT = -1, /*!< Default state for the device */ + PVRSRV_DEV_POWER_STATE_ON = 0, /*!< Running */ + PVRSRV_DEV_POWER_STATE_IDLE = 1, /*!< Powered but operation paused */ + PVRSRV_DEV_POWER_STATE_OFF = 2, /*!< Unpowered */ + + PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff + +} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE; /* PRQA S 3205 */ + + +/* Power transition handler prototypes */ +typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); +typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +/* Clock speed handler prototypes */ +typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); +typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + + +/***************************************************************************** + * Enumeration of all possible pixel types. Where applicable, Ordering of name + * is in reverse order of memory bytes (i.e. as a word in little endian). + * e.g. A8R8G8B8 is in memory as 4 bytes in order: BB GG RR AA + * + * NOTE: When modifying this structure please update the client driver format + * tables located in %WORKROOT%/eurasia/codegen/pixfmts using the tool + * located in %WORKROOT%/eurasia/tools/intern/TextureFormatParser. + * + *****************************************************************************/ +typedef enum _PVRSRV_PIXEL_FORMAT_ { + /* Basic types */ + PVRSRV_PIXEL_FORMAT_UNKNOWN = 0, + PVRSRV_PIXEL_FORMAT_RGB565 = 1, + PVRSRV_PIXEL_FORMAT_RGB555 = 2, + PVRSRV_PIXEL_FORMAT_RGB888 = 3, /*!< 24bit */ + PVRSRV_PIXEL_FORMAT_BGR888 = 4, /*!< 24bit */ + PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8, + PVRSRV_PIXEL_FORMAT_PAL12 = 13, + PVRSRV_PIXEL_FORMAT_PAL8 = 14, + PVRSRV_PIXEL_FORMAT_PAL4 = 15, + PVRSRV_PIXEL_FORMAT_PAL2 = 16, + PVRSRV_PIXEL_FORMAT_PAL1 = 17, + PVRSRV_PIXEL_FORMAT_ARGB1555 = 18, + PVRSRV_PIXEL_FORMAT_ARGB4444 = 19, + PVRSRV_PIXEL_FORMAT_ARGB8888 = 20, + PVRSRV_PIXEL_FORMAT_ABGR8888 = 21, + PVRSRV_PIXEL_FORMAT_YV12 = 22, + PVRSRV_PIXEL_FORMAT_I420 = 23, + PVRSRV_PIXEL_FORMAT_IMC2 = 25, + PVRSRV_PIXEL_FORMAT_XRGB8888 = 26, + PVRSRV_PIXEL_FORMAT_XBGR8888 = 27, + PVRSRV_PIXEL_FORMAT_BGRA8888 = 28, + PVRSRV_PIXEL_FORMAT_XRGB4444 = 29, + PVRSRV_PIXEL_FORMAT_ARGB8332 = 30, + PVRSRV_PIXEL_FORMAT_A2RGB10 = 31, /*!< 32bpp, 10 bits for R, G, B, 2 bits for A */ + PVRSRV_PIXEL_FORMAT_A2BGR10 = 32, /*!< 32bpp, 10 bits for B, G, R, 2 bits for A */ + PVRSRV_PIXEL_FORMAT_P8 = 33, + PVRSRV_PIXEL_FORMAT_L8 = 34, + PVRSRV_PIXEL_FORMAT_A8L8 = 35, + PVRSRV_PIXEL_FORMAT_A4L4 = 36, + PVRSRV_PIXEL_FORMAT_L16 = 37, + PVRSRV_PIXEL_FORMAT_L6V5U5 = 38, + PVRSRV_PIXEL_FORMAT_V8U8 = 39, + PVRSRV_PIXEL_FORMAT_V16U16 = 40, + PVRSRV_PIXEL_FORMAT_QWVU8888 = 41, + PVRSRV_PIXEL_FORMAT_XLVU8888 = 42, + PVRSRV_PIXEL_FORMAT_QWVU16 = 43, + PVRSRV_PIXEL_FORMAT_D16 = 44, + PVRSRV_PIXEL_FORMAT_D24S8 = 45, + PVRSRV_PIXEL_FORMAT_D24X8 = 46, + + /* Added to ensure TQ build */ + PVRSRV_PIXEL_FORMAT_ABGR16 = 47, + PVRSRV_PIXEL_FORMAT_ABGR16F = 48, + PVRSRV_PIXEL_FORMAT_ABGR32 = 49, + PVRSRV_PIXEL_FORMAT_ABGR32F = 50, + PVRSRV_PIXEL_FORMAT_B10GR11 = 51, + PVRSRV_PIXEL_FORMAT_GR88 = 52, + PVRSRV_PIXEL_FORMAT_BGR32 = 53, + PVRSRV_PIXEL_FORMAT_GR32 = 54, + PVRSRV_PIXEL_FORMAT_E5BGR9 = 55, + + /* reserved types */ + PVRSRV_PIXEL_FORMAT_RESERVED1 = 56, + PVRSRV_PIXEL_FORMAT_RESERVED2 = 57, + PVRSRV_PIXEL_FORMAT_RESERVED3 = 58, + PVRSRV_PIXEL_FORMAT_RESERVED4 = 59, + PVRSRV_PIXEL_FORMAT_RESERVED5 = 60, + + /* RGB space packed formats */ + PVRSRV_PIXEL_FORMAT_R8G8_B8G8 = 61, + PVRSRV_PIXEL_FORMAT_G8R8_G8B8 = 62, + + /* YUV space planar formats */ + PVRSRV_PIXEL_FORMAT_NV11 = 63, + PVRSRV_PIXEL_FORMAT_NV12 = 64, + + /* YUV space packed formats */ + PVRSRV_PIXEL_FORMAT_YUY2 = 65, + PVRSRV_PIXEL_FORMAT_YUV420 = 66, + PVRSRV_PIXEL_FORMAT_YUV444 = 67, + PVRSRV_PIXEL_FORMAT_VUY444 = 68, + PVRSRV_PIXEL_FORMAT_YUYV = 69, + PVRSRV_PIXEL_FORMAT_YVYU = 70, + PVRSRV_PIXEL_FORMAT_UYVY = 71, + PVRSRV_PIXEL_FORMAT_VYUY = 72, + + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY = 73, /*!< See http://www.fourcc.org/yuv.php#UYVY */ + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV = 74, /*!< See http://www.fourcc.org/yuv.php#YUYV */ + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU = 75, /*!< See http://www.fourcc.org/yuv.php#YVYU */ + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY = 76, /*!< No fourcc.org link */ + PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV = 77, /*!< See http://www.fourcc.org/yuv.php#AYUV */ + + /* 4 component, 32 bits per component types */ + PVRSRV_PIXEL_FORMAT_A32B32G32R32 = 78, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_A32B32G32R32F = 79, /*!< float type */ + PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT = 80, /*!< uint type */ + PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT = 81, /*!< sint type */ + + /* 3 component, 32 bits per component types */ + PVRSRV_PIXEL_FORMAT_B32G32R32 = 82, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_B32G32R32F = 83, /*!< float data */ + PVRSRV_PIXEL_FORMAT_B32G32R32_UINT = 84, /*!< uint data */ + PVRSRV_PIXEL_FORMAT_B32G32R32_SINT = 85, /*!< signed int data */ + + /* 2 component, 32 bits per component types */ + PVRSRV_PIXEL_FORMAT_G32R32 = 86, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_G32R32F = 87, /*!< float */ + PVRSRV_PIXEL_FORMAT_G32R32_UINT = 88, /*!< uint */ + PVRSRV_PIXEL_FORMAT_G32R32_SINT = 89, /*!< signed int */ + + /* 1 component, 32 bits per component types */ + PVRSRV_PIXEL_FORMAT_D32F = 90, /*!< float depth */ + PVRSRV_PIXEL_FORMAT_R32 = 91, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_R32F = 92, /*!< float type */ + PVRSRV_PIXEL_FORMAT_R32_UINT = 93, /*!< unsigned int type */ + PVRSRV_PIXEL_FORMAT_R32_SINT = 94, /*!< signed int type */ + + /* 4 component, 16 bits per component types */ + PVRSRV_PIXEL_FORMAT_A16B16G16R16 = 95, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_A16B16G16R16F = 96, /*!< type float */ + PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT = 97, /*!< signed ints */ + PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM = 98, /*!< signed normalised int */ + PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT = 99, /*!< unsigned ints */ + PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM = 100, /*!< normalised unsigned int */ + + /* 2 component, 16 bits per component types */ + PVRSRV_PIXEL_FORMAT_G16R16 = 101, /*!< unspecified type */ + PVRSRV_PIXEL_FORMAT_G16R16F = 102, /*!< float type */ + PVRSRV_PIXEL_FORMAT_G16R16_UINT = 103, /*!< unsigned int type */ + PVRSRV_PIXEL_FORMAT_G16R16_UNORM = 104, /*!< unsigned normalised */ + PVRSRV_PIXEL_FORMAT_G16R16_SINT = 105, /*!< signed int */ + PVRSRV_PIXEL_FORMAT_G16R16_SNORM = 106, /*!< signed normalised */ + + /* 1 component, 16 bits per component types */ + PVRSRV_PIXEL_FORMAT_R16 = 107, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_R16F = 108, /*!< float type */ + PVRSRV_PIXEL_FORMAT_R16_UINT = 109, /*!< unsigned int type */ + PVRSRV_PIXEL_FORMAT_R16_UNORM = 110, /*!< unsigned normalised int type */ + PVRSRV_PIXEL_FORMAT_R16_SINT = 111, /*!< signed int type */ + PVRSRV_PIXEL_FORMAT_R16_SNORM = 112, /*!< signed normalised int type */ + + /* 4 component, 8 bits per component types */ + PVRSRV_PIXEL_FORMAT_X8R8G8B8 = 113, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM = 114, /*!< normalised unsigned int */ + PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB = 115, /*!< normalised uint with sRGB */ + + PVRSRV_PIXEL_FORMAT_A8R8G8B8 = 116, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM = 117, /*!< normalised unsigned int */ + PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB = 118, /*!< normalised uint with sRGB */ + + PVRSRV_PIXEL_FORMAT_A8B8G8R8 = 119, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT = 120, /*!< unsigned int */ + PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM = 121, /*!< normalised unsigned int */ + PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB = 122, /*!< normalised unsigned int */ + PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT = 123, /*!< signed int */ + PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM = 124, /*!< normalised signed int */ + + /* 2 component, 8 bits per component types */ + PVRSRV_PIXEL_FORMAT_G8R8 = 125, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_G8R8_UINT = 126, /*!< unsigned int type */ + PVRSRV_PIXEL_FORMAT_G8R8_UNORM = 127, /*!< unsigned int normalised */ + PVRSRV_PIXEL_FORMAT_G8R8_SINT = 128, /*!< signed int type */ + PVRSRV_PIXEL_FORMAT_G8R8_SNORM = 129, /*!< signed int normalised */ + + /* 1 component, 8 bits per component types */ + PVRSRV_PIXEL_FORMAT_A8 = 130, /*!< type unspecified, alpha channel */ + PVRSRV_PIXEL_FORMAT_R8 = 131, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_R8_UINT = 132, /*!< unsigned int */ + PVRSRV_PIXEL_FORMAT_R8_UNORM = 133, /*!< unsigned normalised int */ + PVRSRV_PIXEL_FORMAT_R8_SINT = 134, /*!< signed int */ + PVRSRV_PIXEL_FORMAT_R8_SNORM = 135, /*!< signed normalised int */ + + /* A2RGB10 types */ + PVRSRV_PIXEL_FORMAT_A2B10G10R10 = 136, /*!< Type unspecified */ + PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM = 137, /*!< normalised unsigned int */ + PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT = 138, /*!< unsigned int */ + + /* F11F11F10 types */ + PVRSRV_PIXEL_FORMAT_B10G11R11 = 139, /*!< type unspecified */ + PVRSRV_PIXEL_FORMAT_B10G11R11F = 140, /*!< float type */ + + /* esoteric types */ + PVRSRV_PIXEL_FORMAT_X24G8R32 = 141, /*!< 64 bit, type unspecified (Usually typed to D32S8 style) */ + PVRSRV_PIXEL_FORMAT_G8R24 = 142, /*!< 32 bit, type unspecified (Usually typed to D24S8 style) */ + PVRSRV_PIXEL_FORMAT_X8R24 = 143, + PVRSRV_PIXEL_FORMAT_E5B9G9R9 = 144, /*!< 32 bit, shared exponent (RGBE). */ + PVRSRV_PIXEL_FORMAT_R1 = 145, /*!< 1 bit monochrome */ + + PVRSRV_PIXEL_FORMAT_RESERVED6 = 146, + PVRSRV_PIXEL_FORMAT_RESERVED7 = 147, + PVRSRV_PIXEL_FORMAT_RESERVED8 = 148, + PVRSRV_PIXEL_FORMAT_RESERVED9 = 149, + PVRSRV_PIXEL_FORMAT_RESERVED10 = 150, + PVRSRV_PIXEL_FORMAT_RESERVED11 = 151, + PVRSRV_PIXEL_FORMAT_RESERVED12 = 152, + PVRSRV_PIXEL_FORMAT_RESERVED13 = 153, + PVRSRV_PIXEL_FORMAT_RESERVED14 = 154, + PVRSRV_PIXEL_FORMAT_RESERVED15 = 155, + PVRSRV_PIXEL_FORMAT_RESERVED16 = 156, + PVRSRV_PIXEL_FORMAT_RESERVED17 = 157, + PVRSRV_PIXEL_FORMAT_RESERVED18 = 158, + PVRSRV_PIXEL_FORMAT_RESERVED19 = 159, + PVRSRV_PIXEL_FORMAT_RESERVED20 = 160, + + /* DXLegacy vertex types */ + PVRSRV_PIXEL_FORMAT_UBYTE4 = 161, /*!< 4 channels, 1 byte per channel, normalised */ + PVRSRV_PIXEL_FORMAT_SHORT4 = 162, /*!< 4 signed channels, 16 bits each, unnormalised */ + PVRSRV_PIXEL_FORMAT_SHORT4N = 163, /*!< 4 signed channels, 16 bits each, normalised */ + PVRSRV_PIXEL_FORMAT_USHORT4N = 164, /*!< 4 unsigned channels, 16 bits each, normalised */ + PVRSRV_PIXEL_FORMAT_SHORT2N = 165, /*!< 2 signed channels, 16 bits each, normalised */ + PVRSRV_PIXEL_FORMAT_SHORT2 = 166, /*!< 2 signed channels, 16 bits each, unnormalised */ + PVRSRV_PIXEL_FORMAT_USHORT2N = 167, /*!< 2 unsigned channels, 16 bits each, normalised */ + PVRSRV_PIXEL_FORMAT_UDEC3 = 168, /*!< 3 10-bit channels, unnormalised, unsigned*/ + PVRSRV_PIXEL_FORMAT_DEC3N = 169, /*!< 3 10-bit channels, signed normalised */ + PVRSRV_PIXEL_FORMAT_F16_2 = 170, /*!< 2 F16 channels */ + PVRSRV_PIXEL_FORMAT_F16_4 = 171, /*!< 4 F16 channels */ + + /* misc float types */ + PVRSRV_PIXEL_FORMAT_L_F16 = 172, + PVRSRV_PIXEL_FORMAT_L_F16_REP = 173, + PVRSRV_PIXEL_FORMAT_L_F16_A_F16 = 174, + PVRSRV_PIXEL_FORMAT_A_F16 = 175, + PVRSRV_PIXEL_FORMAT_B16G16R16F = 176, + + PVRSRV_PIXEL_FORMAT_L_F32 = 177, + PVRSRV_PIXEL_FORMAT_A_F32 = 178, + PVRSRV_PIXEL_FORMAT_L_F32_A_F32 = 179, + + /* powervr types */ + PVRSRV_PIXEL_FORMAT_PVRTC2 = 180, + PVRSRV_PIXEL_FORMAT_PVRTC4 = 181, + PVRSRV_PIXEL_FORMAT_PVRTCII2 = 182, + PVRSRV_PIXEL_FORMAT_PVRTCII4 = 183, + PVRSRV_PIXEL_FORMAT_PVRTCIII = 184, + PVRSRV_PIXEL_FORMAT_PVRO8 = 185, + PVRSRV_PIXEL_FORMAT_PVRO88 = 186, + PVRSRV_PIXEL_FORMAT_PT1 = 187, + PVRSRV_PIXEL_FORMAT_PT2 = 188, + PVRSRV_PIXEL_FORMAT_PT4 = 189, + PVRSRV_PIXEL_FORMAT_PT8 = 190, + PVRSRV_PIXEL_FORMAT_PTW = 191, + PVRSRV_PIXEL_FORMAT_PTB = 192, + PVRSRV_PIXEL_FORMAT_MONO8 = 193, + PVRSRV_PIXEL_FORMAT_MONO16 = 194, + + /* additional YUV types */ + PVRSRV_PIXEL_FORMAT_C0_YUYV = 195, + PVRSRV_PIXEL_FORMAT_C0_UYVY = 196, + PVRSRV_PIXEL_FORMAT_C0_YVYU = 197, + PVRSRV_PIXEL_FORMAT_C0_VYUY = 198, + PVRSRV_PIXEL_FORMAT_C1_YUYV = 199, + PVRSRV_PIXEL_FORMAT_C1_UYVY = 200, + PVRSRV_PIXEL_FORMAT_C1_YVYU = 201, + PVRSRV_PIXEL_FORMAT_C1_VYUY = 202, + + /* planar YUV types */ + PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV = 203, + PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU = 204, + PVRSRV_PIXEL_FORMAT_C0_YUV420_3P = 205, + PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV = 206, + PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU = 207, + PVRSRV_PIXEL_FORMAT_C1_YUV420_3P = 208, + + PVRSRV_PIXEL_FORMAT_A2B10G10R10F = 209, + PVRSRV_PIXEL_FORMAT_B8G8R8_SINT = 210, + PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK = 211, + + PVRSRV_PIXEL_FORMAT_ABGR4444 = 212, + PVRSRV_PIXEL_FORMAT_ABGR1555 = 213, + PVRSRV_PIXEL_FORMAT_BGR565 = 214, + + /* 4k aligned planar YUV */ + PVRSRV_PIXEL_FORMAT_C0_4KYUV420_2P_UV = 215, + PVRSRV_PIXEL_FORMAT_C0_4KYUV420_2P_VU = 216, + PVRSRV_PIXEL_FORMAT_C1_4KYUV420_2P_UV = 217, + PVRSRV_PIXEL_FORMAT_C1_4KYUV420_2P_VU = 218, + PVRSRV_PIXEL_FORMAT_P208 = 219, + PVRSRV_PIXEL_FORMAT_A8P8 = 220, + + PVRSRV_PIXEL_FORMAT_A4 = 221, + PVRSRV_PIXEL_FORMAT_AYUV8888 = 222, + PVRSRV_PIXEL_FORMAT_RAW256 = 223, + PVRSRV_PIXEL_FORMAT_RAW512 = 224, + PVRSRV_PIXEL_FORMAT_RAW1024 = 225, + + /* Same as NV12 but with interleaved VU rather than interleaved UV */ + PVRSRV_PIXEL_FORMAT_NV21 = 226, + + /* Semi-planar version of YUYV */ + PVRSRV_PIXEL_FORMAT_NV16 = 227, + + PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff + +} PVRSRV_PIXEL_FORMAT; + +/*! + ***************************************************************************** + * Enumeration of possible alpha types. + *****************************************************************************/ +typedef enum _PVRSRV_ALPHA_FORMAT_ { + PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000, + PVRSRV_ALPHA_FORMAT_PRE = 0x00000001, + PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002, + PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F +} PVRSRV_ALPHA_FORMAT; + +/*! + ***************************************************************************** + * Enumeration of possible alpha types. + *****************************************************************************/ +typedef enum _PVRSRV_COLOURSPACE_FORMAT_ { + PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000, + PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000, + PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000, + PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000 +} PVRSRV_COLOURSPACE_FORMAT; + + +/* + * Drawable orientation (in degrees clockwise). + * Opposite sense from WSEGL. + */ +typedef enum _PVRSRV_ROTATION_ { + PVRSRV_ROTATE_0 = 0, + PVRSRV_ROTATE_90 = 1, + PVRSRV_ROTATE_180 = 2, + PVRSRV_ROTATE_270 = 3, + PVRSRV_FLIP_Y + +} PVRSRV_ROTATION; + +/*! + * Flags for DisplayClassCreateSwapChain. + */ +#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0) +#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1) +#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2) + +/*! + ***************************************************************************** + * Structure providing implementation details for serialisation and + * synchronisation of operations. This is the fundamental unit on which operations + * are synced, and would typically be included in any data structures that require + * serialised accesses etc. e.g. MEM_INFO structures + * + *****************************************************************************/ +/* + Sync Data to be shared/mapped between user/kernel +*/ +typedef struct _PVRSRV_SYNC_DATA_ +{ + /* CPU accessible WriteOp Info */ + IMG_UINT32 ui32WriteOpsPending; + volatile IMG_UINT32 ui32WriteOpsComplete; + + /* CPU accessible ReadOp Info */ + IMG_UINT32 ui32ReadOpsPending; + volatile IMG_UINT32 ui32ReadOpsComplete; + + /* CPU accessible ReadOp2 Info */ + IMG_UINT32 ui32ReadOps2Pending; + volatile IMG_UINT32 ui32ReadOps2Complete; + + /* pdump specific value */ + IMG_UINT32 ui32LastOpDumpVal; + IMG_UINT32 ui32LastReadOpDumpVal; + + /* Last write oprtation on this sync */ + IMG_UINT64 ui64LastWrite; + +} PVRSRV_SYNC_DATA; + +/* + Client Sync Info structure +*/ +typedef struct _PVRSRV_CLIENT_SYNC_INFO_ +{ + /* mapping of the kernel sync data */ + PVRSRV_SYNC_DATA *psSyncData; + + /* Device accessible WriteOp Info */ + IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; + + /* Device accessible ReadOp Info */ + IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; + + /* Device accessible ReadOp2 Info */ + IMG_DEV_VIRTADDR sReadOps2CompleteDevVAddr; + + /* handle to client mapping data (OS specific) */ + IMG_HANDLE hMappingInfo; + + /* handle to kernel sync info */ + IMG_HANDLE hKernelSyncInfo; + +} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO; + +/*! + ***************************************************************************** + * Resource locking structure + *****************************************************************************/ +typedef struct PVRSRV_RESOURCE_TAG +{ + volatile IMG_UINT32 ui32Lock; + IMG_UINT32 ui32ID; +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + IMG_VOID *pOSSyncPrimitive; +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ +}PVRSRV_RESOURCE; +typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE; + + +/* command complete callback pfn prototype */ +typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE); +typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE); + +/* private command handler prototype */ +typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); +typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); + + +/* + rectangle structure required by Lock API +*/ +typedef struct _IMG_RECT_ +{ + IMG_INT32 x0; + IMG_INT32 y0; + IMG_INT32 x1; + IMG_INT32 y1; +}IMG_RECT; + +typedef struct _IMG_RECT_16_ +{ + IMG_INT16 x0; + IMG_INT16 y0; + IMG_INT16 x1; + IMG_INT16 y1; +}IMG_RECT_16; + + +/* common pfn between BC/DC */ +typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE, + IMG_HANDLE, + IMG_SYS_PHYADDR**, + IMG_UINT32*, + IMG_VOID**, + IMG_HANDLE*, + IMG_BOOL*, + IMG_UINT32*); + + +/* + Display dimension structure definition +*/ +typedef struct DISPLAY_DIMS_TAG +{ + IMG_UINT32 ui32ByteStride; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; +} DISPLAY_DIMS; + + +/* + Display format structure definition +*/ +typedef struct DISPLAY_FORMAT_TAG +{ + /* pixel format type */ + PVRSRV_PIXEL_FORMAT pixelformat; +} DISPLAY_FORMAT; + +/* + Display Surface Attributes structure definition +*/ +typedef struct DISPLAY_SURF_ATTRIBUTES_TAG +{ + /* pixel format type */ + PVRSRV_PIXEL_FORMAT pixelformat; + /* dimensions information structure array */ + DISPLAY_DIMS sDims; +} DISPLAY_SURF_ATTRIBUTES; + + +/* + Display Mode information structure definition +*/ +typedef struct DISPLAY_MODE_INFO_TAG +{ + /* pixel format type */ + PVRSRV_PIXEL_FORMAT pixelformat; + /* dimensions information structure array */ + DISPLAY_DIMS sDims; + /* refresh rate of the display */ + IMG_UINT32 ui32RefreshHZ; + /* OEM specific flags */ + IMG_UINT32 ui32OEMFlags; +} DISPLAY_MODE_INFO; + +#define MAX_DISPLAY_NAME_SIZE (64) /* arbitrary choice! */ + +/* + Display info structure definition +*/ +typedef struct DISPLAY_INFO_TAG +{ + /* max swapchains supported */ + IMG_UINT32 ui32MaxSwapChains; + /* max buffers in a swapchain */ + IMG_UINT32 ui32MaxSwapChainBuffers; + /* min swap interval supported */ + IMG_UINT32 ui32MinSwapInterval; + /* max swap interval supported */ + IMG_UINT32 ui32MaxSwapInterval; + /* physical dimensions of the display required for DPI calc. */ + IMG_UINT32 ui32PhysicalWidthmm; + IMG_UINT32 ui32PhysicalHeightmm; + /* display name */ + IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE]; +#if defined(SUPPORT_HW_CURSOR) + /* cursor dimensions */ + IMG_UINT16 ui32CursorWidth; + IMG_UINT16 ui32CursorHeight; +#endif +} DISPLAY_INFO; + +typedef struct ACCESS_INFO_TAG +{ + IMG_UINT32 ui32Size; + IMG_UINT32 ui32FBPhysBaseAddress; + IMG_UINT32 ui32FBMemAvailable; /* size of usable FB memory */ + IMG_UINT32 ui32SysPhysBaseAddress; + IMG_UINT32 ui32SysSize; + IMG_UINT32 ui32DevIRQ; +}ACCESS_INFO; + + + +#if defined(PDUMP_SUSPEND_IS_PER_THREAD) +/** Present only on WinMobile 6.5 */ + +typedef struct { + IMG_UINT32 threadId; + IMG_INT suspendCount; +} PVRSRV_THREAD_SUSPEND_COUNT; + +#define PVRSRV_PDUMP_SUSPEND_Q_NAME "PVRSRVPDumpSuspendMsgQ" +#define PVRSRV_PDUMP_SUSPEND_Q_LENGTH 8 + +#endif /* defined(PDUMP_SUSPEND_IS_PER_THREAD) */ + + +/*! + ***************************************************************************** + * This structure is used for OS independent registry (profile) access + *****************************************************************************/ +typedef struct _PVRSRV_REGISTRY_INFO_ +{ + IMG_UINT32 ui32DevCookie; + IMG_PCHAR pszKey; + IMG_PCHAR pszValue; + IMG_PCHAR pszBuf; + IMG_UINT32 ui32BufSize; +} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO; + + +PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo); +PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo); + +#define PVRSRV_BC_FLAGS_YUVCSC_RANGE_MASK (0x01) +#define PVRSRV_BC_FLAGS_YUVCSC_RANGE_SHIFT (0x00) +#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << PVRSRV_BC_FLAGS_YUVCSC_RANGE_SHIFT) +#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << PVRSRV_BC_FLAGS_YUVCSC_RANGE_SHIFT) + +#define PVRSRV_BC_FLAGS_YUVCSC_CS_MASK (0x02) +#define PVRSRV_BC_FLAGS_YUVCSC_CS_SHIFT (0x01) +#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << PVRSRV_BC_FLAGS_YUVCSC_CS_SHIFT) +#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << PVRSRV_BC_FLAGS_YUVCSC_CS_SHIFT) + +#define PVRSRV_BC_FLAGS_RGB_RANGE_MASK (0x04) +#define PVRSRV_BC_FLAGS_RGB_RANGE_SHIFT (0x02) +#define PVRSRV_BC_FLAGS_RGB_FULL_RANGE (0 << PVRSRV_BC_FLAGS_RGB_RANGE_SHIFT) +#define PVRSRV_BC_FLAGS_RGB_CONFORMANT_RANGE (1 << PVRSRV_BC_FLAGS_RGB_RANGE_SHIFT) + +#define PVRSRV_BC_FLAGS_YUVCSC_MASK (PVRSRV_BC_FLAGS_YUVCSC_RANGE_MASK | PVRSRV_BC_FLAGS_YUVCSC_CS_MASK | PVRSRV_BC_FLAGS_RGB_RANGE_MASK) + + +#define MAX_BUFFER_DEVICE_NAME_SIZE (50) /* arbitrary choice! */ + +/* buffer information structure */ +typedef struct BUFFER_INFO_TAG +{ + IMG_UINT32 ui32BufferCount; + IMG_UINT32 ui32BufferDeviceID; + PVRSRV_PIXEL_FORMAT pixelformat; + IMG_UINT32 ui32ByteStride; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; + IMG_UINT32 ui32Flags; + IMG_CHAR szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE]; +} BUFFER_INFO; + +typedef enum _OVERLAY_DEINTERLACE_MODE_ +{ + WEAVE=0x0, + BOB_ODD, + BOB_EVEN, + BOB_EVEN_NONINTERLEAVED +} OVERLAY_DEINTERLACE_MODE; + +#endif /* __SERVICESEXT_H__ */ +/***************************************************************************** + End of file (servicesext.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/sgx_options.h b/sgx_km/eurasia_km/include4/sgx_options.h new file mode 100644 index 0000000..230e8ad --- /dev/null +++ b/sgx_km/eurasia_km/include4/sgx_options.h @@ -0,0 +1,245 @@ +/*************************************************************************/ /*! +@Title +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Each build option listed here is packed into a dword which + * provides up to 32 flags (or up to 28 flags plus a numeric + * value in the range 0-15 which corresponds to the number of + * cores minus one if SGX_FEATURE_MP is defined). The corresponding + * bit is set if the build option was enabled at compile time. + * + * In order to extract the enabled build flags the INTERNAL_TEST + * switch should be enabled in a client program which includes this + * header. Then the client can test specific build flags by reading + * the bit value at ##OPTIONNAME##_SET_OFFSET in SGX_BUILD_OPTIONS. + * + * IMPORTANT: add new options to unused bits or define a new dword + * (e.g. SGX_BUILD_OPTIONS2) so that the bitfield remains backwards + * compatible. + */ + + +#if defined(DEBUG) || defined (INTERNAL_TEST) +#define DEBUG_SET_OFFSET OPTIONS_BIT0 +#define OPTIONS_BIT0 0x1U +#else +#define OPTIONS_BIT0 0x0 +#endif /* DEBUG */ + +#if defined(PDUMP) || defined (INTERNAL_TEST) +#define PDUMP_SET_OFFSET OPTIONS_BIT1 +#define OPTIONS_BIT1 (0x1U << 1) +#else +#define OPTIONS_BIT1 0x0 +#endif /* PDUMP */ + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined (INTERNAL_TEST) +#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2 +#define OPTIONS_BIT2 (0x1U << 2) +#else +#define OPTIONS_BIT2 0x0 +#endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */ + +#if defined(SUPPORT_HW_RECOVERY) || defined (INTERNAL_TEST) +#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3 +#define OPTIONS_BIT3 (0x1U << 3) +#else +#define OPTIONS_BIT3 0x0 +#endif /* SUPPORT_HW_RECOVERY */ + + + +#if defined(PVR_SECURE_HANDLES) || defined (INTERNAL_TEST) +#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4 +#define OPTIONS_BIT4 (0x1U << 4) +#else +#define OPTIONS_BIT4 0x0 +#endif /* PVR_SECURE_HANDLES */ + +#if defined(SGX_BYPASS_SYSTEM_CACHE) || defined (INTERNAL_TEST) +#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5 +#define OPTIONS_BIT5 (0x1U << 5) +#else +#define OPTIONS_BIT5 0x0 +#endif /* SGX_BYPASS_SYSTEM_CACHE */ + +#if defined(SGX_DMS_AGE_ENABLE) || defined (INTERNAL_TEST) +#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6 +#define OPTIONS_BIT6 (0x1U << 6) +#else +#define OPTIONS_BIT6 0x0 +#endif /* SGX_DMS_AGE_ENABLE */ + +#if defined(SGX_FAST_DPM_INIT) || defined (INTERNAL_TEST) +#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8 +#define OPTIONS_BIT8 (0x1U << 8) +#else +#define OPTIONS_BIT8 0x0 +#endif /* SGX_FAST_DPM_INIT */ + +#if defined(SGX_FEATURE_WRITEBACK_DCU) || defined (INTERNAL_TEST) +#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9 +#define OPTIONS_BIT9 (0x1U << 9) +#else +#define OPTIONS_BIT9 0x0 +#endif /* SGX_FEATURE_WRITEBACK_DCU */ + +#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST) +#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10 +#define OPTIONS_BIT10 (0x1U << 10) +#else +#define OPTIONS_BIT10 0x0 +#endif /* SGX_FEATURE_MP */ + +#define OPTIONS_BIT11 0x0 + +#define OPTIONS_BIT12 0x0 + + +#if defined(SGX_FEATURE_SYSTEM_CACHE) || defined (INTERNAL_TEST) +#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT13 +#define OPTIONS_BIT13 (0x1U << 13) +#else +#define OPTIONS_BIT13 0x0 +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + +#if defined(SGX_SUPPORT_HWPROFILING) || defined (INTERNAL_TEST) +#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT14 +#define OPTIONS_BIT14 (0x1U << 14) +#else +#define OPTIONS_BIT14 0x0 +#endif /* SGX_SUPPORT_HWPROFILING */ + + + +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) || defined (INTERNAL_TEST) +#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT15 +#define OPTIONS_BIT15 (0x1U << 15) +#else +#define OPTIONS_BIT15 0x0 +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + +#if defined(SUPPORT_DISPLAYCONTROLLER_TILING) || defined (INTERNAL_TEST) +#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT16 +#define OPTIONS_BIT16 (0x1U << 16) +#else +#define OPTIONS_BIT16 0x0 +#endif /* SUPPORT_DISPLAYCONTROLLER_TILING */ + +#if defined(SUPPORT_PERCONTEXT_PB) || defined (INTERNAL_TEST) +#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT17 +#define OPTIONS_BIT17 (0x1U << 17) +#else +#define OPTIONS_BIT17 0x0 +#endif /* SUPPORT_PERCONTEXT_PB */ + +#if defined(SUPPORT_SGX_HWPERF) || defined (INTERNAL_TEST) +#define SUPPORT_SGX_HWPERF_SET_OFFSET OPTIONS_BIT18 +#define OPTIONS_BIT18 (0x1U << 18) +#else +#define OPTIONS_BIT18 0x0 +#endif /* SUPPORT_SGX_HWPERF */ + + + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined (INTERNAL_TEST) +#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT19 +#define OPTIONS_BIT19 (0x1U << 19) +#else +#define OPTIONS_BIT19 0x0 +#endif /* SUPPORT_SGX_MMU_DUMMY_PAGE */ + +#if defined(SUPPORT_SGX_PRIORITY_SCHEDULING) || defined (INTERNAL_TEST) +#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT20 +#define OPTIONS_BIT20 (0x1U << 20) +#else +#define OPTIONS_BIT20 0x0 +#endif /* SUPPORT_SGX_PRIORITY_SCHEDULING */ + +#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) || defined (INTERNAL_TEST) +#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING_SET_OFFSET OPTIONS_BIT21 +#define OPTIONS_BIT21 (0x1U << 21) +#else +#define OPTIONS_BIT21 0x0 +#endif /* SUPPORT_SGX_LOW_LATENCY_SCHEDULING */ + +#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST) +#if defined(SGX_FEATURE_MP_CORE_COUNT) +#define OPTIONS_HIGHBYTE ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET) +#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL +#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF +#else +#define OPTIONS_HIGHBYTE (((SGX_FEATURE_MP_CORE_COUNT_TA-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET) |\ + ((SGX_FEATURE_MP_CORE_COUNT_3D-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET_3D)) +#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 24UL +#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET_3D 28UL +#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF +#endif +#else /* SGX_FEATURE_MP */ +#define OPTIONS_HIGHBYTE 0x0 +#endif /* SGX_FEATURE_MP */ + + + +#define SGX_BUILD_OPTIONS \ + OPTIONS_BIT0 |\ + OPTIONS_BIT1 |\ + OPTIONS_BIT2 |\ + OPTIONS_BIT3 |\ + OPTIONS_BIT4 |\ + OPTIONS_BIT5 |\ + OPTIONS_BIT6 |\ + OPTIONS_BIT8 |\ + OPTIONS_BIT9 |\ + OPTIONS_BIT10 |\ + OPTIONS_BIT11 |\ + OPTIONS_BIT12 |\ + OPTIONS_BIT13 |\ + OPTIONS_BIT14 |\ + OPTIONS_BIT15 |\ + OPTIONS_BIT16 |\ + OPTIONS_BIT17 |\ + OPTIONS_BIT18 |\ + OPTIONS_BIT19 |\ + OPTIONS_BIT20 |\ + OPTIONS_BIT21 |\ + OPTIONS_HIGHBYTE + diff --git a/sgx_km/eurasia_km/include4/sgxapi_km.h b/sgx_km/eurasia_km/include4/sgxapi_km.h new file mode 100644 index 0000000..1b9fd65 --- /dev/null +++ b/sgx_km/eurasia_km/include4/sgxapi_km.h @@ -0,0 +1,554 @@ +/*************************************************************************/ /*! +@Title SGX KM API Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Exported SGX API details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SGXAPI_KM_H__ +#define __SGXAPI_KM_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "sgxdefs.h" + +#if (defined(__linux__) || defined(__QNXNTO__)) && !defined(USE_CODE) + #if defined(__KERNEL__) + #include + #else + #include + #endif +#endif + +/****************************************************************************** + Some defines... +******************************************************************************/ + +/* SGX Heap IDs, note: not all heaps are available to clients */ +#define SGX_UNDEFINED_HEAP_ID (~0LU) +#define SGX_GENERAL_HEAP_ID 0 +#define SGX_TADATA_HEAP_ID 1 +#define SGX_KERNEL_CODE_HEAP_ID 2 +#define SGX_KERNEL_DATA_HEAP_ID 3 +#define SGX_PIXELSHADER_HEAP_ID 4 +#define SGX_VERTEXSHADER_HEAP_ID 5 +#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6 +#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7 +#define SGX_SYNCINFO_HEAP_ID 8 +#define SGX_SHARED_3DPARAMETERS_HEAP_ID 9 +#define SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID 10 +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) +#define SGX_GENERAL_MAPPING_HEAP_ID 11 +#endif +#if defined(SGX_FEATURE_2D_HARDWARE) +#define SGX_2D_HEAP_ID 12 +#endif +#if defined(SUPPORT_MEMORY_TILING) +#define SGX_VPB_TILED_HEAP_ID 14 +#endif + +#if defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) + #define SGX_TEXTURE_HEAP_ID 15 + #define SGX_MAX_HEAP_ID 16 +#else + #define SGX_MAX_HEAP_ID 15 +#endif + +/* + * Keep SGX_3DPARAMETERS_HEAP_ID as TQ full custom + * shaders need it to select which heap to write + * their ISP controll stream to. + */ +#if (defined(SUPPORT_PERCONTEXT_PB) || defined(SUPPORT_HYBRID_PB)) +#define SGX_3DPARAMETERS_HEAP_ID SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID +#else +#define SGX_3DPARAMETERS_HEAP_ID SGX_SHARED_3DPARAMETERS_HEAP_ID +#endif +/* Define for number of bytes between consecutive code base registers */ +#if defined(SGX543) || defined(SGX544) || defined(SGX554) +#define SGX_USE_CODE_SEGMENT_RANGE_BITS 23 +#else +#define SGX_USE_CODE_SEGMENT_RANGE_BITS 19 +#endif + +#define SGX_MAX_TA_STATUS_VALS 32 +#define SGX_MAX_3D_STATUS_VALS 4 + +/* sync info structure array size */ +#define SGX_MAX_SRC_SYNCS_TA 32 +#define SGX_MAX_DST_SYNCS_TA 1 +/* note: only one dst sync is supported by the 2D paths */ +#define SGX_MAX_SRC_SYNCS_TQ 6 +#define SGX_MAX_DST_SYNCS_TQ 2 + + +#if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS) +#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 8 +#define PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS 11 +#else +#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9 +#define PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS 8 +#endif /* SGX543 */ + +#define PVRSRV_SGX_HWPERF_INVALID 0x1 + +#define PVRSRV_SGX_HWPERF_TRANSFER 0x2 +#define PVRSRV_SGX_HWPERF_TA 0x3 +#define PVRSRV_SGX_HWPERF_3D 0x4 +#define PVRSRV_SGX_HWPERF_2D 0x5 +#define PVRSRV_SGX_HWPERF_POWER 0x6 +#define PVRSRV_SGX_HWPERF_PERIODIC 0x7 +#define PVRSRV_SGX_HWPERF_3DSPM 0x8 +#define PVRSRV_SGX_HWPERF_TA_OCL 0x9 +#define PVRSRV_SGX_HWPERF_3D_OCL 0xA +#define PVRSRV_SGX_HWPERF_3DSPM_OCL 0xB + +#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101 +#define PVRSRV_SGX_HWPERF_MK_TA 0x102 +#define PVRSRV_SGX_HWPERF_MK_3D 0x103 +#define PVRSRV_SGX_HWPERF_MK_2D 0x104 +#define PVRSRV_SGX_HWPERF_MK_TRANSFER_DUMMY 0x105 +#define PVRSRV_SGX_HWPERF_MK_TA_DUMMY 0x106 +#define PVRSRV_SGX_HWPERF_MK_3D_DUMMY 0x107 +#define PVRSRV_SGX_HWPERF_MK_2D_DUMMY 0x108 +#define PVRSRV_SGX_HWPERF_MK_TA_LOCKUP 0x109 +#define PVRSRV_SGX_HWPERF_MK_3D_LOCKUP 0x10A +#define PVRSRV_SGX_HWPERF_MK_2D_LOCKUP 0x10B +#define PVRSRV_SGX_HWPERF_MK_HK 0x10C + +#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28 +#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK ((1UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1) +#define PVRSRV_SGX_HWPERF_TYPE_OP_START (0UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) +#define PVRSRV_SGX_HWPERF_TYPE_OP_END (1Ul << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) + +#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_TA_START (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_TA_END (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_3D_START (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_3D_END (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_2D_START (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_2D_END (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_POWER_START (PVRSRV_SGX_HWPERF_POWER | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_POWER_END (PVRSRV_SGX_HWPERF_POWER | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_PERIODIC (PVRSRV_SGX_HWPERF_PERIODIC) +#define PVRSRV_SGX_HWPERF_TYPE_3DSPM_START (PVRSRV_SGX_HWPERF_3DSPM | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_3DSPM_END (PVRSRV_SGX_HWPERF_3DSPM | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_3DSPM_OCL_START (PVRSRV_SGX_HWPERF_3DSPM_OCL | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_3DSPM_OCL_END (PVRSRV_SGX_HWPERF_3DSPM_OCL | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_TA_OCL_START (PVRSRV_SGX_HWPERF_TA_OCL | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_TA_OCL_END (PVRSRV_SGX_HWPERF_TA_OCL | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_3D_OCL_START (PVRSRV_SGX_HWPERF_3D_OCL | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_3D_OCL_END (PVRSRV_SGX_HWPERF_3D_OCL | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TRANSFER_DUMMY_START (PVRSRV_SGX_HWPERF_MK_TRANSFER_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TRANSFER_DUMMY_END (PVRSRV_SGX_HWPERF_MK_TRANSFER_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_DUMMY_START (PVRSRV_SGX_HWPERF_MK_TA_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_DUMMY_END (PVRSRV_SGX_HWPERF_MK_TA_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_DUMMY_START (PVRSRV_SGX_HWPERF_MK_3D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_DUMMY_END (PVRSRV_SGX_HWPERF_MK_3D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_DUMMY_START (PVRSRV_SGX_HWPERF_MK_2D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_DUMMY_END (PVRSRV_SGX_HWPERF_MK_2D_DUMMY | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_LOCKUP (PVRSRV_SGX_HWPERF_MK_TA_LOCKUP) +#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_LOCKUP (PVRSRV_SGX_HWPERF_MK_3D_LOCKUP) +#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_LOCKUP (PVRSRV_SGX_HWPERF_MK_2D_LOCKUP) + +#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END) +#define PVRSRV_SGX_HWPERF_TYPE_MK_HK_START (PVRSRV_SGX_HWPERF_MK_HK | PVRSRV_SGX_HWPERF_TYPE_OP_START) +#define PVRSRV_SGX_HWPERF_TYPE_MK_HK_END (PVRSRV_SGX_HWPERF_MK_HK | PVRSRV_SGX_HWPERF_TYPE_OP_END) + + +#define PVRSRV_SGX_HWPERF_STATUS_OFF (0x0) +#define PVRSRV_SGX_HWPERF_STATUS_RESET_COUNTERS (1UL << 0) +#define PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON (1UL << 1) +#define PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON (1UL << 2) +#define PVRSRV_SGX_HWPERF_STATUS_MK_EXECUTION_ON (1UL << 3) + + +/*! + ***************************************************************************** + * One entry in the HWPerf Circular Buffer. + *****************************************************************************/ +typedef struct _PVRSRV_SGX_HWPERF_CB_ENTRY_ +{ + IMG_UINT32 ui32FrameNo; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32RTData; + IMG_UINT32 ui32Type; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Info; + IMG_UINT32 ui32Clocksx16; +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + IMG_UINT32 ui32SystraceIndex; +#endif + /* NOTE: There should always be at least as many 3D cores as TA cores. */ + IMG_UINT32 ui32Counters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_COUNTERS]; + IMG_UINT32 ui32MiscCounters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS]; +} PVRSRV_SGX_HWPERF_CB_ENTRY; + + +/* + Status values control structure +*/ +typedef struct _CTL_STATUS_ +{ + IMG_DEV_VIRTADDR sStatusDevAddr; + IMG_UINT32 ui32StatusValue; +} CTL_STATUS; + + +/*! + List of possible requests/commands to SGXGetMiscInfo() +*/ +typedef enum _SGX_MISC_INFO_REQUEST_ +{ + SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0, + SGX_MISC_INFO_REQUEST_CLOCKSPEED_SLCSIZE, + SGX_MISC_INFO_REQUEST_SGXREV, + SGX_MISC_INFO_REQUEST_DRIVER_SGXREV, +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + SGX_MISC_INFO_REQUEST_MEMREAD, + SGX_MISC_INFO_REQUEST_MEMCOPY, +#endif /* SUPPORT_SGX_EDM_MEMORY_DEBUG */ + SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS, +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + SGX_MISC_INFO_REQUEST_SET_BREAKPOINT, + SGX_MISC_INFO_REQUEST_POLL_BREAKPOINT, + SGX_MISC_INFO_REQUEST_RESUME_BREAKPOINT, +#endif /* SGX_FEATURE_DATA_BREAKPOINTS */ + SGX_MISC_INFO_DUMP_DEBUG_INFO, + SGX_MISC_INFO_DUMP_DEBUG_INFO_FORCE_REGS, + SGX_MISC_INFO_PANIC, + SGX_MISC_INFO_REQUEST_SPM, + SGX_MISC_INFO_REQUEST_ACTIVEPOWER, + SGX_MISC_INFO_REQUEST_LOCKUPS, +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + SGX_MISC_INFO_REQUEST_EDM_STATUS_BUFFER_INFO, +#endif + SGX_MISC_INFO_REQUEST_FORCE_I32 = 0x7fffffff +} SGX_MISC_INFO_REQUEST; + + +/****************************************************************************** + * Struct for passing SGX core rev/features from ukernel to driver. + * This is accessed from the kernel part of the driver and microkernel; it is + * only accessed in user space during buffer allocation in srvinit. + ******************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_FEATURES +{ + IMG_UINT32 ui32CoreRev; /*!< SGX Core revision from HW register */ + IMG_UINT32 ui32CoreID; /*!< SGX Core ID from HW register */ + IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ + IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ + IMG_UINT32 ui32CoreIdSW; /*!< software core version (ID), e.g. SGX535, SGX540 */ + IMG_UINT32 ui32CoreRevSW; /*!< software core revision */ + IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + IMG_UINT32 ui32DeviceMemValue; /*!< device mem value read from ukernel */ +#elif defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif +} PVRSRV_SGX_MISCINFO_FEATURES; + +typedef struct _PVRSRV_SGX_MISCINFO_QUERY_CLOCKSPEED_SLCSIZE +{ + IMG_UINT32 ui32SGXClockSpeed; + IMG_UINT32 ui32SGXSLCSize; +} PVRSRV_SGX_MISCINFO_QUERY_CLOCKSPEED_SLCSIZE; + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) +/****************************************************************************** + * Struct for getting access to the EDM Status Buffer + ******************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_EDM_STATUS_BUFFER_INFO +{ + IMG_DEV_VIRTADDR sDevVAEDMStatusBuffer; /*!< DevVAddr of the EDM status buffer */ + IMG_HANDLE pvEDMStatusBuffer; /*!< CPUVAddr of the EDM status buffer */ +} PVRSRV_SGX_MISCINFO_EDM_STATUS_BUFFER_INFO; +#endif + + +/****************************************************************************** + * Struct for getting lock-up stats from the kernel driver + ******************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_LOCKUPS +{ + IMG_UINT32 ui32HostDetectedLockups; /*!< Host timer detected lockups */ + IMG_UINT32 ui32uKernelDetectedLockups; /*!< Microkernel detected lockups */ +} PVRSRV_SGX_MISCINFO_LOCKUPS; + + +/****************************************************************************** + * Struct for getting lock-up stats from the kernel driver + ******************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_ACTIVEPOWER +{ + IMG_UINT32 ui32NumActivePowerEvents; /*!< active power events */ +#if defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif +} PVRSRV_SGX_MISCINFO_ACTIVEPOWER; + + +/****************************************************************************** + * Struct for getting SPM stats fro the kernel driver + ******************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_SPM +{ + IMG_HANDLE hRTDataSet; /*!< render target data set handle returned from SGXAddRenderTarget */ + IMG_UINT32 ui32NumOutOfMemSignals; /*!< Number of Out of Mem Signals */ + IMG_UINT32 ui32NumSPMRenders; /*!< Number of SPM renders */ +} PVRSRV_SGX_MISCINFO_SPM; + + +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) +/*! + ****************************************************************************** + * Structure for SGX break points control + *****************************************************************************/ +typedef struct _SGX_BREAKPOINT_INFO +{ + /* set/clear BP boolean */ + IMG_BOOL bBPEnable; + /* Index of BP to set */ + IMG_UINT32 ui32BPIndex; + /* On which DataMaster(s) should the breakpoint fire? */ + IMG_UINT32 ui32DataMasterMask; + /* DevVAddr of BP to set */ + IMG_DEV_VIRTADDR sBPDevVAddr, sBPDevVAddrEnd; + /* Whether or not the desired breakpoint will be trapped */ + IMG_BOOL bTrapped; + /* Will the requested breakpoint fire for reads? */ + IMG_BOOL bRead; + /* Will the requested breakpoint fire for writes? */ + IMG_BOOL bWrite; + /* Has a breakpoint been trapped? */ + IMG_BOOL bTrappedBP; + /* Extra information recorded about a trapped breakpoint */ + IMG_UINT32 ui32CoreNum; + IMG_DEV_VIRTADDR sTrappedBPDevVAddr; + IMG_UINT32 ui32TrappedBPBurstLength; + IMG_BOOL bTrappedBPRead; + IMG_UINT32 ui32TrappedBPDataMaster; + IMG_UINT32 ui32TrappedBPTag; +} SGX_BREAKPOINT_INFO; +#endif /* SGX_FEATURE_DATA_BREAKPOINTS */ + + +/*! + ****************************************************************************** + * Structure for setting the hardware performance status + *****************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS +{ + /* See PVRSRV_SGX_HWPERF_STATUS_* */ + IMG_UINT32 ui32NewHWPerfStatus; + + #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS) + /* Specifies the HW's active group selectors */ + IMG_UINT32 aui32PerfGroup[PVRSRV_SGX_HWPERF_NUM_COUNTERS]; + /* Specifies the HW's active bit selectors */ + IMG_UINT32 aui32PerfBit[PVRSRV_SGX_HWPERF_NUM_COUNTERS]; + /* Specifies the HW's counter bit selectors */ + IMG_UINT32 ui32PerfCounterBitSelect; + /* Specifies the HW's sum_mux selectors */ + IMG_UINT32 ui32PerfSumMux; + #else + /* Specifies the HW's active group */ + IMG_UINT32 ui32PerfGroup; + #endif /* SGX_FEATURE_EXTENDED_PERF_COUNTERS */ +} PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS; + + +/*! + ****************************************************************************** + * Structure for misc SGX commands in services + *****************************************************************************/ +typedef struct _SGX_MISC_INFO_ +{ + SGX_MISC_INFO_REQUEST eRequest; /*!< Command request to SGXGetMiscInfo() */ +#if defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + IMG_DEV_VIRTADDR sDevVAddrSrc; /*!< dev virtual addr for mem read */ + IMG_DEV_VIRTADDR sDevVAddrDest; /*!< dev virtual addr for mem write */ + IMG_HANDLE hDevMemContext; /*!< device memory context for mem debug */ +#endif + union + { + IMG_UINT32 reserved; /*!< Unused: ensures valid code in the case everything else is compiled out */ + PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures; + IMG_UINT32 ui32SGXClockSpeed; + PVRSRV_SGX_MISCINFO_QUERY_CLOCKSPEED_SLCSIZE sQueryClockSpeedSLCSize; + PVRSRV_SGX_MISCINFO_ACTIVEPOWER sActivePower; + PVRSRV_SGX_MISCINFO_LOCKUPS sLockups; + PVRSRV_SGX_MISCINFO_SPM sSPM; +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + SGX_BREAKPOINT_INFO sSGXBreakpointInfo; +#endif + PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS sSetHWPerfStatus; + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + PVRSRV_SGX_MISCINFO_EDM_STATUS_BUFFER_INFO sEDMStatusBufferInfo; +#endif + } uData; +} SGX_MISC_INFO; + +#if defined(SGX_FEATURE_2D_HARDWARE) +/* + * The largest number of source sync objects that can be associated with a blit + * command. Allows for src, pattern, and mask + */ +#define PVRSRV_MAX_BLT_SRC_SYNCS 3 +#endif + + +#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256 + +/* + Structure for dumping bitmaps +*/ +typedef struct _SGX_KICKTA_DUMPBITMAP_ +{ + IMG_DEV_VIRTADDR sDevBaseAddr; + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; + IMG_UINT32 ui32Stride; + IMG_UINT32 ui32PDUMPFormat; + IMG_UINT32 ui32BytesPP; + IMG_CHAR pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH]; +} SGX_KICKTA_DUMPBITMAP, *PSGX_KICKTA_DUMPBITMAP; + +#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16) + +/*! + ****************************************************************************** + * Data required only when dumping parameters + *****************************************************************************/ +typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_ +{ + /* cache control word for micro kernel cache flush/invalidates */ + IMG_UINT32 ui32CacheControl; + +} PVRSRV_SGX_PDUMP_CONTEXT; + + +typedef struct _SGX_KICKTA_DUMP_ROFF_ +{ + IMG_HANDLE hKernelMemInfo; /*< Buffer handle */ + IMG_UINT32 uiAllocIndex; /*< Alloc index for LDDM */ + IMG_UINT32 ui32Offset; /*< Byte offset to value to dump */ + IMG_UINT32 ui32Value; /*< Actual value to dump */ + IMG_PCHAR pszName; /*< Name of buffer */ +} SGX_KICKTA_DUMP_ROFF, *PSGX_KICKTA_DUMP_ROFF; + +typedef struct _SGX_KICKTA_DUMP_BUFFER_ +{ + IMG_UINT32 ui32SpaceUsed; + IMG_UINT32 ui32Start; /*< Byte offset of start to dump */ + IMG_UINT32 ui32End; /*< Byte offset of end of dump (non-inclusive) */ + IMG_UINT32 ui32BufferSize; /*< Size of buffer */ + IMG_UINT32 ui32BackEndLength; /*< Size of back end portion, if End < Start */ + IMG_UINT32 uiAllocIndex; + IMG_HANDLE hKernelMemInfo; /*< MemInfo handle for the circular buffer */ + IMG_HANDLE hLinAddr; +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + IMG_HANDLE hCtrlKernelMemInfo; /*< MemInfo handle for the control structure of the + circular buffer */ + IMG_DEV_VIRTADDR sCtrlDevVAddr; /*< Device virtual address of the memory in the + control structure to be checked */ +#if defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif +#endif + IMG_HANDLE hName; /*< Name of buffer */ + +#if defined (__QNXNTO__) + IMG_UINT32 ui32NameLength; /*< Number of characters in buffer name */ +#endif +} SGX_KICKTA_DUMP_BUFFER, *PSGX_KICKTA_DUMP_BUFFER; + +#ifdef PDUMP +/* + PDUMP version of above kick structure +*/ +typedef struct _SGX_KICKTA_PDUMP_ +{ + // Bitmaps to dump + PSGX_KICKTA_DUMPBITMAP psPDumpBitmapArray; + IMG_UINT32 ui32PDumpBitmapSize; + + // Misc buffers to dump (e.g. TA, PDS etc..) + PSGX_KICKTA_DUMP_BUFFER psBufferArray; + IMG_UINT32 ui32BufferArraySize; + + // Roffs to dump + PSGX_KICKTA_DUMP_ROFF psROffArray; + IMG_UINT32 ui32ROffArraySize; +} SGX_KICKTA_PDUMP, *PSGX_KICKTA_PDUMP; +#endif /* PDUMP */ + +#if defined(TRANSFER_QUEUE) +#if defined(SGX_FEATURE_2D_HARDWARE) +/* Maximum size of ctrl stream for 2d blit command (in 32 bit words) */ +#define SGX_MAX_2D_BLIT_CMD_SIZE 26 +#define SGX_MAX_2D_SRC_SYNC_OPS 3 +#endif +#define SGX_MAX_TRANSFER_STATUS_VALS 2 +#define SGX_MAX_TRANSFER_SYNC_OPS 5 +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* __SGXAPI_KM_H__ */ + +/****************************************************************************** + End of file (sgxapi_km.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/include4/sgxscript.h b/sgx_km/eurasia_km/include4/sgxscript.h new file mode 100644 index 0000000..4d103cb --- /dev/null +++ b/sgx_km/eurasia_km/include4/sgxscript.h @@ -0,0 +1,108 @@ +/*************************************************************************/ /*! +@Title SGX kernel services structues/functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description SGX initialisation script definitions. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __SGXSCRIPT_H__ +#define __SGXSCRIPT_H__ + +#include "sgxfeaturedefs.h" +#if defined (__cplusplus) +extern "C" { +#endif + +#define SGX_MAX_INIT_COMMANDS 64 +#define SGX_MAX_PRINT_COMMANDS 64 +#define SGX_MAX_DEINIT_COMMANDS 16 + +typedef enum _SGX_INIT_OPERATION +{ + SGX_INIT_OP_ILLEGAL = 0, + SGX_INIT_OP_WRITE_HW_REG, + SGX_INIT_OP_READ_HW_REG, + SGX_INIT_OP_PRINT_HW_REG, +#if defined(PDUMP) + SGX_INIT_OP_PDUMP_HW_REG, +#endif + SGX_INIT_OP_HALT, + SGX_INIT_FORCE_I32 = 0x7FFFFFFF +} SGX_INIT_OPERATION; + +typedef union _SGX_INIT_COMMAND +{ + SGX_INIT_OPERATION eOp; + struct IMG_COMPAT { + SGX_INIT_OPERATION eOp; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + } sWriteHWReg; + struct IMG_COMPAT { + SGX_INIT_OPERATION eOp; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + } sReadHWReg; +#if defined(PDUMP) + struct IMG_COMPAT { + SGX_INIT_OPERATION eOp; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + } sPDumpHWReg; +#endif +} SGX_INIT_COMMAND; + +typedef struct IMG_COMPAT _SGX_INIT_SCRIPTS_ +{ + SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS]; + SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS]; + SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS]; +#if defined(SGX_FEATURE_MP) + SGX_INIT_COMMAND asSGXREGDebugCommandsMaster[SGX_MAX_PRINT_COMMANDS]; +#endif + SGX_INIT_COMMAND asSGXREGDebugCommandsSlave[SGX_MAX_PRINT_COMMANDS]; +} SGX_INIT_SCRIPTS; + +#if defined(__cplusplus) +} +#endif + +#endif /* __SGXSCRIPT_H__ */ + +/***************************************************************************** + End of file (sgxscript.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/include/env/linux/pvr_drm_shared.h b/sgx_km/eurasia_km/services4/include/env/linux/pvr_drm_shared.h new file mode 100644 index 0000000..4b84f6b --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/env/linux/pvr_drm_shared.h @@ -0,0 +1,76 @@ +/*************************************************************************/ /*! +@Title PowerVR drm driver shared definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if !defined(__PVR_DRM_SHARED_H__) +#define __PVR_DRM_SHARED_H__ + +#if defined(SUPPORT_DRI_DRM) + +typedef struct drm_pvr_unpriv_cmd_tag +{ + uint32_t cmd; + uint32_t res; +} drm_pvr_unpriv_cmd; + +/* DRM command numbers, relative to DRM_COMMAND_BASE */ +#if defined(SUPPORT_DRI_DRM_EXT) +#define PVR_DRM_SRVKM_CMD DRM_PVR_RESERVED1 /* Used for PVR Services ioctls */ +#define PVR_DRM_DISP_CMD DRM_PVR_RESERVED2 /* Reserved for display class driver */ +#define PVR_DRM_BC_CMD DRM_PVR_RESERVED3 /* Reserved for buffer class driver */ +#define PVR_DRM_IS_MASTER_CMD DRM_PVR_RESERVED4 /* Are we the DRM master? */ +#define PVR_DRM_UNPRIV_CMD DRM_PVR_RESERVED5 /* PVR driver unprivileged ioctls */ +#define PVR_DRM_DBGDRV_CMD DRM_PVR_RESERVED6 /* Debug driver (PDUMP) ioctls */ +#else /* defined(SUPPORT_DRI_DRM_EXT) */ +#define PVR_DRM_SRVKM_CMD 0 /* Used for PVR Services ioctls */ +#define PVR_DRM_DISP_CMD 1 /* Reserved for display class driver */ +#define PVR_DRM_BC_CMD 2 /* Reserved for buffer class driver */ +#define PVR_DRM_IS_MASTER_CMD 3 /* Are we the DRM master? */ +#define PVR_DRM_UNPRIV_CMD 4 /* PVR driver unprivileged ioctls */ +#define PVR_DRM_DBGDRV_CMD 5 /* Debug driver (PDUMP) ioctls */ +#endif /* defined(SUPPORT_DRI_DRM_EXT) */ + +/* Subcommands of PVR_DRM_UNPRIV_CMD */ +#define PVR_DRM_UNPRIV_INIT_SUCCESFUL 0 /* PVR Services init succesful */ + +#endif + +#endif /* defined(__PVR_DRM_SHARED_H__) */ + + diff --git a/sgx_km/eurasia_km/services4/include/ion_sys_private.h b/sgx_km/eurasia_km/services4/include/ion_sys_private.h new file mode 100644 index 0000000..8b2c467 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/ion_sys_private.h @@ -0,0 +1,50 @@ +/*************************************************************************/ /*! +@File ion_sys_private.h +@Title System-specific private data for ion support code +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include SUPPORT_ION_HEADER + +typedef struct +{ + ion_phys_addr_t uiHeapBase; + size_t uiHeapSize; + IMG_CPU_PHYADDR sPCIAddrRangeStart; +} ION_TC_PRIVATE_DATA; diff --git a/sgx_km/eurasia_km/services4/include/kernelbuffer.h b/sgx_km/eurasia_km/services4/include/kernelbuffer.h new file mode 100644 index 0000000..6d8aed5 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/kernelbuffer.h @@ -0,0 +1,97 @@ +/*************************************************************************/ /*! +@Title buffer device class API structures and prototypes + for kernel services to kernel 3rd party buffer device driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description provides display device class API structures and prototypes +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined (__KERNELBUFFER_H__) +#define __KERNELBUFFER_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + Function table and pointers for SRVKM->BUFFER +*/ +typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_UINT32, IMG_HANDLE*); +typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_UINT32, IMG_HANDLE); +typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*); +typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*); + +typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG +{ + IMG_UINT32 ui32TableSize; + PFN_OPEN_BC_DEVICE pfnOpenBCDevice; + PFN_CLOSE_BC_DEVICE pfnCloseBCDevice; + PFN_GET_BC_INFO pfnGetBCInfo; + PFN_GET_BC_BUFFER pfnGetBCBuffer; + PFN_GET_BUFFER_ADDR pfnGetBufferAddr; + +} PVRSRV_BC_SRV2BUFFER_KMJTABLE; + + +/* + Function table and pointers for BUFFER->SRVKM +*/ +typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*); +typedef IMG_VOID (*PFN_BC_SCHEDULE_DEVICES)(IMG_VOID); +typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32); + +typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG +{ + IMG_UINT32 ui32TableSize; + PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice; + PFN_BC_SCHEDULE_DEVICES pfnPVRSRVScheduleDevices; + PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice; + +} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE; + +/* function to retrieve kernel services function table from kernel services */ +typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE); + +/* Prototype for platforms that access the JTable via linkage */ +IMG_IMPORT IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable); + +#if defined (__cplusplus) +} +#endif + +#endif/* #if !defined (__KERNELBUFFER_H__) */ diff --git a/sgx_km/eurasia_km/services4/include/kerneldisplay.h b/sgx_km/eurasia_km/services4/include/kerneldisplay.h new file mode 100644 index 0000000..fe6ec15 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/kerneldisplay.h @@ -0,0 +1,239 @@ +/*************************************************************************/ /*! +@Title Display device class API structures and prototypes +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides display device class API structures and prototypes + for kernel services to kernel 3rd party display. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined (__KERNELDISPLAY_H__) +#define __KERNELDISPLAY_H__ + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +#if defined (__cplusplus) +extern "C" { +#endif + +typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*); +typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE); +typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*); +typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE, + DISPLAY_FORMAT*, + IMG_UINT32*, + DISPLAY_DIMS*); +typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*); +typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*); +typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE, + IMG_UINT32, + DISPLAY_SURF_ATTRIBUTES*, + DISPLAY_SURF_ATTRIBUTES*, + IMG_UINT32, + PVRSRV_SYNC_DATA**, + IMG_UINT32, + IMG_HANDLE*, + IMG_UINT32*); +typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE, + IMG_HANDLE); +typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*); +typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*); +typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32); +typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32); +typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE, + IMG_HANDLE, + IMG_UINT32*, + IMG_HANDLE*); +typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE, + IMG_HANDLE, + IMG_UINT32, + IMG_HANDLE, + IMG_UINT32, + IMG_RECT*); +typedef IMG_VOID (*PFN_QUERY_SWAP_COMMAND_ID)(IMG_HANDLE, IMG_HANDLE, IMG_HANDLE, IMG_HANDLE, IMG_UINT16*, IMG_BOOL*); +typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32); + +/* + Function table for SRVKM->DISPLAY +*/ +typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG +{ + IMG_UINT32 ui32TableSize; + PFN_OPEN_DC_DEVICE pfnOpenDCDevice; + PFN_CLOSE_DC_DEVICE pfnCloseDCDevice; + PFN_ENUM_DC_FORMATS pfnEnumDCFormats; + PFN_ENUM_DC_DIMS pfnEnumDCDims; + PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer; + PFN_GET_DC_INFO pfnGetDCInfo; + PFN_GET_BUFFER_ADDR pfnGetBufferAddr; + PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain; + PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain; + PFN_SET_DC_DSTRECT pfnSetDCDstRect; + PFN_SET_DC_SRCRECT pfnSetDCSrcRect; + PFN_SET_DC_DSTCK pfnSetDCDstColourKey; + PFN_SET_DC_SRCCK pfnSetDCSrcColourKey; + PFN_GET_DC_BUFFERS pfnGetDCBuffers; + PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer; + PFN_SET_DC_STATE pfnSetDCState; + +} PVRSRV_DC_SRV2DISP_KMJTABLE; + +/* ISR callback pfn prototype */ +typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*); + +/* + functions exported by kernel services for use by 3rd party kernel display class device driver +*/ +typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*); +typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32); +typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32); +typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32); +typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32); +typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL); +typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32); +typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER, + PFN_PRE_CLOCKSPEED_CHANGE, PFN_POST_CLOCKSPEED_CHANGE, + IMG_HANDLE, PVRSRV_DEV_POWER_STATE, PVRSRV_DEV_POWER_STATE); + +typedef struct _PVRSRV_KERNEL_MEM_INFO_* PDC_MEM_INFO; + +typedef PVRSRV_ERROR (*PFN_DC_MEMINFO_GET_CPU_VADDR)(PDC_MEM_INFO, IMG_CPU_VIRTADDR *pVAddr); +typedef PVRSRV_ERROR (*PFN_DC_MEMINFO_GET_CPU_PADDR)(PDC_MEM_INFO, IMG_SIZE_T uByteOffset, IMG_CPU_PHYADDR *pPAddr); +typedef PVRSRV_ERROR (*PFN_DC_MEMINFO_GET_BYTE_SIZE)(PDC_MEM_INFO, IMG_SIZE_T *uByteSize); +typedef IMG_BOOL (*PFN_DC_MEMINFO_IS_PHYS_CONTIG)(PDC_MEM_INFO); + +/* + Function table for DISPLAY->SRVKM +*/ +typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG +{ + IMG_UINT32 ui32TableSize; + PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice; + PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice; + PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction; + PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList; + PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList; + PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete; + PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler; + PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice; + PFN_DC_MEMINFO_GET_CPU_VADDR pfnPVRSRVDCMemInfoGetCpuVAddr; + PFN_DC_MEMINFO_GET_CPU_PADDR pfnPVRSRVDCMemInfoGetCpuPAddr; + PFN_DC_MEMINFO_GET_BYTE_SIZE pfnPVRSRVDCMemInfoGetByteSize; + PFN_DC_MEMINFO_IS_PHYS_CONTIG pfnPVRSRVDCMemInfoIsPhysContig; + +} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE; + + +typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG +{ + /* Ext Device Handle */ + IMG_HANDLE hExtDevice; + + /* Ext SwapChain Handle */ + IMG_HANDLE hExtSwapChain; + + /* number of vsync intervals between successive flips */ + IMG_UINT32 ui32SwapInterval; + + /* Ext Buffer Handle (Buffer to Flip to) */ + IMG_HANDLE hExtBuffer; + + /* private tag */ + IMG_HANDLE hPrivateTag; + + /* number of clip rects */ + IMG_UINT32 ui32ClipRectCount; + + /* clip rects */ + IMG_RECT *psClipRect; + +} DISPLAYCLASS_FLIP_COMMAND; + + +typedef struct DISPLAYCLASS_FLIP_COMMAND2_TAG +{ + /* Ext Device Handle */ + IMG_HANDLE hExtDevice; + + /* Ext SwapChain Handle */ + IMG_HANDLE hExtSwapChain; + + /* number of vsync intervals between successive flips */ + IMG_UINT32 ui32SwapInterval; + + /* Unused field, padding for compatibility with above structure */ + IMG_HANDLE hUnused; + + /* private data from userspace */ + IMG_PVOID pvPrivData; + + /* length of private data in bytes */ + IMG_UINT32 ui32PrivDataLength; + + /* meminfos */ + PDC_MEM_INFO *ppsMemInfos; + + /* number of meminfos */ + IMG_UINT32 ui32NumMemInfos; + +} DISPLAYCLASS_FLIP_COMMAND2; + +/* start command IDs from 0 */ +#define DC_FLIP_COMMAND 0 + +/* States used in PFN_SET_DC_STATE */ +#define DC_STATE_NO_FLUSH_COMMANDS 0 +#define DC_STATE_FLUSH_COMMANDS 1 + +/* function to retrieve kernel services function table from kernel services */ +typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE); + +/* Prototype for platforms that access the JTable via linkage */ +IMG_IMPORT IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable); + + +#if defined (__cplusplus) +} +#endif + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +#endif/* #if !defined (__KERNELDISPLAY_H__) */ + +/****************************************************************************** + End of file (kerneldisplay.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/include/pdump.h b/sgx_km/eurasia_km/services4/include/pdump.h new file mode 100644 index 0000000..566a118 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/pdump.h @@ -0,0 +1,51 @@ +/*************************************************************************/ /*! +@Title PDUMP flags definitions. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _SERVICES_PDUMP_H_ +#define _SERVICES_PDUMP_H_ + +#define PDUMP_FLAGS_NEVER 0x08000000U +#define PDUMP_FLAGS_LASTFRAME 0x10000000U +#define PDUMP_FLAGS_RESETLFBUFFER 0x20000000U +#define PDUMP_FLAGS_CONTINUOUS 0x40000000U +#define PDUMP_FLAGS_PERSISTENT 0x80000000U + +#endif /* _SERVICES_PDUMP_H_ */ + diff --git a/sgx_km/eurasia_km/services4/include/pvr_bridge.h b/sgx_km/eurasia_km/services4/include/pvr_bridge.h new file mode 100644 index 0000000..bffbd37 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/pvr_bridge.h @@ -0,0 +1,1696 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __PVR_BRIDGE_H__ +#define __PVR_BRIDGE_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "servicesint.h" + +/* + * Bridge Cmd Ids + */ + + +#ifdef __linux__ + + #include + /*!< Nov 2006: according to ioctl-number.txt 'g' wasn't in use. */ + #define PVRSRV_IOC_GID 'g' + #define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) + #define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) + #define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) + #define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE) + +#else /* __linux__ */ + + #if defined(__QNXNTO__) + #define PVRSRV_IOC_GID (0x0UL) + #else + #error Unknown platform: Cannot define ioctls + #endif + + #define PVRSRV_IO(INDEX) (PVRSRV_IOC_GID + (INDEX)) + #define PVRSRV_IOW(INDEX) (PVRSRV_IOC_GID + (INDEX)) + #define PVRSRV_IOR(INDEX) (PVRSRV_IOC_GID + (INDEX)) + #define PVRSRV_IOWR(INDEX) (PVRSRV_IOC_GID + (INDEX)) + + #define PVRSRV_BRIDGE_BASE PVRSRV_IOC_GID +#endif /* __linux__ */ + + +/* + * Note *REMEMBER* to update PVRSRV_BRIDGE_LAST_CMD (below) if you add any new + * bridge commands! + * The command number of PVRSRV_BRIDGE_UM_KM_COMPAT_CHECK needs to be maintained as 0 across previous ddks, for compatibility check command to execute successfully + */ + +#define PVRSRV_BRIDGE_UMKM_CMD_FIRST 0UL +#define PVRSRV_BRIDGE_UM_KM_COMPAT_CHECK PVRSRV_IOWR(0) +#define PVRSRV_BRIDGE_UMKM_CMD_LAST (0) + +#define PVRSRV_BRIDGE_CORE_CMD_FIRST (PVRSRV_BRIDGE_UMKM_CMD_LAST + 1) +#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0) /*!< enumerate device bridge index */ +#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1) /*!< acquire device data bridge index */ +#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2) /*!< release device data bridge index */ +#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3) /*!< create device addressable memory context */ +#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4) /*!< destroy device addressable memory context */ +#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5) /*!< get device memory heap info */ +#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6) /*!< alloc device memory bridge index */ +#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7) /*!< free device memory bridge index */ +#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8) /*!< get free device memory bridge index */ +#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9) /*!< create Cmd Q bridge index */ +#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10) /*!< destroy Cmd Q bridge index */ +#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11) /*!< generate mmap data from a memory handle */ +#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12) /*!< services connect bridge index */ +#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13) /*!< services disconnect bridge index */ +#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14) /*!< wrap device memory bridge index */ +#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15) /*!< read the kernel meminfo record */ +#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16) +#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17) +#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18) +#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19) +#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20) +#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21) +#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22) +#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23) +#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24) +#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25) +#define PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26) +#define PVRSRV_BRIDGE_MAP_DEV_MEMORY_2 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27) +#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+28) +#if defined (SUPPORT_ION) +#define PVRSRV_BRIDGE_MAP_ION_HANDLE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+29) +#define PVRSRV_BRIDGE_UNMAP_ION_HANDLE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+30) +#define PVRSRV_BRIDGE_ION_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+30) +#else +#define PVRSRV_BRIDGE_ION_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+28) +#endif +#if defined (SUPPORT_DMABUF) +#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST (PVRSRV_BRIDGE_ION_CMD_LAST+1) +#define PVRSRV_BRIDGE_MAP_DMABUF PVRSRV_IOWR(PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0) +#define PVRSRV_BRIDGE_UNMAP_DMABUF PVRSRV_IOWR(PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1) +#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1) +#else +#define PVRSRV_BRIDGE_DMABUF_CMD_LAST PVRSRV_BRIDGE_ION_CMD_LAST +#endif +#define PVRSRV_BRIDGE_CORE_CMD_LAST PVRSRV_BRIDGE_DMABUF_CMD_LAST +/* SIM */ +#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1) +#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0) /*!< RTSIM pseudo ISR */ +#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1) /*!< Register RTSIM process thread */ +#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2) /*!< Unregister RTSIM process thread */ +#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2) + +/* User Mapping */ +#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1) +#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0) /*!< map CPU phys to user space */ +#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1) /*!< unmap CPU phys to user space */ +#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2) /*!< get user copy of Phys to Lin loopup table */ +#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2) + +#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1) +#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0) /*!< Get FB memory stats */ +#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0) + +/* API to retrieve misc. info. from services */ +#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1) +#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0) /*!< misc. info. */ +#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1) /*!< misc. info. */ +#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1) + +/* Overlay ioctls */ + +#if defined (SUPPORT_OVERLAY_ROTATE_BLIT) +#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1) +#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0) /*!< 3D Overlay rotate blit init */ +#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1) /*!< 3D Overlay rotate blit deinit */ +#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1) +#else +#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST PVRSRV_BRIDGE_MISC_CMD_LAST +#endif + +/* PDUMP */ +#if defined(PDUMP) +#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_LAST+1) +#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15) /*!< pdump command structure */ +#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16) +#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17) +#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18) +#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18) +#else +/* Note we are carefull here not to leave a large gap in the ioctl numbers. + * (Some ports may use these values to index into an array where large gaps can + * waste memory) */ +#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST +#endif + +/* DisplayClass APIs */ +#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1) +#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0) /*!< Get OEM Jtable */ +#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0) + +/* device class enum */ +#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1) +#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0) +#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+1) +#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+2) +#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+2) + +/* display class API */ +#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1) +#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0) +#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1) +#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2) +#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3) +#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4) +#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5) +#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6) +#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7) +#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8) +#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9) +#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10) +#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11) +#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12) +#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13) +#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2 PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14) +#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+15) +#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+15) + +/* buffer class API */ +#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1) +#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0) +#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1) +#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2) +#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3) +#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3) + +/* Wrap/Unwrap external memory */ +#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1) +#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0) +#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1) +#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1) + +/* Shared memory */ +#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1) +#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0) +#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1) +#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2) +#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3) +#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3) + +/* Intialisation Service support */ +#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1) +#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0) +#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1) +#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1) + +/* Event Objects */ +#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1) +#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0) +#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1) +#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2) +#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2) + +/* Sync ops */ +#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1) +#define PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0) +#define PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1) +#define PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+2) +#define PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+3) +#define PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+4) +#define PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+5) +#define PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+6) +#define PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+7) +#define PVRSRV_BRIDGE_ALLOC_SYNC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+8) +#define PVRSRV_BRIDGE_FREE_SYNC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+9) +#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+9) + +/* For sgx_bridge.h (msvdx_bridge.h should probably use these defines too) */ +#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1) + + +/****************************************************************************** + * Bridge flags + *****************************************************************************/ +#define PVRSRV_KERNEL_MODE_CLIENT 1 + +/****************************************************************************** + * Generic bridge structures + *****************************************************************************/ + +/****************************************************************************** + * bridge return structure + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_RETURN_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hData; +}PVRSRV_BRIDGE_RETURN; + + +/****************************************************************************** + * bridge packaging structure + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_PACKAGE_TAG +{ + IMG_UINT32 ui32BridgeID; /*!< ioctl/drvesc index */ + IMG_UINT32 ui32Size; /*!< size of structure */ + IMG_HANDLE hParamIn; /*!< input data buffer */ + IMG_HANDLE hParamOut; /*!< output data buffer */ + IMG_UINT32 ui32InBufferSize; /*!< size of input data buffer */ + IMG_UINT32 ui32OutBufferSize; /*!< size of output data buffer */ + + IMG_HANDLE hKernelServices; /*!< kernel servcies handle */ +}PVRSRV_BRIDGE_PACKAGE; + + +/****************************************************************************** + * Input structures for IOCTL/DRVESC + *****************************************************************************/ + + +/****************************************************************************** + * 'bridge in' connect to services + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_CONNECT_SERVICES_TAG +{ + IMG_UINT32 ui32Flags; +} PVRSRV_BRIDGE_IN_CONNECT_SERVICES; + +/****************************************************************************** + * 'bridge in' acquire device info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG +{ + IMG_UINT32 uiDevIndex; + PVRSRV_DEVICE_TYPE eDeviceType; +} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO; + + +/****************************************************************************** + * 'bridge in' enum class + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_ENUMCLASS_TAG +{ + PVRSRV_DEVICE_CLASS eDeviceClass; +} PVRSRV_BRIDGE_IN_ENUMCLASS; + + +/****************************************************************************** + * 'bridge in' close display class device + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG +{ + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE; + + +/****************************************************************************** + * 'bridge in' enum display class formats + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG +{ + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS; + + +/****************************************************************************** + * 'bridge in' get display class sysbuffer + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG +{ + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER; + + +/****************************************************************************** + * 'bridge in' display class info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG +{ + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO; + + +/****************************************************************************** + * 'bridge in' close buffer class device + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG +{ + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE; + + +/****************************************************************************** + * 'bridge in' close buffer class device + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG +{ + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO; + +/****************************************************************************** + * 'bridge in' get device memory heap info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemContext; +}PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO; + + +/****************************************************************************** + * 'bridge in' create device memory context + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT; + + +/****************************************************************************** + * 'bridge in' destroy device memory context + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemContext; +}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT; + + +/****************************************************************************** + * 'bridge in' alloc device memory + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemHeap; + + IMG_HANDLE hPrivData; + IMG_UINT32 ui32PrivDataLength; + IMG_UINT32 ui32Attribs; + + IMG_SIZE_T uSize; + IMG_SIZE_T uAlignment; + + IMG_UINT32 ui32ChunkSize; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 ui32NumPhysChunks; + IMG_HANDLE hMapChunk; +}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM; + +/****************************************************************************** + * 'bridge in' free device memory + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_FREEDEVICEMEM; + +/****************************************************************************** + * 'bridge in' export device memory + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM; + +/****************************************************************************** + * 'bridge in' map ion handle + *****************************************************************************/ +#define ION_IMPORT_MAX_FDS 3 +#define ION_IMPORT_MAX_CHUNK_COUNT 3 +typedef struct _PVRSRV_BRIDGE_IN_MAP_ION_HANDLE_ +{ + IMG_UINT32 ui32NumFDs; + IMG_INT32 ai32BufferFDs[ION_IMPORT_MAX_FDS]; + IMG_UINT32 ui32Attribs; + IMG_UINT32 ui32ChunkCount; + IMG_SIZE_T auiOffset[ION_IMPORT_MAX_CHUNK_COUNT]; + IMG_SIZE_T auiSize[ION_IMPORT_MAX_CHUNK_COUNT]; + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemHeap; +} PVRSRV_BRIDGE_IN_MAP_ION_HANDLE; + +/****************************************************************************** + * 'bridge in' unmap ion handle + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNMAP_ION_HANDLE_TAG +{ + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_UNMAP_ION_HANDLE; + +/****************************************************************************** + * 'bridge in' map dmabuf + *****************************************************************************/ +typedef struct _PVRSRV_BRIDGE_IN_MAP_DMABUF_ +{ + IMG_UINT32 ui32BridgeFlags; /* Must be first member of structure */ + IMG_INT32 i32FD; + IMG_SIZE_T uiOffset; + IMG_SIZE_T uiSize; + IMG_UINT32 ui32Attribs; + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemHeap; +} PVRSRV_BRIDGE_IN_MAP_DMABUF; + +/****************************************************************************** + * 'bridge in' unmap dmabuf + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_UNMAP_DMABUF_TAG +{ + IMG_UINT32 ui32BridgeFlags; /* Must be first member of structure */ + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_UNMAP_DMABUF; + +/****************************************************************************** + * 'bridge in' get free device memory + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG +{ + IMG_UINT32 ui32Flags; +} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM; + +/****************************************************************************** + * 'bridge in' create Cmd Q + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG +{ + IMG_HANDLE hDevCookie; + IMG_SIZE_T uQueueSize; +}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE; + + +/****************************************************************************** + * 'bridge in' destroy Cmd Q + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG +{ + IMG_HANDLE hDevCookie; + PVRSRV_QUEUE_INFO *psQueueInfo; +}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE; + + +/****************************************************************************** + * 'bridge in' get full map data + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA_TAG +{ + IMG_HANDLE hMHandle; /* Handle associated with the memory that needs to be mapped */ +} PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA; + + +/****************************************************************************** + * 'bridge in' get full map data + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA_TAG +{ + IMG_HANDLE hMHandle; /* Handle associated with the memory that needs to be mapped */ +} PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA; + + +/****************************************************************************** + * 'bridge in' reserve vm + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG +{ + IMG_HANDLE hDevMemHeap; + IMG_DEV_VIRTADDR *psDevVAddr; + IMG_SIZE_T uSize; + IMG_SIZE_T uAlignment; +}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM; + +/****************************************************************************** + * 'bridge out' connect to services + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT8 ui8KernelArch; + IMG_UINT64 hKernelServices; /* actually a IMG_HANDLE retrurnd from KM */ +}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES; + +/****************************************************************************** + * 'bridge out' reserve vm + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hKernelMemInfo; + IMG_HANDLE hKernelSyncInfo; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; + +}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM; + + +/****************************************************************************** + * 'bridge in' free vm + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG +{ + IMG_HANDLE hKernelMemInfo; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; +}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM; + +/****************************************************************************** + * 'bridge in' map dev memory allocation to another heap + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG +{ + IMG_HANDLE hKernelMemInfo; + IMG_HANDLE hDstDevMemHeap; +}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY; + +/****************************************************************************** + * 'bridge out' map dev memory allocation to another heap + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo; +}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY; + + +/****************************************************************************** + * 'bridge in' unmap dev memory allocation + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG +{ + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY; + + +/****************************************************************************** + * 'bridge in' map pages + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG +{ + IMG_HANDLE hKernelMemInfo; + IMG_SYS_PHYADDR *psSysPAddr; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY; + +/****************************************************************************** + * 'bridge in' unmap pages + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG +{ + IMG_UINT32 ui32BridgeFlags; /* Must be first member of structure */ + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY; + +/****************************************************************************** + * 'bridge in' map device class buffer pages + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG +{ + IMG_HANDLE hDeviceClassBuffer; + IMG_HANDLE hDevMemContext; +}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY; + + +/****************************************************************************** + * 'bridge out' map device class buffer pages + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; + IMG_HANDLE hKernelMemInfo; + IMG_HANDLE hMappingInfo; +}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY; + + +/****************************************************************************** + * 'bridge in' unmap device class buffer pages + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG +{ + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY; + + +/****************************************************************************** + * 'bridge in' pdump memory poll + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG +{ + IMG_HANDLE hKernelMemInfo; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + PDUMP_POLL_OPERATOR eOperator; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL; + +/****************************************************************************** + * 'bridge in' pdump sync poll + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG +{ + IMG_HANDLE hKernelSyncInfo; + IMG_BOOL bIsRead; + IMG_BOOL bUseLastOpDumpVal; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; +}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL; + + +/****************************************************************************** + * 'bridge in' pdump dump memory + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG +{ + IMG_HANDLE hLinAddr; + IMG_HANDLE hAltLinAddr; + IMG_HANDLE hKernelMemInfo; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Bytes; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM; + + +/****************************************************************************** + * 'bridge in' pdump dump sync + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG +{ + IMG_HANDLE hAltLinAddr; + IMG_HANDLE hKernelSyncInfo; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Bytes; +}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC; + + +/****************************************************************************** + * 'bridge in' pdump dump reg + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG +{ + IMG_HANDLE hDevCookie; + PVRSRV_HWREG sHWReg; + IMG_UINT32 ui32Flags; + IMG_CHAR szRegRegion[32]; +}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG; + +/****************************************************************************** + * 'bridge in' pdump dump reg + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG +{ + IMG_HANDLE hDevCookie; + PVRSRV_HWREG sHWReg; + IMG_UINT32 ui32Mask; + IMG_UINT32 ui32Flags; + IMG_CHAR szRegRegion[32]; +}PVRSRV_BRIDGE_IN_PDUMP_REGPOL; + +/****************************************************************************** + * 'bridge in' pdump dump PD reg + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG +{ + PVRSRV_HWREG sHWReg; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG; + +/****************************************************************************** + * 'bridge in' pdump dump mem pages + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hKernelMemInfo; + IMG_DEV_PHYADDR *pPages; + IMG_UINT32 ui32NumPages; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT32 ui32Start; + IMG_UINT32 ui32Length; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES; + +/****************************************************************************** + * 'bridge in' pdump dump comment + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG +{ + IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE]; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_PDUMP_COMMENT; + + +/****************************************************************************** + * 'bridge in' pdump set frame + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG +{ + IMG_UINT32 ui32Frame; +}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME; + + +/****************************************************************************** + * 'bridge in' pdump dump bitmap + *****************************************************************************/ + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG +{ + IMG_HANDLE hDevCookie; + IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE]; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32Width; + IMG_UINT32 ui32Height; + IMG_UINT32 ui32StrideInBytes; + IMG_DEV_VIRTADDR sDevBaseAddr; + IMG_HANDLE hDevMemContext; + IMG_UINT32 ui32Size; + PDUMP_PIXEL_FORMAT ePixelFormat; + PDUMP_MEM_FORMAT eMemFormat; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_PDUMP_BITMAP; + + +/****************************************************************************** + * 'bridge in' pdump dump read reg + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG +{ + IMG_HANDLE hDevCookie; + IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE]; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32Address; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32Flags; + IMG_CHAR szRegRegion[32]; +}PVRSRV_BRIDGE_IN_PDUMP_READREG; + +/****************************************************************************** + * 'bridge in' pdump dump driver-info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG +{ + IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE]; + IMG_BOOL bContinuous; +}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG +{ + IMG_HANDLE hKernelMemInfo; + IMG_UINT32 ui32Offset; + IMG_DEV_PHYADDR sPDDevPAddr; +}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR; + +/****************************************************************************** + * 'bridge in' pdump cycle count register read + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG +{ + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32RegOffset; + IMG_BOOL bLastFrame; +}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ; + +/***************************************************************************** + * Output structures for BRIDGEs + ****************************************************************************/ + +/****************************************************************************** + * 'bridge out' enum. devices + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NumDevices; + PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES]; +}PVRSRV_BRIDGE_OUT_ENUMDEVICE; + + +/****************************************************************************** + * 'bridge out' acquire device info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hDevCookie; +} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO; + + +/****************************************************************************** + * 'bridge out' enum. class devices + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32NumDevices; + IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES]; +}PVRSRV_BRIDGE_OUT_ENUMCLASS; + + +/****************************************************************************** + * 'bridge in' open display class devices + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG +{ + IMG_UINT32 ui32DeviceID; + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE; + +/****************************************************************************** + * 'bridge out' open display class devices + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hDeviceKM; +}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE; + + +/****************************************************************************** + * 'bridge in' wrap pages + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemContext; + IMG_VOID *pvLinAddr; + IMG_SIZE_T uByteSize; + IMG_SIZE_T uPageOffset; + IMG_BOOL bPhysContig; + IMG_UINT32 ui32NumPageTableEntries; + IMG_SYS_PHYADDR *psSysPAddr; + IMG_UINT32 ui32Flags; +}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY; + +/****************************************************************************** + * 'bridge out' wrap pages + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; +}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY; + +/****************************************************************************** + * 'bridge in' unwrap pages + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG +{ + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY; + + +#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10 +#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10 +#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4 +#define PVRSRV_MAX_DC_CLIP_RECTS 32 + +/****************************************************************************** + * 'bridge out' enum display class formats + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Count; + DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS]; +}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS; + + +/****************************************************************************** + * 'bridge in' enum display class dims + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG +{ + IMG_HANDLE hDeviceKM; + DISPLAY_FORMAT sFormat; +}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS; + + +/****************************************************************************** + * 'bridge out' enum display class dims + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Count; + DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS]; +}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS; + + +/****************************************************************************** + * 'bridge out' enum display class dims + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG +{ + PVRSRV_ERROR eError; + DISPLAY_INFO sDisplayInfo; +}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO; + + +/****************************************************************************** + * 'bridge out' get display class system buffer + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBuffer; +}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER; + + +/****************************************************************************** + * 'bridge in' create swap chain + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG +{ + IMG_HANDLE hDeviceKM; + DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib; + DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib; + IMG_UINT32 ui32Flags; + IMG_UINT32 ui32BufferCount; + IMG_UINT32 ui32OEMFlags; + IMG_UINT32 ui32SwapChainID; +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + IMG_INT32 i32TimelineFd; + IMG_INT32 i32Unused; +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) */ +} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN; + + +/****************************************************************************** + * 'bridge out' create swap chain + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32SwapChainID; + IMG_HANDLE hSwapChain; +} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN; + + +/****************************************************************************** + * 'bridge in' destroy swap chain + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hSwapChain; +} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN; + + +/****************************************************************************** + * 'bridge in' set DST/SRC rect + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hSwapChain; + IMG_RECT sRect; +} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT; + + +/****************************************************************************** + * 'bridge in' set DST/SRC colourkey + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hSwapChain; + IMG_UINT32 ui32CKColour; +} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY; + + +/****************************************************************************** + * 'bridge in' get buffers (from swapchain) + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hSwapChain; +} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS; + + +/****************************************************************************** + * 'bridge out' get buffers (from swapchain) + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32BufferCount; + IMG_HANDLE ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + IMG_SYS_PHYADDR asPhyAddr[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; +} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS; + + +/****************************************************************************** + * 'bridge in' swap to buffer + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hBuffer; + IMG_HANDLE hPrivateTag; + IMG_UINT32 ui32SwapInterval; + IMG_UINT32 ui32ClipRectCount; + IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS]; +} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER; + + +/****************************************************************************** + * 'bridge in' swap to buffer 2 + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hSwapChain; + + IMG_UINT32 ui32SwapInterval; + IMG_UINT32 ui32NumMemInfos; + IMG_HANDLE hKernelMemInfos; + IMG_HANDLE hKernelSyncInfos; + IMG_UINT32 ui32PrivDataLength; + IMG_HANDLE hPrivData; +} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2; + + +/****************************************************************************** + * 'bridge out' swap to buffer 2 + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_SWAP_DISPCLASS_TO_BUFFER2_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hFence; +} PVRSRV_BRIDGE_OUT_SWAP_DISPCLASS_TO_BUFFER2; + + +/****************************************************************************** + * 'bridge in' swap to system buffer (primary) + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hSwapChain; +} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM; + + +/****************************************************************************** + * 'bridge in' open buffer class device + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG +{ + IMG_UINT32 ui32DeviceID; + IMG_HANDLE hDevCookie; +} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE; + + +/****************************************************************************** + * 'bridge out' open buffer class device + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hDeviceKM; +} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE; + + +/****************************************************************************** + * 'bridge out' get buffer class info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG +{ + PVRSRV_ERROR eError; + BUFFER_INFO sBufferInfo; +} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO; + + +/****************************************************************************** + * 'bridge in' get buffer class buffer + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_UINT32 ui32BufferIndex; +} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER; + + +/****************************************************************************** + * 'bridge out' get buffer class buffer + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBuffer; +} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER; + + +/****************************************************************************** + * 'bridge out' get heap info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32ClientHeapCount; + PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; +} PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO; + + +/****************************************************************************** + * 'bridge out' create device memory context + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32ClientHeapCount; + IMG_HANDLE hDevMemContext; + PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; +} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT; + + +/****************************************************************************** + * 'bridge out' create device memory context + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hDevMemHeap; +} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP; + + +/****************************************************************************** + * 'bridge out' alloc device memory + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; +} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM; + + +/****************************************************************************** + * 'bridge out' free device memory + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_FREEDEVICEMEM_TAG +{ + PVRSRV_ERROR eError; +#if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT32 ui32TimeToDevUnmap; +#endif + +} PVRSRV_BRIDGE_OUT_FREEDEVICEMEM; + + +/****************************************************************************** + * 'bridge out' export device memory + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hMemInfo; +#if defined(SUPPORT_MEMINFO_IDS) + IMG_UINT64 ui64Stamp; +#endif +} PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM; + + +/****************************************************************************** + * 'bridge out' map ion handle + *****************************************************************************/ +typedef struct IMG_COMPAT _PVRSRV_BRIDGE_OUT_MAP_ION_HANDLE_ +{ + PVRSRV_ERROR eError; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; + IMG_SIZE_T uiIonBufferSize; +} PVRSRV_BRIDGE_OUT_MAP_ION_HANDLE; + +/****************************************************************************** + * 'bridge out' map dmabuf + *****************************************************************************/ +typedef struct IMG_COMPAT _PVRSRV_BRIDGE_OUT_MAP_DMABUF_ +{ + PVRSRV_ERROR eError; + IMG_HANDLE hKernelMemInfo; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; + IMG_SIZE_T uiSize; + IMG_SIZE_T uiOffset; +} PVRSRV_BRIDGE_OUT_MAP_DMABUF; + +/****************************************************************************** + * 'bridge out' map meminfo to user mode + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG +{ + PVRSRV_ERROR eError; + IMG_PVOID pvLinAddr; + IMG_HANDLE hMappingInfo; +}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER; + + +/****************************************************************************** + * 'bridge out' get free device memory + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG +{ + PVRSRV_ERROR eError; + IMG_SIZE_T uTotal; + IMG_SIZE_T uFree; + IMG_SIZE_T uLargestBlock; +} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM; + + +//#ifdef LINUX +/****************************************************************************** + * 'bridge out' get full map data + *****************************************************************************/ +#include "pvrmmap.h" +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA_TAG +{ + PVRSRV_ERROR eError; + + /* This is a the offset you should pass to mmap(2) so that + * the driver can look up the full details for the mapping + * request. */ + IMG_UINTPTR_T uiMMapOffset; + + /* This is the byte offset you should add to the mapping you + * get from mmap */ + IMG_UINTPTR_T uiByteOffset; + + /* This is the real size of the mapping that will be created + * which should be passed to mmap _and_ munmap. */ + IMG_SIZE_T uiRealByteSize; + + /* User mode address associated with mapping */ + IMG_UINTPTR_T uiUserVAddr; + +} PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA; + +typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA_TAG +{ + PVRSRV_ERROR eError; + + /* Flag that indicates whether the mapping should be destroyed */ + IMG_BOOL bMUnmap; + + /* User mode address associated with mapping */ + IMG_UINTPTR_T uiUserVAddr; + + /* Size of mapping */ + IMG_SIZE_T uiRealByteSize; +} PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA; +//#endif + +typedef struct PVRSRV_BRIDGE_IN_COMPAT_CHECK +{ + IMG_UINT32 ui32DDKVersion; + IMG_UINT32 ui32DDKBuild; +#if defined(SUPPORT_TI_VERSION_STRING) + IMG_CHAR szTIVersion[64]; +#endif +} PVRSRV_BRIDGE_IN_COMPAT_CHECK; + +/****************************************************************************** + * 'bridge in' get misc info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG +{ + PVRSRV_MISC_INFO sMiscInfo; +}PVRSRV_BRIDGE_IN_GET_MISC_INFO; + + +/****************************************************************************** + * 'bridge out' get misc info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_MISC_INFO sMiscInfo; +}PVRSRV_BRIDGE_OUT_GET_MISC_INFO; + + +/****************************************************************************** + * 'bridge in' get misc info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG +{ + PVRSRV_MISC_INFO sMiscInfo; +}PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO; + + +/****************************************************************************** + * 'bridge out' get misc info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG +{ + PVRSRV_ERROR eError; + PVRSRV_MISC_INFO sMiscInfo; +}PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO; + + +/****************************************************************************** + * 'bridge out' PDUMP is capturing + *****************************************************************************/ + +typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG +{ + PVRSRV_ERROR eError; + IMG_BOOL bIsCapturing; +} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING; + +/****************************************************************************** + * 'bridge in' get FB mem stats + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG +{ + IMG_SIZE_T uTotal; + IMG_SIZE_T uAvailable; +} PVRSRV_BRIDGE_IN_GET_FB_STATS; + + +/****************************************************************************** + * 'bridge in' Map CPU Physical to User Space + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG +{ + IMG_HANDLE hDevCookie; + IMG_SYS_PHYADDR sSysPhysAddr; + IMG_UINT32 uiSizeInBytes; +} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE; + + +/****************************************************************************** + * 'bridge out' Map CPU Physical to User Space + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG +{ + IMG_PVOID pvUserAddr; + IMG_UINT32 uiActualSize; + IMG_PVOID pvProcess; +} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE; + + +/****************************************************************************** + * 'bridge in' Unmap CPU Physical to User Space + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG +{ + IMG_HANDLE hDevCookie; + IMG_PVOID pvUserAddr; + IMG_PVOID pvProcess; +} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE; + + +/****************************************************************************** + * 'bridge out' Get user space pointer to Phys to Lin lookup table + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG +{ + IMG_PVOID *ppvTbl; + IMG_UINT32 uiTblSize; +} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP; + + +/****************************************************************************** + * 'bridge in' Register RTSIM process thread + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG +{ + IMG_HANDLE hDevCookie; + IMG_PVOID pvProcess; +} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS; + + +/****************************************************************************** + * 'bridge out' Register RTSIM process thread + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG +{ + IMG_SYS_PHYADDR sRegsPhysBase; /*!< Physical address of current device register */ + IMG_VOID *pvRegsBase; /*!< User mode linear address of SGX device registers */ + IMG_PVOID pvProcess; + IMG_UINT32 ulNoOfEntries; + IMG_PVOID pvTblLinAddr; +} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS; + + +/****************************************************************************** + * 'bridge in' Unregister RTSIM process thread + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG +{ + IMG_HANDLE hDevCookie; + IMG_PVOID pvProcess; + IMG_VOID *pvRegsBase; /*!< User mode linear address of SGX device registers */ + +} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS; + +/****************************************************************************** + * 'bridge in' process simulator ISR event + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG +{ + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32StatusAndMask; + PVRSRV_ERROR eError; +} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT; + +/****************************************************************************** + * 'bridge in' initialisation server disconnect + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG +{ + IMG_BOOL bInitSuccesful; +} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT; + + +typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG +{ + IMG_UINT32 ui32Flags; + IMG_SIZE_T uSize; +}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM; + +typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hKernelMemInfo; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; +}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM; + +typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG +{ + IMG_HANDLE hKernelMemInfo; + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; +}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM; + +typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG +{ + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM; + +typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG +{ + IMG_HANDLE hKernelMemInfo; +}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM; + +typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG +{ + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; + PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo; + IMG_HANDLE hKernelMemInfo; + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM; + +typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG +{ + PVRSRV_CLIENT_MEM_INFO sClientMemInfo; +}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM; + +typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG +{ + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM; + +typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG +{ + IMG_HANDLE hOSEventKM; +} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT; + +typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG +{ + PVRSRV_EVENTOBJECT sEventObject; +} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG +{ + IMG_HANDLE hOSEvent; + PVRSRV_ERROR eError; +} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN; + +typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG +{ + PVRSRV_EVENTOBJECT sEventObject; + IMG_HANDLE hOSEventKM; +} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hKernelSyncInfoModObj; +} PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ; + +typedef struct PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ +{ + IMG_HANDLE hKernelSyncInfoModObj; +} PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS_TAG +{ + IMG_HANDLE hKernelSyncInfoModObj; + IMG_HANDLE hKernelSyncInfo; + IMG_UINT32 ui32ModifyFlags; +} PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS; + +typedef struct PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS_TAG +{ + IMG_HANDLE hKernelSyncInfoModObj; +} PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS; + +typedef struct PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS_TAG +{ + PVRSRV_ERROR eError; + /* The following variable are used to return the PRE-INCREMENTED op vals */ + IMG_UINT32 ui32ReadOpsPending; + IMG_UINT32 ui32WriteOpsPending; + IMG_UINT32 ui32ReadOps2Pending; +} PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS; + +typedef struct PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN_TAG +{ + IMG_HANDLE hKernelSyncInfo; +} PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN; + +typedef struct PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32ReadOpsPending; + IMG_UINT32 ui32WriteOpsPending; + IMG_UINT32 ui32ReadOps2Pending; +} PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN_TAG +{ + IMG_HANDLE hKernelSyncInfo; + IMG_UINT32 ui32ReadOpsPendingSnapshot; + IMG_UINT32 ui32WriteOpsPendingSnapshot; + IMG_UINT32 ui32ReadOps2PendingSnapshot; +} PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN; + +typedef struct PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ_TAG +{ + IMG_HANDLE hKernelSyncInfoModObj; +} PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA_TAG +{ + IMG_HANDLE hKernelSyncInfo; + IMG_UINT32 ui32Delta; +} PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA; + +typedef struct PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO_TAG +{ + IMG_HANDLE hDevCookie; +} PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hKernelSyncInfo; +} PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO; + +typedef struct PVRSRV_BRIDGE_IN_FREE_SYNC_INFO_TAG +{ + IMG_HANDLE hKernelSyncInfo; +} PVRSRV_BRIDGE_IN_FREE_SYNC_INFO; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS_TAG +{ + IMG_SID hKernelMemInfo; + IMG_UINT32 ui32Attribs; +} PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS; + + +#if defined (__cplusplus) +} +#endif + +#endif /* __PVR_BRIDGE_H__ */ + +/****************************************************************************** + End of file (pvr_bridge.h) +******************************************************************************/ + diff --git a/sgx_km/eurasia_km/services4/include/pvr_bridge_km.h b/sgx_km/eurasia_km/services4/include/pvr_bridge_km.h new file mode 100644 index 0000000..7dc4352 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/pvr_bridge_km.h @@ -0,0 +1,422 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __PVR_BRIDGE_KM_H_ +#define __PVR_BRIDGE_KM_H_ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "pvr_bridge.h" +#include "perproc.h" + +/****************************************************************************** + * Function prototypes + *****************************************************************************/ +#if defined(__linux__) +PVRSRV_ERROR LinuxBridgeInit(IMG_VOID); +IMG_VOID LinuxBridgeDeInit(IMG_VOID); + +#if defined(SUPPORT_MEMINFO_IDS) +extern IMG_UINT64 g_ui64MemInfoID; +#endif + +#endif +IMG_IMPORT +IMG_VOID IMG_CALLCONV PVRSRVCompatCheckKM(PVRSRV_BRIDGE_IN_COMPAT_CHECK *psUserModeDDKDetails, PVRSRV_BRIDGE_RETURN *psRetOUT); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices, + PVRSRV_DEVICE_IDENTIFIER *psDevIdList); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 uiDevIndex, + PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE *phDevCookie); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T uQueueSize, + PVRSRV_QUEUE_INFO **ppsQueueInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie, + PVRSRV_HEAP_INFO *psHeapInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE *phDevMemContext, + IMG_UINT32 *pui32ClientHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo, + IMG_BOOL *pbCreated, + IMG_BOOL *pbShared); + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_BOOL *pbDestroyed); + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_UINT32 *pui32ClientHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo, + IMG_BOOL *pbShared + ); + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo); + + +#if defined(PVRSRV_LOG_MEMORY_ALLOCS) + #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, \ + chunksize, numvirtchunks, numphyschunks, mapchunk, memInfo, logStr) \ + (PVR_TRACE(("PVRSRVAllocDeviceMemKM(" #devCookie ", " #perProc ", " #devMemHeap ", " #flags ", " #size \ + ", " #alignment "," #memInfo "): " logStr " (size = 0x%x)", size)),\ + _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, \ + chunksize, numvirtchunks, numphyschunks, mapchunk, memInfo)) +#else + #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, \ + chunksize, numvirtchunks, numphyschunks, mapchunk, memInfo, logStr) \ + _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, privdata, privdatalength, \ + chunksize, numvirtchunks, numphyschunks, mapchunk, memInfo) +#endif + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +#if defined(SUPPORT_ION) +IMG_IMPORT +PVRSRV_ERROR PVRSRVMapIonHandleKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32NumFDs, + IMG_INT32 *pai32BufferFDs, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32ChunkCount, + IMG_SIZE_T *pauiOffset, + IMG_SIZE_T *pauiSize, + IMG_SIZE_T *puiIonBufferSize, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo, + IMG_UINT64 *pui64Stamp); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapIonHandleKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo); +#endif /* SUPPORT_ION */ + +#if defined(SUPPORT_DMABUF) +IMG_IMPORT +PVRSRV_ERROR PVRSRVMapDmaBufKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_INT32 ui32DmaBufFD, + IMG_SIZE_T uiDmaBufOffset, + IMG_SIZE_T uiDmaBufSize, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo, + IMG_SIZE_T *puiSize, + IMG_SIZE_T *puiMemInfoOffset, + IMG_UINT64 *pui64Stamp); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDmaBufKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo); +#endif /* SUPPORT_DMABUF */ + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo, + IMG_HANDLE hDstDevMemHeap, + PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemContext, + IMG_SIZE_T ui32ByteSize, + IMG_SIZE_T ui32PageOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psSysAddr, + IMG_VOID *pvLinAddr, + IMG_UINT32 ui32Flags, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +IMG_IMPORT +PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass, + IMG_UINT32 *pui32DevCount, + IMG_UINT32 *pui32DevID ); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32DeviceID, + IMG_HANDLE hDevCookie, + IMG_HANDLE *phDeviceKM); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM, + DISPLAY_FORMAT *psFormat, + IMG_UINT32 *pui32Count, + DISPLAY_DIMS *psDim); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE *phBuffer); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM, + DISPLAY_INFO *psDisplayInfo); +IMG_IMPORT +PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDeviceKM, + IMG_UINT32 ui32Flags, + DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, + DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib, + IMG_UINT32 ui32BufferCount, + IMG_UINT32 ui32OEMFlags, + IMG_HANDLE *phSwapChain, + IMG_UINT32 *pui32SwapChainID +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + ,IMG_INT32 i32TimelineFd +#endif + ); +IMG_IMPORT +PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_RECT *psRect); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_RECT *psRect); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_UINT32 ui32CKColour); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_UINT32 ui32CKColour); +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_UINT32 *pui32BufferCount, + IMG_HANDLE *phBuffer, + IMG_SYS_PHYADDR *psPhyAddr); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hBuffer, + IMG_UINT32 ui32SwapInterval, + IMG_HANDLE hPrivateTag, + IMG_UINT32 ui32ClipRectCount, + IMG_RECT *psClipRect); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSwapToDCBuffer2KM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hBuffer, + IMG_UINT32 ui32SwapInterval, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfos, + PVRSRV_KERNEL_SYNC_INFO **ppsSyncInfos, + IMG_UINT32 ui32NumMemSyncInfos, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_HANDLE *phFence); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32DeviceID, + IMG_HANDLE hDevCookie, + IMG_HANDLE *phDeviceKM); +IMG_IMPORT +PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM, + BUFFER_INFO *psBufferInfo); +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM, + IMG_UINT32 ui32BufferIndex, + IMG_HANDLE *phBuffer); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemContext, + IMG_HANDLE hDeviceClassBuffer, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo, + IMG_HANDLE *phOSMapInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVChangeDeviceMemoryAttributesKM(IMG_HANDLE hKernelMemInfo, + IMG_UINT32 ui32Attribs); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags, + IMG_SIZE_T *pui32Total, + IMG_SIZE_T *pui32Free, + IMG_SIZE_T *pui32LargestBlock); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo); +IMG_IMPORT +IMG_VOID IMG_CALLCONV PVRSRVAcquireSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo); +IMG_IMPORT +IMG_VOID IMG_CALLCONV PVRSRVReleaseSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo); + +/*! + * ***************************************************************************** + * @brief Allocates memory on behalf of a userspace process that is addressable + * by ther kernel. The memory is suitable for mapping into + * user space and it is possible to entirely dissociate the memory + * from the userspace process via PVRSRVDissociateSharedSysMemoryKM. + * + * @param psPerProc + * @param ui32Flags + * @param ui32Size + * @param ppsKernelMemInfo + * + * @return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo); + +/*! + * ***************************************************************************** + * @brief Frees memory allocated via PVRSRVAllocSharedSysMemoryKM (Note you must + * be sure any additional kernel references you created have been + * removed before freeing the memory) + * + * @param psKernelMemInfo + * + * @return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); + +/*! +****************************************************************************** + + @brief Dissociates memory from the process that allocates it. Intended for + transfering the ownership of system memory from a particular process + to the kernel. Unlike PVRSRVDissociateDeviceMemKM, ownership is not + transfered to the kernel context, so the Resource Manager will not + automatically clean up such memory. + + @param psKernelMemInfo: + + @return PVRSRV_ERROR: +******************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); + +#if defined (__cplusplus) +} +#endif + +#endif /* __PVR_BRIDGE_KM_H_ */ + +/****************************************************************************** + End of file (pvr_bridge_km.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/include/pvrmmap.h b/sgx_km/eurasia_km/services4/include/pvrmmap.h new file mode 100644 index 0000000..c18c872 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/pvrmmap.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title Main include file for PVRMMAP library. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PVRMMAP_H__ +#define __PVRMMAP_H__ + +/*! + ************************************************************************** + @brief map kernel memory into user memory. + + @param hModule - a handle to the device supplying the kernel memory + @param ppvLinAddr - pointer to where the user mode address should be placed + @param pvLinAddrKM - the base of kernel address range to map + @param phMappingInfo - pointer to mapping information handle + @param hMHandle - handle associated with memory to be mapped + + @return PVRSRV_OK, or error code. + ***************************************************************************/ + +PVRSRV_ERROR PVRPMapKMem(IMG_HANDLE hModule, IMG_VOID **ppvLinAddr, IMG_VOID *pvLinAddrKM, IMG_HANDLE *phMappingInfo, IMG_HANDLE hMHandle); + + +/*! + ************************************************************************** + @brief Removes a kernel to userspace memory mapping. + + @param hModule - a handle to the device supplying the kernel memory + @param hMappingInfo - mapping information handle + @param hMHandle - handle associated with memory to be mapped + + @return IMG_BOOL indicating success or otherwise. + ***************************************************************************/ +IMG_BOOL PVRUnMapKMem(IMG_HANDLE hModule, IMG_HANDLE hMappingInfo, IMG_HANDLE hMHandle); + +#endif /* _PVRMMAP_H_ */ + diff --git a/sgx_km/eurasia_km/services4/include/pvrsrv_errors.h b/sgx_km/eurasia_km/services4/include/pvrsrv_errors.h new file mode 100644 index 0000000..2e95810 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/pvrsrv_errors.h @@ -0,0 +1,311 @@ +/*************************************************************************/ /*! +@Title error code to string translation utility +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description error code to string translation utility +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined (__PVRSRV_ERRORS_H__) +#define __PVRSRV_ERRORS_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + NOTE: TO BE INCLUDED ONLY ONCE IN THE UM AND KM SERVICES MODULES + PROVIDES IMPLEMENTATIONS OF + + MUST BE KEPT IN SYNC WITH SERVICESEXT.H + + PVRSRVGetErrorString + PVRSRVGetErrorStringKM + Specifically, we have + + resources.c: + IMG_EXPORT + const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError) + { + #include "pvrsrv_errors.h" + } + + pvrsrv.c: + IMG_EXPORT + const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError) + { + #include "pvrsrv_errors.h" + } +*/ + switch (eError) + { + case PVRSRV_OK: return "No Errors"; + case PVRSRV_ERROR_OUT_OF_MEMORY: return "PVRSRV_ERROR_OUT_OF_MEMORY - Unable to allocate required memory"; + case PVRSRV_ERROR_TOO_FEW_BUFFERS: return "PVRSRV_ERROR_TOO_FEW_BUFFERS"; + case PVRSRV_ERROR_INVALID_PARAMS: return "PVRSRV_ERROR_INVALID_PARAMS"; + case PVRSRV_ERROR_INIT_FAILURE: return "PVRSRV_ERROR_INIT_FAILURE"; + case PVRSRV_ERROR_CANT_REGISTER_CALLBACK: return "PVRSRV_ERROR_CANT_REGISTER_CALLBACK"; + case PVRSRV_ERROR_INVALID_DEVICE: return "PVRSRV_ERROR_INVALID_DEVICE"; + case PVRSRV_ERROR_NOT_OWNER: return "PVRSRV_ERROR_NOT_OWNER"; + case PVRSRV_ERROR_BAD_MAPPING: return "PVRSRV_ERROR_BAD_MAPPING"; + case PVRSRV_ERROR_TIMEOUT: return "PVRSRV_ERROR_TIMEOUT"; + case PVRSRV_ERROR_FLIP_CHAIN_EXISTS: return "PVRSRV_ERROR_FLIP_CHAIN_EXISTS"; + case PVRSRV_ERROR_INVALID_SWAPINTERVAL: return "PVRSRV_ERROR_INVALID_SWAPINTERVAL"; + case PVRSRV_ERROR_SCENE_INVALID: return "PVRSRV_ERROR_SCENE_INVALID"; + case PVRSRV_ERROR_STREAM_ERROR: return "PVRSRV_ERROR_STREAM_ERROR"; + case PVRSRV_ERROR_FAILED_DEPENDENCIES: return "PVRSRV_ERROR_FAILED_DEPENDENCIES"; + case PVRSRV_ERROR_CMD_NOT_PROCESSED: return "PVRSRV_ERROR_CMD_NOT_PROCESSED"; + case PVRSRV_ERROR_CMD_TOO_BIG: return "PVRSRV_ERROR_CMD_TOO_BIG"; + case PVRSRV_ERROR_DEVICE_REGISTER_FAILED: return "PVRSRV_ERROR_DEVICE_REGISTER_FAILED"; + case PVRSRV_ERROR_TOOMANYBUFFERS: return "PVRSRV_ERROR_TOOMANYBUFFERS"; + case PVRSRV_ERROR_NOT_SUPPORTED: return "PVRSRV_ERROR_NOT_SUPPORTED - fix"; + case PVRSRV_ERROR_PROCESSING_BLOCKED: return "PVRSRV_ERROR_PROCESSING_BLOCKED"; + + case PVRSRV_ERROR_CANNOT_FLUSH_QUEUE: return "PVRSRV_ERROR_CANNOT_FLUSH_QUEUE"; + case PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE: return "PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE"; + case PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS: return "PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS"; + case PVRSRV_ERROR_RETRY: return "PVRSRV_ERROR_RETRY"; + + case PVRSRV_ERROR_DDK_VERSION_MISMATCH: return "PVRSRV_ERROR_DDK_VERSION_MISMATCH"; + case PVRSRV_ERROR_BUILD_MISMATCH: return "PVRSRV_ERROR_BUILD_MISMATCH"; + case PVRSRV_ERROR_CORE_REVISION_MISMATCH: return "PVRSRV_ERROR_CORE_REVISION_MISMATCH"; + + case PVRSRV_ERROR_UPLOAD_TOO_BIG: return "PVRSRV_ERROR_UPLOAD_TOO_BIG"; + + case PVRSRV_ERROR_INVALID_FLAGS: return "PVRSRV_ERROR_INVALID_FLAGS"; + case PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS: return "PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS"; + + case PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY: return "PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY"; + case PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR: return "PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR"; + case PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED: return "PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED"; + + case PVRSRV_ERROR_BRIDGE_CALL_FAILED: return "PVRSRV_ERROR_BRIDGE_CALL_FAILED"; + case PVRSRV_ERROR_IOCTL_CALL_FAILED: return "PVRSRV_ERROR_IOCTL_CALL_FAILED"; + + case PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND: return "PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND"; + case PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND: return "PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND"; + case PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT:return "PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT"; + + case PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND: return "PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND"; + case PVRSRV_ERROR_PCI_CALL_FAILED: return "PVRSRV_ERROR_PCI_CALL_FAILED"; + case PVRSRV_ERROR_PCI_REGION_TOO_SMALL: return "PVRSRV_ERROR_PCI_REGION_TOO_SMALL"; + case PVRSRV_ERROR_PCI_REGION_UNAVAILABLE: return "PVRSRV_ERROR_PCI_REGION_UNAVAILABLE"; + case PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH: return "PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH"; + + case PVRSRV_ERROR_REGISTER_BASE_NOT_SET: return "PVRSRV_ERROR_REGISTER_BASE_NOT_SET"; + + case PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE: return "PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE"; + + case PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM: return "PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM"; + case PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY: return "PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY"; + case PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC: return "PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC"; + case PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR: return "PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR"; + + case PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY: return "PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY"; + case PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY: return "PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY"; + + case PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES: return "PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES"; + case PVRSRV_ERROR_FAILED_TO_FREE_PAGES: return "PVRSRV_ERROR_FAILED_TO_FREE_PAGES"; + case PVRSRV_ERROR_FAILED_TO_COPY_PAGES: return "PVRSRV_ERROR_FAILED_TO_COPY_PAGES"; + case PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES: return "PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES"; + case PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES: return "PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES"; + case PVRSRV_ERROR_STILL_MAPPED: return "PVRSRV_ERROR_STILL_MAPPED"; + case PVRSRV_ERROR_MAPPING_NOT_FOUND: return "PVRSRV_ERROR_MAPPING_NOT_FOUND"; + case PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT: return "PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT"; + case PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE: return "PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE"; + + case PVRSRV_ERROR_INVALID_SEGMENT_BLOCK: return "PVRSRV_ERROR_INVALID_SEGMENT_BLOCK"; + case PVRSRV_ERROR_INVALID_SGXDEVDATA: return "PVRSRV_ERROR_INVALID_SGXDEVDATA"; + case PVRSRV_ERROR_INVALID_DEVINFO: return "PVRSRV_ERROR_INVALID_DEVINFO"; + case PVRSRV_ERROR_INVALID_MEMINFO: return "PVRSRV_ERROR_INVALID_MEMINFO"; + case PVRSRV_ERROR_INVALID_MISCINFO: return "PVRSRV_ERROR_INVALID_MISCINFO"; + case PVRSRV_ERROR_UNKNOWN_IOCTL: return "PVRSRV_ERROR_UNKNOWN_IOCTL"; + case PVRSRV_ERROR_INVALID_CONTEXT: return "PVRSRV_ERROR_INVALID_CONTEXT"; + case PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT: return "PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT"; + case PVRSRV_ERROR_INVALID_HEAP: return "PVRSRV_ERROR_INVALID_HEAP"; + case PVRSRV_ERROR_INVALID_KERNELINFO: return "PVRSRV_ERROR_INVALID_KERNELINFO"; + case PVRSRV_ERROR_UNKNOWN_POWER_STATE: return "PVRSRV_ERROR_UNKNOWN_POWER_STATE"; + case PVRSRV_ERROR_INVALID_HANDLE_TYPE: return "PVRSRV_ERROR_INVALID_HANDLE_TYPE"; + case PVRSRV_ERROR_INVALID_WRAP_TYPE: return "PVRSRV_ERROR_INVALID_WRAP_TYPE"; + case PVRSRV_ERROR_INVALID_PHYS_ADDR: return "PVRSRV_ERROR_INVALID_PHYS_ADDR"; + case PVRSRV_ERROR_INVALID_CPU_ADDR: return "PVRSRV_ERROR_INVALID_CPU_ADDR"; + case PVRSRV_ERROR_INVALID_HEAPINFO: return "PVRSRV_ERROR_INVALID_HEAPINFO"; + case PVRSRV_ERROR_INVALID_PERPROC: return "PVRSRV_ERROR_INVALID_PERPROC"; + case PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO: return "PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO"; + case PVRSRV_ERROR_INVALID_MAP_REQUEST: return "PVRSRV_ERROR_INVALID_MAP_REQUEST"; + case PVRSRV_ERROR_INVALID_UNMAP_REQUEST: return "PVRSRV_ERROR_INVALID_UNMAP_REQUEST"; + case PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP: return "PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP"; + case PVRSRV_ERROR_MAPPING_STILL_IN_USE: return "PVRSRV_ERROR_MAPPING_STILL_IN_USE"; + + case PVRSRV_ERROR_EXCEEDED_HW_LIMITS: return "PVRSRV_ERROR_EXCEEDED_HW_LIMITS"; + case PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED: return "PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED"; + + case PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA:return "PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA"; + case PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT: return "PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT"; + case PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT: return "PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT"; + case PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT: return "PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT"; + case PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT: return "PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT"; + case PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD: return "PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD"; + case PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD: return "PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD"; + case PVRSRV_ERROR_THREAD_READ_ERROR: return "PVRSRV_ERROR_THREAD_READ_ERROR"; + case PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER:return "PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER"; + case PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR: return "PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR"; + case PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR: return "PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR"; + case PVRSRV_ERROR_ISR_ALREADY_INSTALLED: return "PVRSRV_ERROR_ISR_ALREADY_INSTALLED"; + case PVRSRV_ERROR_ISR_NOT_INSTALLED: return "PVRSRV_ERROR_ISR_NOT_INSTALLED"; + case PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT:return "PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT"; + case PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO: return "PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO"; + case PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT: return "PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT"; + case PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES: return "PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES"; + case PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT: return "PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT"; + case PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE: return "PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE"; + + case PVRSRV_ERROR_INVALID_CCB_COMMAND: return "PVRSRV_ERROR_INVALID_CCB_COMMAND"; + + case PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE: return "PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE"; + case PVRSRV_ERROR_INVALID_LOCK_ID: return "PVRSRV_ERROR_INVALID_LOCK_ID"; + case PVRSRV_ERROR_RESOURCE_NOT_LOCKED: return "PVRSRV_ERROR_RESOURCE_NOT_LOCKED"; + + case PVRSRV_ERROR_FLIP_FAILED: return "PVRSRV_ERROR_FLIP_FAILED"; + case PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED: return "PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED"; + + case PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE: return "PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE"; + + case PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED: return "PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED"; + case PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG: return "PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG"; + case PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG: return "PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG"; + case PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG: return "PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG"; + + case PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID: return "PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID"; + + case PVRSRV_ERROR_BLIT_SETUP_FAILED: return "PVRSRV_ERROR_BLIT_SETUP_FAILED"; + + case PVRSRV_ERROR_PDUMP_NOT_AVAILABLE: return "PVRSRV_ERROR_PDUMP_NOT_AVAILABLE"; + case PVRSRV_ERROR_PDUMP_BUFFER_FULL: return "PVRSRV_ERROR_PDUMP_BUFFER_FULL"; + case PVRSRV_ERROR_PDUMP_BUF_OVERFLOW: return "PVRSRV_ERROR_PDUMP_BUF_OVERFLOW"; + case PVRSRV_ERROR_PDUMP_NOT_ACTIVE: return "PVRSRV_ERROR_PDUMP_NOT_ACTIVE"; + case PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES:return "PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES"; + + case PVRSRV_ERROR_MUTEX_DESTROY_FAILED: return "PVRSRV_ERROR_MUTEX_DESTROY_FAILED"; + case PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR: return "PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR"; + + case PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE: return "PVRSRV_ERROR_INSUFFICIENT_SCRIPT_SPACE"; + case PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND:return "PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND"; + + case PVRSRV_ERROR_PROCESS_NOT_INITIALISED: return "PVRSRV_ERROR_PROCESS_NOT_INITIALISED"; + case PVRSRV_ERROR_PROCESS_NOT_FOUND: return "PVRSRV_ERROR_PROCESS_NOT_FOUND"; + case PVRSRV_ERROR_SRV_CONNECT_FAILED: return "PVRSRV_ERROR_SRV_CONNECT_FAILED"; + case PVRSRV_ERROR_SRV_DISCONNECT_FAILED: return "PVRSRV_ERROR_SRV_DISCONNECT_FAILED"; + case PVRSRV_ERROR_DEINT_PHASE_FAILED: return "PVRSRV_ERROR_DEINT_PHASE_FAILED"; + case PVRSRV_ERROR_INIT2_PHASE_FAILED: return "PVRSRV_ERROR_INIT2_PHASE_FAILED"; + + case PVRSRV_ERROR_NO_DC_DEVICES_FOUND: return "PVRSRV_ERROR_NO_DC_DEVICES_FOUND"; + case PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE: return "PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE"; + case PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE: return "PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE"; + case PVRSRV_ERROR_NO_DEVICEDATA_FOUND: return "PVRSRV_ERROR_NO_DEVICEDATA_FOUND"; + case PVRSRV_ERROR_NO_DEVICENODE_FOUND: return "PVRSRV_ERROR_NO_DEVICENODE_FOUND"; + case PVRSRV_ERROR_NO_CLIENTNODE_FOUND: return "PVRSRV_ERROR_NO_CLIENTNODE_FOUND"; + case PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE: return "PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE"; + + case PVRSRV_ERROR_UNABLE_TO_INIT_TASK: return "PVRSRV_ERROR_UNABLE_TO_INIT_TASK"; + case PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK: return "PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK"; + case PVRSRV_ERROR_UNABLE_TO_KILL_TASK: return "PVRSRV_ERROR_UNABLE_TO_KILL_TASK"; + + case PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER: return "PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER"; + case PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER: return "PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER"; + case PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER: return "PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER"; + + case PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT: return "PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT"; + case PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION: return "PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION"; + + case PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE: return "PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE"; + case PVRSRV_ERROR_HANDLE_NOT_ALLOCATED: return "PVRSRV_ERROR_HANDLE_NOT_ALLOCATED"; + case PVRSRV_ERROR_HANDLE_TYPE_MISMATCH: return "PVRSRV_ERROR_HANDLE_TYPE_MISMATCH"; + case PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE: return "PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE"; + case PVRSRV_ERROR_HANDLE_NOT_SHAREABLE: return "PVRSRV_ERROR_HANDLE_NOT_SHAREABLE"; + case PVRSRV_ERROR_HANDLE_NOT_FOUND: return "PVRSRV_ERROR_HANDLE_NOT_FOUND"; + case PVRSRV_ERROR_INVALID_SUBHANDLE: return "PVRSRV_ERROR_INVALID_SUBHANDLE"; + case PVRSRV_ERROR_HANDLE_BATCH_IN_USE: return "PVRSRV_ERROR_HANDLE_BATCH_IN_USE"; + case PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE: return "PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE"; + + case PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE: return "PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE"; + case PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED:return "PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED"; + + case PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE: return "PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE"; + case PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP: return "PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP"; + + case PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE: return "PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE"; + + case PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE: return "PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE"; + case PVRSRV_ERROR_INVALID_DEVICEID: return "PVRSRV_ERROR_INVALID_DEVICEID"; + case PVRSRV_ERROR_DEVICEID_NOT_FOUND: return "PVRSRV_ERROR_DEVICEID_NOT_FOUND"; + + case PVRSRV_ERROR_MEMORY_TEST_FAILED: return "PVRSRV_ERROR_MEMORY_TEST_FAILED"; + case PVRSRV_ERROR_CPUPADDR_TEST_FAILED: return "PVRSRV_ERROR_CPUPADDR_TEST_FAILED"; + case PVRSRV_ERROR_COPY_TEST_FAILED: return "PVRSRV_ERROR_COPY_TEST_FAILED"; + + case PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED: return "PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED"; + + case PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK"; + case PVRSRV_ERROR_CLOCK_REQUEST_FAILED: return "PVRSRV_ERROR_CLOCK_REQUEST_FAILED"; + case PVRSRV_ERROR_DISABLE_CLOCK_FAILURE: return "PVRSRV_ERROR_DISABLE_CLOCK_FAILURE"; + case PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE: return "PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE"; + case PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE: return "PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE"; + case PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK"; + case PVRSRV_ERROR_UNABLE_TO_GET_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_GET_CLOCK"; + case PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK"; + case PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK: return "PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK"; + + case PVRSRV_ERROR_UNKNOWN_SGL_ERROR: return "PVRSRV_ERROR_UNKNOWN_SGL_ERROR"; + case PVRSRV_ERROR_BAD_SYNC_STATE: return "PVRSRV_ERROR_BAD_SYNC_STATE"; + + case PVRSRV_ERROR_CACHE_INVALIDATE_FAILED: return "PVRSRV_ERROR_CACHE_INVALIDATE_FAILED"; + + case PVRSRV_ERROR_FORCE_I32: return "PVRSRV_ERROR_FORCE_I32"; + + default: + return "Unknown PVRSRV error number"; + } + +#if defined (__cplusplus) +} +#endif +#endif /* __PVRSRV_ERRORS_H__ */ + +/***************************************************************************** + End of file (pvrsrv_errors.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/include/servicesint.h b/sgx_km/eurasia_km/services4/include/servicesint.h new file mode 100644 index 0000000..ca93a12 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/servicesint.h @@ -0,0 +1,616 @@ +/*************************************************************************/ /*! +@Title Services Internal Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description services internal details +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined (__SERVICESINT_H__) +#define __SERVICESINT_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "services.h" +#include "sysinfo.h" + +#define HWREC_DEFAULT_TIMEOUT (500) + +#define DRIVERNAME_MAXLENGTH (100) + +/* + helper macros: +*/ +#define ALIGNSIZE(size, alignshift) (((size) + ((1UL << (alignshift))-1)) & ~((1UL << (alignshift))-1)) + +#ifndef MAX +#define MAX(a,b) (((a) > (b)) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +/* + Note: + MAX_CLEANUP_TRYS is set to try and be around the frame rate + as for every try we will kick the uKernel which we want to avoid + doing too often (as we risk flooding the uKernel trace buffer + with requests and losing important information from before the + cleanup requests started). +*/ +#define MAX_CLEANUP_TIME_US (MAX_HW_TIME_US * 4) +#define MAX_CLEANUP_TRYS 100 +#define MAX_CLEANUP_TIME_WAIT_US (MAX_CLEANUP_TIME_US/MAX_CLEANUP_TRYS) + +typedef enum _PVRSRV_MEMTYPE_ +{ + PVRSRV_MEMTYPE_UNKNOWN = 0, + PVRSRV_MEMTYPE_DEVICE = 1, + PVRSRV_MEMTYPE_DEVICECLASS = 2, + PVRSRV_MEMTYPE_WRAPPED = 3, + PVRSRV_MEMTYPE_MAPPED = 4, + PVRSRV_MEMTYPE_ION = 5, + PVRSRV_MEMTYPE_DMABUF = 6, + PVRSRV_MEMTYPE_ALLOC = 7, + PVRSRV_MEMTYPE_FREE = 8 +} PVRSRV_MEMTYPE; + +#if defined (MEM_TRACK_INFO_DEBUG) +/* Threshold on number of latest operations to track*/ +#define MAX_MEM_TRACK_OPS 512 +typedef struct _PVRSRV_MEM_TRACK_INFO_ +{ + IMG_DEV_VIRTADDR sDevVAddr; + IMG_SIZE_T uSize; + IMG_UINT32 ui32Pid; + IMG_UINT32 ui32RefCount; + PVRSRV_MEMTYPE eOp; + IMG_UINT32 ui32TimeStampUSecs; + IMG_CHAR asTaskName[128]; + IMG_CHAR heapId[128]; + struct _PVRSRV_MEM_TRACK_INFO_ *next; + struct _PVRSRV_MEM_TRACK_INFO_ *prev; + +} PVRSRV_MEM_TRACK_INFO; +#endif +/* + Kernel Memory Information structure +*/ +typedef struct _PVRSRV_KERNEL_MEM_INFO_ +{ + /* Kernel Mode CPU Virtual address */ + IMG_PVOID pvLinAddrKM; + + /* Device Virtual Address */ + IMG_DEV_VIRTADDR sDevVAddr; + + /* allocation flags */ + IMG_UINT32 ui32Flags; + + /* Size of the allocated buffer in bytes */ + IMG_SIZE_T uAllocSize; + + /* Internal implementation details. Do not use outside services code. */ + PVRSRV_MEMBLK sMemBlk; + + /* Address of the backup buffer used in a save/restore of the surface */ + IMG_PVOID pvSysBackupBuffer; + + /* refcount for allocation, wrapping and mapping */ + IMG_UINT32 ui32RefCount; + + /* Set when free call ocured and a mapping was still open */ + IMG_BOOL bPendingFree; + + +#if defined(SUPPORT_MEMINFO_IDS) + #if !defined(USE_CODE) + /* Globally unique "stamp" for allocation (not re-used until wrap) */ + IMG_UINT64 ui64Stamp; + #else /* !defined(USE_CODE) */ + IMG_UINT32 dummy1; + IMG_UINT32 dummy2; + #endif /* !defined(USE_CODE) */ +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + + /* ptr to associated kernel sync info - NULL if no sync */ + struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo; + + IMG_HANDLE hIonSyncInfo; + +#if defined(SUPPORT_DMABUF) + IMG_HANDLE hDmaBufSyncInfo; +#endif + + PVRSRV_MEMTYPE memType; + + /* + To activate the "share mem workaround", add PVRSRV_MEM_XPROC to + the flags for the allocation. This will cause the "map" API to + call use Alloc Device Mem but will share the underlying memory + block and sync data. + */ + struct { + /* Record whether the workaround is active for this + allocation. The rest of the fields in this struct are + undefined unless this is true */ + IMG_BOOL bInUse; + + /* Store the device cookie handle from the original + allocation, as it is not present on the "Map" API. */ + IMG_HANDLE hDevCookieInt; + + /* This is an index into a static array which store + information about the underlying allocation */ + IMG_UINT32 ui32ShareIndex; + + /* Original arguments as supplied to original + "PVRSRVAllocDeviceMem" call, such that a new call to this + function can be later constructed */ + IMG_UINT32 ui32OrigReqAttribs; + IMG_UINT32 ui32OrigReqSize; + IMG_UINT32 ui32OrigReqAlignment; + } sShareMemWorkaround; +#if defined (MEM_TRACK_INFO_DEBUG) + IMG_CHAR heapId[128]; +#endif +#if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT32 ui32TimeToDevMap; + IMG_UINT32 *pui32TimeToDevUnmap; /* API user to provide space for storing "unmap" time */ +#endif +} PVRSRV_KERNEL_MEM_INFO; + + +/* + Kernel Sync Info structure +*/ +typedef struct _PVRSRV_KERNEL_SYNC_INFO_ +{ + /* kernel sync data */ + PVRSRV_SYNC_DATA *psSyncData; + + /* Device accessible WriteOp Info */ + IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; + + /* Device accessible ReadOp Info */ + IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; + + /* Device accessible ReadOp Info */ + IMG_DEV_VIRTADDR sReadOps2CompleteDevVAddr; + + /* meminfo for sync data */ + PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM; + + /* Reference count for deferring destruction of syncinfo when it is shared */ + /* NB: This is only done for devicemem.c (alloc/map/wrap etc), and + not (presently) for deviceclass memory */ + IMG_PVOID pvRefCount; + + /* Resman cleanup, for those created with explicit API */ + IMG_HANDLE hResItem; + + /* Unique ID of the sync object */ + IMG_UINT32 ui32UID; + +#if defined(SUPPORT_DMABUF) + IMG_HANDLE hFenceContext; +#endif + /* Pointer for list manager */ + struct _PVRSRV_KERNEL_SYNC_INFO_ *psNext; + struct _PVRSRV_KERNEL_SYNC_INFO_ **ppsThis; + +#if defined(SUPPORT_PER_SYNC_DEBUG) +#define PER_SYNC_HISTORY 10 + IMG_UINT32 ui32OperationMask; + IMG_UINT32 aui32OpInfo[PER_SYNC_HISTORY]; + IMG_UINT32 aui32ReadOpSample[PER_SYNC_HISTORY]; + IMG_UINT32 aui32WriteOpSample[PER_SYNC_HISTORY]; + IMG_UINT32 aui32ReadOp2Sample[PER_SYNC_HISTORY]; + IMG_UINT32 ui32HistoryIndex; +#endif + +} PVRSRV_KERNEL_SYNC_INFO; + +/*! + ***************************************************************************** + * This is a device addressable version of a pvrsrv_sync_oject + * - any hw cmd may have an unlimited number of these + ****************************************************************************/ +typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_ +{ + /* KEEP THESE 6 VARIABLES TOGETHER FOR UKERNEL BLOCK LOAD */ + IMG_UINT32 ui32ReadOpsPendingVal; + IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr; + IMG_UINT32 ui32WriteOpsPendingVal; + IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr; + IMG_UINT32 ui32ReadOps2PendingVal; + IMG_DEV_VIRTADDR sReadOps2CompleteDevVAddr; +} PVRSRV_DEVICE_SYNC_OBJECT; + +/*! + ***************************************************************************** + * encapsulates a single sync object + * - any cmd may have an unlimited number of these + ****************************************************************************/ +typedef struct _PVRSRV_SYNC_OBJECT +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM; + IMG_UINT32 ui32WriteOpsPending; + IMG_UINT32 ui32ReadOpsPending; + IMG_UINT32 ui32ReadOps2Pending; + +}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT; + +/*! + ***************************************************************************** + * The `one size fits all' generic command. + ****************************************************************************/ +typedef struct _PVRSRV_COMMAND +{ + IMG_SIZE_T uCmdSize; /*!< total size of command */ + IMG_UINT32 ui32DevIndex; /*!< device type - 16bit enum (exported by system) */ + IMG_UINT32 CommandType; /*!< command type */ + IMG_UINT32 ui32DstSyncCount; /*!< number of dst sync objects */ + IMG_UINT32 ui32SrcSyncCount; /*!< number of src sync objects */ + PVRSRV_SYNC_OBJECT *psDstSync; /*!< dst sync ptr list, allocated on + back of this structure, i.e. is resident in Q */ + PVRSRV_SYNC_OBJECT *psSrcSync; /*!< src sync ptr list, allocated on + back of this structure, i.e. is resident in Q */ + IMG_SIZE_T uDataSize; /*!< Size of Cmd Data Packet + - only required in terms of allocating Q space */ + IMG_UINT32 ui32ProcessID; /*!< Process ID for debugging */ + IMG_VOID *pvData; /*!< data to be passed to Cmd Handler function, + allocated on back of this structure, i.e. is resident in Q */ + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete; /*!< Command complete callback */ + IMG_HANDLE hCallbackData; /*!< Command complete callback data */ + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + IMG_VOID *pvCleanupFence; /*!< Sync fence to 'put' after timeline inc() */ + IMG_VOID *pvTimeline; /*!< Android sync timeline to inc() */ +#endif +}PVRSRV_COMMAND, *PPVRSRV_COMMAND; + + +/*! + ***************************************************************************** + * Circular command buffer structure forming the queue of pending commands. + * + * Queues are implemented as circular comamnd buffers (CCBs). + * The buffer is allocated as a specified size, plus the size of the largest supported command. + * The extra size allows commands to be added without worrying about wrapping around at the end. + * + * Commands are added to the CCB by client processes and consumed within + * kernel mode code running from within an L/MISR typically. + * + * The process of adding a command to a queue is as follows:- + * - A `lock' is acquired to prevent other processes from adding commands to a queue + * - Data representing the command to be executed, along with it's PVRSRV_SYNC_INFO + * dependencies is written to the buffer representing the queue at the queues + * current WriteOffset. + * - The PVRSRV_SYNC_INFO that the command depends on are updated to reflect + * the addition of the new command. + * - The WriteOffset is incremented by the size of the command added. + * - If the WriteOffset now lies beyound the declared buffer size, it is + * reset to zero. + * - The semaphore is released. + * + *****************************************************************************/ +typedef struct _PVRSRV_QUEUE_INFO_ +{ + IMG_VOID *pvLinQueueKM; /*!< Pointer to the command buffer in the kernel's + address space */ + + IMG_VOID *pvLinQueueUM; /*!< Pointer to the command buffer in the user's + address space */ + + volatile IMG_SIZE_T uReadOffset; /*!< Index into the buffer at which commands are being + consumed */ + + volatile IMG_SIZE_T uWriteOffset; /*!< Index into the buffer at which commands are being + added */ + + IMG_UINT32 *pui32KickerAddrKM; /*!< kicker address in the kernel's + address space*/ + + IMG_UINT32 *pui32KickerAddrUM; /*!< kicker address in the user's + address space */ + + IMG_SIZE_T uQueueSize; /*!< Size in bytes of the buffer - excluding the safety allocation */ + + IMG_UINT32 ui32ProcessID; /*!< Process ID required by resource locking */ + + IMG_HANDLE hMemBlock[2]; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + IMG_UINT32 ui32FenceValue; /*!< 'Target' timeline value when fence signals */ + IMG_VOID *pvTimeline; /*!< Android struct sync_timeline object */ +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + IMG_UINT32 ui32FenceValue; /*!< 'Target' timeline value when fence signals */ + IMG_INT32 i32TimelineFd; /*!< Fd for time line */ +#endif + + struct _PVRSRV_QUEUE_INFO_ *psNextKM; /*!< The next queue in the system */ +}PVRSRV_QUEUE_INFO; + + +typedef struct _PVRSRV_HEAP_INFO_KM_ +{ + IMG_UINT32 ui32HeapID; + IMG_DEV_VIRTADDR sDevVAddrBase; + + IMG_HANDLE hDevMemHeap; + IMG_UINT32 ui32HeapByteSize; + IMG_UINT32 ui32Attribs; + IMG_UINT32 ui32XTileStride; +}PVRSRV_HEAP_INFO_KM; + + +/* + Event Object information structure +*/ +typedef struct _PVRSRV_EVENTOBJECT_KM_ +{ + /* globally unique name of the event object */ + IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH]; + /* kernel specific handle for the event object */ + IMG_HANDLE hOSEventKM; + +} PVRSRV_EVENTOBJECT_KM; + + +/*! + ****************************************************************************** + * Structure to retrieve misc. information from services + *****************************************************************************/ +typedef struct _PVRSRV_MISC_INFO_KM_ +{ + IMG_UINT32 ui32StateRequest; /*!< requested State Flags */ + IMG_UINT32 ui32StatePresent; /*!< Present/Valid State Flags */ + + /*!< SOC Timer register */ + IMG_VOID *pvSOCTimerRegisterKM; + IMG_VOID *pvSOCTimerRegisterUM; + IMG_HANDLE hSOCTimerRegisterOSMemHandle; + IMG_HANDLE hSOCTimerRegisterMappingInfo; + + /*!< SOC Clock Gating registers */ + IMG_VOID *pvSOCClockGateRegs; + IMG_UINT32 ui32SOCClockGateRegsSize; + + /* Memory Stats/DDK version string depending on ui32StateRequest flags */ + IMG_CHAR *pszMemoryStr; + IMG_UINT32 ui32MemoryStrLen; + + /* global event object */ + PVRSRV_EVENTOBJECT_KM sGlobalEventObject;//FIXME: should be private to services + IMG_HANDLE hOSGlobalEvent; + + /* Note: add misc. items as required */ + IMG_UINT32 aui32DDKVersion[4]; + + /*!< CPU cache flush controls: */ + struct + { + /*!< Defer the CPU cache op to the next HW op to be submitted (else flush now) */ + IMG_BOOL bDeferOp; + + /*!< Type of cache operation to perform */ + PVRSRV_MISC_INFO_CPUCACHEOP_TYPE eCacheOpType; + + /*!< Meminfo (or meminfo handle) to flush */ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + /*!< Offset in MemInfo to start cache op */ + IMG_VOID *pvBaseVAddr; + + /*!< Length of range to perform cache op */ + IMG_UINT32 ui32Length; + } sCacheOpCtl; + + /*!< Meminfo refcount controls: */ + struct + { + /*!< Meminfo (or meminfo handle) to get refcount for */ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + /*!< Resulting refcount */ + IMG_UINT32 ui32RefCount; + } sGetRefCountCtl; +} PVRSRV_MISC_INFO_KM; + + +/* insert command function pointer */ +typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*, + PVRSRV_COMMAND**, + IMG_UINT32, + IMG_UINT16, + IMG_UINT32, + PVRSRV_KERNEL_SYNC_INFO*[], + IMG_UINT32, + PVRSRV_KERNEL_SYNC_INFO*[], + IMG_UINT32); +/* submit command function pointer */ +typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL); + + +/*********************************************************************** + Device Class Structures +***********************************************************************/ + +/* + Generic Device Class Buffer + - details common between DC and BC +*/ +typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG +{ + PFN_GET_BUFFER_ADDR pfnGetBufferAddr; + IMG_HANDLE hDevMemContext; + IMG_HANDLE hExtDevice; + IMG_HANDLE hExtBuffer; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + IMG_UINT32 ui32MemMapRefCount; +} PVRSRV_DEVICECLASS_BUFFER; + + +/* + Common Device Class client services information structure +*/ +typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG +{ + IMG_HANDLE hDeviceKM; + IMG_HANDLE hServices; +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + IMG_INT32 i32TimelineFd; + IMG_INT32 i32Unused; +#endif +} PVRSRV_CLIENT_DEVICECLASS_INFO; + + +typedef enum +{ + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR, + PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER, + PVRSRV_FREE_CALLBACK_ORIGIN_EXTERNAL, +} +PVRSRV_FREE_CALLBACK_ORIGIN; + + +IMG_IMPORT +PVRSRV_ERROR FreeMemCallBackCommon(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin); + + +IMG_IMPORT +PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo, + PVRSRV_COMMAND *psCommand); + + +/*! + * ***************************************************************************** + * @Description Allocates system memory on behalf of a userspace process that is + * addressable by the kernel; suitable for mapping into the current + * user space process; suitable for mapping into other userspace + * processes and it is possible to entirely disassociate the system + * memory from the current userspace process via a call to + * PVRSRVDissociateSharedSysMemoryKM. + * + * @Input psConnection + * @Input ui32Flags + * @Input ui32Size + * @Output ppsClientMemInfo + * + * @Return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV +PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection, + IMG_UINT32 ui32Flags, + IMG_SIZE_T uSize, + PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo); + +/*! + * ***************************************************************************** + * @Description Frees memory allocated via PVRSRVAllocSharedMemory (Note you must + * be sure any additional kernel references you created have been + * removed before freeing the memory) + * + * @Input psConnection + * @Input psClientMemInfo + * + * @Return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV +PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection, + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo); + +/*! + * ***************************************************************************** + * @Description Removes any userspace reference to the shared system memory, except + * that the memory will remain registered with the services resource + * manager so if the process dies/exits the actuall shared memory will + * still be freed. + * If you need to move ownership of shared memory from userspace + * to kernel space then before unrefing a shared piece of memory you can + * take a copy of psClientMemInfo->hKernelMemInfo; call + * PVRSRVUnrefSharedSysMem; then use some mechanism (specialised bridge + * function) to request that the kernel remove any resource manager + * reference to the shared memory and assume responsaility for the meminfo + * in one atomic operation. (Note to aid with such a kernel space bridge + * function see PVRSRVDissociateSharedSysMemoryKM) + * + * @Input psConnection + * @Input psClientMemInfo + * + * @Return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection, + PVRSRV_CLIENT_MEM_INFO *psClientMemInfo); + +/*! + * ***************************************************************************** + * @Description For shared system or device memory that is owned by the kernel, you can + * use this function to map the underlying memory into a client using a + * handle for the KernelMemInfo. + * + * @Input psConnection + * @Input hKernelMemInfo + * @Output ppsClientMemInfo + * + * @Return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV +PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection, + IMG_HANDLE hKernelMemInfo, + PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo); +#if defined(MEM_TRACK_INFO_DEBUG) +IMG_IMPORT IMG_VOID PVRSRVPrintMemTrackInfo(IMG_UINT32 ui32FaultAddr); +IMG_IMPORT IMG_VOID PVRSRVAddMemTrackInfo(PVRSRV_MEM_TRACK_INFO *psMemTrackInfo); +IMG_IMPORT IMG_VOID PVRSRVFreeMemOps(IMG_VOID); +#endif + + +#if defined (__cplusplus) +} +#endif +#endif /* __SERVICESINT_H__ */ + +/***************************************************************************** + End of file (servicesint.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/include/sgx_bridge.h b/sgx_km/eurasia_km/services4/include/sgx_bridge.h new file mode 100644 index 0000000..93b38f9 --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/sgx_bridge.h @@ -0,0 +1,577 @@ +/*************************************************************************/ /*! +@Title SGX Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the sgx Brdige code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SGX_BRIDGE_H__) +#define __SGX_BRIDGE_H__ + +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "pvr_bridge.h" + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* + * Bridge Cmd Ids + */ + +/* *REMEMBER* to update PVRSRV_BRIDGE_LAST_SGX_CMD if you add/remove a command! + * Also you need to ensure all PVRSRV_BRIDGE_SGX_CMD_BASE+ offsets are sequential! + */ + +#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1) +#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0) +#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1) +#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2) +#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3) +#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4) +#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5) + +#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9) + +#if defined(TRANSFER_QUEUE) +#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13) +#endif +#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14) +#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15) +#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16) + +#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17) +#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18) +#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19) +#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20) +#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21) +#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22) +#if defined(SGX_FEATURE_2D_HARDWARE) +#define PVRSRV_BRIDGE_SGX_SUBMIT2D PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23) +#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24) +#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25) +#endif +#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26) +#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27) + +#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28) + +#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29) +#define PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30) +#define PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31) + +#if defined(PDUMP) +#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32) +#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33) +#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34) +#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35) +#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+36) +#define PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+37) +#endif + + + +/* *REMEMBER* to update PVRSRV_BRIDGE_LAST_SGX_CMD if you add/remove a command! + * You need to ensure all PVRSRV_BRIDGE_SGX_CMD_BASE+ offsets are sequential! + */ +#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+37) + +/***************************************************************************** + * Input structures for IOCTL/DRVESC + *****************************************************************************/ + +/*! + ***************************************************************************** + * `bridge in' SGX Get Phys Page Addr + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR +{ + IMG_HANDLE hDevMemHeap; + IMG_DEV_VIRTADDR sDevVAddr; +}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR; + +/*! + ***************************************************************************** + * `bridge out' SGX Get Phys Page Addr + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR +{ + PVRSRV_ERROR eError; + IMG_DEV_PHYADDR DevPAddr; + IMG_CPU_PHYADDR CpuPAddr; +}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR; + +/*! + ***************************************************************************** + * `bridge in' set transfer context priority + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY_TAG + { + IMG_HANDLE hDevCookie; + IMG_HANDLE hHWTransferContext; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32OffsetOfPriorityField; +}PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY; + +/*! + ***************************************************************************** + * `bridge in' set render context priority + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hHWRenderContext; + IMG_UINT32 ui32Priority; + IMG_UINT32 ui32OffsetOfPriorityField; +}PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY; + +/*! + ***************************************************************************** + * `bridge in' Get Client Info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_GETCLIENTINFO; + +/*! + ***************************************************************************** + * `bridge out' Get internal device info + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG +{ + SGX_INTERNAL_DEVINFO sSGXInternalDevInfo; + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO; + +/*! + ***************************************************************************** + * `bridge in' Get internal device info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO; + +/*! + ***************************************************************************** + * `bridge out' Get Client Info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG +{ + SGX_CLIENT_INFO sClientInfo; + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_GETCLIENTINFO; + +/*! + ***************************************************************************** + * `bridge in' Release Client Info + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO; + +/*! + ***************************************************************************** + * `bridge in' Pdump ISP mem Pol + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_ISPBREAKPOLL; + +/*! + ***************************************************************************** + * `bridge in' KickTA + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG +{ + IMG_HANDLE hDevCookie; + SGX_CCB_KICK sCCBKick; +}PVRSRV_BRIDGE_IN_DOKICK; + +/*! + ***************************************************************************** + * `bridge in' SGXScheduleProcessQueues + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES; + + +#if defined(TRANSFER_QUEUE) +/*! + ***************************************************************************** + * `bridge in' SubmitTransfer + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG +{ + IMG_HANDLE hDevCookie; + PVRSRV_TRANSFER_SGX_KICK sKick; +}PVRSRV_BRIDGE_IN_SUBMITTRANSFER; + +#if defined(SGX_FEATURE_2D_HARDWARE) +/*! + ***************************************************************************** + * `bridge in' Submit2D + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SUBMIT2D_TAG +{ + IMG_UINT32 ui32BridgeFlags; /* Must be first member of structure */ + IMG_HANDLE hDevCookie; + PVRSRV_2D_SGX_KICK sKick; +} PVRSRV_BRIDGE_IN_SUBMIT2D; +#endif +#endif + +/*! + ***************************************************************************** + * `bridge in' ReadRegistryString + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG +{ + IMG_HANDLE hDevCookie; + IMG_PCHAR pszKey; + IMG_PCHAR pszValue; +}PVRSRV_BRIDGE_IN_READREGDWORD; + +/*! + ***************************************************************************** + * `bridge out' ReadRegistryString + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32Data; +}PVRSRV_BRIDGE_OUT_READREGDWORD; + + +/*! + ***************************************************************************** + * `bridge in' SGXGetMiscInfo + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hMiscInfo; +}PVRSRV_BRIDGE_IN_SGXGETMISCINFO; + +/*! + ***************************************************************************** + * `bridge in' SGXGetInfoForSrvInit + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG +{ + IMG_HANDLE hDevCookie; +}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT; + +/*! + ***************************************************************************** + * `bridge out' SGXGetInfoForSrvInit + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG +{ + PVRSRV_ERROR eError; + SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo; +}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT; + +/*! + ***************************************************************************** + * `bridge in' SGXDevInitPart2 + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG +{ + IMG_HANDLE hDevCookie; + SGX_BRIDGE_INIT_INFO sInitInfo; +}PVRSRV_BRIDGE_IN_SGXDEVINITPART2; + +/*! + ***************************************************************************** + * `bridge out' SGXDevInitPart2 + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_SGXDEVINITPART2_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32KMBuildOptions; +}PVRSRV_BRIDGE_OUT_SGXDEVINITPART2; + +/*! + ***************************************************************************** + * `bridge in' 2D query blits complete + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hKernSyncInfo; + IMG_BOOL bWaitForComplete; +}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE; + + +#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10 + +typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG +{ + IMG_HANDLE hDevCookie; + IMG_BOOL bLockOnFailure; + IMG_UINT32 ui32TotalPBSize; +}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC; + +typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG +{ + IMG_HANDLE hKernelMemInfo; + IMG_HANDLE hSharedPBDesc; + IMG_HANDLE hSharedPBDescKernelMemInfoHandle; + IMG_HANDLE hHWPBDescKernelMemInfoHandle; + IMG_HANDLE hBlockKernelMemInfoHandle; + IMG_HANDLE hHWBlockKernelMemInfoHandle; + IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS]; + IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount; + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC; + +typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG +{ + IMG_HANDLE hSharedPBDesc; +}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC; + +typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG +{ + PVRSRV_ERROR eError; +}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC; + + +typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG +{ + IMG_UINT32 ui32TotalPBSize; + IMG_HANDLE hDevCookie; + IMG_HANDLE hSharedPBDescKernelMemInfo; + IMG_HANDLE hHWPBDescKernelMemInfo; + IMG_HANDLE hBlockKernelMemInfo; + IMG_HANDLE hHWBlockKernelMemInfo; + IMG_HANDLE *phKernelMemInfoHandles; + IMG_UINT32 ui32KernelMemInfoHandlesCount; + IMG_DEV_VIRTADDR sHWPBDescDevVAddr; +}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC; + +typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hSharedPBDesc; +}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC; + + +#ifdef PDUMP +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG +{ + IMG_HANDLE hBufferArray; + IMG_UINT32 ui32BufferArrayLength; + IMG_BOOL bDumpPolls; +} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemContext; + IMG_UINT32 ui32DumpFrameNum; + IMG_BOOL bLastFrame; + IMG_HANDLE hRegisters; + IMG_UINT32 ui32NumRegisters; +}PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG +{ + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32DumpFrameNum; + IMG_BOOL bLastFrame; + IMG_HANDLE hRegisters; + IMG_UINT32 ui32NumRegisters; +}PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG +{ + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32DumpFrameNum; + IMG_UINT32 ui32TAKickCount; + IMG_BOOL bLastFrame; + IMG_HANDLE hRegisters; + IMG_UINT32 ui32NumRegisters; +}PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE hDevMemContext; + IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE]; + IMG_UINT32 ui32FileOffset; + IMG_UINT32 ui32PDumpFlags; + +}PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM +{ + IMG_HANDLE hDevCookie; + IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE]; + IMG_UINT32 ui32FileOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_UINT32 ui32Size; + IMG_HANDLE hDevMemContext; + IMG_UINT32 ui32PDumpFlags; + +}PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM; + +#endif + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE pHWRenderContextCpuVAddr; + IMG_UINT32 ui32HWRenderContextSize; + IMG_UINT32 ui32OffsetToPDDevPAddr; + IMG_HANDLE hDevMemContext; +}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT; + +typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sHWRenderContextDevVAddr; + IMG_HANDLE hHWRenderContext; +}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG +{ + IMG_BOOL bForceCleanup; + IMG_HANDLE hDevCookie; + IMG_HANDLE hHWRenderContext; +}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG +{ + IMG_HANDLE hDevCookie; + IMG_HANDLE pHWTransferContextCpuVAddr; + IMG_UINT32 ui32HWTransferContextSize; + IMG_UINT32 ui32OffsetToPDDevPAddr; + IMG_HANDLE hDevMemContext; +}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT; + +typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG +{ + PVRSRV_ERROR eError; + IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; + IMG_HANDLE hHWTransferContext; +}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG +{ + IMG_BOOL bForceCleanup; + IMG_HANDLE hDevCookie; + IMG_HANDLE hHWTransferContext; +}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG +{ + IMG_HANDLE hDevCookie; + IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr; +}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET; + +/*! + ***************************************************************************** + * SGX 2D specific defines + *****************************************************************************/ +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG +{ + IMG_UINT32 ui32BridgeFlags; /* Must be first member of structure */ + IMG_HANDLE hDevCookie; + IMG_HANDLE hHW2DContextCpuVAddr; + IMG_UINT32 ui32HW2DContextSize; + IMG_UINT32 ui32OffsetToPDDevPAddr; + IMG_HANDLE hDevMemContext; +}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG +{ + PVRSRV_ERROR eError; + IMG_HANDLE hHW2DContext; + IMG_DEV_VIRTADDR sHW2DContextDevVAddr; +}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT; + +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG +{ + IMG_UINT32 ui32BridgeFlags; /* Must be first member of structure */ + IMG_BOOL bForceCleanup; + IMG_HANDLE hDevCookie; + IMG_HANDLE hHW2DContext; +}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT; + +#define SGX2D_MAX_BLT_CMD_SIZ 256 /* Maximum size of a blit command, in bytes */ +#endif /* SGX_FEATURE_2D_HARDWARE */ + + +/*! + ***************************************************************************** + * `bridge in' SGXReadHWPerfCB + *****************************************************************************/ +typedef struct IMG_COMPAT PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB_TAG +{ + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32ArraySize; + IMG_HANDLE hHWPerfCBData; /* PVRSRV_SGX_HWPERF_CB_ENTRY* */ +} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB; + +/*! + ***************************************************************************** + * `bridge out' SGXReadHWPerfCB + *****************************************************************************/ +typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB_TAG +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32DataCount; + IMG_UINT32 ui32ClockSpeed; + IMG_UINT32 ui32HostTimeStamp; +} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB; + +#if defined (__cplusplus) +} +#endif + +#endif /* __SGX_BRIDGE_H__ */ + diff --git a/sgx_km/eurasia_km/services4/include/sgx_mkif_km.h b/sgx_km/eurasia_km/services4/include/sgx_mkif_km.h new file mode 100644 index 0000000..f67ceaa --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/sgx_mkif_km.h @@ -0,0 +1,476 @@ +/*************************************************************************/ /*! +@Title SGX microkernel interface structures used by srvkm +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description SGX microkernel interface structures used by srvkm +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined (__SGX_MKIF_KM_H__) +#define __SGX_MKIF_KM_H__ + +#include "img_types.h" +#include "servicesint.h" +#include "sgxapi_km.h" + + +#if !defined (SGX_MP_CORE_SELECT) +/* MP register control macros */ +#if defined(SGX_FEATURE_MP) + #define SGX_REG_BANK_SHIFT (14) + #define SGX_REG_BANK_SIZE (1 << SGX_REG_BANK_SHIFT) + #define SGX_REG_BANK_BASE_INDEX (2) + #define SGX_REG_BANK_MASTER_INDEX (1) + #define SGX_MP_CORE_SELECT(x,i) (x + ((i + SGX_REG_BANK_BASE_INDEX) * SGX_REG_BANK_SIZE)) + #define SGX_MP_MASTER_SELECT(x) (x + (SGX_REG_BANK_MASTER_INDEX * SGX_REG_BANK_SIZE)) +#else + #define SGX_MP_CORE_SELECT(x,i) (x) +#endif /* SGX_FEATURE_MP */ +#endif + + +/*! + ****************************************************************************** + * CCB command structure for SGX + *****************************************************************************/ +typedef struct _SGXMKIF_COMMAND_ +{ + IMG_UINT32 ui32ServiceAddress; /*!< address of the USE command handler */ + IMG_UINT32 ui32CacheControl; /*!< See SGXMKIF_CC_INVAL_* */ + IMG_UINT32 ui32Data[6]; /*!< array of other command control words */ +} SGXMKIF_COMMAND; + + +/*! + ****************************************************************************** + * CCB array of commands for SGX + *****************************************************************************/ +typedef struct _PVRSRV_SGX_KERNEL_CCB_ +{ + SGXMKIF_COMMAND asCommands[256]; /*!< array of commands */ +} PVRSRV_SGX_KERNEL_CCB; + + +/*! + ****************************************************************************** + * CCB control for SGX + *****************************************************************************/ +typedef struct _PVRSRV_SGX_CCB_CTL_ +{ + IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ + IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ +} PVRSRV_SGX_CCB_CTL; + + +/*! + ***************************************************************************** + * Control data for SGX + *****************************************************************************/ +typedef struct _SGXMKIF_HOST_CTL_ +{ +#if defined(PVRSRV_USSE_EDM_BREAKPOINTS) + IMG_UINT32 ui32BreakpointDisable; + IMG_UINT32 ui32Continue; +#endif + + volatile IMG_UINT32 ui32InitStatus; /*!< Microkernel Initialisation status */ + volatile IMG_UINT32 ui32PowerStatus; /*!< Microkernel Power Management status */ + volatile IMG_UINT32 ui32CleanupStatus; /*!< Microkernel Resource Management status */ +#if defined(FIX_HW_BRN_28889) + volatile IMG_UINT32 ui32InvalStatus; /*!< Microkernel BIF Cache Invalidate status */ +#endif +#if defined(SUPPORT_HW_RECOVERY) + IMG_UINT32 ui32uKernelDetectedLockups; /*!< counter relating to the number of lockups the uKernel has detected */ + IMG_UINT32 ui32HostDetectedLockups; /*!< counter relating to the number of lockups the host has detected */ + IMG_UINT32 ui32HWRecoverySampleRate; /*!< SGX lockup detection rate (in multiples of the timer period) */ +#endif /* SUPPORT_HW_RECOVERY*/ + IMG_UINT32 ui32uKernelTimerClock; /*!< SGX ukernel timer period (in clocks) */ + IMG_UINT32 ui32ActivePowManSampleRate; /*!< SGX Active Power latency period (in multiples of the timer period) */ + IMG_UINT32 ui32InterruptFlags; /*!< Interrupt flags - PVRSRV_USSE_EDM_INTERRUPT_xxx */ + IMG_UINT32 ui32InterruptClearFlags; /*!< Interrupt clear flags - PVRSRV_USSE_EDM_INTERRUPT_xxx */ + IMG_UINT32 ui32BPSetClearSignal; /*!< Breakpoint set/cear signal */ + + IMG_UINT32 ui32NumActivePowerEvents; /*!< counter for the number of active power events */ + + IMG_UINT32 ui32TimeWraps; /*!< to count time wraps in the Timer task*/ + IMG_UINT32 ui32HostClock; /*!< Host clock value at microkernel power-up time */ + IMG_UINT32 ui32AssertFail; /*!< Microkernel assert failure code */ + +#if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS) + IMG_UINT32 aui32PerfGroup[PVRSRV_SGX_HWPERF_NUM_COUNTERS]; /*!< Specifies the HW's active group selectors */ + IMG_UINT32 aui32PerfBit[PVRSRV_SGX_HWPERF_NUM_COUNTERS]; /*!< Specifies the HW's active bit selectors */ + IMG_UINT32 ui32PerfCounterBitSelect; /*!< Specifies the HW's counter bit selectors */ + IMG_UINT32 ui32PerfSumMux; /*!< Specifies the HW's sum_mux selectors */ +#else + IMG_UINT32 ui32PerfGroup; /*!< Specifies the HW's active group */ +#endif /* SGX_FEATURE_EXTENDED_PERF_COUNTERS */ + + IMG_UINT32 ui32OpenCLDelayCount; /* Counter to keep track OpenCL task completion time in units of regular task time out events */ + IMG_UINT32 ui32InterruptCount; +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + volatile IMG_UINT32 ui32SystraceIndex; /*!< Current index for systrace */ + volatile IMG_UINT32 ui32SGXPoweredOn; /*!< if SGX is powered on */ + volatile IMG_UINT32 ui32TicksAtPowerUp; /*!< if SGX is powered on */ +#endif +} SGXMKIF_HOST_CTL; + +/* + * TA queue Kick flags + */ +/* Set in DoKickKM to indicate the command is ready to be processed */ +#define SGXMKIF_CMDTA_CTRLFLAGS_READY 0x00000001 +/*! + ****************************************************************************** + * Shared TA command structure. + * This structure is part of the TA command structure proper (SGXMKIF_CMDTA), + * and is accessed from the kernel part of the driver and the microkernel. + * There shouldn't be a need to access it from user space. + *****************************************************************************/ +typedef struct _SGXMKIF_CMDTA_SHARED_ +{ + IMG_UINT32 ui32CtrlFlags; + + IMG_UINT32 ui32NumTAStatusVals; + IMG_UINT32 ui32Num3DStatusVals; + + /* KEEP THESE 4 VARIABLES TOGETHER FOR UKERNEL BLOCK LOAD */ + IMG_UINT32 ui32TATQSyncWriteOpsPendingVal; + IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr; + IMG_UINT32 ui32TATQSyncReadOpsPendingVal; + IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr; + + /* KEEP THESE 4 VARIABLES TOGETHER FOR UKERNEL BLOCK LOAD */ + IMG_UINT32 ui323DTQSyncWriteOpsPendingVal; + IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr; + IMG_UINT32 ui323DTQSyncReadOpsPendingVal; + IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr; + + /* sync criteria used for TA/3D dependency synchronisation */ + PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency; + + /* source dependency details */ + IMG_UINT32 ui32NumSrcSyncs; + PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS_TA]; + + CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS]; + CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS]; + +} SGXMKIF_CMDTA_SHARED; + +/* + * Services internal TQ limits + */ +#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2 + +/* + * Transfer queue Kick flags + */ +/* if set the uKernel won't update the sync objects on completion*/ +#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001 +/* if set the kernel won't advance the pending values*/ +#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002 +/* in services equivalent for the same client flags*/ +#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004 +#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008 +#if defined(SGX_FEATURE_FAST_RENDER_CONTEXT_SWITCH) +#define SGXMKIF_TQFLAGS_CTXSWITCH 0x00000010 +#endif +/* if set uKernel only updates syncobjects / status values*/ +#define SGXMKIF_TQFLAGS_DUMMYTRANSFER 0x00000020 + +/*! + ****************************************************************************** + * Shared Transfer Queue command structure. + * This structure is placed at the start of the TQ command structure proper + * (SGXMKIF_TRANSFERCMD), and is accessed from the kernel part of the driver + * and the microkernel. + *****************************************************************************/ +typedef struct _SGXMKIF_TRANSFERCMD_SHARED_ +{ + /* need to be able to check read and write ops on src, and update reads */ + + IMG_UINT32 ui32NumSrcSyncs; + PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS_TQ]; + /* need to be able to check reads and writes on dest, and update writes */ + + IMG_UINT32 ui32NumDstSyncs; + PVRSRV_DEVICE_SYNC_OBJECT asDstSyncs[SGX_MAX_DST_SYNCS_TQ]; + /* KEEP THESE 4 VARIABLES TOGETHER FOR UKERNEL BLOCK LOAD */ + IMG_UINT32 ui32TASyncWriteOpsPendingVal; + IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr; + IMG_UINT32 ui32TASyncReadOpsPendingVal; + IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr; + + /* KEEP THESE 4 VARIABLES TOGETHER FOR UKERNEL BLOCK LOAD */ + IMG_UINT32 ui323DSyncWriteOpsPendingVal; + IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr; + IMG_UINT32 ui323DSyncReadOpsPendingVal; + IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr; + + IMG_UINT32 ui32NumStatusVals; + CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS]; +} SGXMKIF_TRANSFERCMD_SHARED, *PSGXMKIF_TRANSFERCMD_SHARED; + + +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct _SGXMKIF_2DCMD_SHARED_ { + /* need to be able to check read and write ops on src, and update reads */ + IMG_UINT32 ui32NumSrcSync; + PVRSRV_DEVICE_SYNC_OBJECT sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS]; + + /* need to be able to check reads and writes on dest, and update writes */ + PVRSRV_DEVICE_SYNC_OBJECT sDstSyncData; + + /* need to be able to check reads and writes on TA ops, and update writes */ + PVRSRV_DEVICE_SYNC_OBJECT sTASyncData; + + /* need to be able to check reads and writes on 2D ops, and update writes */ + PVRSRV_DEVICE_SYNC_OBJECT s3DSyncData; + + IMG_UINT32 ui32NumStatusVals; + CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS]; +} SGXMKIF_2DCMD_SHARED, *PSGXMKIF_2DCMD_SHARED; +#endif /* SGX_FEATURE_2D_HARDWARE */ + + +typedef struct _SGXMKIF_HWDEVICE_SYNC_LIST_ +{ + IMG_DEV_VIRTADDR sAccessDevAddr; + IMG_UINT32 ui32NumSyncObjects; + /* Must be the last variable in the structure */ + PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1]; +} SGXMKIF_HWDEVICE_SYNC_LIST, *PSGXMKIF_HWDEVICE_SYNC_LIST; + + +/*! + ***************************************************************************** + * Microkernel initialisation status + *****************************************************************************/ +#define PVRSRV_USSE_EDM_INIT_COMPLETE (1UL << 0) /*!< ukernel initialisation complete */ + +/*! + ***************************************************************************** + * Microkernel power status definitions + *****************************************************************************/ +#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2) /*!< Signal from ukernel->Host indicating SGX is idle */ +#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3) /*!< Signal from ukernel->Host indicating SGX can be powered down */ +#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4) /*!< Signal from ukernel->Host indicating there is work to be done immediately */ +#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5) /*!< Signal from ukernel->Host indicating nothing to do */ + +/*! + ***************************************************************************** + * EDM interrupt defines + *****************************************************************************/ +#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0) /*!< EDM requesting hardware recovery */ +#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1) /*!< EDM requesting to be powered down */ +#define PVRSRV_USSE_EDM_INTERRUPT_IDLE (1UL << 2) /*!< EDM indicating SGX idle */ + +/*! + ***************************************************************************** + * EDM Resource management defines + *****************************************************************************/ +#define PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE (1UL << 0) /*!< Signal from EDM->Host indicating clean-up request completion */ +#define PVRSRV_USSE_EDM_CLEANUPCMD_BUSY (1UL << 1) /*!< Signal from EDM->Host indicating clean-up is blocked as the resource is busy */ +#define PVRSRV_USSE_EDM_CLEANUPCMD_DONE (1UL << 2) /*!< Signal from EDM->Host indicating clean-up has been done */ + +#if defined(FIX_HW_BRN_28889) +/*! + ***************************************************************************** + * EDM BIF Cache Invalidate defines + *****************************************************************************/ +#define PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE (1UL << 0) /*!< Signal from EDM->Host indicating the BIF invalidate has started */ +#endif + +/*! + **************************************************************************** + * EDM / uKernel Get misc info defines + **************************************************************************** + */ +#define PVRSRV_USSE_MISCINFO_READY 0x1UL +#define PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES 0x2UL /*!< If set, getmiscinfo ukernel func returns structure sizes */ +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) +#define PVRSRV_USSE_MISCINFO_MEMREAD 0x4UL /*!< If set, getmiscinfo ukernel func reads arbitrary device mem */ +#define PVRSRV_USSE_MISCINFO_MEMWRITE 0x8UL /*!< If set, getmiscinfo ukernel func writes arbitrary device mem */ +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) +#define PVRSRV_USSE_MISCINFO_MEMREAD_FAIL 0x1UL << 31 /* If set, ukernel was unable to read from the mem context */ +#endif +#endif + + +/* Cleanup command control word */ +#define PVRSRV_CLEANUPCMD_RT 0x1U +#define PVRSRV_CLEANUPCMD_RC 0x2U +#define PVRSRV_CLEANUPCMD_TC 0x3U +#define PVRSRV_CLEANUPCMD_2DC 0x4U +#define PVRSRV_CLEANUPCMD_PB 0x5U + +/* Power command control word */ +#define PVRSRV_POWERCMD_POWEROFF 0x1U +#define PVRSRV_POWERCMD_IDLE 0x2U +#define PVRSRV_POWERCMD_RESUME 0x3U + +/* Context suspend command control word */ +#define PVRSRV_CTXSUSPCMD_SUSPEND 0x1U +#define PVRSRV_CTXSUSPCMD_RESUME 0x2U + + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) +#define SGX_BIF_DIR_LIST_INDEX_EDM (SGX_FEATURE_BIF_NUM_DIRLISTS - 1) +#else +#define SGX_BIF_DIR_LIST_INDEX_EDM (0) +#endif + +/*! + ****************************************************************************** + * microkernel cache control requests + ******************************************************************************/ +#define SGXMKIF_CC_INVAL_BIF_PT 0x1 +#define SGXMKIF_CC_INVAL_BIF_PD 0x2 +#define SGXMKIF_CC_INVAL_BIF_SL 0x4 +#define SGXMKIF_CC_INVAL_DATA 0x8 + + +/*! + ****************************************************************************** + * SGX microkernel interface structure sizes + ******************************************************************************/ +typedef struct _SGX_MISCINFO_STRUCT_SIZES_ +{ +#if defined (SGX_FEATURE_2D_HARDWARE) + IMG_UINT32 ui32Sizeof_2DCMD; + IMG_UINT32 ui32Sizeof_2DCMD_SHARED; +#endif + IMG_UINT32 ui32Sizeof_CMDTA; + IMG_UINT32 ui32Sizeof_CMDTA_SHARED; + IMG_UINT32 ui32Sizeof_TRANSFERCMD; + IMG_UINT32 ui32Sizeof_TRANSFERCMD_SHARED; + IMG_UINT32 ui32Sizeof_3DREGISTERS; + IMG_UINT32 ui32Sizeof_HWPBDESC; + IMG_UINT32 ui32Sizeof_HWRENDERCONTEXT; + IMG_UINT32 ui32Sizeof_HWRENDERDETAILS; + IMG_UINT32 ui32Sizeof_HWRTDATA; + IMG_UINT32 ui32Sizeof_HWRTDATASET; + IMG_UINT32 ui32Sizeof_HWTRANSFERCONTEXT; + IMG_UINT32 ui32Sizeof_HOST_CTL; + IMG_UINT32 ui32Sizeof_COMMAND; +#if defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif +} SGX_MISCINFO_STRUCT_SIZES; + + +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) +/*! + ***************************************************************************** + * SGX misc info for accessing device memory from ukernel + ***************************************************************************** + */ +typedef struct _PVRSRV_SGX_MISCINFO_MEMACCESS +{ + IMG_DEV_VIRTADDR sDevVAddr; /*!< dev virtual addr for mem access */ + IMG_DEV_PHYADDR sPDDevPAddr; /*!< device physical addr of PD for the mem heap */ +} PVRSRV_SGX_MISCINFO_MEMACCESS; +#endif + +/*! + ***************************************************************************** + * SGX Misc Info structure used in the microkernel + * PVRSRV_SGX_MISCINFO_FEATURES is defined in sgxapi_km.h + ****************************************************************************/ +typedef struct _PVRSRV_SGX_MISCINFO_INFO +{ + IMG_UINT32 ui32MiscInfoFlags; + PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures; /*!< external info for client */ + SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes; /*!< internal info: microkernel structure sizes */ +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + PVRSRV_SGX_MISCINFO_MEMACCESS sSGXMemAccessSrc; /*!< internal info: for reading dev memory */ + PVRSRV_SGX_MISCINFO_MEMACCESS sSGXMemAccessDest; /*!< internal info: for writing dev memory */ +#endif +} PVRSRV_SGX_MISCINFO_INFO; + +#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG +/*! + ***************************************************************************** + * Number of entries in the microkernel status buffer + *****************************************************************************/ +#define SGXMK_TRACE_BUFFER_SIZE 512 +#endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */ + +#define SGXMKIF_HWPERF_CB_SIZE 0x100 /* must be 2^n*/ + +/*! + ***************************************************************************** + * One entry in the HWPerf Circular Buffer. + *****************************************************************************/ +typedef struct _SGXMKIF_HWPERF_CB_ENTRY_ +{ + IMG_UINT32 ui32FrameNo; + IMG_UINT32 ui32PID; + IMG_UINT32 ui32RTData; + IMG_UINT32 ui32Type; + IMG_UINT32 ui32Ordinal; + IMG_UINT32 ui32Info; + IMG_UINT32 ui32TimeWraps; + IMG_UINT32 ui32Time; +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + IMG_UINT32 ui32SystraceIndex; +#endif + /* NOTE: There should always be at least as many 3D cores as TA cores. */ + IMG_UINT32 ui32Counters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_COUNTERS]; + IMG_UINT32 ui32MiscCounters[SGX_FEATURE_MP_CORE_COUNT_3D][PVRSRV_SGX_HWPERF_NUM_MISC_COUNTERS]; +} SGXMKIF_HWPERF_CB_ENTRY; + +/*! + ***************************************************************************** + * The HWPerf Circular Buffer. + *****************************************************************************/ +typedef struct _SGXMKIF_HWPERF_CB_ +{ + IMG_UINT32 ui32Woff; + IMG_UINT32 ui32Roff; + IMG_UINT32 ui32Ordinal; + SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE]; +} SGXMKIF_HWPERF_CB; + + +#endif /* __SGX_MKIF_KM_H__ */ + +/****************************************************************************** + End of file (sgx_mkif_km.h) +******************************************************************************/ + + diff --git a/sgx_km/eurasia_km/services4/include/sgx_ukernel_status_codes.h b/sgx_km/eurasia_km/services4/include/sgx_ukernel_status_codes.h new file mode 100644 index 0000000..75f186d --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/sgx_ukernel_status_codes.h @@ -0,0 +1,998 @@ +/*************************************************************************/ /*! +@File sgx_ukernel_status_codes.h +@Title SGX microkernel debug status codes +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description SGX microkernel debug status codes +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SGX_UKERNEL_STATUS_CODES_H__ +#define __SGX_UKERNEL_STATUS_CODES_H__ + +/* + NOTE: Do not add any conditional macros to this file! There must be + no use of #if defined(). This file is included in srvkm to print + stringified ukernel status codes, it must build identically to + srvinit. +*/ + +/* + Users of this header might define this macro to do something + clever; the primary use right now is to generate a switch/case + LUT for debugging in srvkm. If you add a new code, make sure it + has a corresponding MKTC_ST. +*/ +#ifndef MKTC_ST +#define MKTC_ST(x) +#endif + +/* + It would be nice to put these definitions into an enumeration, but USEASM + only has access to the C preprocessor so macros are required. +*/ + +/* + Bits 24-31 of these codes (0xAD) are a magic number used to help + distinguish between them and other debug information which can be + optionally dumped into the status buffer, e.g. sync object values. +*/ + +/* + Microkernel trace codes +*/ +#define MKTC_EHEVENT_3DMEMFREE 0xAD000001 +MKTC_ST(MKTC_EHEVENT_3DMEMFREE) +#define MKTC_EHEVENT_PIXELENDRENDER 0xAD000002 +MKTC_ST(MKTC_EHEVENT_PIXELENDRENDER) +#define MKTC_EHEVENT_ISPBREAKPOINT 0xAD000004 +MKTC_ST(MKTC_EHEVENT_ISPBREAKPOINT) +#define MKTC_EHEVENT_TAFINISHED 0xAD000005 +MKTC_ST(MKTC_EHEVENT_TAFINISHED) +#define MKTC_EHEVENT_OUTOFMEM 0xAD000007 +MKTC_ST(MKTC_EHEVENT_OUTOFMEM) +#define MKTC_EHEVENT_TATERMINATE 0xAD000008 +MKTC_ST(MKTC_EHEVENT_TATERMINATE) +#define MKTC_EHEVENT_TIMER 0xAD000009 +MKTC_ST(MKTC_EHEVENT_TIMER) +#define MKTC_EHEVENT_SWEVENT 0xAD00000A +MKTC_ST(MKTC_EHEVENT_SWEVENT) +#define MKTC_EHEVENT_2DCOMPLETE 0xAD00000B +MKTC_ST(MKTC_EHEVENT_2DCOMPLETE) + +#define MKTC_3DEVENT_3DMEMFREE 0xAD000100 +MKTC_ST(MKTC_3DEVENT_3DMEMFREE) +#define MKTC_3DEVENT_PIXELENDRENDER 0xAD000101 +MKTC_ST(MKTC_3DEVENT_PIXELENDRENDER) +#define MKTC_3DEVENT_ISPBREAKPOINT 0xAD000102 +MKTC_ST(MKTC_3DEVENT_ISPBREAKPOINT) +#define MKTC_3DEVENT_END 0xAD000104 +MKTC_ST(MKTC_3DEVENT_END) +#define MKTC_3DLB_3DMEMFREE 0xAD000180 +MKTC_ST(MKTC_3DLB_3DMEMFREE) +#define MKTC_3DLB_PIXELENDRENDER 0xAD000181 +MKTC_ST(MKTC_3DLB_PIXELENDRENDER) +#define MKTC_3DLB_ISPBREAKPOINT 0xAD000182 +MKTC_ST(MKTC_3DLB_ISPBREAKPOINT) +#define MKTC_3DLB_FIND3D 0xAD000183 +MKTC_ST(MKTC_3DLB_FIND3D) +#define MKTC_3DLB_END 0xAD000184 +MKTC_ST(MKTC_3DLB_END) + +#define MKTC_TAEVENT_TAFINISHED 0xAD000200 +MKTC_ST(MKTC_TAEVENT_TAFINISHED) +#define MKTC_TAEVENT_END 0xAD000202 +MKTC_ST(MKTC_TAEVENT_END) +#define MKTC_TALB_TAFINISHED 0xAD000280 +MKTC_ST(MKTC_TALB_TAFINISHED) +#define MKTC_TALB_FINDTA 0xAD000281 +MKTC_ST(MKTC_TALB_FINDTA) +#define MKTC_TALB_END 0xAD000282 +MKTC_ST(MKTC_TALB_END) + +#define MKTC_CRRL_WRITEOPSBLOCKED 0xAD000300 +MKTC_ST(MKTC_CRRL_WRITEOPSBLOCKED) +#define MKTC_CRRL_READOPSBLOCKED 0xAD000301 +MKTC_ST(MKTC_CRRL_READOPSBLOCKED) +#define MKTC_CRRL_FOUNDRENDER 0xAD000302 +MKTC_ST(MKTC_CRRL_FOUNDRENDER) +#define MKTC_CRRL_NORENDER 0xAD000303 +MKTC_ST(MKTC_CRRL_NORENDER) +#define MKTC_CRRL_TARC_DIFFERENT 0xAD000304 +MKTC_ST(MKTC_CRRL_TARC_DIFFERENT) +#define MKTC_CRRL_BLOCKEDRC 0xAD000309 +MKTC_ST(MKTC_CRRL_BLOCKEDRC) +#define MKTC_CRRL_BLOCKEDRTDATA 0xAD00030A +MKTC_ST(MKTC_CRRL_BLOCKEDRTDATA) +#define MKTC_CRRL_CONTEXT_SUSPENDED 0xAD00030B +MKTC_ST(MKTC_CRRL_CONTEXT_SUSPENDED) +#define MKTC_CRRL_TAWAITINGFORMEM 0xAD00030C +MKTC_ST(MKTC_CRRL_TAWAITINGFORMEM) +#define MKTC_CRRL_TAOOMBUTPRIOINV 0xAD00030D +MKTC_ST(MKTC_CRRL_TAOOMBUTPRIOINV) +#define MKTC_CRRL_READOPS2BLOCKED 0xAD00030E +MKTC_ST(MKTC_CRRL_READOPS2BLOCKED) +#define MKTC_CRRL_SRC_WRITEOPSBLOCKED 0xAD00030F +MKTC_ST(MKTC_CRRL_SRC_WRITEOPSBLOCKED) +#define MKTC_CRRL_SRC_READOPSBLOCKED 0xAD000310 +MKTC_ST(MKTC_CRRL_SRC_READOPSBLOCKED) +#define MKTC_CRRL_TQ_WRITEOPSBLOCKED 0xAD000311 +MKTC_ST(MKTC_CRRL_TQ_WRITEOPSBLOCKED) +#define MKTC_CRRL_TQ_READOPSBLOCKED 0xAD000312 +MKTC_ST(MKTC_CRRL_TQ_READOPSBLOCKED) + +#define MKTC_KICKRENDER_START 0xAD000400 +MKTC_ST(MKTC_KICKRENDER_START) +#define MKTC_KICKRENDER_OVERLAP 0xAD000401 +MKTC_ST(MKTC_KICKRENDER_OVERLAP) +#define MKTC_KICKRENDER_ISP_START 0xAD000402 +MKTC_ST(MKTC_KICKRENDER_ISP_START) +#define MKTC_KICKRENDER_RESUME 0xAD000403 +MKTC_ST(MKTC_KICKRENDER_RESUME) +#define MKTC_KICKRENDER_CONFIG_REGION_HDRS 0xAD000404 +MKTC_ST(MKTC_KICKRENDER_CONFIG_REGION_HDRS) +#define MKTC_KICKRENDER_END 0xAD000408 +MKTC_ST(MKTC_KICKRENDER_END) +#define MKTC_KICKRENDER_RENDERCONTEXT 0xAD000409 +MKTC_ST(MKTC_KICKRENDER_RENDERCONTEXT) +#define MKTC_KICKRENDER_RTDATA 0xAD00040A +MKTC_ST(MKTC_KICKRENDER_RTDATA) +#define MKTC_KICKRENDER_PID 0xAD00040B +MKTC_ST(MKTC_KICKRENDER_PID) + +#define MKTC_RENDERFINISHED_START 0xAD000500 +MKTC_ST(MKTC_RENDERFINISHED_START) +#define MKTC_RF_START_NEXT_MT 0xAD000501 +MKTC_ST(MKTC_RF_START_NEXT_MT) +#define MKTC_RF_ALL_MTS_DONE 0xAD000502 +MKTC_ST(MKTC_RF_ALL_MTS_DONE) +#define MKTC_RENDERFINISHED_END 0xAD000503 +MKTC_ST(MKTC_RENDERFINISHED_END) +#define MKTC_VISQUERY_START 0xAD000504 +MKTC_ST(MKTC_VISQUERY_START) +#define MKTC_VISQUERY_END 0xAD000505 +MKTC_ST(MKTC_VISQUERY_END) +#define MKTC_TRANSFERRENDERFINISHED_START 0xAD000508 +MKTC_ST(MKTC_TRANSFERRENDERFINISHED_START) +#define MKTC_TRANSFERRENDERFINISHED_END 0xAD000509 +MKTC_ST(MKTC_TRANSFERRENDERFINISHED_END) +#define MKTC_TRF_UPDATESTATUSVALS 0xAD00050A +MKTC_ST(MKTC_TRF_UPDATESTATUSVALS) +#define MKTC_TRF_UPDATESTATUSVALS_DONE 0xAD00050B +MKTC_ST(MKTC_TRF_UPDATESTATUSVALS_DONE) + +#define MKTC_PIXELENDRENDER_START 0xAD000600 +MKTC_ST(MKTC_PIXELENDRENDER_START) +#define MKTC_PIXELENDRENDER_AFTERLOCK 0xAD000601 +MKTC_ST(MKTC_PIXELENDRENDER_AFTERLOCK) +#define MKTC_PIXELENDRENDER_END 0xAD000602 +MKTC_ST(MKTC_PIXELENDRENDER_END) +#define MKTC_PIXELENDRENDER_TLQEND 0xAD000603 +MKTC_ST(MKTC_PIXELENDRENDER_TLQEND) + +#define MKTC_3DMEMFREE_START 0xAD000700 +MKTC_ST(MKTC_3DMEMFREE_START) +#define MKTC_3DMEMFREE_AFTERLOCK 0xAD000701 +MKTC_ST(MKTC_3DMEMFREE_AFTERLOCK) +#define MKTC_3DMEMFREE_TESTEOR 0xAD000702 +MKTC_ST(MKTC_3DMEMFREE_TESTEOR) +#define MKTC_3DMEMFREE_END 0xAD000703 +MKTC_ST(MKTC_3DMEMFREE_END) + +#define MKTC_KICKTA_START 0xAD000800 +MKTC_ST(MKTC_KICKTA_START) +#define MKTC_KICKTA_OVERLAP 0xAD000801 +MKTC_ST(MKTC_KICKTA_OVERLAP) +#define MKTC_KICKTA_RESETCONTEXT 0xAD000802 +MKTC_ST(MKTC_KICKTA_RESETCONTEXT) +#define MKTC_KICKTA_VDM_START 0xAD000803 +MKTC_ST(MKTC_KICKTA_VDM_START) +#define MKTC_KICKTA_END 0xAD000804 +MKTC_ST(MKTC_KICKTA_END) +#define MKTC_KICKTA_RENDERCONTEXT 0xAD000805 +MKTC_ST(MKTC_KICKTA_RENDERCONTEXT) +#define MKTC_KICKTA_RTDATA 0xAD000806 +MKTC_ST(MKTC_KICKTA_RTDATA) +#define MKTC_KICKTA_RESET_VDMCSSTATUS 0xAD000807 +MKTC_ST(MKTC_KICKTA_RESET_VDMCSSTATUS) +#define MKTC_KICKTA_RESET_BUFFERS 0xAD000808 +MKTC_ST(MKTC_KICKTA_RESET_BUFFERS) +#define MKTC_KICKTA_PID 0xAD000809 +MKTC_ST(MKTC_KICKTA_PID) +#define MKTC_KICKTA_TACMD_DEBUG 0xAD00080A +MKTC_ST(MKTC_KICKTA_TACMD_DEBUG) +#define MKTC_KICKTA_FREECONTEXT 0xAD00080B +MKTC_ST(MKTC_KICKTA_FREECONTEXT) +#define MKTC_KICKTA_PIM_PATCHING 0xAD00080C +MKTC_ST(MKTC_KICKTA_PIM_PATCHING) +#define MKTC_KICKTA_TPC_CHECK_START 0xAD00080D +MKTC_ST(MKTC_KICKTA_TPC_CHECK_START) +#define MKTC_KICKTA_TPC_CHECK_END 0xAD00080E +MKTC_ST(MKTC_KICKTA_TPC_CHECK_END) +#define MKTC_KICKTA_TPC_CHECK_CORE 0xAD00080F +MKTC_ST(MKTC_KICKTA_TPC_CHECK_CORE) +#define MKTC_KICKTA_TPC_CHECK_FAIL 0xAD000810 +MKTC_ST(MKTC_KICKTA_TPC_CHECK_FAIL) + +#define MKTC_KICKTA_CHKPT_START_DUMMY_CS 0xAD0008A1 +MKTC_ST(MKTC_KICKTA_CHKPT_START_DUMMY_CS) +#define MKTC_KICKTA_CHKPT_START_DUMMY_TAK 0xAD0008A2 +MKTC_ST(MKTC_KICKTA_CHKPT_START_DUMMY_TAK) +#define MKTC_KICKTA_CHKPT_WAIT_FOR_DUMMY_KICK 0xAD0008A3 +MKTC_ST(MKTC_KICKTA_CHKPT_WAIT_FOR_DUMMY_KICK) +#define MKTC_KICKTA_CHKPT_WAIT_NEXT_CORE 0xAD0008A4 +MKTC_ST(MKTC_KICKTA_CHKPT_WAIT_NEXT_CORE) +#define MKTC_KICKTA_CHKPT_RESET_COMPLETE 0xAD0008A5 +MKTC_ST(MKTC_KICKTA_CHKPT_RESET_COMPLETE) +#define MKTC_KICKTA_CHKPT_CHECK_SWITCH 0xAD0008A6 +MKTC_ST(MKTC_KICKTA_CHKPT_CHECK_SWITCH) + +#define MKTC_KICK_MEM_BURSTER_WA_VDM_START 0xAD0008B1 +MKTC_ST(MKTC_KICK_MEM_BURSTER_WA_VDM_START) +#define MKTC_TAFINISHED_MEM_BURSTER_WA 0xAD0008B2 +MKTC_ST(MKTC_TAFINISHED_MEM_BURSTER_WA) + +#define MKTC_HOSTKICK_START 0xAD000900 +MKTC_ST(MKTC_HOSTKICK_START) +#define MKTC_HOSTKICK_END 0xAD000901 +MKTC_ST(MKTC_HOSTKICK_END) +#define MKTC_HOSTKICK_PROCESS_QUEUES_END 0xAD000902 +MKTC_ST(MKTC_HOSTKICK_PROCESS_QUEUES_END) +#define MKTC_HOSTKICK_2D 0xAD000903 +MKTC_ST(MKTC_HOSTKICK_2D) +#define MKTC_HOSTKICK_TRANSFER 0xAD000904 +MKTC_ST(MKTC_HOSTKICK_TRANSFER) +#define MKTC_HOSTKICK_TA 0xAD000905 +MKTC_ST(MKTC_HOSTKICK_TA) +#define MKTC_HOSTKICK_PROCESS_QUEUES 0xAD000906 +MKTC_ST(MKTC_HOSTKICK_PROCESS_QUEUES) +#define MKTC_HOSTKICK_RESUME 0xAD000908 +MKTC_ST(MKTC_HOSTKICK_RESUME) +#define MKTC_HOSTKICK_POWEROFF 0xAD000909 +MKTC_ST(MKTC_HOSTKICK_POWEROFF) +#define MKTC_HOSTKICK_IDLE 0xAD00090A +MKTC_ST(MKTC_HOSTKICK_IDLE) +#define MKTC_HOSTKICK_CTXSUSPEND 0xAD00090B +MKTC_ST(MKTC_HOSTKICK_CTXSUSPEND) +#define MKTC_HOSTKICK_CTXRESUME 0xAD00090C +MKTC_ST(MKTC_HOSTKICK_CTXRESUME) + +#define MKTC_TIMER_POTENTIAL_TA_LOCKUP 0xAD000A00 +MKTC_ST(MKTC_TIMER_POTENTIAL_TA_LOCKUP) +#define MKTC_TIMER_POTENTIAL_3D_LOCKUP 0xAD000A01 +MKTC_ST(MKTC_TIMER_POTENTIAL_3D_LOCKUP) +#define MKTC_TIMER_CTAL_START 0xAD000A02 +MKTC_ST(MKTC_TIMER_CTAL_START) +#define MKTC_TIMER_CTAL_END 0xAD000A03 +MKTC_ST(MKTC_TIMER_CTAL_END) +#define MKTC_TIMER_C3DL_START 0xAD000A04 +MKTC_ST(MKTC_TIMER_C3DL_START) +#define MKTC_TIMER_C3DL_END 0xAD000A05 +MKTC_ST(MKTC_TIMER_C3DL_END) +#define MKTC_TIMER_LOCKUP 0xAD000A0A +MKTC_ST(MKTC_TIMER_LOCKUP) +#define MKTC_TIMER_NOT_TA_LOCKUP 0xAD000A0B +MKTC_ST(MKTC_TIMER_NOT_TA_LOCKUP) +#define MKTC_TIMER_NOT_3D_LOCKUP 0xAD000A0C +MKTC_ST(MKTC_TIMER_NOT_3D_LOCKUP) +#define MKTC_TIMER_2D_LOCKUP 0xAD000A0D +MKTC_ST(MKTC_TIMER_2D_LOCKUP) +#define MKTC_TIMER_POTENTIAL_2D_LOCKUP 0xAD000A10 +MKTC_ST(MKTC_TIMER_POTENTIAL_2D_LOCKUP) +#define MKTC_TIMER_C2DL_START 0xAD000A11 +MKTC_ST(MKTC_TIMER_C2DL_START) +#define MKTC_TIMER_C2DL_END 0xAD000A12 +MKTC_ST(MKTC_TIMER_C2DL_END) +#define MKTC_TIMER_NOT_2D_LOCKUP 0xAD000A13 +MKTC_ST(MKTC_TIMER_NOT_2D_LOCKUP) +#define MKTC_TIMER_ABORTALL 0xAD000A0E +MKTC_ST(MKTC_TIMER_ABORTALL) +#define MKTC_TIMER_END 0xAD000A0F +MKTC_ST(MKTC_TIMER_END) + +#define MKTC_HWR_START 0xAD000B00 +MKTC_ST(MKTC_HWR_START) +#define MKTC_HWR_END 0xAD000B01 +MKTC_ST(MKTC_HWR_END) +#define MKTC_HWR_HKS 0xAD000B02 +MKTC_ST(MKTC_HWR_HKS) +#define MKTC_HWR_PRL 0xAD000B03 +MKTC_ST(MKTC_HWR_PRL) +#define MKTC_HWR_PRL_DP 0xAD000B04 +MKTC_ST(MKTC_HWR_PRL_DP) +#define MKTC_HWR_CRL 0xAD000B05 +MKTC_ST(MKTC_HWR_CRL) +#define MKTC_HWR_CRL_DP 0xAD000B06 +MKTC_ST(MKTC_HWR_CRL_DP) +#define MKTC_HWR_TRL 0xAD000B07 +MKTC_ST(MKTC_HWR_TRL) +#define MKTC_HWR_TRL_DP 0xAD000B08 +MKTC_ST(MKTC_HWR_TRL_DP) +#define MKTC_HWR_ISC 0xAD000B09 +MKTC_ST(MKTC_HWR_ISC) +#define MKTC_HWR_2DL 0xAD000B0A +MKTC_ST(MKTC_HWR_2DL) +#define MKTC_HWR_CLEANUP 0xAD000B0B +MKTC_ST(MKTC_HWR_CLEANUP) + +#define MKTC_URSV_START 0xAD000C00 +MKTC_ST(MKTC_URSV_START) +#define MKTC_URSV_UPDATEWRITEOPS 0xAD000C01 +MKTC_ST(MKTC_URSV_UPDATEWRITEOPS) +#define MKTC_URSV_UPDATESTATUSVALS 0xAD000C03 +MKTC_ST(MKTC_URSV_UPDATESTATUSVALS) +#define MKTC_URSV_UPDATESTATUSVALS_DONE 0xAD000C04 +MKTC_ST(MKTC_URSV_UPDATESTATUSVALS_DONE) +#define MKTC_URSV_END 0xAD000C05 +MKTC_ST(MKTC_URSV_END) + +#define MKTC_STORETACONTEXT_START 0xAD000D00 +MKTC_ST(MKTC_STORETACONTEXT_START) +#define MKTC_STORETACONTEXT_END 0xAD000D01 +MKTC_ST(MKTC_STORETACONTEXT_END) +#define MKTC_LOADTACONTEXT_START 0xAD000D02 +MKTC_ST(MKTC_LOADTACONTEXT_START) +#define MKTC_LOADTACONTEXT_END 0xAD000D03 +MKTC_ST(MKTC_LOADTACONTEXT_END) +#define MKTC_STORE3DCONTEXT_START 0xAD000D04 +MKTC_ST(MKTC_STORE3DCONTEXT_START) +#define MKTC_STORE3DCONTEXT_END 0xAD000D05 +MKTC_ST(MKTC_STORE3DCONTEXT_END) +#define MKTC_LOAD3DCONTEXT_START 0xAD000D06 +MKTC_ST(MKTC_LOAD3DCONTEXT_START) +#define MKTC_LOAD3DCONTEXT_END 0xAD000D07 +MKTC_ST(MKTC_LOAD3DCONTEXT_END) + +#define MKTC_FINDTA_POWERREQUEST 0xAD000E00 +MKTC_ST(MKTC_FINDTA_POWERREQUEST) +#define MKTC_FINDTA_TA3D_OVERLAP_BLOCKED 0xAD000E01 +MKTC_ST(MKTC_FINDTA_TA3D_OVERLAP_BLOCKED) +#define MKTC_FINDTA_RTDATA_RENDERING 0xAD000E02 +MKTC_ST(MKTC_FINDTA_RTDATA_RENDERING) +#define MKTC_FINDTA_3DRC_DIFFERENT 0xAD000E03 +MKTC_ST(MKTC_FINDTA_3DRC_DIFFERENT) +#define MKTC_FINDTA_WRITEOPSBLOCKED 0xAD000E04 +MKTC_ST(MKTC_FINDTA_WRITEOPSBLOCKED) +#define MKTC_FINDTA_READOPSBLOCKED 0xAD000E05 +MKTC_ST(MKTC_FINDTA_READOPSBLOCKED) +#define MKTC_FINDTA_RESIZE_PB 0xAD000E06 +MKTC_ST(MKTC_FINDTA_RESIZE_PB) +#define MKTC_FINDTA_RESIZE_PB_BLOCKED 0xAD000E07 +MKTC_ST(MKTC_FINDTA_RESIZE_PB_BLOCKED) +#define MKTC_FINDTA_SHRINK_PB 0xAD000E08 +MKTC_ST(MKTC_FINDTA_SHRINK_PB) +#define MKTC_FINDTA_TAPB_DIFFERENT 0xAD000E09 +MKTC_ST(MKTC_FINDTA_TAPB_DIFFERENT) +#define MKTC_FINDTA_TACONTEXT_DIFFERENT 0xAD000E0A +MKTC_ST(MKTC_FINDTA_TACONTEXT_DIFFERENT) +#define MKTC_FINDTA_TA2D_OVERLAP_BLOCKED 0xAD000E0B +MKTC_ST(MKTC_FINDTA_TA2D_OVERLAP_BLOCKED) +#define MKTC_FINDTA_CONTEXT_SUSPENDED 0xAD000E0C +MKTC_ST(MKTC_FINDTA_CONTEXT_SUSPENDED) +#define MKTC_FINDTA_SRC_READOPSBLOCKED 0xAD000E0D +MKTC_ST(MKTC_FINDTA_SRC_READOPSBLOCKED) +#define MKTC_FINDTA_SRC_WRITEOPSBLOCKED 0xAD000E0E +MKTC_ST(MKTC_FINDTA_SRC_WRITEOPSBLOCKED) +#define MKTC_FINDTA_READOPS2BLOCKED 0xAD000E0F +MKTC_ST(MKTC_FINDTA_READOPS2BLOCKED) + + +#define MKTC_CTRL_SRCREADOPSBLOCKED 0xAD000F00 +MKTC_ST(MKTC_CTRL_SRCREADOPSBLOCKED) +#define MKTC_CTRL_SRCWRITEOPSBLOCKED 0xAD000F01 +MKTC_ST(MKTC_CTRL_SRCWRITEOPSBLOCKED) +#define MKTC_CTRL_DSTREADOPSBLOCKED 0xAD000F02 +MKTC_ST(MKTC_CTRL_DSTREADOPSBLOCKED) +#define MKTC_CTRL_DSTWRITEOPSBLOCKED 0xAD000F03 +MKTC_ST(MKTC_CTRL_DSTWRITEOPSBLOCKED) +#define MKTC_CTRL_TARC_DIFFERENT 0xAD000F04 +MKTC_ST(MKTC_CTRL_TARC_DIFFERENT) +#define MKTC_CTRL_CONTEXT_SUSPENDED 0xAD000F05 +MKTC_ST(MKTC_CTRL_CONTEXT_SUSPENDED) +#define MKTC_CTRL_SRCREADOPS2BLOCKED 0xAD000F06 +MKTC_ST(MKTC_CTRL_SRCREADOPS2BLOCKED) +#define MKTC_CTRL_3D_WRITEOPSBLOCKED 0xAD000F07 +MKTC_ST(MKTC_CTRL_3D_WRITEOPSBLOCKED) +#define MKTC_CTRL_3D_READOPSBLOCKED 0xAD000F08 +MKTC_ST(MKTC_CTRL_3D_READOPSBLOCKED) + +#define MKTC_DPTA_START 0xAD001000 +MKTC_ST(MKTC_DPTA_START) +#define MKTC_DPTA_UPDATESTATUSVALS 0xAD001001 +MKTC_ST(MKTC_DPTA_UPDATESTATUSVALS) +#define MKTC_DPTA_UPDATESTATUSVALS_DONE 0xAD001002 +MKTC_ST(MKTC_DPTA_UPDATESTATUSVALS_DONE) +#define MKTC_DPTA_NORENDER 0xAD001003 +MKTC_ST(MKTC_DPTA_NORENDER) +#define MKTC_DPTA_MEMFREE 0xAD001004 +MKTC_ST(MKTC_DPTA_MEMFREE) +#define MKTC_DPTA_INC_COMPLETECOUNT 0xAD001005 +MKTC_ST(MKTC_DPTA_INC_COMPLETECOUNT) + +#define MKTC_INVALDC 0xAD001100 +MKTC_ST(MKTC_INVALDC) +#define MKTC_INVALPT 0xAD001101 +MKTC_ST(MKTC_INVALPT) +#define MKTC_INVALSLC 0xAD001102 +MKTC_ST(MKTC_INVALSLC) +#define MKTC_INVALDATA 0xAD001103 +MKTC_ST(MKTC_INVALDATA) + +#define MKTC_RESTARTTA 0xAD001200 +MKTC_ST(MKTC_RESTARTTA) +#define MKTC_CSABORTNONGBL 0xAD001201 +MKTC_ST(MKTC_CSABORTNONGBL) +#define MKTC_CSABORTALL 0xAD001202 +MKTC_ST(MKTC_CSABORTALL) +#define MKTC_CSRENDERINPROGRESS 0xAD001203 +MKTC_ST(MKTC_CSRENDERINPROGRESS) +#define MKTC_TATERMRENDERINPROGRESS 0xAD001204 +MKTC_ST(MKTC_TATERMRENDERINPROGRESS) +#define MKTC_RESTARTTANORENDER 0xAD001205 +MKTC_ST(MKTC_RESTARTTANORENDER) +#define MKTC_SPM_KICKRENDER 0xAD001206 +MKTC_ST(MKTC_SPM_KICKRENDER) +#define MKTC_SPM_RESUME_ABORTCOMPLETE 0xAD001208 +MKTC_ST(MKTC_SPM_RESUME_ABORTCOMPLETE) +#define MKTC_RESUMEVDM 0xAD001209 +MKTC_ST(MKTC_RESUMEVDM) +#define MKTC_REMOVE_RESERVE_MEM 0xAD00120A +MKTC_ST(MKTC_REMOVE_RESERVE_MEM) +#define MKTC_INCREASEZLSTHRESHOLD 0xAD00120B +MKTC_ST(MKTC_INCREASEZLSTHRESHOLD) +#define MKTC_CSFORCEABORTALL 0xAD00120C +MKTC_ST(MKTC_CSFORCEABORTALL) + +#define MKTC_DUMMY_DEPTH 0xAD00120D +MKTC_ST(MKTC_DUMMY_DEPTH) +#define MKTC_DUMMY_DEPTH_CS 0xAD00120E +MKTC_ST(MKTC_DUMMY_DEPTH_CS) + +#define MKTC_MTETE_OOM 0xAD00120F +MKTC_ST(MKTC_MTETE_OOM) +#define MKTC_MTETE_OOM_FIRST_STORE_REF 0xAD001210 +MKTC_ST(MKTC_MTETE_OOM_FIRST_STORE_REF) +#define MKTC_MERGE_STATE_TABLES 0xAD001211 +MKTC_ST(MKTC_MERGE_STATE_TABLES) +#define MKTC_NO_PAGES_LEFT_FOR_23055 0xAD001212 +MKTC_ST(MKTC_NO_PAGES_LEFT_FOR_23055) +#define MKTC_NO_STATE_MODS 0xAD001213 +MKTC_ST(MKTC_NO_STATE_MODS) +#define MKTC_FIND_MTE_PAGE_IN_STATE 0xAD001214 +MKTC_ST(MKTC_FIND_MTE_PAGE_IN_STATE) +#define MKTC_MTE_PAGE_FOUND 0xAD001215 +MKTC_ST(MKTC_MTE_PAGE_FOUND) +#define MKTC_MOVE_MTE_PAGE_TO_TA_STATE 0xAD001216 +MKTC_ST(MKTC_MOVE_MTE_PAGE_TO_TA_STATE) +#define MKTC_MOVE_MTE_PAGE_TO_TA_STATE_END 0xAD001217 +MKTC_ST(MKTC_MOVE_MTE_PAGE_TO_TA_STATE_END) +#define MKTC_ZERO_ZLS_THRESHOLD 0xAD001218 +MKTC_ST(MKTC_ZERO_ZLS_THRESHOLD) +#define MKTC_RESTORE_ZLS_THRESHOLD 0xAD001219 +MKTC_ST(MKTC_RESTORE_ZLS_THRESHOLD) +#define MKTC_FIND_MTE_PAGE_IN_CSM 0xAD00121A +MKTC_ST(MKTC_FIND_MTE_PAGE_IN_CSM) +#define MKTC_REISSUE_MTE_PAGE 0xAD00121B +MKTC_ST(MKTC_REISSUE_MTE_PAGE) +#define MKTC_REISSUE_MTE_PAGE_REQUIRED 0xAD00121C +MKTC_ST(MKTC_REISSUE_MTE_PAGE_REQUIRED) +#define MKTC_REISSUE_MTE_PAGE_END 0xAD00121D +MKTC_ST(MKTC_REISSUE_MTE_PAGE_END) +#define MKTC_RESET_TE_PSG 0xAD00121E +MKTC_ST(MKTC_RESET_TE_PSG) + +#define MKTC_OOM_WRITEOPSBLOCKED 0xAD00121F +MKTC_ST(MKTC_OOM_WRITEOPSBLOCKED) +#define MKTC_OOM_READOPSBLOCKED 0xAD001220 +MKTC_ST(MKTC_OOM_READOPSBLOCKED) +#define MKTC_OOM_SRC_WRITEOPSBLOCKED 0xAD001221 +MKTC_ST(MKTC_OOM_SRC_WRITEOPSBLOCKED) +#define MKTC_OOM_SRC_READOPSBLOCKED 0xAD001222 +MKTC_ST(MKTC_OOM_SRC_READOPSBLOCKED) +#define MKTC_OOM_SPM_DEADLOCK 0xAD001223 +MKTC_ST(MKTC_OOM_SPM_DEADLOCK) +#define MKTC_OOM_SPM_DEADLOCK_MEM_ADDED 0xAD001224 +MKTC_ST(MKTC_OOM_SPM_DEADLOCK_MEM_ADDED) +#define MKTC_RESET 0xAD001225 +MKTC_ST(MKTC_RESET) +#define MKTC_SPM_INVALID_ZLSCONFIG 0xAD001226 +MKTC_ST(MKTC_SPM_INVALID_ZLSCONFIG) +#define MKTC_SPM_DEADLOCK_RENDER_FINISHED 0xAD001227 +MKTC_ST(MKTC_SPM_DEADLOCK_RENDER_FINISHED) +#define MKTC_OOM_BLOCKEDRTDATA 0xAD001228 +MKTC_ST(MKTC_OOM_BLOCKEDRTDATA) + +#define MKTC_OOM_TYPE_MT 0xAD00122A +MKTC_ST(MKTC_OOM_TYPE_MT) +#define MKTC_OOM_TYPE_GLOBAL 0xAD001230 +MKTC_ST(MKTC_OOM_TYPE_GLOBAL) +#define MKTC_OOM_CAUSE_GBL_OOM 0xAD001231 +MKTC_ST(MKTC_OOM_CAUSE_GBL_OOM) +#define MKTC_OOM_RESTORE_LIST_SIZE 0xAD001232 +MKTC_ST(MKTC_OOM_RESTORE_LIST_SIZE) + +#define MKTC_CHECK_MTE_PAGE_REISSUE 0xAD001240 +MKTC_ST(MKTC_CHECK_MTE_PAGE_REISSUE) +#define MKTC_CPRI_VALID_ENTRIES 0xAD001241 +MKTC_ST(MKTC_CPRI_VALID_ENTRIES) +#define MKTC_CPRI_STORE_DPLIST 0xAD001242 +MKTC_ST(MKTC_CPRI_STORE_DPLIST) +#define MKTC_CPRI_STORE_OTPM_CSM 0xAD001243 +MKTC_ST(MKTC_CPRI_STORE_OTPM_CSM) +#define MKTC_CPRI_ABORT_MT_IDX 0xAD001244 +MKTC_ST(MKTC_CPRI_ABORT_MT_IDX) +#define MKTC_CPRI_ABORT_CORE_IDX 0xAD001245 +MKTC_ST(MKTC_CPRI_ABORT_CORE_IDX) +#define MKTC_CPRI_CSM_TABLE_DATA 0xAD001246 +MKTC_ST(MKTC_CPRI_CSM_TABLE_DATA) +#define MKTC_CPRI_PIM_DATA 0xAD001247 +MKTC_ST(MKTC_CPRI_PIM_DATA) +#define MKTC_CPRI_DO_CIRCULAR_TEST 0xAD001248 +MKTC_ST(MKTC_CPRI_DO_CIRCULAR_TEST) +#define MKTC_CPRI_WRITE_ENTRIES 0xAD001249 +MKTC_ST(MKTC_CPRI_WRITE_ENTRIES) + +#define MKTC_MTE_ENTRY_NOT_IN_ANY_LIST 0xAD001250 +MKTC_ST(MKTC_MTE_ENTRY_NOT_IN_ANY_LIST) + +#define MKTC_SPMAC_IGNORE_TERMINATE 0xAD001251 +MKTC_ST(MKTC_SPMAC_IGNORE_TERMINATE) + +#define MKTC_SPMAC_REQUEST_3D_TIMEOUT 0xAD001252 +MKTC_ST(MKTC_SPMAC_REQUEST_3D_TIMEOUT) +#define MKTC_SPMAC_3D_TIMEOUT_COMPLETE 0xAD001253 +MKTC_ST(MKTC_SPMAC_3D_TIMEOUT_COMPLETE) +#define MKTC_OOM_READOPS2BLOCKED 0xAD001254 +MKTC_ST(MKTC_OOM_READOPS2BLOCKED) + +/* PB Load/store status */ +#define MKTC_LOADTAPB_START 0xAD001300 +MKTC_ST(MKTC_LOADTAPB_START) +#define MKTC_LOADTAPB_END 0xAD001301 +MKTC_ST(MKTC_LOADTAPB_END) +#define MKTC_STORETAPB_START 0xAD001302 +MKTC_ST(MKTC_STORETAPB_START) +#define MKTC_STORETAPB_END 0xAD001303 +MKTC_ST(MKTC_STORETAPB_END) +#define MKTC_LOAD3DPB_START 0xAD001304 +MKTC_ST(MKTC_LOAD3DPB_START) +#define MKTC_LOAD3DPB_END 0xAD001305 +MKTC_ST(MKTC_LOAD3DPB_END) +#define MKTC_STORE3DPB_START 0xAD001306 +MKTC_ST(MKTC_STORE3DPB_START) +#define MKTC_STORE3DPB_END 0xAD001307 +MKTC_ST(MKTC_STORE3DPB_END) +#define MKTC_LOADTAPB_PAGETABLE_DONE 0xAD001308 +MKTC_ST(MKTC_LOADTAPB_PAGETABLE_DONE) +#define MKTC_LOAD3DPB_PAGETABLE_DONE 0xAD001309 +MKTC_ST(MKTC_LOAD3DPB_PAGETABLE_DONE) + +#define MKTC_TIMER_RC_CLEANUP 0xAD001400 +MKTC_ST(MKTC_TIMER_RC_CLEANUP) +#define MKTC_TIMER_RC_CLEANUP_DONE 0xAD001401 +MKTC_ST(MKTC_TIMER_RC_CLEANUP_DONE) +#define MKTC_TIMER_RC_CLEANUP_BUSY 0xAD001402 +MKTC_ST(MKTC_TIMER_RC_CLEANUP_BUSY) +#define MKTC_TIMER_RT_CLEANUP 0xAD001410 +MKTC_ST(MKTC_TIMER_RT_CLEANUP) +#define MKTC_TIMER_RT_CLEANUP_DONE 0xAD001411 +MKTC_ST(MKTC_TIMER_RT_CLEANUP_DONE) +#define MKTC_TIMER_RT_CLEANUP_PENDING 0xAD001412 +MKTC_ST(MKTC_TIMER_RT_CLEANUP_PENDING) +#define MKTC_TIMER_RT_CLEANUP_TIDYPARTIALLIST 0xAD001413 +MKTC_ST(MKTC_TIMER_RT_CLEANUP_TIDYPARTIALLIST) +#define MKTC_TIMER_RT_CLEANUP_BUSY 0xAD001414 +MKTC_ST(MKTC_TIMER_RT_CLEANUP_BUSY) +#define MKTC_TIMER_TC_CLEANUP 0xAD001420 +MKTC_ST(MKTC_TIMER_TC_CLEANUP) +#define MKTC_TIMER_TC_CLEANUP_DONE 0xAD001421 +MKTC_ST(MKTC_TIMER_TC_CLEANUP_DONE) +#define MKTC_TIMER_TC_CLEANUP_BUSY 0xAD001422 +MKTC_ST(MKTC_TIMER_TC_CLEANUP_BUSY) +#define MKTC_TIMER_2DC_CLEANUP 0xAD001430 +MKTC_ST(MKTC_TIMER_2DC_CLEANUP) +#define MKTC_TIMER_2DC_CLEANUP_DONE 0xAD001431 +MKTC_ST(MKTC_TIMER_2DC_CLEANUP_DONE) +#define MKTC_TIMER_2DC_CLEANUP_BUSY 0xAD001432 +MKTC_ST(MKTC_TIMER_2DC_CLEANUP_BUSY) +#define MKTC_TIMER_SHAREDPBDESC_CLEANUP 0xAD001440 +MKTC_ST(MKTC_TIMER_SHAREDPBDESC_CLEANUP) + + +#define MKTC_TIMER_ISP_SWITCH_POTENTIAL_LOCKUP 0xAD001450 +MKTC_ST(MKTC_TIMER_ISP_SWITCH_POTENTIAL_LOCKUP) +#define MKTC_TIMER_ISP_SWITCH_FORCE_SWITCH 0xAD001451 +MKTC_ST(MKTC_TIMER_ISP_SWITCH_FORCE_SWITCH) + +#define MKTC_UTSO_UPDATEREADOPS 0xAD001600 +MKTC_ST(MKTC_UTSO_UPDATEREADOPS) +#define MKTC_UTSO_UPDATEWRITEOPS 0xAD001601 +MKTC_ST(MKTC_UTSO_UPDATEWRITEOPS) + +#define MKTC_TAFINISHED_UPDATESTATUSVALS 0xAD001700 +MKTC_ST(MKTC_TAFINISHED_UPDATESTATUSVALS) +#define MKTC_TAFINISHED_UPDATESTATUSVALS_DONE 0xAD001701 +MKTC_ST(MKTC_TAFINISHED_UPDATESTATUSVALS_DONE) +#define MKTC_TAFINISHED_NORENDER 0xAD001702 +MKTC_ST(MKTC_TAFINISHED_NORENDER) +#define MKTC_TAFINISHED_LASTKICK 0xAD001703 +MKTC_ST(MKTC_TAFINISHED_LASTKICK) +#define MKTC_TAFINISHED_FINDRENDER 0xAD001704 +MKTC_ST(MKTC_TAFINISHED_FINDRENDER) +#define MKTC_TAFINISHED_FINDTA 0xAD001705 +MKTC_ST(MKTC_TAFINISHED_FINDTA) +#define MKTC_TAFINISHED_END 0xAD001706 +MKTC_ST(MKTC_TAFINISHED_END) +#define MKTC_TAF_SPM_DEADLOCK_MEM_REMOVED 0xAD001707 +MKTC_ST(MKTC_TAF_SPM_DEADLOCK_MEM_REMOVED) +#define MKTC_TAF_RESERVE_MEM 0xAD001708 +MKTC_ST(MKTC_TAF_RESERVE_MEM) +#define MKTC_TAF_RESERVE_MEM_REQUEST_RENDER 0xAD001709 +MKTC_ST(MKTC_TAF_RESERVE_MEM_REQUEST_RENDER) +#define MKTC_TAF_RESERVE_FREE_RENDER_FINISHED 0xAD00170A +MKTC_ST(MKTC_TAF_RESERVE_FREE_RENDER_FINISHED) +#define MKTC_TAF_RESERVE_FREE_DUMMY_RENDER 0xAD00170B +MKTC_ST(MKTC_TAF_RESERVE_FREE_DUMMY_RENDER) +#define MKTC_TAF_DEBUG_SAS 0xAD00170C +MKTC_ST(MKTC_TAF_DEBUG_SAS) +#define MKTC_TAFINISHED_NOCONTEXTSWITCH 0xAD00170D +MKTC_ST(MKTC_TAFINISHED_NOCONTEXTSWITCH) + +#define MKTC_TAFINISHED_TERM_COMPLETE_START 0xAD001710 +MKTC_ST(MKTC_TAFINISHED_TERM_COMPLETE_START) +#define MKTC_TAFINISHED_TERM_COMPLETE_END 0xAD001711 +MKTC_ST(MKTC_TAFINISHED_TERM_COMPLETE_END) + +#define MKTC_TAFINISHED_DPMPAGERECYCLING 0xAD001720 +MKTC_ST(MKTC_TAFINISHED_DPMPAGERECYCLING) + +#define MKTC_2DEVENT_2DCOMPLETE 0xAD001800 +MKTC_ST(MKTC_2DEVENT_2DCOMPLETE) +#define MKTC_2DEVENT_END 0xAD001801 +MKTC_ST(MKTC_2DEVENT_END) +#define MKTC_2DLB_2DCOMPLETE 0xAD001802 +MKTC_ST(MKTC_2DLB_2DCOMPLETE) +#define MKTC_2DLB_FIND2D 0xAD001803 +MKTC_ST(MKTC_2DLB_FIND2D) +#define MKTC_2DLB_END 0xAD001804 +MKTC_ST(MKTC_2DLB_END) +#define MKTC_2DCOMPLETE_START 0xAD001805 +MKTC_ST(MKTC_2DCOMPLETE_START) +#define MKTC_2DCOMPLETE_END 0xAD001806 +MKTC_ST(MKTC_2DCOMPLETE_END) +#define MKTC_KICK2D_START 0xAD001807 +MKTC_ST(MKTC_KICK2D_START) +#define MKTC_KICK2D_END 0xAD001808 +MKTC_ST(MKTC_KICK2D_END) +#define MKTC_DUMMYPROC2D 0xAD001809 +MKTC_ST(MKTC_DUMMYPROC2D) +#define MKTC_FTD_SRCREADOPSBLOCKED 0xAD00180A +MKTC_ST(MKTC_FTD_SRCREADOPSBLOCKED) +#define MKTC_FTD_SRCWRITEOPSBLOCKED 0xAD00180B +MKTC_ST(MKTC_FTD_SRCWRITEOPSBLOCKED) +#define MKTC_FTD_DSTREADOPSBLOCKED 0xAD00180C +MKTC_ST(MKTC_FTD_DSTREADOPSBLOCKED) +#define MKTC_FTD_DSTWRITEOPSBLOCKED 0xAD00180D +MKTC_ST(MKTC_FTD_DSTWRITEOPSBLOCKED) +#define MKTC_FTD_TA2D_OVERLAP_BLOCKED 0xAD00180E +MKTC_ST(MKTC_FTD_TA2D_OVERLAP_BLOCKED) +#define MKTC_U2DSO_UPDATEREADOPS 0xAD00180F +MKTC_ST(MKTC_U2DSO_UPDATEREADOPS) +#define MKTC_U2DSO_UPDATEWRITEOPS 0xAD001810 +MKTC_ST(MKTC_U2DSO_UPDATEWRITEOPS) +#define MKTC_FTD_TAOPSBLOCKED 0xAD001811 +MKTC_ST(MKTC_FTD_TAOPSBLOCKED) +#define MKTC_KICK2D_2DSLAVEPORT 0xAD001812 +MKTC_ST(MKTC_KICK2D_2DSLAVEPORT) +#define MKTC_KICK2D_2DSLAVEPORT_DONE 0xAD001813 +MKTC_ST(MKTC_KICK2D_2DSLAVEPORT_DONE) +#define MKTC_FTD_CONTEXT_SUSPENDED 0xAD001814 +MKTC_ST(MKTC_FTD_CONTEXT_SUSPENDED) +#define MKTC_KICK2D_PID 0xAD001815 +MKTC_ST(MKTC_KICK2D_PID) +#define MKTC_FIND2D_ADDR_SPACE_DIFFERENT 0xAD001816 +MKTC_ST(MKTC_FIND2D_ADDR_SPACE_DIFFERENT) +#define MKTC_FTD_3DOPSBLOCKED 0xAD001817 +MKTC_ST(MKTC_FTD_3DOPSBLOCKED) +#define MKTC_FTD_DSTREADOPS2BLOCKED 0xAD001818 +MKTC_ST(MKTC_FTD_DSTREADOPS2BLOCKED) +#define MKTC_U2DSO_UPDATESTATUSVALS 0xAD001819 +MKTC_ST(MKTC_U2DSO_UPDATESTATUSVALS) +#define MKTC_U2DSO_UPDATESTATUSVALS_DONE 0xAD00181A +MKTC_ST(MKTC_U2DSO_UPDATESTATUSVALS_DONE) + +#define MKTC_FCM_START 0xAD001900 +MKTC_ST(MKTC_FCM_START) +#define MKTC_FCM_END 0xAD001901 +MKTC_ST(MKTC_FCM_END) +#define MKTC_FCM_PB_SAME 0xAD001902 +MKTC_ST(MKTC_FCM_PB_SAME) +#define MKTC_FCM_TQ_IN_PROGESS 0xAD001903 +MKTC_ST(MKTC_FCM_TQ_IN_PROGESS) +#define MKTC_FCM_TQ_MEMCONTEXT_DIFFERENT 0xAD001904 +MKTC_ST(MKTC_FCM_TQ_MEMCONTEXT_DIFFERENT) + +#define MKTC_TIMER_ACTIVE_POWER 0xAD001A00 +MKTC_ST(MKTC_TIMER_ACTIVE_POWER) +#define MKTC_TIMER_POWER_3D_ACTIVE 0xAD001A01 +MKTC_ST(MKTC_TIMER_POWER_3D_ACTIVE) +#define MKTC_TIMER_POWER_TA_ACTIVE 0xAD001A02 +MKTC_ST(MKTC_TIMER_POWER_TA_ACTIVE) +#define MKTC_TIMER_POWER_2D_ACTIVE 0xAD001A03 +MKTC_ST(MKTC_TIMER_POWER_2D_ACTIVE) +#define MKTC_TIMER_POWER_PENDING_EVENTS 0xAD001A04 +MKTC_ST(MKTC_TIMER_POWER_PENDING_EVENTS) +#define MKTC_TIMER_POWER_IDLE 0xAD001A05 +MKTC_ST(MKTC_TIMER_POWER_IDLE) +#define MKTC_TIMER_POWER_OFF 0xAD001A06 +MKTC_ST(MKTC_TIMER_POWER_OFF) +#define MKTC_TIMER_POWER_CCB_ERROR 0xAD001A07 +MKTC_ST(MKTC_TIMER_POWER_CCB_ERROR) +#define MKTC_TIMER_POWER_RESTART_IMMEDIATE 0xAD001A08 +MKTC_ST(MKTC_TIMER_POWER_RESTART_IMMEDIATE) + +#define MKTC_3DCONTEXT_SWITCH 0xAD001B00 +MKTC_ST(MKTC_3DCONTEXT_SWITCH) +#define MKTC_3DCONTEXT_SWITCH_END 0xAD001B01 +MKTC_ST(MKTC_3DCONTEXT_SWITCH_END) + +#define MKTC_TACONTEXT_SWITCH 0xAD001C00 +MKTC_ST(MKTC_TACONTEXT_SWITCH) +#define MKTC_TACONTEXT_SWITCH_END 0xAD001C02 +MKTC_ST(MKTC_TACONTEXT_SWITCH_END) + +#define MKTC_GETMISCINFO_MEMREAD_START 0xAD001D00 +MKTC_ST(MKTC_GETMISCINFO_MEMREAD_START) +#define MKTC_GETMISCINFO_MEMREAD_END 0xAD001D01 +MKTC_ST(MKTC_GETMISCINFO_MEMREAD_END) +#define MKTC_GETMISCINFO_MEMWRITE_START 0xAD001D02 +MKTC_ST(MKTC_GETMISCINFO_MEMWRITE_START) +#define MKTC_GETMISCINFO_MEMWRITE_END 0xAD001D03 +MKTC_ST(MKTC_GETMISCINFO_MEMWRITE_END) + +#define MKTC_HALTTA 0xAD001E00 +MKTC_ST(MKTC_HALTTA) +#define MKTC_HTA_SET_FLAG 0xAD001E01 +MKTC_ST(MKTC_HTA_SET_FLAG) +#define MKTC_HTA_SAVE_COMPLEX_PTR 0xAD001E02 +MKTC_ST(MKTC_HTA_SAVE_COMPLEX_PTR) +#define MKTC_HALTTA_END 0xAD001E03 +MKTC_ST(MKTC_HALTTA_END) + +#define MKTC_RESUMETA 0xAD001F00 +MKTC_ST(MKTC_RESUMETA) +#define MKTC_RTA_CONTEXT_LOADED 0xAD001F01 +MKTC_ST(MKTC_RTA_CONTEXT_LOADED) +#define MKTC_RTA_MTE_STATE_KICKED 0xAD001F02 +MKTC_ST(MKTC_RTA_MTE_STATE_KICKED) +#define MKTC_RTA_CMPLX_GEOM_PRESENT 0xAD001F03 +MKTC_ST(MKTC_RTA_CMPLX_GEOM_PRESENT) +#define MKTC_RTA_CMPLX_STATE_KICKED 0xAD001F04 +MKTC_ST(MKTC_RTA_CMPLX_STATE_KICKED) +#define MKTC_RTA_CHECK_NEXT_SA_PROG 0xAD001F05 +MKTC_ST(MKTC_RTA_CHECK_NEXT_SA_PROG) +#define MKTC_RTA_CORE_COMPLETED 0xAD001F06 +MKTC_ST(MKTC_RTA_CORE_COMPLETED) +#define MKTC_RTA_DEBUG_SAS 0xAD001F07 +MKTC_ST(MKTC_RTA_DEBUG_SAS) +#define MKTC_RTA_MTE_STATE_RESTORE_TASK 0xAD001F08 +MKTC_ST(MKTC_RTA_MTE_STATE_RESTORE_TASK) +#define MKTC_RTA_SA_STATE_RESTORE_TASK 0xAD001F09 +MKTC_ST(MKTC_RTA_SA_STATE_RESTORE_TASK) +#define MKTC_RESUMETA_END 0xAD001F0F +MKTC_ST(MKTC_RESUMETA_END) + +#define MKTC_RENDERHALT 0xAD002000 +MKTC_ST(MKTC_RENDERHALT) +#define MKTC_RH_CLEARFLAGS 0xAD002001 +MKTC_ST(MKTC_RH_CLEARFLAGS) +#define MKTC_RH_CTRL_ADDR 0xAD002002 +MKTC_ST(MKTC_RH_CTRL_ADDR) +#define MKTC_RH_RGN_ADDR 0xAD002003 +MKTC_ST(MKTC_RH_RGN_ADDR) +#define MKTC_RH_EMPTY_TILE 0xAD002004 +MKTC_ST(MKTC_RH_EMPTY_TILE) +#define MKTC_RH_EMPTY_LAST_TILE 0xAD002005 +MKTC_ST(MKTC_RH_EMPTY_LAST_TILE) +#define MKTC_RH_3D_TIMEOUT 0xAD002006 +MKTC_ST(MKTC_RH_3D_TIMEOUT) +#define MKTC_RH_NOT_EMPTY 0xAD002007 +MKTC_ST(MKTC_RH_NOT_EMPTY) +#define MKTC_RH_OBJECT_COMPLETE 0xAD002008 +MKTC_ST(MKTC_RH_OBJECT_COMPLETE) +#define MKTC_RH_STREAM_LINK 0xAD002009 +MKTC_ST(MKTC_RH_STREAM_LINK) +#define MKTC_RH_OBJECT_INCOMPLETE 0xAD00200A +MKTC_ST(MKTC_RH_OBJECT_INCOMPLETE) +#define MKTC_RH_PRIM_MASK_PRESENT 0xAD00200B +MKTC_ST(MKTC_RH_PRIM_MASK_PRESENT) +#define MKTC_RH_BYTE_MASK_PRESENT 0xAD00200C +MKTC_ST(MKTC_RH_BYTE_MASK_PRESENT) +#define MKTC_RH_BYTE_MASK_ZERO 0xAD00200D +MKTC_ST(MKTC_RH_BYTE_MASK_ZERO) +#define MKTC_RH_PRIM_MASK_ZERO 0xAD00200E +MKTC_ST(MKTC_RH_PRIM_MASK_ZERO) +#define MKTC_RH_INVALIDATE_OBJECTS 0xAD00200F +MKTC_ST(MKTC_RH_INVALIDATE_OBJECTS) +#define MKTC_RH_OBJECTS_INVALIDATED 0xAD002010 +MKTC_ST(MKTC_RH_OBJECTS_INVALIDATED) +#define MKTC_RH_DPM_RGN_PARSER_IDLE 0xAD002011 +MKTC_ST(MKTC_RH_DPM_RGN_PARSER_IDLE) +#define MKTC_RH_NEXT_RGN_BASE 0xAD002012 +MKTC_ST(MKTC_RH_NEXT_RGN_BASE) +#define MKTC_RH_OCC_EXIT 0xAD002013 +MKTC_ST(MKTC_RH_OCC_EXIT) +#define MKTC_RH_STILL_RUNNING 0xAD002020 +MKTC_ST(MKTC_RH_STILL_RUNNING) +#define MKTC_RH_CLEARMCI 0xAD002021 +MKTC_ST(MKTC_RH_CLEARMCI) +#define MKTC_RH_EOR 0xAD002022 +MKTC_ST(MKTC_RH_EOR) +#define MKTC_RENDERHALT_END 0xAD002030 +MKTC_ST(MKTC_RENDERHALT_END) + +#define MKTC_FIND3D_POWERREQUEST 0xAD002100 +MKTC_ST(MKTC_FIND3D_POWERREQUEST) + +#define MKTC_FIND2D_POWERREQUEST 0xAD002200 +MKTC_ST(MKTC_FIND2D_POWERREQUEST) + +#define MKTC_UKERNEL_INIT 0xAD002300 +MKTC_ST(MKTC_UKERNEL_INIT) +#define MKTC_UKERNEL_INIT_DCS_COMPLETE 0xAD002301 +MKTC_ST(MKTC_UKERNEL_INIT_DCS_COMPLETE) +#define MKTC_UKERNEL_INIT_VDMKICK_COMPLETE 0xAD002303 +MKTC_ST(MKTC_UKERNEL_INIT_VDMKICK_COMPLETE) + +#define MKTC_KICKTRANSFERRENDER_START 0xAD002400 +MKTC_ST(MKTC_KICKTRANSFERRENDER_START) +#define MKTC_KICKTRANSFERRENDER_ISP_START 0xAD002401 +MKTC_ST(MKTC_KICKTRANSFERRENDER_ISP_START) +#define MKTC_KICKTRANSFERRENDER_END 0xAD002402 +MKTC_ST(MKTC_KICKTRANSFERRENDER_END) +#define MKTC_DUMMYPROCTRANSFER 0xAD002403 +MKTC_ST(MKTC_DUMMYPROCTRANSFER) +#define MKTC_KTR_TQFENCE 0xAD002404 +MKTC_ST(MKTC_KTR_TQFENCE) +#define MKTC_KICKTRANSFERRENDER_PID 0xAD002405 +MKTC_ST(MKTC_KICKTRANSFERRENDER_PID) + +#define MKTC_HOSTKICK_CLEANUP_RT 0xAD002500 +MKTC_ST(MKTC_HOSTKICK_CLEANUP_RT) +#define MKTC_HOSTKICK_CLEANUP_RC 0xAD002501 +MKTC_ST(MKTC_HOSTKICK_CLEANUP_RC) +#define MKTC_HOSTKICK_CLEANUP_TC 0xAD002502 +MKTC_ST(MKTC_HOSTKICK_CLEANUP_TC) +#define MKTC_HOSTKICK_CLEANUP_2DC 0xAD002503 +MKTC_ST(MKTC_HOSTKICK_CLEANUP_2DC) +#define MKTC_HOSTKICK_CLEANUP_PB 0xAD002504 +MKTC_ST(MKTC_HOSTKICK_CLEANUP_PB) +#define MKTC_HOSTKICK_GETMISCINFO 0xAD002505 +MKTC_ST(MKTC_HOSTKICK_GETMISCINFO) +#define MKTC_HOSTKICK_DATABREAKPOINT 0xAD002506 +MKTC_ST(MKTC_HOSTKICK_DATABREAKPOINT) +#define MKTC_HOSTKICK_SETHWPERFSTATUS 0xAD002507 +MKTC_ST(MKTC_HOSTKICK_SETHWPERFSTATUS) + +#define MKTC_ZEROPC 0xAD002600 +MKTC_ST(MKTC_ZEROPC) + +#define MKTC_ASSERT_FAIL 0xAD002700 +MKTC_ST(MKTC_ASSERT_FAIL) + +#define MKTC_SDLB_ILLEGAL 0xAD002800 +MKTC_ST(MKTC_SDLB_ILLEGAL) + +#define MKTC_SPMEVENT_OUTOFMEM 0xAD002901 +MKTC_ST(MKTC_SPMEVENT_OUTOFMEM) +#define MKTC_SPMEVENT_TATERMINATE 0xAD002902 +MKTC_ST(MKTC_SPMEVENT_TATERMINATE) +#define MKTC_SPMEVENT_END 0xAD002904 +MKTC_ST(MKTC_SPMEVENT_END) + +#define MKTC_SPMLB_OUTOFMEM 0xAD002981 +MKTC_ST(MKTC_SPMLB_OUTOFMEM) +#define MKTC_SPMLB_TATERMINATE 0xAD002982 +MKTC_ST(MKTC_SPMLB_TATERMINATE) +#define MKTC_SPMLB_SPMRENDERFINSHED 0xAD002983 +MKTC_ST(MKTC_SPMLB_SPMRENDERFINSHED) +#define MKTC_SPMLB_END 0xAD002985 +MKTC_ST(MKTC_SPMLB_END) + +#define MKTC_SPM_CHECK_MT_DEADLOCK 0xAD002991 +MKTC_ST(MKTC_SPM_CHECK_MT_DEADLOCK) +#define MKTC_SPM_CHECK_GLOBAL_DEADLOCK 0xAD002992 +MKTC_ST(MKTC_SPM_CHECK_GLOBAL_DEADLOCK) +#define MKTC_SPM_RESERVE_ADDED 0xAD002993 +MKTC_ST(MKTC_SPM_RESERVE_ADDED) +#define MKTC_SPM_FORCE_GLOBAL_OOM_FAILED 0xAD00299E +MKTC_ST(MKTC_SPM_FORCE_GLOBAL_OOM_FAILED) +#define MKTC_SPM_DEADLOCK_MEM_FAILED 0xAD00299F +MKTC_ST(MKTC_SPM_DEADLOCK_MEM_FAILED) + +#define MKTC_IBC_ILLEGAL 0xAD002A00 +MKTC_ST(MKTC_IBC_ILLEGAL) + +#define MKTC_HWP_CLEARCOUNTERS 0xAD002B00 +MKTC_ST(MKTC_HWP_CLEARCOUNTERS) + +#define MKTC_TA_FRAMENUM 0xAD002C00 +MKTC_ST(MKTC_TA_FRAMENUM) +#define MKTC_3D_FRAMENUM 0xAD002C01 +MKTC_ST(MKTC_3D_FRAMENUM) +#define MKTC_SPM3D_FRAMENUM 0xAD002C02 +MKTC_ST(MKTC_SPM3D_FRAMENUM) + +#define MKTC_HKTA_RENDERCONTEXT 0xAD002D00 +MKTC_ST(MKTC_HKTA_RENDERCONTEXT) +#define MKTC_IDLECORE_REFCOUNT_FAIL 0xAD002E00 +MKTC_ST(MKTC_IDLECORE_REFCOUNT_FAIL) + +#define MKTC_MCISTATE_NOT_CLEARED 0xAD002F00 +MKTC_ST(MKTC_MCISTATE_NOT_CLEARED) + +#define MKTC_LOWERED_TO_PDS_THRESHOLD 0xAD003000 +MKTC_ST(MKTC_LOWERED_TO_PDS_THRESHOLD) +#define MKTC_REDUCE_MAX_VTX_PARTITIONS 0xAD003001 +MKTC_ST(MKTC_REDUCE_MAX_VTX_PARTITIONS) +#define MKTC_KTAOVERRIDE_MAX_VTX_PARTITIONS 0xAD003002 +MKTC_ST(MKTC_KTAOVERRIDE_MAX_VTX_PARTITIONS) +#define MKTC_KTANOOVERRIDE_MAX_VTX_PARTITIONS 0xAD003003 +MKTC_ST(MKTC_KTANOOVERRIDE_MAX_VTX_PARTITIONS) + +#define MKTC_IPRB_NORENDERDETAILS 0xAD003010 +MKTC_ST(MKTC_IPRB_NORENDERDETAILS) +#define MKTC_IPRB_HAVERENDERDETAILS 0xAD003011 +MKTC_ST(MKTC_IPRB_HAVERENDERDETAILS) + +#define MKTC_RENDER_OUT_OF_ORDER 0xAD003020 +MKTC_ST(MKTC_RENDER_OUT_OF_ORDER) +#define MKTC_RENDER_NOT_OUT_OF_ORDER 0xAD003021 +MKTC_ST(MKTC_RENDER_NOT_OUT_OF_ORDER) + +#define MKTC_ZLS_IDLE_BEGIN 0xAD003030 +MKTC_ST(MKTC_ZLS_IDLE_BEGIN) +#define MKTC_ZLS_ISP_CLK_GATING_EN 0xAD003031 +MKTC_ST(MKTC_ZLS_ISP_CLK_GATING_EN) +#define MKTC_ZLS_IDLE_END 0xAD003032 +MKTC_ST(MKTC_ZLS_IDLE_END) + +#endif /* __SGX_UKERNEL_STATUS_CODES_H__ */ + +/****************************************************************************** + End of file (sgx_ukernel_status_codes.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/include/sgxinfo.h b/sgx_km/eurasia_km/services4/include/sgxinfo.h new file mode 100644 index 0000000..22a258d --- /dev/null +++ b/sgx_km/eurasia_km/services4/include/sgxinfo.h @@ -0,0 +1,341 @@ +/*************************************************************************/ /*! +@Title sgx services structures/functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description inline functions/structures shared across UM and KM services components +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if !defined (__SGXINFO_H__) +#define __SGXINFO_H__ + +#include "sgxscript.h" +#include "servicesint.h" +#include "services.h" +#include "sgxapi_km.h" +#include "sgx_mkif_km.h" + + +#define SGX_MAX_DEV_DATA 24 +#define SGX_MAX_INIT_MEM_HANDLES 18 + + +typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT +{ + IMG_DEV_PHYADDR sPDDevPAddr; + PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; +} SGX_BRIDGE_INFO_FOR_SRVINIT; + + +typedef enum _SGXMKIF_CMD_TYPE_ +{ + SGXMKIF_CMD_TA = 0, + SGXMKIF_CMD_TRANSFER = 1, + SGXMKIF_CMD_2D = 2, + SGXMKIF_CMD_POWER = 3, + SGXMKIF_CMD_CONTEXTSUSPEND = 4, + SGXMKIF_CMD_CLEANUP = 5, + SGXMKIF_CMD_GETMISCINFO = 6, + SGXMKIF_CMD_PROCESS_QUEUES = 7, + SGXMKIF_CMD_DATABREAKPOINT = 8, + SGXMKIF_CMD_SETHWPERFSTATUS = 9, + SGXMKIF_CMD_FLUSHPDCACHE = 10, + SGXMKIF_CMD_MAX = 11, + + SGXMKIF_CMD_FORCE_I32 = -1, + +} SGXMKIF_CMD_TYPE; + +typedef struct IMG_COMPAT _SGX_BRIDGE_INIT_INFO_ +{ + IMG_HANDLE hKernelCCBMemInfo; + IMG_HANDLE hKernelCCBCtlMemInfo; + IMG_HANDLE hKernelCCBEventKickerMemInfo; + IMG_HANDLE hKernelSGXHostCtlMemInfo; + IMG_HANDLE hKernelSGXTA3DCtlMemInfo; +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + IMG_HANDLE hKernelSGXPTLAWriteBackMemInfo; +#endif + IMG_HANDLE hKernelSGXMiscMemInfo; + + IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX]; + IMG_UINT32 ui32ClientBuildOptions; + + SGX_INIT_SCRIPTS sScripts; + + SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes; + +#if defined(SGX_SUPPORT_HWPROFILING) + IMG_HANDLE hKernelHWProfilingMemInfo; +#endif +#if defined(SUPPORT_SGX_HWPERF) + IMG_HANDLE hKernelHWPerfCBMemInfo; +#endif + IMG_HANDLE hKernelTASigBufferMemInfo; + IMG_HANDLE hKernel3DSigBufferMemInfo; + + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) + IMG_HANDLE hKernelClearClipWAVDMStreamMemInfo; + IMG_HANDLE hKernelClearClipWAIndexStreamMemInfo; + IMG_HANDLE hKernelClearClipWAPDSMemInfo; + IMG_HANDLE hKernelClearClipWAUSEMemInfo; + IMG_HANDLE hKernelClearClipWAParamMemInfo; + IMG_HANDLE hKernelClearClipWAPMPTMemInfo; + IMG_HANDLE hKernelClearClipWATPCMemInfo; + IMG_HANDLE hKernelClearClipWAPSGRgnHdrMemInfo; +#endif + +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + IMG_HANDLE hKernelVDMStateUpdateBufferMemInfo; +#endif +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + IMG_HANDLE hKernelEDMStatusBufferMemInfo; +#endif + + IMG_UINT32 ui32EDMTaskReg0; + IMG_UINT32 ui32EDMTaskReg1; + + IMG_UINT32 ui32ClkGateCtl; + IMG_UINT32 ui32ClkGateCtl2; + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) + IMG_UINT32 ui32MasterClkGateStatusReg; + IMG_UINT32 ui32MasterClkGateStatusMask; + IMG_UINT32 ui32MasterClkGateStatus2Reg; + IMG_UINT32 ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ + +#if defined(SGX_FEATURE_AUTOCLOCKGATING) + IMG_BOOL bDisableClockGating; +#elif defined(USE_64BIT_COMPAT) + IMG_UINT32 ui32Padding; +#endif + IMG_UINT32 ui32CacheControl; + + IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA]; + IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES]; + +} SGX_BRIDGE_INIT_INFO; + + +typedef struct _SGX_DEVICE_SYNC_LIST_ +{ + PSGXMKIF_HWDEVICE_SYNC_LIST psHWDeviceSyncList; + + IMG_HANDLE hKernelHWSyncListMemInfo; + PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo; + PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo; + + volatile IMG_UINT32 *pui32Lock; + + struct _SGX_DEVICE_SYNC_LIST_ *psNext; + + /* Must be the last variable in the structure */ + IMG_UINT32 ui32NumSyncObjects; + IMG_HANDLE ahSyncHandles[1]; +} SGX_DEVICE_SYNC_LIST, *PSGX_DEVICE_SYNC_LIST; + + +typedef struct _SGX_INTERNEL_STATUS_UPDATE_ +{ + CTL_STATUS sCtlStatus; + IMG_HANDLE hKernelMemInfo; +} SGX_INTERNEL_STATUS_UPDATE; + +typedef struct IMG_COMPAT _SGX_CCB_KICK_ +{ + SGXMKIF_COMMAND sCommand; + IMG_HANDLE hCCBKernelMemInfo; + + IMG_UINT32 ui32NumDstSyncObjects; + IMG_HANDLE hKernelHWSyncListMemInfo; + + /* DST syncs */ + IMG_HANDLE hDstSyncHandles; + + IMG_UINT32 ui32NumTAStatusVals; + IMG_UINT32 ui32Num3DStatusVals; + +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + SGX_INTERNEL_STATUS_UPDATE asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS]; + SGX_INTERNEL_STATUS_UPDATE as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS]; +#else + IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS]; + IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS]; +#endif + + IMG_BOOL bFirstKickOrResume; +#if defined(NO_HARDWARE) || defined(PDUMP) + IMG_BOOL bTerminateOrAbort; +#endif + IMG_BOOL bLastInScene; + + /* CCB offset of data structure associated with this kick */ + IMG_UINT32 ui32CCBOffset; + + /* SRC syncs */ + IMG_UINT32 ui32NumSrcSyncs; + IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA]; + + /* TA/3D dependency data */ + IMG_BOOL bTADependency; + IMG_HANDLE hTA3DSyncInfo; + + IMG_HANDLE hTASyncInfo; + IMG_HANDLE h3DSyncInfo; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +#if defined(NO_HARDWARE) + IMG_UINT32 ui32WriteOpsPendingVal; +#endif + IMG_HANDLE hDevMemContext; + IMG_DEV_VIRTADDR sHWRTDataSetDevAddr; + IMG_DEV_VIRTADDR sHWRTDataDevAddr; + IMG_UINT32 ui32FrameNum; +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + IMG_BOOL bIsFirstKick; +#endif +} SGX_CCB_KICK; + + +/*! + ****************************************************************************** + * shared client/kernel device information structure for SGX + *****************************************************************************/ +#define SGX_KERNEL_USE_CODE_BASE_INDEX 15 + + +/*! + ****************************************************************************** + * Client device information structure for SGX + *****************************************************************************/ +typedef struct IMG_COMPAT _SGX_CLIENT_INFO_ +{ + IMG_UINT32 ui32ProcessID; /*!< ID of process controlling SGX device */ + IMG_UINT32 asDevData[SGX_MAX_DEV_DATA]; +} SGX_CLIENT_INFO; + +/*! + ****************************************************************************** + * Internal device information structure for SGX + *****************************************************************************/ +typedef struct IMG_COMPAT _SGX_INTERNAL_DEVINFO_ +{ + IMG_HANDLE hHostCtlKernelMemInfoHandle; + IMG_BOOL bForcePTOff; +} SGX_INTERNAL_DEVINFO; + + +typedef struct _SGX_INTERNAL_DEVINFO_KM_ +{ + IMG_UINT32 ui32Flags; + IMG_HANDLE hHostCtlKernelMemInfoHandle; + IMG_BOOL bForcePTOff; +} SGX_INTERNAL_DEVINFO_KM; + + +#if defined(TRANSFER_QUEUE) +typedef struct IMG_COMPAT _PVRSRV_TRANSFER_SGX_KICK_ +{ + IMG_HANDLE hCCBMemInfo; + IMG_UINT32 ui32SharedCmdCCBOffset; + + IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; + + IMG_HANDLE hTASyncInfo; + IMG_HANDLE h3DSyncInfo; + + IMG_UINT32 ui32NumSrcSync; + IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; + + IMG_UINT32 ui32NumDstSync; + IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; + + IMG_UINT32 ui32Flags; + + IMG_UINT32 ui32PDumpFlags; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif + IMG_HANDLE hDevMemContext; +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + /* Android >JB MR1 doesn't use ahSrcSyncInfo for synchronization */ + IMG_INT64 iFenceFd; +#endif +} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK; + +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct IMG_COMPAT _PVRSRV_2D_SGX_KICK_ +{ + IMG_HANDLE hCCBMemInfo; + IMG_UINT32 ui32SharedCmdCCBOffset; + + IMG_DEV_VIRTADDR sHW2DContextDevVAddr; + + IMG_UINT32 ui32NumSrcSync; + IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS]; + + /* need to be able to check reads and writes on dest, and update writes */ + IMG_HANDLE hDstSyncInfo; + + /* need to be able to check reads and writes on TA ops, and update writes */ + IMG_HANDLE hTASyncInfo; + + /* need to be able to check reads and writes on 2D ops, and update writes */ + IMG_HANDLE h3DSyncInfo; + + IMG_UINT32 ui32PDumpFlags; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif + IMG_HANDLE hDevMemContext; +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + /* Android >JB MR1 doesn't use ahSrcSyncInfo for synchronization */ + IMG_INT iFenceFd; +#endif +} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK; +#endif /* defined(SGX_FEATURE_2D_HARDWARE) */ +#endif /* defined(TRANSFER_QUEUE) */ + + +#endif /* __SGXINFO_H__ */ +/****************************************************************************** + End of file (sgxinfo.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.c b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.c new file mode 100644 index 0000000..68a0f93 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.c @@ -0,0 +1,5024 @@ +/*************************************************************************/ /*! +@Title PVR Common Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + + +#include + +#include "img_defs.h" +#include "services.h" +#include "pvr_bridge_km.h" +#include "pvr_debug.h" +#include "ra.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_SGX) +#include "sgx_bridge.h" +#endif +#if defined(SUPPORT_VGX) +#include "vgx_bridge.h" +#endif +#if defined(SUPPORT_MSVDX) +#include "msvdx_bridge.h" +#endif +#include "perproc.h" +#include "device.h" +#include "buffer_manager.h" +#include "refcount.h" + +#include "pdump_km.h" +#include "syscommon.h" + +#include "bridged_pvr_bridge.h" +#if defined(SUPPORT_SGX) +#include "bridged_sgx_bridge.h" +#endif +#if defined(SUPPORT_VGX) +#include "bridged_vgx_bridge.h" +#endif +#if defined(SUPPORT_MSVDX) +#include "bridged_msvdx_bridge.h" +#endif + +#include "env_data.h" +#include "ttrace.h" +#include "ttrace_tokens.h" + +#if defined (__linux__) || defined(__QNXNTO__) +#include "mmap.h" +#endif + + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) +#include +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#include <../drivers/staging/android/sync.h> +#else +#include <../drivers/dma-buf/sync_debug.h> +#endif +#endif + +#include "srvkm.h" + +/* FIXME: we should include an OS specific header here to allow configuration of + * which functions should be excluded (like the shared srvclient bridge code) + * so that ports may choose to override certain things. */ + +/* For the purpose of maintainability, it is intended that this file should not + * contain large amounts of OS specific #ifdefs. Headers are fine, and perhaps + * a few one liners, but for anything more, please find a way to add e.g. + * an osfunc.c abstraction or override the entire function in question within + * env,*,pvr_bridge_k.c + */ + + +PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; + +#if defined(DEBUG_BRIDGE_KM) +PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; +#endif + +#if defined(PVR_SECURE_HANDLES) +static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS]; +static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap; +#else +static IMG_BOOL *pbSharedDeviceMemHeap = (IMG_BOOL*)IMG_NULL; +#endif + + +#if defined(DEBUG_BRIDGE_KM) +PVRSRV_ERROR +CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size; + return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size); +} +PVRSRV_ERROR +CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size; + return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size); +} +#endif + + +static IMG_INT +PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES); + + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + psEnumDeviceOUT->eError = + PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices, + psEnumDeviceOUT->asDeviceIdentifier); + + return 0; +} + +static IMG_INT +PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN, + PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO); + + psAcquireDevInfoOUT->eError = + PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex, + psAcquireDevInfoIN->eDeviceType, + &hDevCookieInt); + if(psAcquireDevInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* + * Handle is not allocated in batch mode, as there is no resource + * allocation to undo if the handle allocation fails. + */ + psAcquireDevInfoOUT->eError = + PVRSRVAllocHandle(psPerProc->psHandleBase, + &psAcquireDevInfoOUT->hDevCookie, + hDevCookieInt, + PVRSRV_HANDLE_TYPE_DEV_NODE, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + return 0; +} + + +static IMG_INT +PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN, + PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + IMG_UINT32 i; + IMG_BOOL bCreated; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT); + + /* + * We potentially need one handle for the device memory context, + * and one handle for each client heap. + */ + NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1) + + psCreateDevMemContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psCreateDevMemContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + psCreateDevMemContextOUT->eError = + PVRSRVCreateDeviceMemContextKM(hDevCookieInt, + psPerProc, + &hDevMemContextInt, + &psCreateDevMemContextOUT->ui32ClientHeapCount, + &psCreateDevMemContextOUT->sHeapInfo[0], + &bCreated, + pbSharedDeviceMemHeap); + + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* + * Only allocate a handle if the device memory context was created. + * If an existing context was returned, lookup the existing + * handle. + */ + if(bCreated) + { + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psCreateDevMemContextOUT->hDevMemContext, + hDevMemContextInt, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + } + else + { + psCreateDevMemContextOUT->eError = + PVRSRVFindHandle(psPerProc->psHandleBase, + &psCreateDevMemContextOUT->hDevMemContext, + hDevMemContextInt, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++) + { + IMG_HANDLE hDevMemHeapExt; + +#if defined(PVR_SECURE_HANDLES) + if(abSharedDeviceMemHeap[i]) +#endif + { + /* + * Heaps shared by everybody. These heaps are not + * created as part of the device memory context + * creation, and exist for the lifetime of the + * driver, hence, we use shared handles for these + * heaps. + */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt, + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + } +#if defined(PVR_SECURE_HANDLES) + else + { + /* + * Heaps belonging to this context. The handles for + * these are made subhandles of the memory context + * handle, so that they are automatically deallocated + * when the memory context handle is deallocated. + */ + if(bCreated) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt, + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psCreateDevMemContextOUT->hDevMemContext); + } + else + { + psCreateDevMemContextOUT->eError = + PVRSRVFindHandle(psPerProc->psHandleBase, + &hDevMemHeapExt, + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + } + } +#endif + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt; + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + IMG_BOOL bDestroyed; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psDestroyDevMemContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, + psDestroyDevMemContextIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(bDestroyed) + { + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psDestroyDevMemContextIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + } + + return 0; +} + + +static IMG_INT +PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN, + PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO); + + NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS) + + psGetDevMemHeapInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psGetDevMemHeapInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDevMemHeapInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, + psGetDevMemHeapInfoIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDevMemHeapInfoOUT->eError = + PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt, + hDevMemContextInt, + &psGetDevMemHeapInfoOUT->ui32ClientHeapCount, + &psGetDevMemHeapInfoOUT->sHeapInfo[0], + pbSharedDeviceMemHeap); + + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++) + { + IMG_HANDLE hDevMemHeapExt; + +#if defined(PVR_SECURE_HANDLES) + if(abSharedDeviceMemHeap[i]) +#endif + { + /* + * Heaps shared by everybody. These heaps are not + * created as part of the device memory context + * creation, and exist for the lifetime of the + * driver, hence, we use shared handles for these + * heaps. + */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt, + psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + } +#if defined(PVR_SECURE_HANDLES) + else + { + /* + * Heaps belonging to this context. The handles for + * these are made subhandles of the memory context + * handle, so that they are automatically deallocated + * when the memory context handle is deallocated. + */ + psGetDevMemHeapInfoOUT->eError = + PVRSRVFindHandle(psPerProc->psHandleBase, + &hDevMemHeapExt, + psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif + psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt; + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc) + + return 0; +} + + +#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW) +/* customised version */ +IMG_INT +PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN, + PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc); +#else +static IMG_INT +PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN, + PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemHeapInt; + IMG_UINT32 ui32ShareIndex; + IMG_BOOL bUseShareMemWorkaround; + IMG_BOOL *pabMapChunk = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM); + + NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2) + + /* Do same sanity checking */ + if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_SPARSE) + { + if (psAllocDeviceMemIN->ui32NumPhysChunks > psAllocDeviceMemIN->ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: more physical chunks then virtual space")); + psAllocDeviceMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + if (psAllocDeviceMemIN->hMapChunk == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Called in sparse mapping mode but without MapChunk array")); + psAllocDeviceMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + } + + psAllocDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psAllocDeviceMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psAllocDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt, + psAllocDeviceMemIN->hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Memory sharing workaround, version 2 */ + + bUseShareMemWorkaround = ((psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_XPROC) != 0) ? IMG_TRUE : IMG_FALSE; + ui32ShareIndex = 7654321; /* stops MSVC compiler warning */ + + if (bUseShareMemWorkaround) + { + /* allocate a shared-surface ID, prior to the call to AllocDeviceMem */ + /* We could plumb in an extra argument, but for now, we'll keep the + shared-surface ID as a piece of global state, and rely upon the + bridge mutex to make it safe for us */ + + psAllocDeviceMemOUT->eError = + BM_XProcWorkaroundFindNewBufferAndSetShareIndex(&ui32ShareIndex); + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* Check access to private data, if provided */ + if(psAllocDeviceMemIN->hPrivData) + { + if(!OSAccessOK(PVR_VERIFY_READ, + psAllocDeviceMemIN->hPrivData, + psAllocDeviceMemIN->ui32PrivDataLength)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Access check failed for pvPrivData")); + return -EFAULT; + } + } + + if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_SPARSE) + { + /* Check access to the sparse mapping table, if provided */ + if(!OSAccessOK(PVR_VERIFY_READ, + psAllocDeviceMemIN->hMapChunk, + psAllocDeviceMemIN->ui32NumVirtChunks)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Access check failed for pabMapChunk")); + return -EFAULT; + } + + psAllocDeviceMemOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psAllocDeviceMemIN->ui32NumVirtChunks, + (IMG_VOID **) &pabMapChunk, + 0, + "MapChunk kernel copy"); + if (psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psAllocDeviceMemOUT->eError = OSCopyFromUser(psPerProc, + pabMapChunk, + psAllocDeviceMemIN->hMapChunk, + sizeof(IMG_BOOL) * psAllocDeviceMemIN->ui32NumVirtChunks); + if (psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psAllocDeviceMemIN->ui32NumVirtChunks, + pabMapChunk, + 0); + return 0; + } + } + + + psAllocDeviceMemOUT->eError = + PVRSRVAllocDeviceMemKM(hDevCookieInt, + psPerProc, + hDevMemHeapInt, + psAllocDeviceMemIN->ui32Attribs, + psAllocDeviceMemIN->uSize, + psAllocDeviceMemIN->uAlignment, + psAllocDeviceMemIN->hPrivData, + psAllocDeviceMemIN->ui32PrivDataLength, + psAllocDeviceMemIN->ui32ChunkSize, + psAllocDeviceMemIN->ui32NumVirtChunks, + psAllocDeviceMemIN->ui32NumPhysChunks, + pabMapChunk, + &psMemInfo, + "" /*FIXME: add something meaningful*/); + if (psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: PVRSRVAllocDeviceMemKM failed with eError = %d", psAllocDeviceMemOUT->eError)); + return -ENOMEM; + } + + if (bUseShareMemWorkaround) + { + PVR_ASSERT(ui32ShareIndex != 7654321); + BM_XProcWorkaroundUnsetShareIndex(ui32ShareIndex); + } + + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psMemInfo->sShareMemWorkaround.bInUse = bUseShareMemWorkaround; + if (bUseShareMemWorkaround) + { + PVR_ASSERT(ui32ShareIndex != 7654321); + psMemInfo->sShareMemWorkaround.ui32ShareIndex = ui32ShareIndex; + psMemInfo->sShareMemWorkaround.hDevCookieInt = hDevCookieInt; + psMemInfo->sShareMemWorkaround.ui32OrigReqAttribs = psAllocDeviceMemIN->ui32Attribs; + psMemInfo->sShareMemWorkaround.ui32OrigReqSize = (IMG_UINT32)psAllocDeviceMemIN->uSize; + psMemInfo->sShareMemWorkaround.ui32OrigReqAlignment = (IMG_UINT32)psAllocDeviceMemIN->uAlignment; + } + + OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo, + 0, + sizeof(psAllocDeviceMemOUT->sClientMemInfo)); + + psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM = + psMemInfo->pvLinAddrKM; + +#if defined (__linux__) + psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0; +#else + psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM; +#endif + psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; + psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; + psAllocDeviceMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize; + psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle; +#if defined (PVRSRV_DEVMEM_TIME_STATS) + psAllocDeviceMemOUT->sClientMemInfo.sDevMemTimingStats.sDevMemMapTimes.ui32TimeToDevMap = psMemInfo->ui32TimeToDevMap; +#endif + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + + if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL; + } + else + { + /* and setup the sync info */ + +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psAllocDeviceMemOUT->sClientSyncInfo.psSyncData = + psMemInfo->psKernelSyncInfo->psSyncData; + psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psAllocDeviceMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo, + psMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo); + + psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = + &psAllocDeviceMemOUT->sClientSyncInfo; + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc) + + return 0; +} + +#endif /* OS_PVRSRV_ALLOC_DEVICE_MEM_BW */ + +static IMG_INT +PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN, + PVRSRV_BRIDGE_OUT_FREEDEVICEMEM *psFreeDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; +#if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT32 ui32TimeToDevUnmap; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM); + + psFreeDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psFreeDeviceMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psFreeDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psFreeDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psFreeDeviceMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psFreeDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (PVRSRV_DEVMEM_TIME_STATS) + psKernelMemInfo->pui32TimeToDevUnmap = &ui32TimeToDevUnmap; +#endif + + psFreeDeviceMemOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt, psKernelMemInfo); + + if(psFreeDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (PVRSRV_DEVMEM_TIME_STATS) + psFreeDeviceMemOUT->ui32TimeToDevUnmap = ui32TimeToDevUnmap; +#endif + + psFreeDeviceMemOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psFreeDeviceMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + + +static IMG_INT +PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN, + PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM) || + ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2)); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + /* find the device cookie */ + psExportDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psExportDeviceMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psExportDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie")); + return 0; + } + + /* find the kernel meminfo from the process handle list */ + psExportDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psExportDeviceMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psExportDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo")); + return 0; + } + + /* see if it's already exported */ + psExportDeviceMemOUT->eError = + PVRSRVFindHandle(KERNEL_HANDLE_BASE, + &psExportDeviceMemOUT->hMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psExportDeviceMemOUT->eError == PVRSRV_OK) + { + /* it's already exported */ + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported")); + return 0; + } + + /* export the allocation */ + psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, + &psExportDeviceMemOUT->hMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + if (psExportDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list")); + return 0; + } + + /* mark the meminfo as 'exported' */ + psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED; + + return 0; +} + + +static IMG_INT +PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN, + PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL; + PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL; + IMG_HANDLE hDstDevMemHeap = IMG_NULL; + + PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY) || + ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2)); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2) + + /* lookup srcmeminfo handle */ + psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_VOID**)&psSrcKernelMemInfo, + psMapDevMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* lookup dev mem heap handle */ + psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDstDevMemHeap, + psMapDevMemIN->hDstDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* check for workaround */ + if (psSrcKernelMemInfo->sShareMemWorkaround.bInUse) + { + PVR_DPF((PVR_DBG_MESSAGE, "using the mem wrap workaround.")); + + /* Ensure we get the same ID for this allocation, such that it + inherits the same physical block. Rather than add a lot of + plumbing to several APIs, we call into buffer manager directly + to set "global" state. This works only if we make + this allocation while holding the bridge mutex and don't + make any other allocations (because the state persists and + would affect other device memory allocations too). It is + important that we bracket the PVRSRVAllocDeviceMemKM() call + with this Set/Unset pair. */ + psMapDevMemOUT->eError = BM_XProcWorkaroundSetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW(): failed to recycle shared buffer")); + return 0; + } + + psMapDevMemOUT->eError = + PVRSRVAllocDeviceMemKM(psSrcKernelMemInfo->sShareMemWorkaround.hDevCookieInt, + psPerProc, + hDstDevMemHeap, + psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAttribs | PVRSRV_MEM_NO_SYNCOBJ, + psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqSize, + psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAlignment, + IMG_NULL, + 0, + /* FIXME: Do we need to be able to export sparse memory? */ + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psDstKernelMemInfo, + "" /*FIXME: add something meaningful*/); + /* counterpart of the above "SetShareIndex". NB: this must be + done in both the success and failure paths of the + AllocDeviceMemKM() call */ + BM_XProcWorkaroundUnsetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW: Failed to create allocation for cross-process memory map")); + return 0; + } + + if(psSrcKernelMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoIncRef(psSrcKernelMemInfo->psKernelSyncInfo, psSrcKernelMemInfo); + } + + psDstKernelMemInfo->psKernelSyncInfo = psSrcKernelMemInfo->psKernelSyncInfo; + } + else + { + /* map the meminfo to the target heap and memory context */ + psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc, + psSrcKernelMemInfo, + hDstDevMemHeap, + &psDstKernelMemInfo); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* copy the workaround info */ + psDstKernelMemInfo->sShareMemWorkaround = psSrcKernelMemInfo->sShareMemWorkaround; + + OSMemSet(&psMapDevMemOUT->sDstClientMemInfo, + 0, + sizeof(psMapDevMemOUT->sDstClientMemInfo)); + OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo, + 0, + sizeof(psMapDevMemOUT->sDstClientSyncInfo)); + + psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM = + psDstKernelMemInfo->pvLinAddrKM; + + psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0; + psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr; + psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags; + psMapDevMemOUT->sDstClientMemInfo.uAllocSize = psDstKernelMemInfo->uAllocSize; + psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle; + + /* allocate handle to the DST kernel meminfo */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo, + psDstKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL; + + + /* and setup the sync info */ + if(psDstKernelMemInfo->psKernelSyncInfo) + { +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapDevMemOUT->sDstClientSyncInfo.psSyncData = + psDstKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr = + psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr = + psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapDevMemOUT->sDstClientSyncInfo.sReadOps2CompleteDevVAddr = + psDstKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = + psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo; + /* + * The sync info is associated with the device buffer, + * and not allocated here. It isn't exported when created, + * hence the handle allocation rather than a lookup. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo, + psDstKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo); + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY); + + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelMemInfo, + psUnmapDevMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + psRetOUT->eError = PVRSRVFreeDeviceMemKM(psKernelMemInfo->sShareMemWorkaround.hDevCookieInt, psKernelMemInfo); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVUnmapDeviceMemoryBW: internal error, should expect FreeDeviceMem to fail")); + return 0; + } + } + else + { + psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psUnmapDevMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +static IMG_INT +PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN, + PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_HANDLE hOSMapInfo; + IMG_HANDLE hDeviceClassBufferInt; + IMG_HANDLE hDevMemContextInt; + PVRSRV_HANDLE_TYPE eHandleType; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY); + + NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2) + + /* + * The buffer to be mapped can belong to a 3rd party display or + * buffer driver, and we don't know which type we have at this + * point. + */ + psMapDevClassMemOUT->eError = + PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, + &hDeviceClassBufferInt, + &eHandleType, + psMapDevClassMemIN->hDeviceClassBuffer); + + if(psMapDevClassMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* get the device memory context */ + psMapDevClassMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + psMapDevClassMemIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psMapDevClassMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Having looked up the handle, now check its type */ + switch(eHandleType) + { +#if defined(PVR_SECURE_HANDLES) + case PVRSRV_HANDLE_TYPE_DISP_BUFFER: + case PVRSRV_HANDLE_TYPE_BUF_BUFFER: +#else + case PVRSRV_HANDLE_TYPE_NONE: +#endif + break; + default: + psMapDevClassMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE; + return 0; + } + + psMapDevClassMemOUT->eError = + PVRSRVMapDeviceClassMemoryKM(psPerProc, + hDevMemContextInt, + hDeviceClassBufferInt, + &psMemInfo, + &hOSMapInfo); + if(psMapDevClassMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + OSMemSet(&psMapDevClassMemOUT->sClientMemInfo, + 0, + sizeof(psMapDevClassMemOUT->sClientMemInfo)); + OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo, + 0, + sizeof(psMapDevClassMemOUT->sClientSyncInfo)); + + psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM = + psMemInfo->pvLinAddrKM; + + psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0; + psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; + psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; + psMapDevClassMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize; + psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapDevClassMemIN->hDeviceClassBuffer); + + psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL; + + /* and setup the sync info */ + if(psMemInfo->psKernelSyncInfo) + { +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapDevClassMemOUT->sClientSyncInfo.psSyncData = + psMemInfo->psKernelSyncInfo->psSyncData; + psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapDevClassMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo; + /* + * The sync info is associated with the device buffer, + * and not allocated here. It isn't exported when + * created, hence the handle allocation rather than a + * lookup. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo, + psMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo); + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo, + psUnmapDevClassMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psUnmapDevClassMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + + +#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW) +IMG_INT +PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN, + PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc); +#else /* OS_PVRSRV_WRAP_EXT_MEM_BW */ +static IMG_INT +PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN, + PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; + IMG_UINT32 ui32PageTableSize = 0; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY); + + NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2) + + /* + * FIXME: This needs reworking - don't use the user supplied page + * table list, get the list from the OS. + */ + psWrapExtMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psWrapExtMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psWrapExtMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* get the device memory context */ + psWrapExtMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, + psWrapExtMemIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psWrapExtMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(psWrapExtMemIN->ui32NumPageTableEntries) + { + ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries + * sizeof(IMG_SYS_PHYADDR); + + ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32PageTableSize, + (IMG_VOID **)&psSysPAddr, 0, + "Page Table")); + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + psSysPAddr, + psWrapExtMemIN->psSysPAddr, + ui32PageTableSize) != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0); + /*not nulling pointer, out of scope*/ + return -EFAULT; + } + } + + psWrapExtMemOUT->eError = + PVRSRVWrapExtMemoryKM(hDevCookieInt, + psPerProc, + hDevMemContextInt, + psWrapExtMemIN->uByteSize, + psWrapExtMemIN->uPageOffset, + psWrapExtMemIN->bPhysContig, + psSysPAddr, + psWrapExtMemIN->pvLinAddr, + psWrapExtMemIN->ui32Flags, + &psMemInfo); + + if(psWrapExtMemIN->ui32NumPageTableEntries) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32PageTableSize, + (IMG_VOID *)psSysPAddr, 0); + /*not nulling pointer, out of scope*/ + } + + if(psWrapExtMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = + psMemInfo->pvLinAddrKM; + + /* setup the mem info */ + psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0; + psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; + psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; + psWrapExtMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize; + psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle; + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + + /* setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psWrapExtMemOUT->sClientSyncInfo.psSyncData = + psMemInfo->psKernelSyncInfo->psSyncData; + psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psWrapExtMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo, + (IMG_HANDLE)psMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo); + + COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc) + + return 0; +} +#endif /* OS_PVRSRV_WRAP_EXT_MEM_BW */ + +static IMG_INT +PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, + psUnwrapExtMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psUnwrapExtMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + +#if defined(SUPPORT_ION) +static IMG_INT +PVRSRVMapIonHandleBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_ION_HANDLE *psMapIonIN, + PVRSRV_BRIDGE_OUT_MAP_ION_HANDLE *psMapIonOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + IMG_UINT64 ui64Stamp; + + psMapIonOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &psMapIonIN->hDevCookie, + psMapIonIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if (psMapIonOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup device node handle", __FUNCTION__)); + return 0; + } + + psMapIonOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &psMapIonIN->hDevMemHeap, + psMapIonIN->hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if (psMapIonOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup memory context handle", __FUNCTION__)); + return 0; + } + + psMapIonOUT->eError = PVRSRVMapIonHandleKM(psPerProc, + psMapIonIN->hDevCookie, + psMapIonIN->hDevMemHeap, + psMapIonIN->ui32NumFDs, + psMapIonIN->ai32BufferFDs, + psMapIonIN->ui32Attribs, + psMapIonIN->ui32ChunkCount, + psMapIonIN->auiOffset, + psMapIonIN->auiSize, + &psMapIonOUT->uiIonBufferSize, + &psKernelMemInfo, + &ui64Stamp); + if (psMapIonOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map ion handle", __FUNCTION__)); + return 0; + } + + OSMemSet(&psMapIonOUT->sClientMemInfo, + 0, + sizeof(psMapIonOUT->sClientMemInfo)); + + psMapIonOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psMapIonOUT->sClientMemInfo.pvLinAddr = 0; + psMapIonOUT->sClientMemInfo.sDevVAddr = psKernelMemInfo->sDevVAddr; + psMapIonOUT->sClientMemInfo.ui32Flags = psKernelMemInfo->ui32Flags; + psMapIonOUT->sClientMemInfo.uAllocSize = psKernelMemInfo->uAllocSize; + + /* No mapping info, we map through ion */ + psMapIonOUT->sClientMemInfo.hMappingInfo = IMG_NULL; + +#if defined(SUPPORT_MEMINFO_IDS) + psMapIonOUT->sClientMemInfo.ui64Stamp = ui64Stamp; +#endif + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psMapIonOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + if(psMapIonIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psMapIonOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + psMapIonOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL; + } + else + { + /* and setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapIonOUT->sClientSyncInfo.psSyncData = + psKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapIonOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapIonOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapIonOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psMapIonOUT->sClientSyncInfo.hMappingInfo = + psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapIonOUT->sClientSyncInfo.hKernelSyncInfo, + psKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapIonOUT->sClientMemInfo.hKernelMemInfo); + + psMapIonOUT->sClientMemInfo.psClientSyncInfo = + &psMapIonOUT->sClientSyncInfo; + } + return 0; +} + +static IMG_INT +PVRSRVUnmapIonHandleBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_ION_HANDLE *psUnmapIonIN, + PVRSRV_BRIDGE_RETURN *psUnmapIonOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_ION_HANDLE); + + psUnmapIonOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvKernelMemInfo, + psUnmapIonIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psUnmapIonOUT->eError != PVRSRV_OK) + { + return 0; + } + + psUnmapIonOUT->eError = PVRSRVUnmapIonHandleKM(pvKernelMemInfo); + + if(psUnmapIonOUT->eError != PVRSRV_OK) + { + return 0; + } + + psUnmapIonOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psUnmapIonIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} +#endif /* SUPPORT_ION */ + +#if defined(SUPPORT_DMABUF) +static IMG_INT +PVRSRVMapDmaBufBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_DMABUF *psMapDmaBufIN, + PVRSRV_BRIDGE_OUT_MAP_DMABUF *psMapDmaBufOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + IMG_UINT64 ui64Stamp; + + psMapDmaBufOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &psMapDmaBufIN->hDevCookie, + psMapDmaBufIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if (psMapDmaBufOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup device node handle", __FUNCTION__)); + return 0; + } + + psMapDmaBufOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &psMapDmaBufIN->hDevMemHeap, + psMapDmaBufIN->hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if (psMapDmaBufOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup memory context handle", __FUNCTION__)); + return 0; + } + + psMapDmaBufOUT->eError = PVRSRVMapDmaBufKM(psPerProc, + psMapDmaBufIN->hDevCookie, + psMapDmaBufIN->hDevMemHeap, + psMapDmaBufIN->ui32Attribs, + psMapDmaBufIN->i32FD, + psMapDmaBufIN->uiOffset, + psMapDmaBufIN->uiSize, + &psKernelMemInfo, + &psMapDmaBufOUT->uiSize, + &psMapDmaBufOUT->uiOffset, + &ui64Stamp); + if (psMapDmaBufOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map dma-buf handle", __FUNCTION__)); + return 0; + } + + OSMemSet(&psMapDmaBufOUT->sClientMemInfo, + 0, + sizeof(psMapDmaBufOUT->sClientMemInfo)); + + psMapDmaBufOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psMapDmaBufOUT->sClientMemInfo.pvLinAddr = 0; + psMapDmaBufOUT->sClientMemInfo.sDevVAddr = psKernelMemInfo->sDevVAddr; + psMapDmaBufOUT->sClientMemInfo.ui32Flags = psKernelMemInfo->ui32Flags; + psMapDmaBufOUT->sClientMemInfo.uAllocSize = psKernelMemInfo->uAllocSize; + + /* No mapping info, we map through dma_buf */ + psMapDmaBufOUT->sClientMemInfo.hMappingInfo = IMG_NULL; + +#if defined(SUPPORT_MEMINFO_IDS) + psMapDmaBufOUT->sClientMemInfo.ui64Stamp = ui64Stamp; +#endif + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psMapDmaBufOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + if(psMapDmaBufIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psMapDmaBufOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + psMapDmaBufOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL; + } + else + { + /* and setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapDmaBufOUT->sClientSyncInfo.psSyncData = + psKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapDmaBufOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapDmaBufOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapDmaBufOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psMapDmaBufOUT->sClientSyncInfo.hMappingInfo = + psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + /* + * If the DMA Buffer is imported into the same process that + * exported it, there will be two handles for the same sync + * info, hence the PVRSRV_HANDLE_ALLOC_FLAG_MULTI flag. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDmaBufOUT->sClientSyncInfo.hKernelSyncInfo, + psKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapDmaBufOUT->sClientMemInfo.hKernelMemInfo); + + psMapDmaBufOUT->sClientMemInfo.psClientSyncInfo = + &psMapDmaBufOUT->sClientSyncInfo; + } + return 0; +} + +static IMG_INT +PVRSRVUnmapDmaBufBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_DMABUF *psUnmapDmaBufIN, + PVRSRV_BRIDGE_RETURN *psUnmapDmaBufOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DMABUF); + + psUnmapDmaBufOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvKernelMemInfo, + psUnmapDmaBufIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psUnmapDmaBufOUT->eError != PVRSRV_OK) + { + return 0; + } + + psUnmapDmaBufOUT->eError = PVRSRVUnmapDmaBufKM(pvKernelMemInfo); + + if(psUnmapDmaBufOUT->eError != PVRSRV_OK) + { + return 0; + } + + psUnmapDmaBufOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psUnmapDmaBufIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} +#endif /* SUPPORT_DMABUF */ + +static IMG_INT +PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN, + PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM); + + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psGetFreeDeviceMemOUT->eError = + PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags, + &psGetFreeDeviceMemOUT->uTotal, + &psGetFreeDeviceMemOUT->uFree, + &psGetFreeDeviceMemOUT->uLargestBlock); + + return 0; +} + +static IMG_INT +PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN, + PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA); + +#if defined (__linux__) || defined (__QNXNTO__) + psMMapDataOUT->eError = + PVRMMapOSMemHandleToMMapData(psPerProc, + psMMapDataIN->hMHandle, + &psMMapDataOUT->uiMMapOffset, + &psMMapDataOUT->uiByteOffset, + &psMMapDataOUT->uiRealByteSize, + &psMMapDataOUT->uiUserVAddr); +#else + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psMMapDataIN); + + psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; +#endif + return 0; +} + + +static IMG_INT +PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN, + PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA); + +#if defined (__linux__) || defined (__QNXNTO__) + psMMapDataOUT->eError = + PVRMMapReleaseMMapData(psPerProc, + psMMapDataIN->hMHandle, + &psMMapDataOUT->bMUnmap, + &psMMapDataOUT->uiRealByteSize, + &psMMapDataOUT->uiUserVAddr); +#else + + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psMMapDataIN); + + psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; +#endif + return 0; +} + + +static IMG_INT +PVRSRVChangeDeviceMemoryAttributesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS *psChgMemAttribIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + PVR_UNREFERENCED_PARAMETER(psChgMemAttribIN); + PVR_UNREFERENCED_PARAMETER(psRetOUT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + return 0; +} + +#ifdef PDUMP +static IMG_INT +PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM(); + psPDumpIsCapturingOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PDumpCommentBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0], + psPDumpCommentIN->ui32Flags); + return 0; +} + +static IMG_INT +PDumpSetFrameBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame); + + return 0; +} + +static IMG_INT +PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpRegDumpIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PDumpRegWithFlagsKM (psPDumpRegDumpIN->szRegRegion, + psPDumpRegDumpIN->sHWReg.ui32RegAddr, + psPDumpRegDumpIN->sHWReg.ui32RegVal, + psPDumpRegDumpIN->ui32Flags); + + return 0; +} + +static IMG_INT +PDumpRegPolBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpRegPolIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + psRetOUT->eError = + PDumpRegPolWithFlagsKM(psPDumpRegPolIN->szRegRegion, + psPDumpRegPolIN->sHWReg.ui32RegAddr, + psPDumpRegPolIN->sHWReg.ui32RegVal, + psPDumpRegPolIN->ui32Mask, + psPDumpRegPolIN->ui32Flags, + PDUMP_POLL_OPERATOR_EQUAL); + + return 0; +} + +static IMG_INT +PDumpMemPolBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, + psPDumpMemPolIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo), + psPDumpMemPolIN->ui32Offset, + psPDumpMemPolIN->ui32Value, + psPDumpMemPolIN->ui32Mask, + psPDumpMemPolIN->eOperator, + psPDumpMemPolIN->ui32Flags, + MAKEUNIQUETAG(pvMemInfo)); + + return 0; +} + +static IMG_INT +PDumpMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, + psPDumpMemDumpIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpMemUM(psPerProc, + psPDumpMemDumpIN->hAltLinAddr, + psPDumpMemDumpIN->hLinAddr, + pvMemInfo, + psPDumpMemDumpIN->ui32Offset, + psPDumpMemDumpIN->ui32Bytes, + psPDumpMemDumpIN->ui32Flags, + MAKEUNIQUETAG(pvMemInfo)); + + return 0; +} + +static IMG_INT +PDumpBitmapBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt; + + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode, + psPDumpBitmapIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDumpBitmapIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpBitmapKM(psDeviceNode, + &psPDumpBitmapIN->szFileName[0], + psPDumpBitmapIN->ui32FileOffset, + psPDumpBitmapIN->ui32Width, + psPDumpBitmapIN->ui32Height, + psPDumpBitmapIN->ui32StrideInBytes, + psPDumpBitmapIN->sDevBaseAddr, + hDevMemContextInt, + psPDumpBitmapIN->ui32Size, + psPDumpBitmapIN->ePixelFormat, + psPDumpBitmapIN->eMemFormat, + psPDumpBitmapIN->ui32Flags); + + return 0; +} + +static IMG_INT +PDumpReadRegBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode, + psPDumpReadRegIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + psRetOUT->eError = + PDumpReadRegKM(&psPDumpReadRegIN->szRegRegion[0], + &psPDumpReadRegIN->szFileName[0], + psPDumpReadRegIN->ui32FileOffset, + psPDumpReadRegIN->ui32Address, + psPDumpReadRegIN->ui32Size, + psPDumpReadRegIN->ui32Flags); + + return 0; +} + +static IMG_INT +PDumpMemPagesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES *psPDumpMemPagesIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPAGES); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpMemPagesIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + return 0; +} + +static IMG_INT +PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32PDumpFlags; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + ui32PDumpFlags = 0; + if(psPDumpDriverInfoIN->bContinuous) + { + ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS; + } + psRetOUT->eError = + PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0], + ui32PDumpFlags); + + return 0; +} + +static IMG_INT +PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes; + IMG_VOID *pvSyncInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, + psPDumpSyncDumpIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpMemUM(psPerProc, + psPDumpSyncDumpIN->hAltLinAddr, + IMG_NULL, + ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM, + psPDumpSyncDumpIN->ui32Offset, + ui32Bytes, + 0, + MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM)); + + return 0; +} + +static IMG_INT +PDumpSyncPolBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32Offset; + IMG_VOID *pvSyncInfo; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSyncInfo, + psPDumpSyncPolIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(psPDumpSyncPolIN->bIsRead) + { + ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); + } + else + { + ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete); + } + + /* FIXME: Move this code to somewhere outside of the bridge */ + if (psPDumpSyncPolIN->bUseLastOpDumpVal) + { + if(psPDumpSyncPolIN->bIsRead) + { + ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastReadOpDumpVal; + } + else + { + ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastOpDumpVal; + } + ui32Mask = 0xffffffff; + } + else + { + ui32Value = psPDumpSyncPolIN->ui32Value; + ui32Mask = psPDumpSyncPolIN->ui32Mask; + } + + psRetOUT->eError = + PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM, + ui32Offset, + ui32Value, + ui32Mask, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM)); + + return 0; +} + + +static IMG_INT +PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpCycleCountRegReadIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + PDumpCycleCountRegRead(&psDeviceNode->sDevId, + psPDumpCycleCountRegReadIN->ui32RegOffset, + psPDumpCycleCountRegReadIN->bLastFrame); + + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo, + psPDumpPDDevPAddrIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo, + psPDumpPDDevPAddrIN->ui32Offset, + psPDumpPDDevPAddrIN->sPDDevPAddr, + MAKEUNIQUETAG(pvMemInfo), + PDUMP_PD_UNIQUETAG); + return 0; +} + +static IMG_INT +PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpStartInitPhaseKM(); + + return 0; +} + +static IMG_INT +PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpStopInitPhaseKM(); + + return 0; +} + +#endif /* PDUMP */ + + +static IMG_INT +PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN, + PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO); + + OSMemCopy(&psGetMiscInfoOUT->sMiscInfo, + &psGetMiscInfoIN->sMiscInfo, + sizeof(PVRSRV_MISC_INFO)); + + if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) && + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) && + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)) + { + /* Client must choose which of memstats and DDK version will be written to + * kernel side buffer */ + psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) || + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) || + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)) + { + /* Alloc kernel side buffer to write into */ + ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen, + (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0, + "Output string buffer")); + + psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo); + + /* Copy result to user */ + eError = CopyToUserWrapper(psPerProc, ui32BridgeID, + psGetMiscInfoIN->sMiscInfo.pszMemoryStr, + psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen); + + /* Free kernel side buffer again */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen, + (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0); + + /* Replace output buffer pointer with input pointer, as both are expected + * to point to the same userspace memory. + */ + psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr; + + if(eError != PVRSRV_OK) + { + /* Do error check at the end as we always have to free and reset the + * pointer. + */ + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user")); + return -EFAULT; + } + } + else + { + psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo); + } + + /* Return on error so exit status of PVRSRVGetMiscInfoKM is propagated to client. + * Don't alloc handles for event object or timer; if error exit status is returned + * the handles should not be used (even if not null) */ + if (psGetMiscInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* + * The handles are not allocated in batch mode as they are shared + * (a shared handle is allocated at most once), and there is no + * resource allocation to undo if the handle allocation fails. + */ + if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) + { + psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM, + psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + if (psGetMiscInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + } + + if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle) + { + /* Allocate handle for SOC OSMemHandle */ + psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle, + psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle, + PVRSRV_HANDLE_TYPE_SOC_TIMER, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + if (psGetMiscInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + return 0; +} + +static IMG_INT +PVRSRVConnectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CONNECT_SERVICES *psConnectServicesIN, + PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES); + +#if defined(PDUMP) + /* Store the per process connection info. + * The Xserver initially connects via PVR2D which sets the persistent flag. + * But, later the Xserver may connect via SGL which doesn't carry the flag (in + * general SGL clients aren't persistent). So we OR in the flag so if it was set + * before it remains set. + */ + if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PERSIST) != 0) + { + psPerProc->bPDumpPersistent = IMG_TRUE; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Select whether this client is our 'active' target for pdumping in a + * multi-process environment. + * NOTE: only 1 active target is supported at present. + */ + if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PDUMP_ACTIVE) != 0) + { + psPerProc->bPDumpActive = IMG_TRUE; + } +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ +#else + PVR_UNREFERENCED_PARAMETER(psConnectServicesIN); +#endif + psConnectServicesOUT->ui8KernelArch = (sizeof(void *) == 8) ? 64 : 32; + psConnectServicesOUT->hKernelServices = (IMG_UINT64)(uintptr_t)(void *) psPerProc->hPerProcData; + psConnectServicesOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES); + + /* just return OK, per-process data is cleaned up by resmgr */ + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +static IMG_INT +PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN, + PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psPerProc); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS); + + psEnumDispClassOUT->eError = + PVRSRVEnumerateDCKM(psEnumDispClassIN->eDeviceClass, + &psEnumDispClassOUT->ui32NumDevices, + &psEnumDispClassOUT->ui32DevID[0]); + + return 0; +} + +static IMG_INT +PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN, + PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE); + + NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1) + + psOpenDispClassDeviceOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psOpenDispClassDeviceIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + psOpenDispClassDeviceOUT->eError = + PVRSRVOpenDCDeviceKM(psPerProc, + psOpenDispClassDeviceIN->ui32DeviceID, + hDevCookieInt, + &hDispClassInfoInt); + + if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psOpenDispClassDeviceOUT->hDeviceKM, + hDispClassInfoInt, + PVRSRV_HANDLE_TYPE_DISP_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psCloseDispClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psCloseDispClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + return 0; +} + +static IMG_INT +PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN, + PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS); + + psEnumDispClassFormatsOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psEnumDispClassFormatsIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK) + { + return 0; + } + + psEnumDispClassFormatsOUT->eError = + PVRSRVEnumDCFormatsKM(pvDispClassInfoInt, + &psEnumDispClassFormatsOUT->ui32Count, + psEnumDispClassFormatsOUT->asFormat); + + return 0; +} + +static IMG_INT +PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN, + PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS); + + psEnumDispClassDimsOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psEnumDispClassDimsIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + + if(psEnumDispClassDimsOUT->eError != PVRSRV_OK) + { + return 0; + } + + psEnumDispClassDimsOUT->eError = + PVRSRVEnumDCDimsKM(pvDispClassInfoInt, + &psEnumDispClassDimsIN->sFormat, + &psEnumDispClassDimsOUT->ui32Count, + psEnumDispClassDimsOUT->asDim); + + return 0; +} + +#if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) +static IMG_INT +PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN, //IMG_HANDLE *phGetDispClassSysBufferIN, + PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hBufferInt; + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER); + + NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1) + + psGetDispClassSysBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psGetDispClassSysBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDispClassSysBufferOUT->eError = + PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, + &hBufferInt); + + if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* PRQA S 1461 6 */ /* ignore warning about enum type being converted */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psGetDispClassSysBufferOUT->hBuffer, + hBufferInt, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetDispClassSysBufferIN->hDeviceKM); + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc) + + return 0; +} +#endif + +static IMG_INT +PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN, + PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO); + + psGetDispClassInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psGetDispClassInfoIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psGetDispClassInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDispClassInfoOUT->eError = + PVRSRVGetDCInfoKM(pvDispClassInfo, + &psGetDispClassInfoOUT->sDisplayInfo); + + return 0; +} + +static IMG_INT +PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN, + PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_HANDLE hSwapChainInt; + IMG_UINT32 ui32SwapChainID; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN); + + NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1) + + psCreateDispClassSwapChainOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psCreateDispClassSwapChainIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + + if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Get ui32SwapChainID from input */ + ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID; + + psCreateDispClassSwapChainOUT->eError = + PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo, + psCreateDispClassSwapChainIN->ui32Flags, + &psCreateDispClassSwapChainIN->sDstSurfAttrib, + &psCreateDispClassSwapChainIN->sSrcSurfAttrib, + psCreateDispClassSwapChainIN->ui32BufferCount, + psCreateDispClassSwapChainIN->ui32OEMFlags, + &hSwapChainInt, + &ui32SwapChainID +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + ,psCreateDispClassSwapChainIN->i32TimelineFd +#endif + ); + + if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Pass ui32SwapChainID to output */ + psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psCreateDispClassSwapChainOUT->hSwapChain, + hSwapChainInt, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psCreateDispClassSwapChainIN->hDeviceKM); + + COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain, + psDestroyDispClassSwapChainIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVDestroyDCSwapChainKM(pvSwapChain); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psDestroyDispClassSwapChainIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + + return 0; +} + +static IMG_INT +PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassDstRectIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassDstRectIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCDstRectKM(pvDispClassInfo, + pvSwapChain, + &psSetDispClassDstRectIN->sRect); + + return 0; +} + +static IMG_INT +PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassSrcRectIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassSrcRectIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCSrcRectKM(pvDispClassInfo, + pvSwapChain, + &psSetDispClassSrcRectIN->sRect); + + return 0; +} + +static IMG_INT +PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassColKeyIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassColKeyIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCDstColourKeyKM(pvDispClassInfo, + pvSwapChain, + psSetDispClassColKeyIN->ui32CKColour); + + return 0; +} + +static IMG_INT +PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassColKeyIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassColKeyIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo, + pvSwapChain, + psSetDispClassColKeyIN->ui32CKColour); + + return 0; +} + +static IMG_INT +PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN, + PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS); + + NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) + + psGetDispClassBuffersOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psGetDispClassBuffersIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDispClassBuffersOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psGetDispClassBuffersIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } + + + psGetDispClassBuffersOUT->eError = + PVRSRVGetDCBuffersKM(pvDispClassInfo, + pvSwapChain, + &psGetDispClassBuffersOUT->ui32BufferCount, + psGetDispClassBuffersOUT->ahBuffer, + psGetDispClassBuffersOUT->asPhyAddr); + if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); + + for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++) + { + IMG_HANDLE hBufferExt; + + /* PRQA S 1461 15 */ /* ignore warning about enum type being converted */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &hBufferExt, + psGetDispClassBuffersOUT->ahBuffer[i], + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetDispClassBuffersIN->hSwapChain); + + psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt; + } + + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChainBuf; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSwapDispClassBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &pvSwapChainBuf, + psSwapDispClassBufferIN->hBuffer, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + psSwapDispClassBufferIN->hDeviceKM); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + psRetOUT->eError = + PVRSRVSwapToDCBufferKM(pvDispClassInfo, + pvSwapChainBuf, + psSwapDispClassBufferIN->ui32SwapInterval, + psSwapDispClassBufferIN->hPrivateTag, + psSwapDispClassBufferIN->ui32ClipRectCount, + psSwapDispClassBufferIN->sClipRect); + + return 0; +} + +static IMG_INT +PVRSRVSwapToDCBuffer2BW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2 *psSwapDispClassBufferIN, + PVRSRV_BRIDGE_OUT_SWAP_DISPCLASS_TO_BUFFER2 *psSwapDispClassBufferOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvPrivData = IMG_NULL; + IMG_HANDLE hFence = IMG_NULL; + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + IMG_HANDLE *phKernelMemInfos; + IMG_HANDLE *phKernelSyncInfos; + IMG_UINT32 i; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + int iReleaseFd; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iReleaseFd = get_unused_fd_flags(0); +#else + iReleaseFd = get_unused_fd(); +#endif + + if(iReleaseFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iReleaseFd)); + return 0; + } +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2); + + psSwapDispClassBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSwapDispClassBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psSwapDispClassBufferOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up DISP_INFO handle")); + return 0; + } + + psSwapDispClassBufferOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSwapDispClassBufferIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + psSwapDispClassBufferIN->hDeviceKM); + if(psSwapDispClassBufferOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up DISP_BUFFER handle")); + return 0; + } + + if(!OSAccessOK(PVR_VERIFY_WRITE, + psSwapDispClassBufferIN->hKernelMemInfos, + sizeof(IMG_HANDLE) * psSwapDispClassBufferIN->ui32NumMemInfos)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Access check failed for ppsKernelMemInfos")); + return -EFAULT; + } + + if(!OSAccessOK(PVR_VERIFY_WRITE, + psSwapDispClassBufferIN->hKernelSyncInfos, + sizeof(IMG_HANDLE) * psSwapDispClassBufferIN->ui32NumMemInfos)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Access check failed for ppsKernelSyncInfos")); + return -EFAULT; + } + + phKernelMemInfos = (IMG_HANDLE *)psSwapDispClassBufferIN->hKernelMemInfos; + phKernelSyncInfos = (IMG_HANDLE *)psSwapDispClassBufferIN->hKernelSyncInfos; + for (i = 0; i < psSwapDispClassBufferIN->ui32NumMemInfos; i++) + { + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + psSwapDispClassBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + phKernelMemInfos[i], + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psSwapDispClassBufferOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up MEM_INFO handle")); + return 0; + } + phKernelMemInfos[i] = psKernelMemInfo; + +#if !(defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) + { + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + + psSwapDispClassBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelSyncInfo, + phKernelSyncInfos[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psSwapDispClassBufferOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up SYNC_INFO handle")); + return 0; + } + phKernelSyncInfos[i] = psKernelSyncInfo; + } +#endif /* !defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + } + + if(psSwapDispClassBufferIN->ui32PrivDataLength > 0) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + psSwapDispClassBufferIN->ui32PrivDataLength, + (IMG_VOID **)&pvPrivData, IMG_NULL, + "Swap Command Private Data") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2BW: Failed to allocate private data space")); + return -ENOMEM; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pvPrivData, + psSwapDispClassBufferIN->hPrivData, + psSwapDispClassBufferIN->ui32PrivDataLength) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to copy private data")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + psSwapDispClassBufferIN->ui32PrivDataLength, + pvPrivData, IMG_NULL); + return -EFAULT; + } + } + + psSwapDispClassBufferOUT->eError = + PVRSRVSwapToDCBuffer2KM(pvDispClassInfo, + pvSwapChain, + psSwapDispClassBufferIN->ui32SwapInterval, + (PVRSRV_KERNEL_MEM_INFO**) psSwapDispClassBufferIN->hKernelMemInfos, + (PVRSRV_KERNEL_SYNC_INFO**) psSwapDispClassBufferIN->hKernelSyncInfos, + psSwapDispClassBufferIN->ui32NumMemInfos, + pvPrivData, + psSwapDispClassBufferIN->ui32PrivDataLength, + &hFence); + + if(psSwapDispClassBufferOUT->eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSwapDispClassBufferIN->ui32PrivDataLength, + pvPrivData, IMG_NULL); + } + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + if(hFence) + { + struct sync_fence *psFence = hFence; + sync_fence_install(psFence, iReleaseFd); + psSwapDispClassBufferOUT->hFence = (IMG_HANDLE)(uintptr_t)iReleaseFd; + } + else + { + psSwapDispClassBufferOUT->hFence = (IMG_HANDLE)-1; + put_unused_fd(iReleaseFd); + } +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + if(hFence) + { + struct sync_file *psSyncFile = sync_file_create(hFence); + fd_install(iReleaseFd, psSyncFile->file); + psSwapDispClassBufferOUT->hFence = (IMG_HANDLE)(uintptr_t)iReleaseFd; + } + else + { + psSwapDispClassBufferOUT->hFence = (IMG_HANDLE)-1; + put_unused_fd(iReleaseFd); + } +#else /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + psSwapDispClassBufferOUT->hFence = (IMG_HANDLE)-1; +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + return 0; +} + + + +static IMG_INT +PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSwapDispClassSystemIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSwapDispClassSystemIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + psSwapDispClassSystemIN->hDeviceKM); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + psRetOUT->eError = + PVRSRVSwapToDCSystemKM(pvDispClassInfo, + pvSwapChain); + + return 0; +} + +static IMG_INT +PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN, + PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hBufClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE); + + NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1) + + psOpenBufferClassDeviceOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psOpenBufferClassDeviceIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + psOpenBufferClassDeviceOUT->eError = + PVRSRVOpenBCDeviceKM(psPerProc, + psOpenBufferClassDeviceIN->ui32DeviceID, + hDevCookieInt, + &hBufClassInfo); + if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psOpenBufferClassDeviceOUT->hDeviceKM, + hBufClassInfo, + PVRSRV_HANDLE_TYPE_BUF_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvBufClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvBufClassInfo, + psCloseBufferClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVCloseBCDeviceKM(pvBufClassInfo); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psCloseBufferClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + + return 0; +} + +static IMG_INT +PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN, + PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvBufClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO); + + psGetBufferClassInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvBufClassInfo, + psGetBufferClassInfoIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + if(psGetBufferClassInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetBufferClassInfoOUT->eError = + PVRSRVGetBCInfoKM(pvBufClassInfo, + &psGetBufferClassInfoOUT->sBufferInfo); + return 0; +} + +static IMG_INT +PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN, + PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvBufClassInfo; + IMG_HANDLE hBufferInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER); + + NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1) + + psGetBufferClassBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvBufClassInfo, + psGetBufferClassBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + if(psGetBufferClassBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetBufferClassBufferOUT->eError = + PVRSRVGetBCBufferKM(pvBufClassInfo, + psGetBufferClassBufferIN->ui32BufferIndex, + &hBufferInt); + + if(psGetBufferClassBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* PRQA S 1461 6 */ /* ignore warning about enum type being converted */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psGetBufferClassBufferOUT->hBuffer, + hBufferInt, + PVRSRV_HANDLE_TYPE_BUF_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetBufferClassBufferIN->hDeviceKM); + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc) + + return 0; +} + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +static IMG_INT +PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN, + PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM); + + NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1) + + psAllocSharedSysMemOUT->eError = + PVRSRVAllocSharedSysMemoryKM(psPerProc, + psAllocSharedSysMemIN->ui32Flags, + psAllocSharedSysMemIN->uSize, + &psKernelMemInfo); + if(psAllocSharedSysMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo, + 0, + sizeof(psAllocSharedSysMemOUT->sClientMemInfo)); + + psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0; + psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags = + psKernelMemInfo->ui32Flags; + psAllocSharedSysMemOUT->sClientMemInfo.uAllocSize = + psKernelMemInfo->uAllocSize; + psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle; + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN, + PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM); + + psFreeSharedSysMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psKernelMemInfo, + psFreeSharedSysMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + + if(psFreeSharedSysMemOUT->eError != PVRSRV_OK) + return 0; + + psFreeSharedSysMemOUT->eError = + PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo); + if(psFreeSharedSysMemOUT->eError != PVRSRV_OK) + return 0; + + psFreeSharedSysMemOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psFreeSharedSysMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + return 0; +} + +static IMG_INT +PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN, + PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + PVRSRV_HANDLE_TYPE eHandleType; + IMG_HANDLE hParent; + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM); + + NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2) + + psMapMemInfoMemOUT->eError = + PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, + (IMG_VOID **)&psKernelMemInfo, + &eHandleType, + psMapMemInfoMemIN->hKernelMemInfo); + if(psMapMemInfoMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + switch (eHandleType) + { +#if defined(PVR_SECURE_HANDLES) + case PVRSRV_HANDLE_TYPE_MEM_INFO: + case PVRSRV_HANDLE_TYPE_MEM_INFO_REF: + case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO: +#else + case PVRSRV_HANDLE_TYPE_NONE: +#endif + break; + default: + psMapMemInfoMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE; + return 0; + } + + /* + * To prevent the building up of deep chains of subhandles, parent + * the new meminfo off the parent of the input meminfo, if it has + * a parent. + */ + psMapMemInfoMemOUT->eError = + PVRSRVGetParentHandle(psPerProc->psHandleBase, + &hParent, + psMapMemInfoMemIN->hKernelMemInfo, + eHandleType); + if (psMapMemInfoMemOUT->eError != PVRSRV_OK) + { + return 0; + } + if (hParent == IMG_NULL) + { + hParent = psMapMemInfoMemIN->hKernelMemInfo; + } + + OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo, + 0, + sizeof(psMapMemInfoMemOUT->sClientMemInfo)); + + psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0; + psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr = + psKernelMemInfo->sDevVAddr; + psMapMemInfoMemOUT->sClientMemInfo.ui32Flags = + psKernelMemInfo->ui32Flags; + psMapMemInfoMemOUT->sClientMemInfo.uAllocSize = + psKernelMemInfo->uAllocSize; + psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + hParent); + + if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + } + else + { + /* and setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapMemInfoMemOUT->sClientSyncInfo.psSyncData = + psKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapMemInfoMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + + psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = + psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif + + psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo, + psKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo); + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc) + + return 0; +} + + + +IMG_INT +DummyBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); +#endif + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + PVR_UNREFERENCED_PARAMETER(psPerProc); + +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u (%s) mapped to " + "Dummy Wrapper (probably not what you want!)", + __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u mapped to " + "Dummy Wrapper (probably not what you want!)", + __FUNCTION__, ui32BridgeID)); +#endif + return -ENOTTY; +} + + +/*! + * ***************************************************************************** + * @brief A wrapper for filling in the g_BridgeDispatchTable array that does + * error checking. + * + * @param ui32Index + * @param pszIOCName + * @param pfFunction + * @param pszFunctionName + * + * @return + ********************************************************************************/ +IMG_VOID +_SetDispatchTableEntry(IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName) +{ + static IMG_UINT uiPrevIndex = ~0U; /* -1 */ +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(pszIOCName); +#endif +#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) + PVR_UNREFERENCED_PARAMETER(pszFunctionName); +#endif + +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + /* INTEGRATION_POINT: Enable this to dump out the dispatch table entries */ + PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName)); +#endif + + /* We should never be over-writing a previous entry. + * If we are, tell the world about it. + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likly to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at at a performance critical time. + */ + if(g_BridgeDispatchTable[ui32Index].pfFunction) + { +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, + "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s", + __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName)); +#else + PVR_DPF((PVR_DBG_ERROR, + "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%u)", + __FUNCTION__, pszIOCName, ui32Index)); +#endif + PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); + } + + /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly + * interested in spotting any large gap of wasted memory that could be + * accidentally introduced. + * + * This will currently flag up any gaps > 5 entries. + * + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likly to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at at a performance critical time. + */ +// if((uiPrevIndex != (IMG_UINT)-1) && + if((uiPrevIndex != ~0U) && + ((ui32Index >= uiPrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || + (ui32Index <= uiPrevIndex))) + { +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_WARNING, + "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)", + __FUNCTION__, uiPrevIndex, g_BridgeDispatchTable[uiPrevIndex].pszIOCName, + ui32Index, pszIOCName)); +#else + PVR_DPF((PVR_DBG_WARNING, + "%s: There is a gap in the dispatch table between indices %u and %u (%s)", + __FUNCTION__, (IMG_UINT)uiPrevIndex, (IMG_UINT)ui32Index, pszIOCName)); +#endif + PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); + } + + g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; + g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; + g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; + g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; +#endif + + uiPrevIndex = ui32Index; +} + +static IMG_INT +PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + /* PRQA S 3415 1 */ /* side effects needed - if any step fails */ + if((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) + { + psRetOUT->eError = PVRSRV_ERROR_SRV_CONNECT_FAILED; + return 0; + } + +#if defined (__linux__) || defined (__QNXNTO__) + PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE); +#endif + psPerProc->bInitProcess = IMG_TRUE; + + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + + +static IMG_INT +PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT); + + if(!psPerProc->bInitProcess) + { + psRetOUT->eError = PVRSRV_ERROR_SRV_DISCONNECT_FAILED; + return 0; + } + + psPerProc->bInitProcess = IMG_FALSE; + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + psPerProc->bPDumpActive = IMG_FALSE; +#endif + + PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE); + PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE); + + psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful); + + PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL , + ((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)) + ? IMG_TRUE : IMG_FALSE); + + return 0; +} + + +static IMG_INT +PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hOSEventKM; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT); + + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hOSEventKM, + psEventObjectWaitIN->hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = OSEventObjectWaitKM(hOSEventKM); + + return 0; +} + + +static IMG_INT +PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN, + PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN); + + NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1) + + psEventObjectOpenOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psEventObjectOpenIN->sEventObject.hOSEventKM, + psEventObjectOpenIN->sEventObject.hOSEventKM, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + + if(psEventObjectOpenOUT->eError != PVRSRV_OK) + { + return 0; + } + + psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent); + + if(psEventObjectOpenOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psEventObjectOpenOUT->hOSEvent, + psEventObjectOpenOUT->hOSEvent, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI); + + COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hOSEventKM; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psEventObjectCloseIN->sEventObject.hOSEventKM, + psEventObjectCloseIN->sEventObject.hOSEventKM, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &hOSEventKM, + psEventObjectCloseIN->hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = OSEventObjectCloseKM(&psEventObjectCloseIN->sEventObject, hOSEventKM); + + return 0; +} + + +typedef struct _MODIFY_SYNC_OP_INFO +{ + IMG_HANDLE hResItem; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + IMG_UINT32 ui32ModifyFlags; + IMG_UINT32 ui32ReadOpsPendingSnapShot; + IMG_UINT32 ui32WriteOpsPendingSnapShot; + IMG_UINT32 ui32ReadOps2PendingSnapShot; +} MODIFY_SYNC_OP_INFO; + + +static PVRSRV_ERROR DoQuerySyncOpsSatisfied(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + IMG_UINT32 ui32ReadOpsPendingSnapShot, + IMG_UINT32 ui32WriteOpsPendingSnapShot, + IMG_UINT32 ui32ReadOps2PendingSnapShot) +{ + IMG_UINT32 ui32WriteOpsPending; + IMG_UINT32 ui32ReadOpsPending; + IMG_UINT32 ui32ReadOps2Pending; + + /* + * + * We wait until the complete count reaches _or_moves_past_ the + * snapshot value. + * + */ + + if (!psKernelSyncInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + let p be the pending ops count + let c be the complete ops count + let p' be the previously taken snapshot + + if p exceeds c by an amount greater than that by which + p exceeds p', then the condition is not yet satisfied. + + Note that (p - c) can never be negative, and neither can (p - p') + so we can do the comparison using unsigned arithmetic + */ + ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + if((ui32WriteOpsPending - ui32WriteOpsPendingSnapShot >= + ui32WriteOpsPending - psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) && + (ui32ReadOpsPending - ui32ReadOpsPendingSnapShot >= + ui32ReadOpsPending - psKernelSyncInfo->psSyncData->ui32ReadOpsComplete) && + (ui32ReadOps2Pending - ui32ReadOps2PendingSnapShot >= + ui32ReadOps2Pending - psKernelSyncInfo->psSyncData->ui32ReadOps2Complete)) + { +#if defined(PDUMP) && !defined(SUPPORT_VGX) + /* pdump the sync pol: reads */ + PDumpComment("Poll for read ops complete to reach value (pdump: %u, actual snapshot: %u)", + psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal, + ui32ReadOpsPendingSnapShot); + PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, /* * see "NB" below */ + 0, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); + + /* pdump the sync pol: writes */ + PDumpComment("Poll for write ops complete to reach value (pdump: %u, actual snapshot: %u)", + psKernelSyncInfo->psSyncData->ui32LastOpDumpVal, + ui32WriteOpsPendingSnapShot); + PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + psKernelSyncInfo->psSyncData->ui32LastOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, /* * see "NB" below */ + 0, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); + /* NB: FIXME -- really need to POL on an expression to + accurately reflect the condition we need to check. How to + do this in PDUMP? */ +#endif + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_RETRY; + } +} + + +static PVRSRV_ERROR DoModifyCompleteSyncOps(MODIFY_SYNC_OP_INFO *psModSyncOpInfo) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + + psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo; + + if (!psKernelSyncInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* If user has used the API correctly, we will always have reached the pending snapshot. + We should catch this error on the client side of the bridge and report it in an obvious way */ + if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) + || (psModSyncOpInfo->ui32ReadOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32ReadOpsComplete)) + { + return PVRSRV_ERROR_BAD_SYNC_STATE; + } + + /* update the WOpComplete */ + if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) + { + psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++; + } + + /* update the ROpComplete */ + if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) + { + psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++; + } + + /* update the ROp2Complete */ + if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO2_INC) + { + psKernelSyncInfo->psSyncData->ui32ReadOps2Complete++; + } + + PVR_TTRACE(PVRSRV_TRACE_GROUP_MODOBJ, PVRSRV_TRACE_CLASS_CMD_COMP_START, MODOBJ_TOKEN_COMPLETE_PENDING); + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_MODOBJ, MODOBJ_TOKEN_SYNC_UPDATE, + psKernelSyncInfo, PVRSRV_SYNCOP_COMPLETE); + PVR_TTRACE(PVRSRV_TRACE_GROUP_MODOBJ, PVRSRV_TRACE_CLASS_CMD_COMP_END, MODOBJ_TOKEN_COMPLETE_PENDING); + + return PVRSRV_OK; +} + + +static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + if (!pvParam) + { + PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam; + + if (psModSyncOpInfo->psKernelSyncInfo) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo, + psModSyncOpInfo->ui32ReadOpsPendingSnapShot, + psModSyncOpInfo->ui32WriteOpsPendingSnapShot, + psModSyncOpInfo->ui32ReadOps2PendingSnapShot) == PVRSRV_OK) + { + goto OpFlushedComplete; + } + PVR_DPF((PVR_DBG_WARNING, "ModifyCompleteSyncOpsCallBack: waiting for current Ops to flush")); + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: timeout whilst waiting for current Ops to flush.")); + PVR_DPF((PVR_DBG_ERROR, " Write ops pending snapshot = %d, write ops complete = %d", + psModSyncOpInfo->ui32WriteOpsPendingSnapShot, + psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)); + PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, read ops complete = %d", + psModSyncOpInfo->ui32ReadOpsPendingSnapShot, + psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOpsComplete)); + PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, read ops2 complete = %d", + psModSyncOpInfo->ui32ReadOps2PendingSnapShot, + psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOps2Complete)); + return PVRSRV_ERROR_TIMEOUT; + +OpFlushedComplete: + DoModifyCompleteSyncOps(psModSyncOpInfo); + PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0); + + /* re-kick all services managed devices */ + PVRSRVScheduleDeviceCallbacks(); + + return PVRSRV_OK; +} + + +static IMG_INT +PVRSRVCreateSyncInfoModObjBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ *psCreateSyncInfoModObjOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ); + + NEW_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc, 1) + + ASSIGN_AND_EXIT_ON_ERROR(psCreateSyncInfoModObjOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(MODIFY_SYNC_OP_INFO), + (IMG_VOID **)&psModSyncOpInfo, 0, + "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)")); + + psModSyncOpInfo->psKernelSyncInfo = IMG_NULL; /* mark it as empty */ + + psCreateSyncInfoModObjOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psCreateSyncInfoModObjOUT->hKernelSyncInfoModObj, + psModSyncOpInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ, + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE); + + if (psCreateSyncInfoModObjOUT->eError != PVRSRV_OK) + { + return 0; + } + + psModSyncOpInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_MODIFY_SYNC_OPS, + psModSyncOpInfo, + 0, + &ModifyCompleteSyncOpsCallBack); + + COMMIT_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVDestroySyncInfoModObjBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ *psDestroySyncInfoModObjIN, + PVRSRV_BRIDGE_RETURN *psDestroySyncInfoModObjOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ); + + psDestroySyncInfoModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psDestroySyncInfoModObjIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo != IMG_NULL) + { + /* Not empty */ + psDestroySyncInfoModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + psDestroySyncInfoModObjOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psDestroySyncInfoModObjIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + + if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVReleaseHandle failed")); + return 0; + } + + psDestroySyncInfoModObjOUT->eError = ResManFreeResByPtr(psModSyncOpInfo->hResItem, CLEANUP_WITH_POLL); + if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: ResManFreeResByPtr failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN, + PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS); + + psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psModifySyncOpsIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelSyncInfo, + psModifySyncOpsIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo) + { + /* SyncInfoModification is not empty */ + psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY; + PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo Modification object is not empty")); + return 0; + } + + /* Should never happen, but check to be sure */ + if (psKernelSyncInfo == IMG_NULL) + { + psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo bad handle")); + return 0; + } + + PVRSRVKernelSyncInfoIncRef(psKernelSyncInfo, IMG_NULL); + /* setup info to store in resman */ + psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo; + psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags; + psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + psModSyncOpInfo->ui32ReadOps2PendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + /* We return PRE-INCREMENTED versions of all sync Op Values */ + + psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + psModifySyncOpsOUT->ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + PVR_TTRACE(PVRSRV_TRACE_GROUP_MODOBJ, PVRSRV_TRACE_CLASS_CMD_START, MODOBJ_TOKEN_MODIFY_PENDING); + if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) + { + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_MODOBJ, MODOBJ_TOKEN_WRITE_SYNC, + psKernelSyncInfo, PVRSRV_SYNCOP_SAMPLE); + } + else if (psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) + { + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_MODOBJ, MODOBJ_TOKEN_READ_SYNC, + psKernelSyncInfo, PVRSRV_SYNCOP_SAMPLE); + } + else if (psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO2_INC) + { + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_MODOBJ, MODOBJ_TOKEN_READ2_SYNC, + psKernelSyncInfo, PVRSRV_SYNCOP_SAMPLE); + } + else + { + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_MODOBJ, MODOBJ_TOKEN_READ_WRITE_SYNC, + psKernelSyncInfo, PVRSRV_SYNCOP_SAMPLE); + } + PVR_TTRACE(PVRSRV_TRACE_GROUP_MODOBJ, PVRSRV_TRACE_CLASS_CMD_END, MODOBJ_TOKEN_MODIFY_PENDING); + + if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) + { + SyncTakeWriteOp(psKernelSyncInfo, SYNC_OP_CLASS_MODOBJ); + } + + if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) + { + SyncTakeReadOp(psKernelSyncInfo, SYNC_OP_CLASS_MODOBJ); + } + + if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO2_INC) + { + SyncTakeReadOp2(psKernelSyncInfo, SYNC_OP_CLASS_MODOBJ); + } + + /* pull the resman item to the front of the list */ + psModifySyncOpsOUT->eError = ResManDissociateRes(psModSyncOpInfo->hResItem, + psPerProc->hResManContext); + + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN, + PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS); + + psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psModifySyncOpsIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL) + { + /* Empty */ + psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + psModifySyncOpsOUT->eError = DoModifyCompleteSyncOps(psModSyncOpInfo); + + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: DoModifyCompleteSyncOps failed")); + return 0; + } + + PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL); + psModSyncOpInfo->psKernelSyncInfo = IMG_NULL; + + /* re-kick all services managed devices */ + PVRSRVScheduleDeviceCallbacks(); + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsTakeTokenBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenIN, + PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN); + + psSyncOpsTakeTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelSyncInfo, + psSyncOpsTakeTokenIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psSyncOpsTakeTokenOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsTakeTokenBW: PVRSRVLookupHandle failed")); + return 0; + } + + /* We return PRE-INCREMENTED versions of all sync Op Values */ + + psSyncOpsTakeTokenOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + psSyncOpsTakeTokenOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + psSyncOpsTakeTokenOUT->ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsFlushToTokenBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN *psSyncOpsFlushToTokenIN, + PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToTokenOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + IMG_UINT32 ui32ReadOpsPendingSnapshot; + IMG_UINT32 ui32WriteOpsPendingSnapshot; + IMG_UINT32 ui32ReadOps2PendingSnapshot; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN); + + psSyncOpsFlushToTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelSyncInfo, + psSyncOpsFlushToTokenIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: PVRSRVLookupHandle failed")); + return 0; + } + + ui32ReadOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOpsPendingSnapshot; + ui32WriteOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32WriteOpsPendingSnapshot; + ui32ReadOps2PendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOps2PendingSnapshot; + + psSyncOpsFlushToTokenOUT->eError = DoQuerySyncOpsSatisfied(psKernelSyncInfo, + ui32ReadOpsPendingSnapshot, + ui32WriteOpsPendingSnapshot, + ui32ReadOps2PendingSnapshot); + + if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK && psSyncOpsFlushToTokenOUT->eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: DoQuerySyncOpsSatisfied failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsFlushToModObjBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ *psSyncOpsFlushToModObjIN, + PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToModObjOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ); + + psSyncOpsFlushToModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psSyncOpsFlushToModObjIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL) + { + /* Empty */ + psSyncOpsFlushToModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + psSyncOpsFlushToModObjOUT->eError = DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo, + psModSyncOpInfo->ui32ReadOpsPendingSnapShot, + psModSyncOpInfo->ui32WriteOpsPendingSnapShot, + psModSyncOpInfo->ui32ReadOps2PendingSnapShot); + + if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK && psSyncOpsFlushToModObjOUT->eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: DoQuerySyncOpsSatisfied failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsFlushToDeltaBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA *psSyncOpsFlushToDeltaIN, + PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToDeltaOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + IMG_UINT32 ui32DeltaRead; + IMG_UINT32 ui32DeltaWrite; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA); + + psSyncOpsFlushToDeltaOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psSyncInfo, + psSyncOpsFlushToDeltaIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psSyncOpsFlushToDeltaOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToDeltaBW: PVRSRVLookupHandle failed")); + return 0; + } + + /* FIXME: there's logic here in the bridge-wrapper - this needs to be moved to + a better place */ + + ui32DeltaRead = psSyncInfo->psSyncData->ui32ReadOpsPending - psSyncInfo->psSyncData->ui32ReadOpsComplete; + ui32DeltaWrite = psSyncInfo->psSyncData->ui32WriteOpsPending - psSyncInfo->psSyncData->ui32WriteOpsComplete; + + if (ui32DeltaRead <= psSyncOpsFlushToDeltaIN->ui32Delta && ui32DeltaWrite <= psSyncOpsFlushToDeltaIN->ui32Delta) + { +#if defined(PDUMP) && !defined(SUPPORT_VGX) + /* pdump the sync pol: reads */ + PDumpComment("Poll for read ops complete to delta (%u)", + psSyncOpsFlushToDeltaIN->ui32Delta); + psSyncOpsFlushToDeltaOUT->eError = + PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_GREATEREQUAL, + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + + /* pdump the sync pol: writes */ + PDumpComment("Poll for write ops complete to delta (%u)", + psSyncOpsFlushToDeltaIN->ui32Delta); + psSyncOpsFlushToDeltaOUT->eError = + PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + psSyncInfo->psSyncData->ui32LastOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_GREATEREQUAL, + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); +#endif + + psSyncOpsFlushToDeltaOUT->eError = PVRSRV_OK; + } + else + { + psSyncOpsFlushToDeltaOUT->eError = PVRSRV_ERROR_RETRY; + } + + return 0; +} + + +static PVRSRV_ERROR +FreeSyncInfoCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pvParam; + + PVRSRVKernelSyncInfoDecRef(psSyncInfo, IMG_NULL); + + return PVRSRV_OK; +} + + +static IMG_INT +PVRSRVAllocSyncInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO *psAllocSyncInfoIN, + PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO *psAllocSyncInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContext; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SYNC_INFO); + + NEW_HANDLE_BATCH_OR_ERROR(psAllocSyncInfoOUT->eError, psPerProc, 1) + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_HANDLE *)&psDeviceNode, + psAllocSyncInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(eError != PVRSRV_OK) + { + goto allocsyncinfo_errorexit; + } + + hDevMemContext = psDeviceNode->sDevMemoryInfo.pBMKernelContext; + + eError = PVRSRVAllocSyncInfoKM(psDeviceNode, + hDevMemContext, + &psSyncInfo); + + if (eError != PVRSRV_OK) + { + goto allocsyncinfo_errorexit; + } + + eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psAllocSyncInfoOUT->hKernelSyncInfo, + psSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE); + + if(eError != PVRSRV_OK) + { + goto allocsyncinfo_errorexit_freesyncinfo; + } + + psSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SYNC_INFO, + psSyncInfo, + 0, + &FreeSyncInfoCallback); + + /* Success */ + goto allocsyncinfo_commit; + + /* Error handling */ + allocsyncinfo_errorexit_freesyncinfo: + PVRSRVKernelSyncInfoDecRef(psSyncInfo, IMG_NULL); + + allocsyncinfo_errorexit: + + /* Common exit */ + allocsyncinfo_commit: + psAllocSyncInfoOUT->eError = eError; + COMMIT_HANDLE_BATCH_OR_ERROR(eError, psPerProc); + + return 0; +} + + +static IMG_INT +PVRSRVFreeSyncInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_FREE_SYNC_INFO *psFreeSyncInfoIN, + PVRSRV_BRIDGE_RETURN *psFreeSyncInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SYNC_INFO); + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psSyncInfo, + psFreeSyncInfoIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVLookupHandle failed")); + psFreeSyncInfoOUT->eError = eError; + return 0; + } + + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psFreeSyncInfoIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVReleaseHandle failed")); + psFreeSyncInfoOUT->eError = eError; + return 0; + } + + eError = ResManFreeResByPtr(psSyncInfo->hResItem, CLEANUP_WITH_POLL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: ResManFreeResByPtr failed")); + psFreeSyncInfoOUT->eError = eError; + return 0; + } + + return 0; +} + + +PVRSRV_ERROR +CommonBridgeInit(IMG_VOID) +{ + IMG_UINT32 i; + + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS, PVRSRVChangeDeviceMemoryAttributesBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2, PVRSRVMapDeviceMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2, PVRSRVExportDeviceMemBW); +#if defined(SUPPORT_ION) + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_ION_HANDLE, PVRSRVMapIonHandleBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_ION_HANDLE, PVRSRVUnmapIonHandleBW); +#endif +#if defined(SUPPORT_DMABUF) + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DMABUF, PVRSRVMapDmaBufBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DMABUF, PVRSRVUnmapDmaBufBW); +#endif + + /* SIM */ + SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW); + + /* User Mapping */ + SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW); + + /* API to retrieve misc. info. from services */ + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW); + + /* Overlay ioctls */ +#if defined (SUPPORT_OVERLAY_ROTATE_BLIT) + SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW); +#endif + + + /* PDUMP */ +#if defined(PDUMP) + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPAGES, PDumpMemPagesBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW); +#endif /* defined(PDUMP) */ + + /* DisplayClass APIs */ + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW); + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW); + + /* device class enum */ + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW); + + /* display class API */ + SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW); +#if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW); +#else + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, DummyBW); +#endif + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2, PVRSRVSwapToDCBuffer2BW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW); + + /* buffer class API */ + SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW); +#else /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, DummyBW); + + /* device class enum */ + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, DummyBW); + + /* display class API */ + SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, DummyBW); + + /* buffer class API */ + SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, DummyBW); +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + + /* Wrap/Unwrap external memory */ + SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW); + + /* Shared memory */ + SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW); + + /* Intialisation Service support */ + SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, &PVRSRVInitSrvConnectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, &PVRSRVInitSrvDisconnectBW); + + /* Event Object */ + SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, &PVRSRVEventObjectWaitBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, &PVRSRVEventObjectOpenBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, &PVRSRVEventObjectCloseBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ, PVRSRVCreateSyncInfoModObjBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ, PVRSRVDestroySyncInfoModObjBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN, PVRSRVSyncOpsTakeTokenBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN, PVRSRVSyncOpsFlushToTokenBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ, PVRSRVSyncOpsFlushToModObjBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA, PVRSRVSyncOpsFlushToDeltaBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SYNC_INFO, PVRSRVAllocSyncInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SYNC_INFO, PVRSRVFreeSyncInfoBW); + +#if defined (SUPPORT_SGX) + SetSGXDispatchTableEntry(); +#endif +#if defined (SUPPORT_VGX) + SetVGXDispatchTableEntry(); +#endif +#if defined (SUPPORT_MSVDX) + SetMSVDXDispatchTableEntry(); +#endif + + /* A safety net to help ensure there won't be any un-initialised dispatch + * table entries... */ + /* Note: This is specifically done _after_ setting all the dispatch entries + * so that SetDispatchTableEntry can detect mistakes where entries + * overlap */ + for(i=0;iui32BridgeID; + IMG_INT err = -EFAULT; + +#if defined(DEBUG_TRACE_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: %s", + __FUNCTION__, + g_BridgeDispatchTable[ui32BridgeID].pszIOCName)); +#endif + +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++; + g_BridgeGlobalStats.ui32IOCTLCount++; +#endif + + if(!psPerProc->bInitProcess) + { + if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) + { + if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.", + __FUNCTION__)); + goto return_fault; + } + } + else + { + if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress", + __FUNCTION__)); + goto return_fault; + } + else + { + /* Only certain operations are allowed */ + switch(ui32BridgeID) + { + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_UM_KM_COMPAT_CHECK): + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.", + __FUNCTION__)); + goto return_fault; + } + } + } + } + +#if defined(__linux__) + { + /* This should be moved into the linux specific code */ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* We have already set up some static buffers to store our ioctl data... */ + psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData; + psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE); + + /* check we are not using a bigger bridge than allocated */ + if((psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) || + (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)) + { + goto return_fault; + } + + + if(psBridgePackageKM->ui32InBufferSize > 0) + { + if(!OSAccessOK(PVR_VERIFY_READ, + psBridgePackageKM->hParamIn, + psBridgePackageKM->ui32InBufferSize)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__)); + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + psBridgeIn, + psBridgePackageKM->hParamIn, + psBridgePackageKM->ui32InBufferSize) + != PVRSRV_OK) + { + goto return_fault; + } + } + } +#else + psBridgeIn = (IMG_VOID*)(IMG_UINTPTR_T)psBridgePackageKM->hParamIn; + psBridgeOut = (IMG_VOID*)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; +#endif + + if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!", + __FUNCTION__, ui32BridgeID)); + goto return_fault; + } + + if( ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_UM_KM_COMPAT_CHECK)) + PVRSRVCompatCheckKM(psBridgeIn, psBridgeOut); + else + { + pfBridgeHandler = + (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction; + err = pfBridgeHandler(ui32BridgeID, + psBridgeIn, + psBridgeOut, + psPerProc); + if(err < 0) + { + goto return_fault; + } + } + +#if defined(__linux__) + /* This should be moved into the linux specific code */ + if(CopyToUserWrapper(psPerProc, + ui32BridgeID, + psBridgePackageKM->hParamOut, + psBridgeOut, + psBridgePackageKM->ui32OutBufferSize) + != PVRSRV_OK) + { + goto return_fault; + } +#endif + + err = 0; +return_fault: + + ReleaseHandleBatch(psPerProc); + return err; +} + +/****************************************************************************** + End of file (bridged_pvr_bridge.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.h b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.h new file mode 100644 index 0000000..0a1cc6e --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_pvr_bridge.h @@ -0,0 +1,257 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __BRIDGED_PVR_BRIDGE_H__ +#define __BRIDGED_PVR_BRIDGE_H__ + +#include "pvr_bridge.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__linux__) +#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X) +#else +#define PVRSRV_GET_BRIDGE_ID(X) ((X) - PVRSRV_IOWR(PVRSRV_BRIDGE_UMKM_CMD_FIRST)) +#endif + +#ifndef ENOMEM +#define ENOMEM 12 +#endif +#ifndef EFAULT +#define EFAULT 14 +#endif +#ifndef ENOTTY +#define ENOTTY 25 +#endif + +#if defined(DEBUG_BRIDGE_KM) +PVRSRV_ERROR +CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size); +PVRSRV_ERROR +CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size); +#else +#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \ + OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size) +#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \ + OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size) +#endif + + +#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \ + do \ + { \ + (error) = (src); \ + if ((error) != PVRSRV_OK) \ + { \ + return (res); \ + } \ + } while ((error) != PVRSRV_OK); + +#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \ + ASSIGN_AND_RETURN_ON_ERROR(error, src, 0) + +#if defined(PVR_SECURE_HANDLES) +#ifdef INLINE_IS_PRAGMA +#pragma inline(NewHandleBatch) +#endif +static INLINE PVRSRV_ERROR +NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32BatchSize) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(!psPerProc->bHandlesBatched); + + eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize); + + if (eError == PVRSRV_OK) + { + psPerProc->bHandlesBatched = IMG_TRUE; + } + + return eError; +} + +#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \ + ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize)) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(CommitHandleBatch) +#endif +static INLINE PVRSRV_ERROR +CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_ASSERT(psPerProc->bHandlesBatched); + + psPerProc->bHandlesBatched = IMG_FALSE; + + return PVRSRVCommitHandleBatch(psPerProc->psHandleBase); +} + + +#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \ + ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc)) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(ReleaseHandleBatch) +#endif +static INLINE IMG_VOID +ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + if (psPerProc->bHandlesBatched) + { + psPerProc->bHandlesBatched = IMG_FALSE; + + PVRSRVReleaseHandleBatch(psPerProc->psHandleBase); + } +} +#else /* defined(PVR_SECURE_HANDLES) */ +#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) +#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) +#define ReleaseHandleBatch(psPerProc) +#endif /* defined(PVR_SECURE_HANDLES) */ + +IMG_INT +DummyBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY +{ + BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl + arguments before calling into srvkm proper */ +#if defined(DEBUG_BRIDGE_KM) + const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ + const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ + IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */ + IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ + IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ +#endif +}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; + +#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX) + #if defined(SUPPORT_VGX) + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD + #else + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD + #endif +#else + #if defined(SUPPORT_SGX) + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD + #else + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD + #endif +#endif + +extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; + +IMG_VOID +_SetDispatchTableEntry(IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName); + + +/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ +#define SetDispatchTableEntry(ui32Index, pfFunction) \ + _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction) + +#define DISPATCH_TABLE_GAP_THRESHOLD 5 + +#if defined(DEBUG) +#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y)) +#else +#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X) +#endif + + +#if defined(DEBUG_BRIDGE_KM) +typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS +{ + IMG_UINT32 ui32IOCTLCount; + IMG_UINT32 ui32TotalCopyFromUserBytes; + IMG_UINT32 ui32TotalCopyToUserBytes; +}PVRSRV_BRIDGE_GLOBAL_STATS; + +/* OS specific code way want to report the stats held here and within the + * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a + * proc entry /proc/pvr/bridge_stats. Ref printLinuxBridgeStats()) */ +extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; +#endif + + +PVRSRV_ERROR CommonBridgeInit(IMG_VOID); + +IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); + +#if defined (__cplusplus) +} +#endif + +#endif /* __BRIDGED_PVR_BRIDGE_H__ */ + +/****************************************************************************** + End of file (bridged_pvr_bridge.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.c b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.c new file mode 100644 index 0000000..2ccdc66 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.c @@ -0,0 +1,113 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Support Functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description User/kernel mode bridge support. The functions in here + may be used beyond the bridge code proper (e.g. Linux + mmap interface). +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "servicesint.h" +#include "bridged_support.h" + + +/* + * Derive the internal OS specific memory handle from a secure + * handle. + */ +PVRSRV_ERROR +PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle) +{ + IMG_HANDLE hMHandleInt; + PVRSRV_HANDLE_TYPE eHandleType; + PVRSRV_ERROR eError; + + /* + * We don't know the type of the handle at this point, so we use + * PVRSRVLookupHandleAnyType to look it up. + */ + eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt, + &eHandleType, + hMHandle); + if(eError != PVRSRV_OK) + { + return eError; + } + + switch(eHandleType) + { +#if defined(PVR_SECURE_HANDLES) + case PVRSRV_HANDLE_TYPE_MEM_INFO: + case PVRSRV_HANDLE_TYPE_MEM_INFO_REF: + case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO: + { + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hMHandleInt; + + *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle; + + break; + } + case PVRSRV_HANDLE_TYPE_SYNC_INFO: + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt; + PVRSRV_KERNEL_MEM_INFO *psMemInfo = psSyncInfo->psSyncDataMemInfoKM; + + *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle; + + break; + } + case PVRSRV_HANDLE_TYPE_SOC_TIMER: + { + *phOSMemHandle = (IMG_VOID *)hMHandleInt; + break; + } +#else + case PVRSRV_HANDLE_TYPE_NONE: + *phOSMemHandle = (IMG_VOID *)hMHandleInt; + break; +#endif + default: + return PVRSRV_ERROR_BAD_MAPPING; + } + + return PVRSRV_OK; +} +/****************************************************************************** + End of file (bridged_support.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.h b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.h new file mode 100644 index 0000000..e32fa88 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/bridged/bridged_support.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description User/kernel mode bridge support. The functions in here + may be used beyond the bridge code proper (e.g. Linux + mmap interface). +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __BRIDGED_SUPPORT_H__ +#define __BRIDGED_SUPPORT_H__ + +#include "handle.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Derive the internal OS specific memory handle from a secure + * handle. + */ +PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle); + +#if defined (__cplusplus) +} +#endif + +#endif /* __BRIDGED_SUPPORT_H__ */ + +/****************************************************************************** + End of file (bridged_support.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c b/sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c new file mode 100644 index 0000000..d0a10ad --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c @@ -0,0 +1,3058 @@ +/*************************************************************************/ /*! +@Title SGX Common Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + + +#include + +#include "img_defs.h" + +#if defined(SUPPORT_SGX) + +#include "services.h" +#include "pvr_debug.h" +#include "pvr_bridge.h" +#include "sgx_bridge.h" +#include "perproc.h" +#include "power.h" +#include "pvr_bridge_km.h" +#include "sgx_bridge_km.h" +#include "sgx_options.h" + +#if defined(SUPPORT_MSVDX) + #include "msvdx_bridge.h" +#endif + +#include "bridged_pvr_bridge.h" +#include "bridged_sgx_bridge.h" +#include "sgxutils.h" +#include "buffer_manager.h" +#include "pdump_km.h" + +static IMG_INT +SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN, + PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO); + + psGetClientInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psGetClientInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psGetClientInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetClientInfoOUT->eError = + SGXGetClientInfoKM(hDevCookieInt, + &psGetClientInfoOUT->sClientInfo); + return 0; +} + +static IMG_INT +SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psReleaseClientInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice; + + PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0); + + /* + * psDevInfo->ui32ClientRefCount can be zero if an error occurred before SGXGetClientInfo is called + */ + if (psDevInfo->ui32ClientRefCount > 0) + { + psDevInfo->ui32ClientRefCount--; + } + + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + + +static IMG_INT +SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN, + PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO); + + psSGXGetInternalDevInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXGetInternalDevInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXGetInternalDevInfoOUT->eError = + SGXGetInternalDevInfoKM(hDevCookieInt, + &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo); + + /* + * Handle is not allocated in batch mode, as there is no resource + * allocation to undo if the handle allocation fails. + */ + psSGXGetInternalDevInfoOUT->eError = + PVRSRVAllocHandle(psPerProc->psHandleBase, + &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle, + psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + return 0; +} + + +static IMG_INT +SGXDoKickBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_UINT32 i; + IMG_INT ret = 0; + IMG_UINT32 ui32NumDstSyncs; + IMG_HANDLE *phKernelSyncInfoHandles = IMG_NULL; + IMG_HANDLE *pahDstSyncHandles; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psDoKickIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.hCCBKernelMemInfo, + psDoKickIN->sCCBKick.hCCBKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.hTA3DSyncInfo, + psDoKickIN->sCCBKick.hTA3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.hTASyncInfo, + psDoKickIN->sCCBKick.hTASyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + +#if defined(FIX_HW_BRN_31620) + /* We need to lookup the mem context and pass it through */ + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.hDevMemContext, + psDoKickIN->sCCBKick.hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + + if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.h3DSyncInfo, + psDoKickIN->sCCBKick.h3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* texture dependency details */ + if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS_TA) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + +#if !(defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) + for(i=0; isCCBKick.ui32NumSrcSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i], + psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif /* !defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++) + { + psRetOUT->eError = +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo, + psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + +#else + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i], + psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); +#endif + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++) + { + psRetOUT->eError = +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo, + psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + +#else + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i], + psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); +#endif + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects; + + if(ui32NumDstSyncs > 0) + { + if(!OSAccessOK(PVR_VERIFY_READ, + (IMG_HANDLE*)psDoKickIN->sCCBKick.hDstSyncHandles, + ui32NumDstSyncs * sizeof(IMG_HANDLE))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:" + " Invalid pasDstSyncHandles pointer", __FUNCTION__)); + return -EFAULT; + } + + psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32NumDstSyncs * sizeof(IMG_HANDLE), + (IMG_VOID **)&phKernelSyncInfoHandles, + 0, + "Array of Synchronization Info Handles"); + if (psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + phKernelSyncInfoHandles, + (IMG_HANDLE*)psDoKickIN->sCCBKick.hDstSyncHandles, + ui32NumDstSyncs * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + ret = -EFAULT; + goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT; + } + + /* Set sCCBKick.hDstSyncHandles to point to the local memory */ + psDoKickIN->sCCBKick.hDstSyncHandles = (IMG_HANDLE)phKernelSyncInfoHandles; + pahDstSyncHandles = psDoKickIN->sCCBKick.hDstSyncHandles; + for( i = 0; i < ui32NumDstSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pahDstSyncHandles[i], + pahDstSyncHandles[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT; + } + + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo, + psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT; + } + } + + psRetOUT->eError = + SGXDoKickKM(hDevCookieInt, + &psDoKickIN->sCCBKick); + +PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT: + + if(phKernelSyncInfoHandles) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32NumDstSyncs * sizeof(IMG_HANDLE), + (IMG_VOID *)phKernelSyncInfoHandles, + 0); + /*not nulling pointer, out of scope*/ + } + return ret; +} + + +static IMG_INT +SGXScheduleProcessQueuesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psScheduleProcQIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt); + + return 0; +} + + +#if defined(TRANSFER_QUEUE) +static IMG_INT +SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_TRANSFER_SGX_KICK *psKick; + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + psKick = &psSubmitTransferIN->sKick; + +#if defined(FIX_HW_BRN_31620) + /* We need to lookup the mem context and pass it through */ + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hDevMemContext, + psKick->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSubmitTransferIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hCCBMemInfo, + psKick->hCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hTASyncInfo, + psKick->hTASyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->h3DSyncInfo, + psKick->h3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->ahSrcSyncInfo[i], + psKick->ahSrcSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psKick->ui32NumDstSync; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->ahDstSyncInfo[i], + psKick->ahDstSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick); + + return 0; +} + +static IMG_INT +SGXSetTransferContextPriorityBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY *psSGXSetTransferContextPriorityIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hTransferContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXSetTransferContextPriorityIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hTransferContextInt, + psSGXSetTransferContextPriorityIN->hHWTransferContext, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXSetTransferContextPriorityKM( + hDevCookieInt, + hTransferContextInt, + psSGXSetTransferContextPriorityIN->ui32Priority, + psSGXSetTransferContextPriorityIN->ui32OffsetOfPriorityField); + + return 0; +} + +static IMG_INT +SGXSetRenderContextPriorityBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY *psSGXSetRenderContextPriorityIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hRenderContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXSetRenderContextPriorityIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hRenderContextInt, + psSGXSetRenderContextPriorityIN->hHWRenderContext, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXSetRenderContextPriorityKM( + hDevCookieInt, + hRenderContextInt, + psSGXSetRenderContextPriorityIN->ui32Priority, + psSGXSetRenderContextPriorityIN->ui32OffsetOfPriorityField); + + return 0; +} + + +#if defined(SGX_FEATURE_2D_HARDWARE) +static IMG_INT +SGXSubmit2DBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_2D_SGX_KICK *psKick; + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + psKick = &psSubmit2DIN->sKick; + +#if defined(FIX_HW_BRN_31620) + /* We need to lookup the mem context and pass it through */ + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hDevMemContext, + psKick->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSubmit2DIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hCCBMemInfo, + psKick->hCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hTASyncInfo, + psKick->hTASyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->h3DSyncInfo, + psKick->h3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->ahSrcSyncInfo[i], + psKick->ahSrcSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hDstSyncInfo, + psKick->hDstSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + psRetOUT->eError = + SGXSubmit2DKM(hDevCookieInt, psKick); + + return 0; +} +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */ +#endif /* #if defined(TRANSFER_QUEUE) */ + + +static IMG_INT +SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt = 0; + PVRSRV_SGXDEV_INFO *psDevInfo; + SGX_MISC_INFO sMiscInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, + PVRSRV_BRIDGE_SGX_GETMISCINFO); + + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXGetMiscInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + /* Lookup handle for dev mem context */ + if (((SGX_MISC_INFO*)psSGXGetMiscInfoIN->hMiscInfo)->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD) + { + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + ((SGX_MISC_INFO*)psSGXGetMiscInfoIN->hMiscInfo)->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif + /* device node is required for scheduling a CCB command */ + psDeviceNode = hDevCookieInt; + PVR_ASSERT(psDeviceNode != IMG_NULL); + if (psDeviceNode == IMG_NULL) + { + return -EFAULT; + } + + psDevInfo = psDeviceNode->pvDevice; + + /* Copy psMiscInfo to kernel space */ + psRetOUT->eError = CopyFromUserWrapper(psPerProc, + ui32BridgeID, + &sMiscInfo, + (SGX_MISC_INFO*)psSGXGetMiscInfoIN->hMiscInfo, + sizeof(SGX_MISC_INFO)); + if (psRetOUT->eError != PVRSRV_OK) + { + return -EFAULT; + } + + { + psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt); + + if (psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* Copy back misc info to user address space */ + psRetOUT->eError = CopyToUserWrapper(psPerProc, + ui32BridgeID, + (SGX_MISC_INFO*)psSGXGetMiscInfoIN->hMiscInfo, + &sMiscInfo, + sizeof(SGX_MISC_INFO)); + if (psRetOUT->eError != PVRSRV_OK) + { + return -EFAULT; + } + return 0; +} + + +static IMG_INT +SGXReadHWPerfCBBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN, + PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated, *psHWPerfCBData; + IMG_HANDLE hAllocatedHandle; + IMG_UINT32 ui32AllocatedSize; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB); + + psSGXReadHWPerfCBOUT->eError =PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXReadHWPerfCBIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK) + { + return 0; + } + psHWPerfCBData = (PVRSRV_SGX_HWPERF_CB_ENTRY *)psSGXReadHWPerfCBIN->hHWPerfCBData; + ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize * + sizeof(psHWPerfCBData[0]); + ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32AllocatedSize, + (IMG_VOID **)&psAllocated, + &hAllocatedHandle, + "Array of Hardware Performance Circular Buffer Data")); + + psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt, + psSGXReadHWPerfCBIN->ui32ArraySize, + psAllocated, + &psSGXReadHWPerfCBOUT->ui32DataCount, + &psSGXReadHWPerfCBOUT->ui32ClockSpeed, + &psSGXReadHWPerfCBOUT->ui32HostTimeStamp); + if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK) + { + psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc, + ui32BridgeID, + psHWPerfCBData, + psAllocated, + ui32AllocatedSize); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32AllocatedSize, + psAllocated, + hAllocatedHandle); + /*not nulling pointer, out of scope*/ + + return 0; +} + + +static IMG_INT +SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN, + PVRSRV_BRIDGE_OUT_SGXDEVINITPART2 *psSGXDevInitPart2OUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_ERROR eError; + IMG_BOOL bDissociateFailed = IMG_FALSE; + IMG_BOOL bLookupFailed = IMG_FALSE; + IMG_BOOL bReleaseFailed = IMG_FALSE; + IMG_HANDLE hDummy; + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2); + + /* Report the kernel-side build options to UM */ + psSGXDevInitPart2OUT->ui32KMBuildOptions = SGX_BUILD_OPTIONS; + + if(!psPerProc->bInitProcess) + { + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_PROCESS_NOT_INITIALISED; + return 0; + } + + psSGXDevInitPart2OUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXDevInitPart2IN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXDevInitPart2OUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Check all the meminfo handles */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + +#if defined(SGX_SUPPORT_HWPROFILING) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(SUPPORT_SGX_HWPERF) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: Failed to look up HWPerf meminfo (possibly due to SUPPORT_SGX_HWPERF option mismatch)")); + bLookupFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { + IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + + if (hHandle == IMG_NULL) + { + continue; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + hHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + } + + if (bLookupFailed) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: A handle lookup failed")); + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED; + return 0; + } + + /* Lookup and release the device memory handles */ + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + +#if defined(SGX_SUPPORT_HWPROFILING) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + +#if defined(SUPPORT_SGX_HWPERF) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { + IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + + if (*phHandle == IMG_NULL) + continue; + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + phHandle, + *phHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + } + + if (bReleaseFailed) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: A handle release failed")); + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED; + /* + * Given that we checked the handles before release, a release + * failure is unexpected. + */ + PVR_DBG_BREAK; + return 0; + } + + /* Dissociate device memory from caller */ + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#endif + + /* Dissociate SGX MiscInfo buffer from user space */ + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + +#if defined(SGX_SUPPORT_HWPROFILING) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif + +#if defined(SUPPORT_SGX_HWPERF) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif + +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { + IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + + if (hHandle == IMG_NULL) + continue; + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + } + + /* If any dissociations failed, free all the device memory passed in */ + if(bDissociateFailed) + { + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo); +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo); +#endif + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo); + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { + IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + + if (hHandle == IMG_NULL) + continue; + + PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle); + + } + + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: A dissociate failed")); + + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED; + + /* A dissociation failure is unexpected */ + PVR_DBG_BREAK; + return 0; + } + + psSGXDevInitPart2OUT->eError = + DevInitSGXPart2KM(psPerProc, + hDevCookieInt, + &psSGXDevInitPart2IN->sInitInfo); + + return 0; +} + + +static IMG_INT +SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN, + PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +// PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_HANDLE hHWRenderContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1); + + psSGXRegHWRenderContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXRegHWRenderContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + hHWRenderContextInt = + SGXRegisterHWRenderContextKM(hDevCookieInt, + psSGXRegHWRenderContextIN->pHWRenderContextCpuVAddr, + psSGXRegHWRenderContextIN->ui32HWRenderContextSize, + psSGXRegHWRenderContextIN->ui32OffsetToPDDevPAddr, + psSGXRegHWRenderContextIN->hDevMemContext, + &psSGXRegHWRenderContextOUT->sHWRenderContextDevVAddr, + psPerProc); + + if (hHWRenderContextInt == IMG_NULL) + { + psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT; + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXRegHWRenderContextOUT->hHWRenderContext, + hHWRenderContextInt, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc); + + return 0; +} + + +static IMG_INT +SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hHWRenderContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hHWRenderContextInt, + psSGXUnregHWRenderContextIN->hHWRenderContext, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt, + psSGXUnregHWRenderContextIN->bForceCleanup); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnregHWRenderContextIN->hHWRenderContext, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); + + return 0; +} + + +static IMG_INT +SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN, + PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hHWTransferContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1); + + psSGXRegHWTransferContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXRegHWTransferContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + hHWTransferContextInt = + SGXRegisterHWTransferContextKM(hDevCookieInt, + psSGXRegHWTransferContextIN->pHWTransferContextCpuVAddr, + psSGXRegHWTransferContextIN->ui32HWTransferContextSize, + psSGXRegHWTransferContextIN->ui32OffsetToPDDevPAddr, + psSGXRegHWTransferContextIN->hDevMemContext, + &psSGXRegHWTransferContextOUT->sHWTransferContextDevVAddr, + psPerProc); + + if (hHWTransferContextInt == IMG_NULL) + { + psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT; + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXRegHWTransferContextOUT->hHWTransferContext, + hHWTransferContextInt, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc); + + return 0; +} + + +static IMG_INT +SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hHWTransferContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hHWTransferContextInt, + psSGXUnregHWTransferContextIN->hHWTransferContext, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt, + psSGXUnregHWTransferContextIN->bForceCleanup); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnregHWTransferContextIN->hHWTransferContext, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); + + return 0; +} + + +#if defined(SGX_FEATURE_2D_HARDWARE) +static IMG_INT +SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN, + PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hHW2DContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1); + + psSGXRegHW2DContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXRegHW2DContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + hHW2DContextInt = + SGXRegisterHW2DContextKM(hDevCookieInt, + (IMG_CPU_VIRTADDR*)psSGXRegHW2DContextIN->hHW2DContextCpuVAddr, + psSGXRegHW2DContextIN->ui32HW2DContextSize, + psSGXRegHW2DContextIN->ui32OffsetToPDDevPAddr, + psSGXRegHW2DContextIN->hDevMemContext, + &psSGXRegHW2DContextOUT->sHW2DContextDevVAddr, + psPerProc); + + if (hHW2DContextInt == IMG_NULL) + { + psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT; + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXRegHW2DContextOUT->hHW2DContext, + hHW2DContextInt, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc); + + return 0; +} + + +static IMG_INT +SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hHW2DContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hHW2DContextInt, + psSGXUnregHW2DContextIN->hHW2DContext, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt, + psSGXUnregHW2DContextIN->bForceCleanup); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnregHW2DContextIN->hHW2DContext, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT); + + return 0; +} +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */ + +static IMG_INT +SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +// PVRSRV_SGXDEV_INFO *psDevInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXFlushHWRenderTargetIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +// psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice; + + psRetOUT->eError = SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr, IMG_FALSE); + + return 0; +} + + +static IMG_INT +SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_VOID *pvSyncInfo; + PVRSRV_SGXDEV_INFO *psDevInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + ps2DQueryBltsCompleteIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSyncInfo, + ps2DQueryBltsCompleteIN->hKernSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice; + + psRetOUT->eError = + SGX2DQueryBlitsCompleteKM(psDevInfo, + (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo, + ps2DQueryBltsCompleteIN->bWaitForComplete); + + return 0; +} + + +static IMG_INT +SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN, + PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL; + IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0; + IMG_UINT32 i; + IMG_HANDLE hSharedPBDesc = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4); + + psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL; + + psSGXFindSharedPBDescOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXFindSharedPBDescIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) + goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; + + psSGXFindSharedPBDescOUT->eError = + SGXFindSharedPBDescKM(psPerProc, hDevCookieInt, + psSGXFindSharedPBDescIN->bLockOnFailure, + psSGXFindSharedPBDescIN->ui32TotalPBSize, + &hSharedPBDesc, + &psSharedPBDescKernelMemInfo, + &psHWPBDescKernelMemInfo, + &psBlockKernelMemInfo, + &psHWBlockKernelMemInfo, + &ppsSharedPBDescSubKernelMemInfos, + &ui32SharedPBDescSubKernelMemInfosCount); + if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) + goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; + + PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount + <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS); + + psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount = + ui32SharedPBDescSubKernelMemInfosCount; + + if(hSharedPBDesc == IMG_NULL) + { + psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0; + /* It's not an error if we don't find a buffer, + * we just return NULL */ + goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hSharedPBDesc, + hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + /* + * We allocate handles of type PVRSRV_HANDLE_TYPE_MEM_INFO_REF here, + * as the process doesn't own the underlying memory, and so should + * only be allowed a restricted set of operations on it, such as + * mapping it into its address space. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle, + psSharedPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle, + psHWPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle, + psBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle, + psHWBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + + for(i=0; ipsHandleBase, + &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i], + ppsSharedPBDescSubKernelMemInfos[i], + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle); + } + +PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT: + if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount, + ppsSharedPBDescSubKernelMemInfos, + IMG_NULL); + } + + if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) + { + if(hSharedPBDesc != IMG_NULL) + { + SGXUnrefSharedPBDescKM(hSharedPBDesc); + } + } + else + { + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc); + } + + return 0; +} + + +static IMG_INT +SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN, + PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hSharedPBDesc; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC); + + psSGXUnrefSharedPBDescOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hSharedPBDesc, + psSGXUnrefSharedPBDescIN->hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC); + if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXUnrefSharedPBDescOUT->eError = + SGXUnrefSharedPBDescKM(hSharedPBDesc); + + if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXUnrefSharedPBDescOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnrefSharedPBDescIN->hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC); + + return 0; +} + + +static IMG_INT +SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN, + PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo; + IMG_UINT32 ui32KernelMemInfoHandlesCount = + psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount; + IMG_INT ret = 0; + IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL; + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL; + IMG_UINT32 i; + PVRSRV_ERROR eError; + IMG_HANDLE hSharedPBDesc = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1); + + psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL; + + PVR_ASSERT(ui32KernelMemInfoHandlesCount + <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS); + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXAddSharedPBDescIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psSharedPBDescKernelMemInfo, + psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psHWPBDescKernelMemInfo, + psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psBlockKernelMemInfo, + psSGXAddSharedPBDescIN->hBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psHWBlockKernelMemInfo, + psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + + if(!OSAccessOK(PVR_VERIFY_READ, + psSGXAddSharedPBDescIN->phKernelMemInfoHandles, + ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:" + " Invalid phKernelMemInfos pointer", __FUNCTION__)); + ret = -EFAULT; + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE), + (IMG_VOID **)&phKernelMemInfoHandles, + 0, + "Array of Handles"); + if (eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + phKernelMemInfoHandles, + psSGXAddSharedPBDescIN->phKernelMemInfoHandles, + ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)) + != PVRSRV_OK) + { + ret = -EFAULT; + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *), + (IMG_VOID **)&ppsKernelMemInfos, + 0, + "Array of pointers to Kernel Memory Info"); + if (eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + for(i=0; ipsHandleBase, + (IMG_VOID **)&ppsKernelMemInfos[i], + phKernelMemInfoHandles[i], + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + } + + /* + * Release all the handles we've just looked up, as none + * of the associated resources will be valid for access via + * those handles once we return from SGXAddSharedPBDesc. + */ + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + for(i=0; ipsHandleBase, + phKernelMemInfoHandles[i], + PVRSRV_HANDLE_TYPE_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + } + + eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt, + psSharedPBDescKernelMemInfo, + psHWPBDescKernelMemInfo, + psBlockKernelMemInfo, + psHWBlockKernelMemInfo, + psSGXAddSharedPBDescIN->ui32TotalPBSize, + &hSharedPBDesc, + ppsKernelMemInfos, + ui32KernelMemInfoHandlesCount, + psSGXAddSharedPBDescIN->sHWPBDescDevVAddr); + + + if (eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXAddSharedPBDescOUT->hSharedPBDesc, + hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + +PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT: + + if(phKernelMemInfoHandles) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE), + (IMG_VOID *)phKernelMemInfoHandles, + 0); + } + if(ppsKernelMemInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *), + (IMG_VOID *)ppsKernelMemInfos, + 0); + } + + if(ret == 0 && eError == PVRSRV_OK) + { + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc); + } + + psSGXAddSharedPBDescOUT->eError = eError; + + return ret; +} + +static IMG_INT +SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN, + PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_UINT32 i; + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS); + + if(!psPerProc->bInitProcess) + { + psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_PROCESS_NOT_INITIALISED; + return 0; + } + + psSGXInfoForSrvinitOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psSGXInfoForSrvinitIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXInfoForSrvinitOUT->eError = + SGXGetInfoForSrvinitKM(hDevCookieInt, + &psSGXInfoForSrvinitOUT->sInitInfo); + + if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK) + { + return 0; + } + + for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) + { + PVRSRV_HEAP_INFO *psHeapInfo; + + psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i]; + + if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID) + { + IMG_HANDLE hDevMemHeapExt; + + if (psHeapInfo->hDevMemHeap != IMG_NULL) + { + /* Allocate heap handle */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &hDevMemHeapExt, + psHeapInfo->hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + psHeapInfo->hDevMemHeap = hDevMemHeapExt; + } + } + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc); + + return 0; +} + +#if defined(PDUMP) +// PRQA S 5120++ +/***************************************************************************** + FUNCTION : DumpBufferArray + PURPOSE : PDUMP information in stored buffer array + PARAMETERS : + RETURNS : +*****************************************************************************/ +static IMG_VOID +DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc, + PSGX_KICKTA_DUMP_BUFFER psBufferArray, + IMG_UINT32 ui32BufferArrayLength, + IMG_BOOL bDumpPolls) +{ + IMG_UINT32 i; + + for (i=0; ihName; + if (!pszName) + { + pszName = "Nameless buffer"; + } + + hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo); + + #if defined(SUPPORT_SGX_NEW_STATUS_VALS) + psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo); + ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr; + #else + psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM; + ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); + #endif + + if (psBuffer->ui32Start <= psBuffer->ui32End) + { + if (bDumpPolls) + { + PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName); + PDUMPCBP(psCtrlMemInfoKM, + ui32Offset, + psBuffer->ui32Start, + psBuffer->ui32SpaceUsed, + psBuffer->ui32BufferSize, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + } + + PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName); + PDUMPMEMUM(psPerProc, + IMG_NULL, + psBuffer->hLinAddr, + (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo, + psBuffer->ui32Start, + psBuffer->ui32End - psBuffer->ui32Start, + 0, + hUniqueTag); + } + else + { + /* + Range of data wraps the end of the buffer so it needs to be dumped in two sections + */ + + if (bDumpPolls) + { + PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName); + PDUMPCBP(psCtrlMemInfoKM, + ui32Offset, + psBuffer->ui32Start, + psBuffer->ui32BackEndLength, + psBuffer->ui32BufferSize, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + } + PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName); + PDUMPMEMUM(psPerProc, + IMG_NULL, + psBuffer->hLinAddr, + (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo, + psBuffer->ui32Start, + psBuffer->ui32BackEndLength, + 0, + hUniqueTag); + + if (bDumpPolls) + { + PDUMPMEMPOL(psCtrlMemInfoKM, + ui32Offset, + 0, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_NOTEQUAL, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + + PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName); + PDUMPCBP(psCtrlMemInfoKM, + ui32Offset, + 0, + psBuffer->ui32End, + psBuffer->ui32BufferSize, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + } + PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName); + PDUMPMEMUM(psPerProc, + IMG_NULL, + psBuffer->hLinAddr, + (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo, + 0, + psBuffer->ui32End, + 0, + hUniqueTag); + } + } +} +static IMG_INT +SGXPDumpBufferArrayBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 i; +#if defined(__QNXNTO__) + const IMG_UINT32 NAME_BUFFER_SIZE = 30; + IMG_PCHAR pszNameBuffer, pszName; + IMG_UINT32 ui32NameBufferArraySize, ui32NameLength; +#endif + SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer; + IMG_UINT32 ui32BufferArrayLength = + psPDumpBufferArrayIN->ui32BufferArrayLength; + IMG_UINT32 ui32BufferArraySize = + ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER); + PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; + + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY); + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32BufferArraySize, + (IMG_PVOID *)&psKickTADumpBuffer, 0, + "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK) + { + return -ENOMEM; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + psKickTADumpBuffer, + psPDumpBufferArrayIN->hBufferArray, + ui32BufferArraySize) != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0); + /*not nulling pointer, out of scope*/ + return -EFAULT; + } + +#if defined (__QNXNTO__) + ui32NameBufferArraySize = ui32BufferArrayLength * NAME_BUFFER_SIZE; + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, ui32NameBufferArraySize, + (IMG_PVOID *)&pszNameBuffer, 0, + "Kick Tile Accelerator Dump Buffer names") != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0); + return -ENOMEM; + } + + pszName = pszNameBuffer; + + for(i = 0; i < ui32BufferArrayLength; i++) + { + if (psKickTADumpBuffer[i].hName) + { + ui32NameLength = psKickTADumpBuffer[i].ui32NameLength; + if (ui32NameLength >= NAME_BUFFER_SIZE) + { + ui32NameLength = NAME_BUFFER_SIZE - 1; + } + + if (ui32NameLength && + (CopyFromUserWrapper(psPerProc, ui32BridgeID, pszName, + psKickTADumpBuffer[i].hName, ui32NameLength + 1) == PVRSRV_OK)) + { + pszName[NAME_BUFFER_SIZE - 1] = 0; + psKickTADumpBuffer[i].hName = (IMG_HANDLE)pszName; + pszName += NAME_BUFFER_SIZE; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "Failed to read PDUMP buffer name")); + psKickTADumpBuffer[i].hName = 0; + } + } + } +#endif + + for(i = 0; i < ui32BufferArrayLength; i++) + { + IMG_VOID *pvMemInfo; + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, + psKickTADumpBuffer[i].hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: " + "PVRSRVLookupHandle failed (%d)", eError)); + break; + } + psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo; + +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, + psKickTADumpBuffer[i].hCtrlKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: " + "PVRSRVLookupHandle failed (%d)", eError)); + break; + } + psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo; +#endif + + } + + if(eError == PVRSRV_OK) + { + DumpBufferArray(psPerProc, + psKickTADumpBuffer, + ui32BufferArrayLength, + psPDumpBufferArrayIN->bDumpPolls); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0); +#if defined (__QNXNTO__) + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32NameBufferArraySize, pszNameBuffer, 0); +#endif + /*not nulling pointer, out of scope*/ + + return 0; +} + +static IMG_INT +SGXPDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); + IMG_UINT32 *pui32Registers = IMG_NULL; + PVRSRV_SGXDEV_INFO *psDevInfo; +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + IMG_UINT32 ui32RegVal = 0; +#endif + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt = 0; + IMG_UINT32 ui32MMUContextID; + IMG_INT ret = -EFAULT; + + PVR_UNREFERENCED_PARAMETER(psRetOUT); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS); + + if (ui32RegisterArraySize == 0) + { + goto ExitNoError; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psDeviceNode, + psPDump3DSignatureRegistersIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed")); + goto Exit; + } + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + /* Enable all cores available */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT, + psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif +#endif + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32RegisterArraySize, + (IMG_PVOID *)&pui32Registers, 0, + "Array of Registers") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed")); + goto Exit; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pui32Registers, + psPDump3DSignatureRegistersIN->hRegisters, + ui32RegisterArraySize) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed")); + goto Exit; + } + + PDump3DSignatureRegisters(&psDeviceNode->sDevId, + psPDump3DSignatureRegistersIN->ui32DumpFrameNum, + psPDump3DSignatureRegistersIN->bLastFrame, + pui32Registers, + psPDump3DSignatureRegistersIN->ui32NumRegisters); + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDump3DSignatureRegistersIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* look up the MMU context ID */ + PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL); + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID((IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext); + + PDumpSignatureBuffer(&psDeviceNode->sDevId, + "out.tasig", "TA", 0, + psDevInfo->psKernelTASigBufferMemInfo->sDevVAddr, + (IMG_UINT32)psDevInfo->psKernelTASigBufferMemInfo->uAllocSize, + ui32MMUContextID, + 0 /*ui32PDumpFlags*/); + PDumpSignatureBuffer(&psDeviceNode->sDevId, + "out.3dsig", "3D", 0, + psDevInfo->psKernel3DSigBufferMemInfo->sDevVAddr, + (IMG_UINT32)psDevInfo->psKernel3DSigBufferMemInfo->uAllocSize, + ui32MMUContextID, + 0 /*ui32PDumpFlags*/); + +ExitNoError: + psRetOUT->eError = PVRSRV_OK; + ret = 0; +Exit: + if (pui32Registers != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0); + } + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + if (psDevInfo != IMG_NULL) + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, ui32RegVal, + psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif + } +#endif + + return ret; +} + +static IMG_INT +SGXPDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); + IMG_UINT32 *pui32Registers = IMG_NULL; + PVRSRV_DEVICE_NODE *psDeviceNode ; + IMG_INT ret = -EFAULT; + + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS); + + if (ui32RegisterArraySize == 0) + { + goto ExitNoError; + } + + if(PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psDeviceNode, + psPDumpCounterRegistersIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXPDumpCounterRegistersBW: hDevCookie lookup failed")); + ret = -ENOMEM; + goto Exit; + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32RegisterArraySize, + (IMG_PVOID *)&pui32Registers, 0, + "Array of Registers") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed")); + ret = -ENOMEM; + goto Exit; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pui32Registers, + psPDumpCounterRegistersIN->hRegisters, + ui32RegisterArraySize) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed")); + goto Exit; + } + + PDumpCounterRegisters(&psDeviceNode->sDevId, + psPDumpCounterRegistersIN->ui32DumpFrameNum, + psPDumpCounterRegistersIN->bLastFrame, + pui32Registers, + psPDumpCounterRegistersIN->ui32NumRegisters); + +ExitNoError: + ret = 0; +Exit: + if (pui32Registers != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0); + } + + return ret; +} + +static IMG_INT +SGXPDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); + IMG_UINT32 *pui32Registers = IMG_NULL; +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL; + IMG_UINT32 ui32RegVal = 0; +#endif + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_INT ret = -EFAULT; + + PVR_UNREFERENCED_PARAMETER(psRetOUT); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS); + + if (ui32RegisterArraySize == 0) + { + goto ExitNoError; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID**)&psDeviceNode, + psPDumpTASignatureRegistersIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed")); + goto Exit; + } + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + + /* Enable all cores available */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT, + psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif +#endif + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32RegisterArraySize, + (IMG_PVOID *)&pui32Registers, 0, + "Array of Registers") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed")); + ret = -ENOMEM; + goto Exit; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pui32Registers, + psPDumpTASignatureRegistersIN->hRegisters, + ui32RegisterArraySize) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed")); + goto Exit; + } + + PDumpTASignatureRegisters(&psDeviceNode->sDevId, + psPDumpTASignatureRegistersIN->ui32DumpFrameNum, + psPDumpTASignatureRegistersIN->ui32TAKickCount, + psPDumpTASignatureRegistersIN->bLastFrame, + pui32Registers, + psPDumpTASignatureRegistersIN->ui32NumRegisters); + +ExitNoError: + psRetOUT->eError = PVRSRV_OK; + ret = 0; +Exit: + if (pui32Registers != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0); + } + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + if (psDevInfo != IMG_NULL) + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, ui32RegVal, + psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif + } +#endif + + return ret; +} +//PRQA S 5120-- + + +static IMG_INT +SGXPDumpHWPerfCBBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if defined(SUPPORT_SGX_HWPERF) +#if defined(__linux__) || defined(__QNXNTO__) + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt = 0; + IMG_UINT32 ui32MMUContextID = 0; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID**)&psDeviceNode, + psPDumpHWPerfCBIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psDevInfo = psDeviceNode->pvDevice; + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDumpHWPerfCBIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* look up the MMU context ID */ + PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL); + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID(hDevMemContextInt); + + PDumpHWPerfCBKM(&psDeviceNode->sDevId, + &psPDumpHWPerfCBIN->szFileName[0], + psPDumpHWPerfCBIN->ui32FileOffset, + psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr, + psDevInfo->psKernelHWPerfCBMemInfo->uAllocSize, + ui32MMUContextID, + psPDumpHWPerfCBIN->ui32PDumpFlags); + + return 0; +#else + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN); + PVR_UNREFERENCED_PARAMETER(psRetOUT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + return 0; +#endif +#else + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN); + PVR_UNREFERENCED_PARAMETER(psRetOUT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + return -EFAULT; +#endif /* defined(SUPPORT_SGX_HWPERF) */ +} + + +static IMG_INT +SGXPDumpSaveMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM *psPDumpSaveMem, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt = 0; + IMG_UINT32 ui32MMUContextID; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psDeviceNode, + psPDumpSaveMem->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDumpSaveMem->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* look up the MMU context ID */ + PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL); + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID(hDevMemContextInt); + + PDumpSaveMemKM(&psDeviceNode->sDevId, + &psPDumpSaveMem->szFileName[0], + psPDumpSaveMem->ui32FileOffset, + psPDumpSaveMem->sDevVAddr, + psPDumpSaveMem->ui32Size, + ui32MMUContextID, + psPDumpSaveMem->ui32PDumpFlags); + return 0; +} + +#endif /* PDUMP */ + + +/* PRQA S 0313,3635 END_SET_SGX */ /* function macro required this format */ +IMG_VOID SetSGXDispatchTableEntry(IMG_VOID) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW); + +#if defined(TRANSFER_QUEUE) + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW); +#endif + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW); +#if defined(SGX_FEATURE_2D_HARDWARE) + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW); +#endif + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY, SGXSetRenderContextPriorityBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY, SGXSetTransferContextPriorityBW); + +#if defined(PDUMP) + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM, SGXPDumpSaveMemBW); +#endif +} +/* PRQA L:END_SET_SGX */ /* end of setup overrides */ + +#endif /* SUPPORT_SGX */ diff --git a/sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h b/sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h new file mode 100644 index 0000000..3cb6282 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@Title SGX Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __BRIDGED_SGX_BRIDGE_H__ +#define __BRIDGED_SGX_BRIDGE_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + + +IMG_VOID SetSGXDispatchTableEntry(IMG_VOID); + +#if defined (__cplusplus) +} +#endif + +#endif /* __BRIDGED_SGX_BRIDGE_H__ */ + +/****************************************************************************** + End of file (bridged_sgx_bridge.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/buffer_manager.c b/sgx_km/eurasia_km/services4/srvkm/common/buffer_manager.c new file mode 100644 index 0000000..bd21023 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/buffer_manager.c @@ -0,0 +1,3485 @@ +/*************************************************************************/ /*! +@Title Buffer management functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Manages buffers mapped into two memory spaces - cpu and device, + either of which can be virtual or physical. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" + +#include "sysconfig.h" +#include "hash.h" +#include "ra.h" +#include "pdump_km.h" +#include "lists.h" + +static IMG_BOOL +ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags); +static IMG_VOID +BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping); +static IMG_BOOL +BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping, + IMG_UINT32 ui32Flags, IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, IMG_UINTPTR_T *pBase); + +static IMG_BOOL +DevMemoryAlloc (BM_CONTEXT *pBMContext, + BM_MAPPING *pMapping, + IMG_SIZE_T *pActualSize, + IMG_UINT32 ui32Flags, + IMG_UINT32 dev_vaddr_alignment, + IMG_DEV_VIRTADDR *pDevVAddr); +static IMG_VOID +DevMemoryFree (BM_MAPPING *pMapping); + +/*! +****************************************************************************** + + @Function AllocMemory + + @Description Allocate a buffer mapped into both cpu and device virtual + address spaces. This is now quite simple: + + 1. Choose whence to get the memory; + 2. Obtain memory from that source; + 3. Work out the actual buffer addresses in other spaces. + + In choosing whence to get the memory we work like this: + + 1. If an import arena exists, use unless BP_CONTIGUOUS is set; + 2. Use a contiguous pool. + + @Input pBMContext - BM context + @Input psBMHeap - BM heap + @Input psDevVAddr - device virtual address (optional) + @Input uSize - requested buffer size in bytes. + @Input ui32Flags - property flags for the buffer. + @Input uDevVAddrAlignment - required device virtual address + alignment, or 0. + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + + @Output pui32TimeToDevMap - Time taken in us to map the allocated + buffer to device MMU. + @Output pBuf - receives a pointer to a descriptor of the allocated + buffer. + @Return IMG_TRUE - Success + IMG_FALSE - Failed. + + *****************************************************************************/ +static IMG_BOOL +AllocMemory (BM_CONTEXT *pBMContext, + BM_HEAP *psBMHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32Flags, + IMG_UINT32 uDevVAddrAlignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + #if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT32 *pui32TimeToDevMap, + #endif + BM_BUF *pBuf) +{ + BM_MAPPING *pMapping; + IMG_UINTPTR_T uOffset; + RA_ARENA *pArena = IMG_NULL; + + PVR_DPF ((PVR_DBG_MESSAGE, + "AllocMemory (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x, align=0x%x)", + uSize, ui32Flags, uDevVAddrAlignment)); + + /* + Decision depends on combination of DevVaddr generation + and backing RAM requirement + */ + if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) + { + if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) + { + /* user supplied DevVAddr, RAM backing */ + PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported")); + return IMG_FALSE; + } + + /* BM supplied DevVAddr, RAM Backing */ + + /* check heap attributes */ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + /* specify arena (VM+RAM)*/ + pArena = psBMHeap->pImportArena; + PVR_ASSERT(psBMHeap->sDevArena.psDeviceMemoryHeapInfo->ui32Attribs & PVRSRV_MEM_RAM_BACKED_ALLOCATION); + } + else + { + PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap")); + return IMG_FALSE; + } + + /* Now allocate from the arena we chose above. */ + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + IMG_BOOL bSuccess; + IMG_SIZE_T uActualSize; +#if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT64 ui64TimeStart; +#endif + + /* Allocate physical memory */ + bSuccess = BM_ImportMemory(psBMHeap, + ui32ChunkSize * ui32NumPhysChunks, + &uActualSize, + &pMapping, + ui32Flags, + pvPrivData, + ui32PrivDataLength, + IMG_NULL); /* We allocate VM space */ + + if (!bSuccess) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: BM_ImportMemory failed")); + return IMG_FALSE; + } + + if (uActualSize != ui32ChunkSize * ui32NumPhysChunks) + { + /* + Most likely the chunksize was not host page multiple so + return with an error + */ + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: Failed to allocate memory for sparse allocation")); + BM_FreeMemory(pArena, IMG_NULL, pMapping); + return IMG_FALSE; + } + + pMapping->uSizeVM = ui32ChunkSize * ui32NumVirtChunks; + uSize = pMapping->uSizeVM; + pMapping->ui32ChunkSize = ui32ChunkSize; + pMapping->ui32NumVirtChunks = ui32NumVirtChunks; + pMapping->ui32NumPhysChunks = ui32NumPhysChunks; + pMapping->pabMapChunk = pabMapChunk; + +#if defined (PVRSRV_DEVMEM_TIME_STATS) + ui64TimeStart = OSClockMonotonicus(); +#endif + /* Allocate VA space and map in the physical memory */ + bSuccess = DevMemoryAlloc (pBMContext, + pMapping, + IMG_NULL, + ui32Flags, + uDevVAddrAlignment, + &pMapping->DevVAddr); + if (!bSuccess) + { + PVR_DPF((PVR_DBG_ERROR, + "AllocMemory: Failed to allocate device memory")); + BM_FreeMemory(pArena, IMG_NULL, pMapping); + return IMG_FALSE; + } + +#if defined (PVRSRV_DEVMEM_TIME_STATS) + pMapping->ui32TimeToDevMap = OSClockMonotonicus() - ui64TimeStart; +#endif + /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */ + /* PRQA S 3356,3358 1 */ + PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1); + pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr; + } + else + { + if (!RA_Alloc(pArena, + uSize, + IMG_NULL, + (IMG_VOID*) &pMapping, + ui32Flags, + uDevVAddrAlignment, + 0, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr))) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%" SIZE_T_FMT_LEN "x) FAILED", uSize)); + return IMG_FALSE; + } + } + + uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr; + if(pMapping->CpuVAddr) + { + pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset); + } + else + { + pBuf->CpuVAddr = IMG_NULL; + } + + if(uSize == pMapping->uSizeVM) + { + pBuf->hOSMemHandle = pMapping->hOSMemHandle; + } + else + { + if(OSGetSubMemHandle(pMapping->hOSMemHandle, + uOffset, + uSize, + psBMHeap->ui32Attribs, + &pBuf->hOSMemHandle)!=PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED")); + return IMG_FALSE; + } + } + + /* for hm_contiguous and hm_wrapped memory, the pMapping + * will have a physical address, else 0 */ + pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset; + + if(ui32Flags & PVRSRV_MEM_ZERO) + { + if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | ui32Flags)) + { + return IMG_FALSE; + } + } + } + else + { + if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) + { + /* user supplied DevVAddr, no RAM backing */ + PVR_ASSERT(psDevVAddr != IMG_NULL); + + if (psDevVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr")); + return IMG_FALSE; + } + + /* just make space in the pagetables */ + pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap, + uSize, + IMG_NULL, + PVRSRV_MEM_USER_SUPPLIED_DEVVADDR, + uDevVAddrAlignment, + psDevVAddr); + + /* setup buf */ + pBuf->DevVAddr = *psDevVAddr; + } + else + { + IMG_BOOL bResult; + /* BM supplied DevVAddr, no RAM Backing */ + + /* just make space in the pagetables */ + bResult = pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap, + uSize, + IMG_NULL, + 0, + uDevVAddrAlignment, + &pBuf->DevVAddr); + + if(!bResult) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: MMUAlloc failed")); + return IMG_FALSE; + } + } + + /* allocate a mocked-up mapping */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (struct _BM_MAPPING_), + (IMG_PVOID *)&pMapping, IMG_NULL, + "Buffer Manager Mapping") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%" SIZE_T_FMT_LEN "x) FAILED", sizeof(*pMapping))); + return IMG_FALSE; + } + + /* setup buf */ + pBuf->CpuVAddr = IMG_NULL; + pBuf->hOSMemHandle = 0; + pBuf->CpuPAddr.uiAddr = 0; + + /* setup mapping */ + pMapping->CpuVAddr = IMG_NULL; + pMapping->CpuPAddr.uiAddr = 0; + pMapping->DevVAddr = pBuf->DevVAddr; + pMapping->psSysAddr = IMG_NULL; + pMapping->uSize = uSize; + pMapping->hOSMemHandle = 0; + } + +#if defined (PVRSRV_DEVMEM_TIME_STATS) + *pui32TimeToDevMap = pMapping->ui32TimeToDevMap; +#endif + + /* Record the arena pointer in the mapping. */ + pMapping->pArena = pArena; + + /* record the heap */ + pMapping->pBMHeap = psBMHeap; + pBuf->pMapping = pMapping; + + /* output some stats */ + PVR_DPF ((PVR_DBG_MESSAGE, + "AllocMemory: pMapping=%p: DevV=%08X CpuV=%p CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x", + pMapping, + pMapping->DevVAddr.uiAddr, + pMapping->CpuVAddr, + pMapping->CpuPAddr.uiAddr, + pMapping->uSize)); + + PVR_DPF ((PVR_DBG_MESSAGE, + "AllocMemory: pBuf=%p: DevV=%08X CpuV=%p CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x", + pBuf, + pBuf->DevVAddr.uiAddr, + pBuf->CpuVAddr, + pBuf->CpuPAddr.uiAddr, + uSize)); + + /* Verify virtual device address alignment */ + PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0); + + return IMG_TRUE; +} + + +/*! +****************************************************************************** + + @Function WrapMemory + + @Description Allocate a buffer mapped into both cpu and device virtual + address spaces. + + @Input psBMHeap - BM heap + @Input uSize - requested buffer size in bytes. + @Input ui32BaseOffset - Offset from page of wrap. + @Input bPhysContig - Is the wrap physically contiguous. + @Input psAddr - List of pages to wrap. + @Input pvCPUVAddr - Optional CPU Kernel virtual address (page aligned) of memory to wrap + @Input ui32Flags - property flags for the buffer. + @Output Buf - receives a pointer to a descriptor of the allocated + buffer. + @Return IMG_TRUE - Success + IMG_FALSE - Failed. + + *****************************************************************************/ +static IMG_BOOL +WrapMemory (BM_HEAP *psBMHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T uiBaseOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 ui32Flags, + BM_BUF *pBuf) +{ + IMG_DEV_VIRTADDR DevVAddr = {0}; + BM_MAPPING *pMapping; + IMG_BOOL bResult; + IMG_SIZE_T const uPageSize = HOST_PAGESIZE(); + /* We should not pass down R/W flags into the OS layers so create ui32Attribs */ + IMG_UINT32 ui32Attribs = ui32Flags & ~(PVRSRV_MEM_READ | PVRSRV_MEM_WRITE); + + PVR_DPF ((PVR_DBG_MESSAGE, + "WrapMemory(psBMHeap=%p, size=0x%" SIZE_T_FMT_LEN "x, offset=0x%" SIZE_T_FMT_LEN + "x, bPhysContig=0x%x, sysPAddr=0x" SYSPADDR_FMT ", pvCPUVAddr = 0x%p, flags=0x%x)", + psBMHeap, + uSize, + uiBaseOffset, + bPhysContig, + psAddr->uiAddr, + pvCPUVAddr, + ui32Flags)); + + PVR_ASSERT((psAddr->uiAddr & (uPageSize - 1)) == 0); + /* Only need lower 12 bits of the cpu addr - don't care what size a void* is */ + PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (uPageSize - 1)) == 0); + + uSize += uiBaseOffset; + uSize = HOST_PAGEALIGN (uSize); + + /* allocate a mocked-up mapping */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*pMapping), + (IMG_PVOID *)&pMapping, IMG_NULL, + "Mocked-up mapping") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%" SIZE_T_FMT_LEN "x) FAILED", sizeof(*pMapping))); + return IMG_FALSE; + } + + OSMemSet(pMapping, 0, sizeof (*pMapping)); + + pMapping->uSize = uSize; + pMapping->uSizeVM = uSize; + pMapping->pBMHeap = psBMHeap; + + if(pvCPUVAddr) + { + pMapping->CpuVAddr = pvCPUVAddr; + + if (bPhysContig) + { + pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr; + pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]); + + if(OSRegisterMem(pMapping->CpuPAddr, + pMapping->CpuVAddr, + pMapping->uSize, + ui32Attribs, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x" CPUPADDR_FMT ", Size=%" SIZE_T_FMT_LEN "u) failed", + pMapping->CpuPAddr.uiAddr, pMapping->uSize)); + goto fail_cleanup; + } + } + else + { + pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr; + pMapping->psSysAddr = psAddr; + + if(OSRegisterDiscontigMem(pMapping->psSysAddr, + pMapping->CpuVAddr, + pMapping->uSize, + ui32Attribs, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem Size=0x%" SIZE_T_FMT_LEN "u) failed", + pMapping->uSize)); + goto fail_cleanup; + } + } + } + else + { + if (bPhysContig) + { + pMapping->eCpuMemoryOrigin = hm_wrapped; + pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]); + + if(OSReservePhys(pMapping->CpuPAddr, + pMapping->uSize, + ui32Attribs, + IMG_NULL, + &pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x" CPUPADDR_FMT ", Size=%" SIZE_T_FMT_LEN "u) failed", + pMapping->CpuPAddr.uiAddr, pMapping->uSize)); + goto fail_cleanup; + } + } + else + { + pMapping->eCpuMemoryOrigin = hm_wrapped_scatter; + pMapping->psSysAddr = psAddr; + + if(OSReserveDiscontigPhys(pMapping->psSysAddr, + pMapping->uSize, + ui32Attribs, + &pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%" SIZE_T_FMT_LEN "u) failed", + pMapping->uSize)); + goto fail_cleanup; + } + } + } + + /* + * Allocate device memory for this buffer. + */ + bResult = DevMemoryAlloc(psBMHeap->pBMContext, + pMapping, + IMG_NULL, + ui32Flags, + IMG_CAST_TO_DEVVADDR_UINT(uPageSize), + &DevVAddr); + if (!bResult) + { + PVR_DPF((PVR_DBG_ERROR, + "WrapMemory: DevMemoryAlloc(0x%" SIZE_T_FMT_LEN "x) failed", + pMapping->uSize)); + goto fail_cleanup; + } + + /* + * Determine the offset of this allocation within the underlying + * dual mapped chunk of memory, we can assume that all three + * addresses associated with this allocation are placed at the same + * offset within the underlying chunk. + */ + pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uiBaseOffset; + if(!uiBaseOffset) + { + pBuf->hOSMemHandle = pMapping->hOSMemHandle; + } + else + { + if(OSGetSubMemHandle(pMapping->hOSMemHandle, + uiBaseOffset, + (pMapping->uSize - uiBaseOffset), + ui32Attribs, + &pBuf->hOSMemHandle)!=PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed")); + goto fail_cleanup; + } + } + if(pMapping->CpuVAddr) + { + pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uiBaseOffset); + } + pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(uiBaseOffset); + + if(ui32Flags & PVRSRV_MEM_ZERO) + { + if(!ZeroBuf(pBuf, pMapping, uSize, ui32Flags)) + { + goto fail_cleanup; + } + } + + PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr)); + PVR_DPF ((PVR_DBG_MESSAGE, + "WrapMemory: DevV=%08X CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x", + pMapping->DevVAddr.uiAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize)); + PVR_DPF ((PVR_DBG_MESSAGE, + "WrapMemory: DevV=%08X CpuP=" CPUPADDR_FMT " uSize=0x%" SIZE_T_FMT_LEN "x", + pBuf->DevVAddr.uiAddr, pBuf->CpuPAddr.uiAddr, uSize)); + + pBuf->pMapping = pMapping; + return IMG_TRUE; + +fail_cleanup: + if(uiBaseOffset && pBuf->hOSMemHandle) + { + OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Attribs); + } + + /* pMapping must be valid: if the allocation failed, we'd have returned */ + if(pMapping->CpuVAddr || pMapping->hOSMemHandle) + { + switch(pMapping->eCpuMemoryOrigin) + { + case hm_wrapped: + OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle); + break; + case hm_wrapped_virtaddr: + OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter: + OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter_virtaddr: + OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Attribs, pMapping->hOSMemHandle); + break; + default: + break; + } + + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return IMG_FALSE; +} + + +static IMG_BOOL +ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags) +{ + IMG_VOID *pvCpuVAddr; + + if(pBuf->CpuVAddr) + { + OSMemSet(pBuf->CpuVAddr, 0, uBytes); + } + else if(pMapping->eCpuMemoryOrigin == hm_contiguous + || pMapping->eCpuMemoryOrigin == hm_wrapped) + { + pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr, + uBytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + if(!pvCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed")); + return IMG_FALSE; + } + OSMemSet(pvCpuVAddr, 0, uBytes); + OSUnMapPhysToLin(pvCpuVAddr, + uBytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + } + else + { + IMG_SIZE_T uBytesRemaining = uBytes; + IMG_SIZE_T uCurrentOffset = 0; + IMG_CPU_PHYADDR CpuPAddr; + + /* Walk through the pBuf one page at a time and use + * transient mappings to zero the memory */ + + PVR_ASSERT(pBuf->hOSMemHandle); + + while(uBytesRemaining > 0) + { + IMG_SIZE_T uBlockBytes = MIN(uBytesRemaining, HOST_PAGESIZE()); + CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, uCurrentOffset); + /* If the CpuPAddr isn't page aligned then start by writing up to the next page + * boundary (or uBytesRemaining if less), so that subsequent iterations can + * copy full physical pages. */ + if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1)) + { + uBlockBytes = + MIN(uBytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr)); + } + + pvCpuVAddr = OSMapPhysToLin(CpuPAddr, + uBlockBytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + if(!pvCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED")); + return IMG_FALSE; + } + OSMemSet(pvCpuVAddr, 0, uBlockBytes); + OSUnMapPhysToLin(pvCpuVAddr, + uBlockBytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + + uBytesRemaining -= uBlockBytes; + uCurrentOffset += uBlockBytes; + } + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function FreeBuf + + @Description Free a buffer previously allocated with BM_Alloc() or unwrap + one previous wrapped with BM_Wrap(). + The buffer is identified by the buffer descriptor pBuf + returned at allocation. Note the double indirection when + passing the buffer. + + + @Input pBuf - buffer descriptor to free. + @Input ui32Flags - flags + @Input bFromAllocator - Is this being called by the + allocator? + @Output pui32TimeToDevUnmap - When not NULL, this receives time taken in + microseconds to unmap the buffer from Device MMU + + @Return None. + + *****************************************************************************/ +static IMG_VOID +FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator + #if defined (PVRSRV_DEVMEM_TIME_STATS) + ,IMG_UINT32 *pui32TimeToDevUnmap + #endif + ) +{ + BM_MAPPING *pMapping; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_DPF ((PVR_DBG_MESSAGE, + "FreeBuf: pBuf=0x%p: DevVAddr=%08X CpuVAddr=0x%p CpuPAddr=" CPUPADDR_FMT, + pBuf, pBuf->DevVAddr.uiAddr, + pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr)); + + /* record mapping */ + pMapping = pBuf->pMapping; +#if defined (PVRSRV_DEVMEM_TIME_STATS) + /* Pass in the storage where we wish to receive the device "unmap" timing info */ + pMapping->pui32TimeToDevUnmap = pui32TimeToDevUnmap; + + /* Initialise unmap timing to 'zero'. If the mapping is deleted from Device MMU + * we'll get legit value on return */ + if (pMapping->pui32TimeToDevUnmap) + { + *(pMapping->pui32TimeToDevUnmap) = 0; + } +#endif + + psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode; + if (psDeviceNode->pfnCacheInvalidate) + { + psDeviceNode->pfnCacheInvalidate(psDeviceNode); + } + + if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) + { + /* Submemhandle is required by exported mappings */ + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + /* user supplied Device Virtual Address */ + if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) + { + /* RAM backed allocation */ + PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported")); + } + else + { + /* free the mocked-up mapping */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/ + } + } + } + else + { + /* BM supplied Device Virtual Address */ + if(pBuf->hOSMemHandle != pMapping->hOSMemHandle) + { + /* Submemhandle is required by exported mappings */ + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags); + } + } + if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) + { + /* Submemhandle is required by exported mappings */ + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + /* + RAM backed allocation + Note: currently no need to distinguish between hm_env and hm_contiguous + */ + PVR_ASSERT(pBuf->ui32ExportCount == 0); + if (pBuf->pMapping->ui32Flags & PVRSRV_MEM_SPARSE) + { + IMG_UINT32 ui32FreeSize = sizeof(IMG_BOOL) * pBuf->pMapping->ui32NumVirtChunks; + IMG_PVOID pvFreePtr = pBuf->pMapping->pabMapChunk; + + /* With sparse allocations we don't go through the sub-alloc RA */ + BM_FreeMemory(pBuf->pMapping->pBMHeap, pBuf->DevVAddr.uiAddr, pBuf->pMapping); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32FreeSize, + pvFreePtr, + IMG_NULL); + } + else + { + RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE); + } + } + } + else + { + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + switch (pMapping->eCpuMemoryOrigin) + { + case hm_wrapped: + OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + case hm_wrapped_virtaddr: + OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter: + OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter_virtaddr: + OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + default: + break; + } + } + if (bFromAllocator) + DevMemoryFree (pMapping); + + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + /* free the mocked-up mapping */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/ + } + } + } + + + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + + @Function BM_DestroyContext_AnyCb + + @Description Destroy a buffer manager heap. + + @Input psBMHeap + + @Return PVRSRV_ERROR + + *****************************************************************************/ +static PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap) +{ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + if (psBMHeap->pImportArena) + { + IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena); + if (!bTestDelete) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed")); + return PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP; + } + } + } + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function BM_DestroyContext + + @Description Destroy a buffer manager context. All allocated buffers must be + freed before calling this function. This function is called + also to perform cleanup during aborted initialisations so it's + fairly careful not to assume any given resource has really been + created/allocated. + + @Return PVRSRV_ERROR + + *****************************************************************************/ +PVRSRV_ERROR +BM_DestroyContext(IMG_HANDLE hBMContext, + IMG_BOOL *pbDestroyed) +{ + PVRSRV_ERROR eError; + BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext; + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext")); + + if (pbDestroyed != IMG_NULL) + { + *pbDestroyed = IMG_FALSE; + } + + /* + Exit straight away if it's an invalid context handle + */ + if (pBMContext == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pBMContext->ui32RefCount--; + + if (pBMContext->ui32RefCount > 0) + { + /* Just return if there are more references to this context */ + return PVRSRV_OK; + } + + /* + Check whether there is a bug in the client which brought it here before + all the allocations have been freed. + */ + eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, &BM_DestroyContext_AnyCb); + if(eError != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed")); + return eError; + } + else + { + /* free the device memory context */ + eError = ResManFreeResByPtr(pBMContext->hResItem, CLEANUP_WITH_POLL); + if(eError != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError)); + return eError; + } + + /* mark context as destroyed */ + if (pbDestroyed != IMG_NULL) + { + *pbDestroyed = IMG_TRUE; + } + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function BM_DestroyContextCallBack_AnyVaCb + + @Description Destroy Device memory context + + @Input psBMHeap - heap to be freed. + @Input va - list of variable arguments with the following contents: + - psDeviceNode + @Return PVRSRV_ERROR + + *****************************************************************************/ +static PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*); + + /* Free up the import arenas */ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + if (psBMHeap->pImportArena) + { + RA_Delete (psBMHeap->pImportArena); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported")); + return PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE; + } + + /* Free up the MMU Heaps */ + psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap); + + /* Free Heap memory */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function BM_DestroyContextCallBack + + @Description Destroy Device memory context + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + + @Return PVRSRV_ERROR + + *****************************************************************************/ +static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + BM_CONTEXT *pBMContext = pvParam; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_ERROR eError; +/* BM_CONTEXT **ppBMContext; + BM_HEAP *psBMHeap, *psTmpBMHeap;*/ + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + /* + Get DeviceNode from BMcontext + */ + psDeviceNode = pBMContext->psDeviceNode; + + /* + Free the import arenas and heaps + */ + eError = List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap, + &BM_DestroyContextCallBack_AnyVaCb, + psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + /* + 'Finalise' the MMU + */ + if (pBMContext->psMMUContext) + { + psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext); + } + + /* + Free up generic, useful resources - if they were allocated. + */ + if (pBMContext->pBufferHash) + { + HASH_Delete(pBMContext->pBufferHash); + } + + if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext) + { + /* Freeing the kernel context */ + psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL; + } + else + { + if (pBMContext->ppsThis != IMG_NULL) + { + /* + * Remove context from the linked list + */ + List_BM_CONTEXT_Remove(pBMContext); + } + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +static IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va) +{ + PRESMAN_CONTEXT hResManContext; + hResManContext = va_arg(va, PRESMAN_CONTEXT); + if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK) + { + /* just increment the refcount and return the memory context found for this process */ + pBMContext->ui32RefCount++; + return pBMContext; + } + return IMG_NULL; +} + +static IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + BM_CONTEXT *pBMContext; + psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*); + pBMContext = va_arg(va, BM_CONTEXT*); + switch(psBMHeap->sDevArena.DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED: + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + /* insert the heap into the device's MMU page directory/table */ + psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap); + break; + } + } +} + +/*! +****************************************************************************** + + @Function BM_CreateContext + + @Description Creates and initialises a buffer manager context. This function must be called + before any other buffer manager functions. + + @Return valid BM context handle - Success + IMG_NULL - Failed + + *****************************************************************************/ +IMG_HANDLE +BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_PHYADDR *psPDDevPAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_BOOL *pbCreated) +{ + BM_CONTEXT *pBMContext; +/* BM_HEAP *psBMHeap;*/ + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_BOOL bKernelContext; + PRESMAN_CONTEXT hResManContext; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext")); + + if (psPerProc == IMG_NULL) + { + bKernelContext = IMG_TRUE; + hResManContext = psDeviceNode->hResManContext; + } + else + { + bKernelContext = IMG_FALSE; + hResManContext = psPerProc->hResManContext; + } + + if (pbCreated != IMG_NULL) + { + *pbCreated = IMG_FALSE; + } + + /* setup the device memory info. */ + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + + if (bKernelContext == IMG_FALSE) + { + IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext, + &BM_CreateContext_IncRefCount_AnyVaCb, + hResManContext); + if (res) + { + return res; + } + } + + /* allocate a BM context */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (struct _BM_CONTEXT_), + (IMG_PVOID *)&pBMContext, IMG_NULL, + "Buffer Manager Context") != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed")); + return IMG_NULL; + } + OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT)); + + /* store the associated devicenode */ + pBMContext->psDeviceNode = psDeviceNode; + + /* This hash table is used to store BM_Wraps in a global way */ + /* INTEGRATION_POINT: 32 is an abitrary limit on the number of hashed BM_wraps */ + pBMContext->pBufferHash = HASH_Create(32); + if (pBMContext->pBufferHash==IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed")); + goto cleanup; + } + + if((IMG_NULL == psDeviceNode->pfnMMUInitialise) || (psDeviceNode->pfnMMUInitialise(psDeviceNode, + &pBMContext->psMMUContext, + psPDDevPAddr) != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed")); + goto cleanup; + } + + if(bKernelContext) + { + /* just save the kernel context */ + PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL); + psDevMemoryInfo->pBMKernelContext = pBMContext; + } + else + { + /* + On the creation of each new context we must + insert the kernel context's 'shared' and 'shared_exported' + heaps into the new context + - check the kernel context and heaps exist + */ + PVR_ASSERT(psDevMemoryInfo->pBMKernelContext); + + if (psDevMemoryInfo->pBMKernelContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid")); + goto cleanup; + } + + PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap); + + /* + insert the kernel heaps structures into the new context's shared heap list + Note. this will include the kernel only heaps but these will not actually + be imported into the context nor returned to the client + */ + pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap; + + /* + insert the shared heaps into the MMU page directory/table + for the new context + */ + List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap, + &BM_CreateContext_InsertHeap_ForEachVaCb, + psDeviceNode, + pBMContext); + + /* Finally, insert the new context into the list of BM contexts */ + List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext); + } + + /* Increment the refcount, as creation is successful */ + pBMContext->ui32RefCount++; + + /* register with resman */ + pBMContext->hResItem = ResManRegisterRes(hResManContext, + RESMAN_TYPE_DEVICEMEM_CONTEXT, + pBMContext, + 0, + &BM_DestroyContextCallBack); + if (pBMContext->hResItem == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed")); + goto cleanup; + } + + if (pbCreated != IMG_NULL) + { + *pbCreated = IMG_TRUE; + } + return (IMG_HANDLE)pBMContext; + +cleanup: + (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0, CLEANUP_WITH_POLL); + + return IMG_NULL; +} + + +static IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va) +{ + DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo; + psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*); + if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID) + { + /* Match - just return already created heap */ + return psBMHeap; + } + else + { + return IMG_NULL; + } +} + +/*! +****************************************************************************** + + @Function BM_CreateHeap + + @Description Creates and initialises a BM heap for a given BM context. + + @Return + valid heap handle - success + IMG_NULL - failure + + + *****************************************************************************/ +IMG_HANDLE +BM_CreateHeap (IMG_HANDLE hBMContext, + DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo) +{ + BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext; + PVRSRV_DEVICE_NODE *psDeviceNode; + BM_HEAP *psBMHeap; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap")); + + if(!pBMContext) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: BM_CONTEXT null")); + return IMG_NULL; + } + + psDeviceNode = pBMContext->psDeviceNode; + + /* + * Ensure that the heap size is a multiple of the data page size. + */ + PVR_ASSERT((psDevMemHeapInfo->ui32HeapSize & (psDevMemHeapInfo->ui32DataPageSize - 1)) == 0); + PVR_ASSERT(psDevMemHeapInfo->ui32HeapSize > 0); + + /* + We may be being asked to create a heap in a context which already has one. + Test for refcount > 0 because PVRSRVGetDeviceMemHeapInfoKM doesn't increment the refcount. + This does mean that the first call to PVRSRVCreateDeviceMemContextKM will first try to find + heaps that we already know don't exist + */ + if(pBMContext->ui32RefCount > 0) + { + psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap, + &BM_CreateHeap_AnyVaCb, + psDevMemHeapInfo); + + if (psBMHeap) + { + return psBMHeap; + } + } + + + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_HEAP), + (IMG_PVOID *)&psBMHeap, IMG_NULL, + "Buffer Manager Heap") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed")); + return IMG_NULL; + } + + OSMemSet (psBMHeap, 0, sizeof (BM_HEAP)); + + psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID; + psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName; + psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase; + psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize; + psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType; + psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize; + psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo; + psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs; +#if defined(SUPPORT_MEMORY_TILING) + psBMHeap->ui32XTileStride = psDevMemHeapInfo->ui32XTileStride; +#endif + + /* tie the heap to the context */ + psBMHeap->pBMContext = pBMContext; + + psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext, + &psBMHeap->sDevArena, + &psBMHeap->pVMArena, + &psBMHeap->psMMUAttrib); + if (!psBMHeap->pMMUHeap) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed")); + goto ErrorExit; + } + + /* memory is allocated from the OS as required */ + psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName, + 0, 0, IMG_NULL, + MIN(HOST_PAGESIZE(), psBMHeap->sDevArena.ui32DataPageSize), + &BM_ImportMemory, + &BM_FreeMemory, + IMG_NULL, + psBMHeap); + if(psBMHeap->pImportArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed")); + goto ErrorExit; + } + + if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) + { + /* + memory comes from a device memory contiguous allocator (ra) + Note: these arenas are shared across the system so don't delete + as part of heap destroy + */ + psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena; + if(psBMHeap->pLocalDevMemArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null")); + goto ErrorExit; + } + } + + /* insert heap into head of the heap list */ + List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap); + + return (IMG_HANDLE)psBMHeap; + + /* handle error case */ +ErrorExit: + + /* Free up the MMU if we created one */ + if (psBMHeap->pMMUHeap != IMG_NULL) + { + psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap); + /* don't finalise psMMUContext as we don't own it */ + } + + /* Free the Heap memory */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return IMG_NULL; +} + +/*! +****************************************************************************** + + @Function BM_DestroyHeap + + @Description Destroys a BM heap + + @Return + valid heap handle - success + IMG_NULL - failure + + + *****************************************************************************/ +IMG_VOID +BM_DestroyHeap (IMG_HANDLE hDevMemHeap) +{ + BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap; + PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap")); + + if(psBMHeap) + { + /* Free up the import arenas */ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + if (psBMHeap->pImportArena) + { + RA_Delete (psBMHeap->pImportArena); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported")); + return; + } + + /* Free up the MMU Heap */ + psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap); + + /* remove from the heap list */ + List_BM_HEAP_Remove(psBMHeap); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); + } + else + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle")); + } +} + + +/*! +****************************************************************************** + + @Function BM_Reinitialise + + @Description Reinitialise the buffer manager after a power down event. + + @Return IMG_TRUE - Success + IMG_FALSE - Failed + + *****************************************************************************/ +IMG_BOOL +BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode) +{ + + PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise")); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + /* FIXME: Need to reenable all contexts + List_BM_CONTEXT_ForEach(psDeviceNode->sDevMemoryInfo.pBMContext, MMU_Enable); + */ + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function BM_Alloc + + @Description Allocate a buffer mapped into both cpu and device virtual + memory maps. + + @Input hDevMemHeap + @Input psDevVAddr - device virtual address specified by caller (optional) + @Input uSize - require size in bytes of the buffer. + @Input pui32Flags - bit mask of buffer property flags. + @Input uDevVAddrAlignment - required alignment in bytes, or 0. + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + + @Output phBuf - receives buffer handle + @Output pui32Flags - bit mask of heap property flags. + + @Return IMG_TRUE - Success + IMG_FALSE - Failure + + *****************************************************************************/ +IMG_BOOL +BM_Alloc ( IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T uSize, + IMG_UINT32 *pui32Flags, + IMG_UINT32 uDevVAddrAlignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + #if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT32 *pui32TimeToDevMap, + #endif + BM_HANDLE *phBuf) +{ + BM_BUF *pBuf; + BM_CONTEXT *pBMContext; + BM_HEAP *psBMHeap; + SYS_DATA *psSysData; + IMG_UINT32 ui32Flags; + + if (pui32Flags == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter")); + PVR_DBG_BREAK; + return IMG_FALSE; + } + + ui32Flags = *pui32Flags; + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Alloc (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x, uDevVAddrAlignment=0x%x)", + uSize, ui32Flags, uDevVAddrAlignment)); + + SysAcquireData(&psSysData); + + psBMHeap = (BM_HEAP*)hDevMemHeap; + pBMContext = psBMHeap->pBMContext; + + if(uDevVAddrAlignment == 0) + { + uDevVAddrAlignment = 1; + } + + /* + * Allocate something in which to record the allocation's details. + */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_BUF), + (IMG_PVOID *)&pBuf, IMG_NULL, + "Buffer Manager buffer") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED")); + return IMG_FALSE; + } + OSMemSet(pBuf, 0, sizeof (BM_BUF)); + + /* + * Allocate the memory itself now. + */ + if (AllocMemory(pBMContext, + psBMHeap, + psDevVAddr, + uSize, + ui32Flags, + uDevVAddrAlignment, + pvPrivData, + ui32PrivDataLength, + ui32ChunkSize, + ui32NumVirtChunks, + ui32NumPhysChunks, + pabMapChunk, + #if defined (PVRSRV_DEVMEM_TIME_STATS) + pui32TimeToDevMap, + #endif + pBuf) != IMG_TRUE) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL); + /* not nulling pointer, out of scope */ + PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED")); + return IMG_FALSE; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Alloc (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x)", + uSize, ui32Flags)); + + /* + * Assign the handle and return. + */ + pBuf->ui32RefCount = 1; + *phBuf = (BM_HANDLE)pBuf; + *pui32Flags = ui32Flags | psBMHeap->ui32Attribs; + + /* + * If the user has specified heap CACHETYPE flags themselves, + * override any CACHETYPE flags inherited from the heap. + */ + if(ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK; + *pui32Flags |= (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + return IMG_TRUE; +} + + + +#if defined(PVR_LMA) +/*! +****************************************************************************** + + @Function ValidSysPAddrArrayForDev + + @Description Verify the array of system address is accessible + by the given device. + + @Input psDeviceNode + @Input psSysPAddr - system address array + @Input uPageSize - size of address array + + @Return IMG_BOOL + + *****************************************************************************/ +static IMG_BOOL +ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T uPageSize) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32PageCount; i++) + { + IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i]; + IMG_SYS_PHYADDR sEndSysPAddr; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr)) + { + return IMG_FALSE; + } + + sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + uPageSize; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr)) + { + return IMG_FALSE; + } + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function ValidSysPAddrRangeForDev + + @Description Verify a system address range is accessible + by the given device. + + @Input psDeviceNode + @Input sStartSysPAddr - starting system address + @Input ui32Range - length of address range + + @Return IMG_BOOL + + *****************************************************************************/ +static IMG_BOOL +ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T uRange) +{ + IMG_SYS_PHYADDR sEndSysPAddr; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr)) + { + return IMG_FALSE; + } + + sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + uRange; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr)) + { + return IMG_FALSE; + } + + return IMG_TRUE; +} + +#define WRAP_MAPPING_SIZE(uByteSize, uPageOffset) HOST_PAGEALIGN((uByteSize) + (uPageOffset)) + +#define WRAP_PAGE_COUNT(uByteSize, uPageOffset, uHostPageSize) (WRAP_MAPPING_SIZE(uByteSize, uPageOffset) / (uHostPageSize)) + +#endif + + +/*! +****************************************************************************** + + @Function BM_Wrap + + @Description Create a buffer which wraps user provided system physical + memory. + The wrapped memory must be page aligned. BM_Wrap will + roundup the size to a multiple of cpu pages. + + @Input uSize - size of memory to wrap. + @Input ui32Offset - Offset into page of memory to wrap. + @Input bPhysContig - Is the wrap physically contiguous. + @Input psSysAddr - list of system physical page addresses of memory to wrap. + @Input pvCPUVAddr - optional CPU kernel virtual address (Page aligned) of memory to wrap. + @Input ui32Flags - bit mask of buffer property flags. + @output phBuf - receives the buffer handle. + + @Return IMG_TRUE - Success. + IMG_FALSE - Failed + + *****************************************************************************/ +IMG_BOOL +BM_Wrap ( IMG_HANDLE hDevMemHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T uOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psSysAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 *pui32Flags, + BM_HANDLE *phBuf) +{ + BM_BUF *pBuf; + BM_CONTEXT *psBMContext; + BM_HEAP *psBMHeap; + SYS_DATA *psSysData; + IMG_SYS_PHYADDR sHashAddress; + IMG_UINT32 ui32Flags; + + psBMHeap = (BM_HEAP*)hDevMemHeap; + psBMContext = psBMHeap->pBMContext; + + ui32Flags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK); + + if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0)) + { + ui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Flags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK; + } + + if ((pui32Flags != IMG_NULL) && ((*pui32Flags & (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE)) != 0)) + { + ui32Flags &= ~(PVRSRV_MEM_READ | PVRSRV_MEM_WRITE); + ui32Flags |= *pui32Flags & (PVRSRV_MEM_READ | PVRSRV_MEM_WRITE); + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Wrap (uSize=0x%" SIZE_T_FMT_LEN "x, uOffset=0x%" SIZE_T_FMT_LEN + "x, bPhysContig=0x%x, syspAddr=0x" SYSPADDR_FMT ", pvCPUVAddr=0x%p, ui32Flags=0x%x)", + uSize, + uOffset, + bPhysContig, + psSysAddr->uiAddr, + pvCPUVAddr, + ui32Flags)); + + SysAcquireData(&psSysData); + +#if defined(PVR_LMA) + if (bPhysContig) + { + if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(uSize, uOffset))) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device")); + return IMG_FALSE; + } + } + else + { + IMG_SIZE_T uHostPageSize = HOST_PAGESIZE(); + + if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(uSize, uOffset, uHostPageSize), uHostPageSize)) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device")); + return IMG_FALSE; + } + } +#endif + /* + * Insert the System Physical Address of the first page into the hash so we can optimise multiple wraps of the + * same memory. + */ + sHashAddress = psSysAddr[0]; + + /* Add the in-page offset to ensure a unique hash */ + sHashAddress.uiAddr += uOffset; + + /* See if this address has already been wrapped, note that the cast is ok as this is only local mem */ + pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr); + + if(pBuf) + { + IMG_SIZE_T uMappingSize = HOST_PAGEALIGN (uSize + uOffset); + + /* Check base address, size and contiguity type match */ + if(pBuf->pMapping->uSize == uMappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || + pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "BM_Wrap (Matched previous Wrap! uSize=0x%" SIZE_T_FMT_LEN "x, uOffset=0x%" SIZE_T_FMT_LEN "x, SysAddr=" SYSPADDR_FMT ")", + uSize, + uOffset, + sHashAddress.uiAddr)); + + PVRSRVBMBufIncRef(pBuf); + *phBuf = (BM_HANDLE)pBuf; + if(pui32Flags) + *pui32Flags = ui32Flags; + + return IMG_TRUE; + } + else + { + /* Otherwise removed that item from the hash table + (a workaround for buffer device class) */ + HASH_Remove(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr); + } + } + + /* + * Allocate something in which to record the allocation's details. + */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_BUF), + (IMG_PVOID *)&pBuf, IMG_NULL, + "Buffer Manager buffer") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED")); + return IMG_FALSE; + } + OSMemSet(pBuf, 0, sizeof (BM_BUF)); + + /* + * Actually perform the memory wrap. + */ + if (WrapMemory (psBMHeap, uSize, uOffset, bPhysContig, psSysAddr, pvCPUVAddr, ui32Flags, pBuf) != IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL); + /*not nulling pointer, out of scope*/ + return IMG_FALSE; + } + + /* Only insert the buffer in the hash table if it is contiguous - allows for optimisation of multiple wraps + * of the same contiguous buffer. + */ + if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) + { + /* Have we calculated the right Hash key ? */ + PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr); + + if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf)) + { + FreeBuf (pBuf, ui32Flags, IMG_TRUE + #if defined (PVRSRV_DEVMEM_TIME_STATS) + , IMG_NULL + #endif + ); + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED")); + return IMG_FALSE; + } + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Wrap (uSize=0x%" SIZE_T_FMT_LEN "x, ui32Flags=0x%x, devVAddr=%08X)", + uSize, ui32Flags, pBuf->DevVAddr.uiAddr)); + + /* + * Assign the handle and return. + */ + pBuf->ui32RefCount = 1; + *phBuf = (BM_HANDLE)pBuf; + if(pui32Flags) + { + /* need to override the heap attributes SINGLE PROC to MULT_PROC. */ + *pui32Flags = (ui32Flags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS; + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function BM_Export + + @Description Export a buffer previously allocated via BM_Alloc. + + @Input hBuf - buffer handle. + @Input ui32Flags - flags + + @Return None. + + *****************************************************************************/ + +IMG_VOID +BM_Export (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVRSRVBMBufIncExport(pBuf); +} + +/*! +****************************************************************************** + @Function BM_FreeExport + + @Description Free a buffer previously exported via BM_Export. + + @Input hBuf - buffer handle. + @Input ui32Flags - flags + + @Return None. +**************************************************************************/ +IMG_VOID +BM_FreeExport(BM_HANDLE hBuf, + IMG_UINT32 ui32Flags) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVRSRVBMBufDecExport(pBuf); + FreeBuf (pBuf, ui32Flags, IMG_FALSE + #if defined (PVRSRV_DEVMEM_TIME_STATS) + , IMG_NULL + #endif + ); +} + +/*! +****************************************************************************** + @Function BM_Free + + @Description Free a buffer previously allocated via BM_Alloc + + @Input hBuf - buffer handle. + @Input ui32Flags - flags + + @Return None. +**************************************************************************/ +IMG_VOID +BM_Free (BM_HANDLE hBuf, + IMG_UINT32 ui32Flags + #if defined (PVRSRV_DEVMEM_TIME_STATS) + ,IMG_UINT32 *pui32TimeToDevUnmap + #endif + ) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + SYS_DATA *psSysData; + IMG_SYS_PHYADDR sHashAddr; + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%p)", hBuf)); + PVR_ASSERT (pBuf!=IMG_NULL); + + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter")); + return; + } + + SysAcquireData(&psSysData); + + PVRSRVBMBufDecRef(pBuf); + if(pBuf->ui32RefCount == 0) + { + if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) + { + sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr); + + HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr); + } + FreeBuf (pBuf, ui32Flags, IMG_TRUE + #if defined(PVRSRV_DEVMEM_TIME_STATS) + ,pui32TimeToDevUnmap + #endif + ); + } +} + + +/*! +****************************************************************************** + + @Function BM_HandleToCpuVaddr + + @Description Retrieve the cpu virtual address associated with a buffer. + + @Input buffer handle. + + @Return buffers cpu virtual address, or NULL if none exists + + *****************************************************************************/ +IMG_CPU_VIRTADDR +BM_HandleToCpuVaddr (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter")); + return IMG_NULL; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_HandleToCpuVaddr(h=0x%p)=0x%p", + hBuf, pBuf->CpuVAddr)); + return pBuf->CpuVAddr; +} + + +/*! +****************************************************************************** + + @Function BM_HandleToDevVaddr + + @Description Retrieve the device virtual address associated with a buffer. + + @Input hBuf - buffer handle. + + @Return buffers device virtual address. + + *****************************************************************************/ +IMG_DEV_VIRTADDR +BM_HandleToDevVaddr (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + if (pBuf == IMG_NULL) + { + IMG_DEV_VIRTADDR DevVAddr = {0}; + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter")); + return DevVAddr; + } + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=0x%p)=%08X", hBuf, pBuf->DevVAddr.uiAddr)); + return pBuf->DevVAddr; +} + + +/*! +****************************************************************************** + + @Function BM_HandleToSysPaddr + + @Description Retrieve the system physical address associated with a buffer. + + @Input hBuf - buffer handle. + + @Return buffers device virtual address. + + *****************************************************************************/ +IMG_SYS_PHYADDR +BM_HandleToSysPaddr (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + + if (pBuf == IMG_NULL) + { + IMG_SYS_PHYADDR PhysAddr = {0}; + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter")); + return PhysAddr; + } + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0lx%p)=" CPUPADDR_FMT, hBuf, pBuf->CpuPAddr.uiAddr)); + return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr); +} + +/*! +****************************************************************************** + + @Function BM_HandleToMemOSHandle + + @Description Retrieve the underlying memory handle associated with a buffer. + + @Input hBuf - buffer handle. + + @Return OS Specific memory handle. + + *****************************************************************************/ +IMG_HANDLE +BM_HandleToOSMemHandle(BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter")); + return IMG_NULL; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_HandleToOSMemHandle(h=0x%p)=0x%p", + hBuf, pBuf->hOSMemHandle)); + return pBuf->hOSMemHandle; +} + +/*! +****************************************************************************** + + @Function DevMemoryAlloc + + @Description Allocate device memory for a given physical/virtual memory + mapping. We handle the main cases where device MMU mappings + are required - these are the dynamic cases: all wrappings of + host OS memory and host OS imports for SYS_MMU_NORMAL mode. + + If no MMU support is required then we simply map device virtual + space as device physical space. + + @Input pBMContext - the pager to allocate from. + @Output pMapping - the mapping descriptor to be filled in for this + allocation. + @Output pActualSize - the actual size of the block allocated in + bytes. + @Input ui32Flags - allocation flags + @Input dev_vaddr_alignment - required device virtual address + alignment, or 0. + @Output pDevVAddr - receives the device virtual base address of the + allocated block. + @Return IMG_TRUE - Success + IMG_FALSE - Failed. + + *****************************************************************************/ +static IMG_BOOL +DevMemoryAlloc (BM_CONTEXT *pBMContext, + BM_MAPPING *pMapping, + IMG_SIZE_T *pActualSize, + IMG_UINT32 ui32Flags, + IMG_UINT32 dev_vaddr_alignment, + IMG_DEV_VIRTADDR *pDevVAddr) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; +#ifdef PDUMP + IMG_UINT32 ui32PDumpSize = (IMG_UINT32)pMapping->uSize; + IMG_UINT32 ui32PDumpFlags; +#endif + + psDeviceNode = pBMContext->psDeviceNode; + +#ifdef PDUMP +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + ui32PDumpFlags = psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap) + ? PDUMP_FLAGS_PERSISTENT : PDUMP_FLAGS_CONTINUOUS; +#else + ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif +#endif + + if(ui32Flags & PVRSRV_MEM_INTERLEAVED) + { + /* double the size */ + pMapping->uSize *= 2; + } + +#ifdef PDUMP + if(ui32Flags & PVRSRV_MEM_DUMMY) + { + /* only one page behind a dummy allocation */ + ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize; + } +#endif + + /* Check we haven't fall through a gap */ + PVR_ASSERT(pMapping->uSizeVM != 0); + /* allocate device linear space */ + if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap, + pMapping->uSizeVM, + pActualSize, + 0, + dev_vaddr_alignment, + &(pMapping->DevVAddr))) + { + PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc")); + return IMG_FALSE; + } + +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(pBMContext->psMMUContext); +#endif + +#if defined(PDUMP) + /* pdump the memory allocate */ + PDUMPMALLOCPAGES(&psDeviceNode->sDevId, + pMapping->DevVAddr.uiAddr, + pMapping->CpuVAddr, + pMapping->hOSMemHandle, + ui32PDumpSize, + pMapping->pBMHeap->sDevArena.ui32DataPageSize, + (IMG_HANDLE)pMapping, + ui32PDumpFlags); +#endif + + switch (pMapping->eCpuMemoryOrigin) + { + case hm_wrapped: + case hm_wrapped_virtaddr: + case hm_contiguous: + { + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + /* Check if this device supports sparse mappings */ + PVR_ASSERT(psDeviceNode->pfnMMUMapPagesSparse != IMG_NULL); + psDeviceNode->pfnMMUMapPagesSparse(pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + SysCpuPAddrToSysPAddr (pMapping->CpuPAddr), + pMapping->ui32ChunkSize, + pMapping->ui32NumVirtChunks, + pMapping->ui32NumPhysChunks, + pMapping->pabMapChunk, + ui32Flags, + (IMG_HANDLE)pMapping); + } + else + { + psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + SysCpuPAddrToSysPAddr (pMapping->CpuPAddr), + pMapping->uSize, + ui32Flags, + (IMG_HANDLE)pMapping); + } + *pDevVAddr = pMapping->DevVAddr; + break; + } + case hm_env: + { + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + /* Check if this device supports sparse mappings */ + PVR_ASSERT(psDeviceNode->pfnMMUMapShadowSparse != IMG_NULL); + psDeviceNode->pfnMMUMapShadowSparse(pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + pMapping->ui32ChunkSize, + pMapping->ui32NumVirtChunks, + pMapping->ui32NumPhysChunks, + pMapping->pabMapChunk, + pMapping->CpuVAddr, + pMapping->hOSMemHandle, + pDevVAddr, + ui32Flags, + (IMG_HANDLE)pMapping); + } + else + { + psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + pMapping->uSize, + pMapping->CpuVAddr, + pMapping->hOSMemHandle, + pDevVAddr, + ui32Flags, + (IMG_HANDLE)pMapping); + } + break; + } + case hm_wrapped_scatter: + case hm_wrapped_scatter_virtaddr: + { + psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + pMapping->psSysAddr, + pMapping->uSize, + ui32Flags, + (IMG_HANDLE)pMapping); + + *pDevVAddr = pMapping->DevVAddr; + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, + "Illegal value %d for pMapping->eCpuMemoryOrigin", + pMapping->eCpuMemoryOrigin)); + return IMG_FALSE; + } + +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(pBMContext->psMMUContext); +#endif + + return IMG_TRUE; +} + +static IMG_VOID +DevMemoryFree (BM_MAPPING *pMapping) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_DEV_PHYADDR sDevPAddr; +#ifdef PDUMP + IMG_UINT32 ui32PSize; + IMG_UINT32 ui32PDumpFlags; +#endif + + psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode; + sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr); + +#ifdef PDUMP +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + ui32PDumpFlags = psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap) + ? PDUMP_FLAGS_PERSISTENT : PDUMP_FLAGS_CONTINUOUS; +#else + ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; +#endif +#endif + + if (sDevPAddr.uiAddr != 0) + { +#ifdef PDUMP + /* pdump the memory free */ + if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + /* physical memory size differs in the case of Dummy allocations */ + ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + ui32PSize = (IMG_UINT32)pMapping->uSize; + } + + PDUMPFREEPAGES(pMapping->pBMHeap, + pMapping->DevVAddr, + ui32PSize, + pMapping->pBMHeap->sDevArena.ui32DataPageSize, + (IMG_HANDLE)pMapping, + (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE, + (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) ? IMG_TRUE : IMG_FALSE, + ui32PDumpFlags); +#endif + } + PVR_ASSERT(pMapping->uSizeVM != 0); + psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSizeVM)); +} + +/* If this array grows larger, it might be preferable to use a hashtable rather than an array. */ +#ifndef XPROC_WORKAROUND_NUM_SHAREABLES +#define XPROC_WORKAROUND_NUM_SHAREABLES 200 +#endif + +#define XPROC_WORKAROUND_BAD_SHAREINDEX 0773407734 + +#define XPROC_WORKAROUND_UNKNOWN 0 +#define XPROC_WORKAROUND_ALLOC 1 +#define XPROC_WORKAROUND_MAP 2 + +static IMG_UINT32 gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX; +static IMG_UINT32 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN; + +/* PRQA S 0686 10 */ /* force compiler to init structure */ +XPROC_DATA gXProcWorkaroundShareData[XPROC_WORKAROUND_NUM_SHAREABLES] = {{0}}; + +PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index) +{ + /* if you fail this assertion - did you acquire the mutex? + did you call "set" exactly once? + did you call "unset" exactly once per set? + */ + if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX) + { + PVR_DPF((PVR_DBG_ERROR, "No, it's already set!")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + gXProcWorkaroundShareIndex = ui32Index; + gXProcWorkaroundState = XPROC_WORKAROUND_MAP; + + return PVRSRV_OK; +} + +PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index) +{ + /* if you fail this assertion - did you acquire the mutex? + did you call "set" exactly once? + did you call "unset" exactly once per set? + */ + if (gXProcWorkaroundShareIndex == XPROC_WORKAROUND_BAD_SHAREINDEX) + { + PVR_DPF((PVR_DBG_ERROR, "huh? how can it be bad??")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (gXProcWorkaroundShareIndex != ui32Index) + { + PVR_DPF((PVR_DBG_ERROR, "gXProcWorkaroundShareIndex == 0x%08x != 0x%08x == ui32Index", gXProcWorkaroundShareIndex, ui32Index)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX; + gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN; + + return PVRSRV_OK; +} + +PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index) +{ + /* if you fail this assertion - did you acquire the mutex? + did you call "set" exactly once? + did you call "unset" exactly once per set? + */ + if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + for (*pui32Index = 0; *pui32Index < XPROC_WORKAROUND_NUM_SHAREABLES; (*pui32Index)++) + { + if (gXProcWorkaroundShareData[*pui32Index].ui32RefCount == 0) + { + gXProcWorkaroundShareIndex = *pui32Index; + gXProcWorkaroundState = XPROC_WORKAROUND_ALLOC; + return PVRSRV_OK; + } + } + + PVR_DPF((PVR_DBG_ERROR, "ran out of shared buffers")); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +static PVRSRV_ERROR +XProcWorkaroundAllocShareable(RA_ARENA *psArena, + IMG_UINT32 ui32AllocFlags, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PageSize, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_VOID **ppvCpuVAddr, + IMG_HANDLE *phOSMemHandle) +{ + if ((ui32AllocFlags & PVRSRV_MEM_XPROC) == 0) + { + PVR_DPF((PVR_DBG_VERBOSE, "XProcWorkaroundAllocShareable: bad flags")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32RefCount > 0) + { + PVR_DPF((PVR_DBG_VERBOSE, + "XProcWorkaroundAllocShareable: re-using previously allocated pages")); + + ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK; + ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS; + + if (ui32AllocFlags != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags) + { + PVR_DPF((PVR_DBG_ERROR, + "Can't! Flags don't match! (I had 0x%08x, you gave 0x%08x)", + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags, + ui32AllocFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Size != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, + "Can't! Size doesn't match!")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32PageSize != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "Can't! Page Size doesn't match!")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr; + *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle; + + BM_XProcIndexAcquire(gXProcWorkaroundShareIndex); + + return PVRSRV_OK; + } + else + { + if (gXProcWorkaroundState != XPROC_WORKAROUND_ALLOC) + { + PVR_DPF((PVR_DBG_ERROR, + "XPROC workaround in bad state! About to allocate memory from non-alloc state! (%d)", + gXProcWorkaroundState)); + } + PVR_ASSERT(gXProcWorkaroundState == XPROC_WORKAROUND_ALLOC); + + if (psArena != IMG_NULL) + { + IMG_CPU_PHYADDR sCpuPAddr; + IMG_SYS_PHYADDR sSysPAddr; + + PVR_DPF((PVR_DBG_VERBOSE, + "XProcWorkaroundAllocShareable: making a NEW allocation from local mem")); + + if (!RA_Alloc (psArena, + ui32Size, + IMG_NULL, + IMG_NULL, + 0, + ui32PageSize, + 0, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&sSysPAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: RA_Alloc(0x%x) FAILED", ui32Size)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + if(OSReservePhys(sCpuPAddr, + ui32Size, + ui32AllocFlags, + IMG_NULL, + (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr, + &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: OSReservePhys failed")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].sSysPAddr = sSysPAddr; + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, + "XProcWorkaroundAllocShareable: making a NEW allocation from OS")); + + ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK; + ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS; + + /* allocate pages from the OS RAM */ + if (OSAllocPages(ui32AllocFlags, + ui32Size, + ui32PageSize, + pvPrivData, + ui32PrivDataLength, + IMG_NULL, /* FIXME: to support cross process sparse allocations */ + (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr, + &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "XProcWorkaroundAllocShareable: OSAllocPages(0x%x) failed", + ui32PageSize)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].psArena = psArena; + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags = ui32AllocFlags; + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size = ui32Size; + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize = ui32PageSize; + + *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr; + *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle; + + BM_XProcIndexAcquire(gXProcWorkaroundShareIndex); + + return PVRSRV_OK; + } +} + +static PVRSRV_ERROR XProcWorkaroundHandleToSI(IMG_HANDLE hOSMemHandle, IMG_UINT32 *pui32SI) +{ + IMG_UINT32 ui32SI; + IMG_BOOL bFound; + IMG_BOOL bErrorDups; + + bFound = IMG_FALSE; + bErrorDups = IMG_FALSE; + + for (ui32SI = 0; ui32SI < XPROC_WORKAROUND_NUM_SHAREABLES; ui32SI++) + { + if (gXProcWorkaroundShareData[ui32SI].ui32RefCount>0 && gXProcWorkaroundShareData[ui32SI].hOSMemHandle == hOSMemHandle) + { + if (bFound) + { + bErrorDups = IMG_TRUE; + } + else + { + *pui32SI = ui32SI; + bFound = IMG_TRUE; + } + } + } + + if (bErrorDups || !bFound) + { + return PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE; + } + + return PVRSRV_OK; +} + +#if defined(PVRSRV_REFCOUNT_DEBUG) +IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +#else +IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index) +#endif +{ +#if defined(PVRSRV_REFCOUNT_DEBUG) + PVRSRVBMXProcIncRef2(pszFile, iLine, ui32Index); +#else + PVRSRVBMXProcIncRef(ui32Index); +#endif +} + +#if defined(PVRSRV_REFCOUNT_DEBUG) +IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +#else +IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index) +#endif +{ +#if defined(PVRSRV_REFCOUNT_DEBUG) + PVRSRVBMXProcDecRef2(pszFile, iLine, ui32Index); +#else + PVRSRVBMXProcDecRef(ui32Index); +#endif + + PVR_DPF((PVR_DBG_VERBOSE, "Reduced refcount of SI[%d] from %d to %d", + ui32Index, gXProcWorkaroundShareData[ui32Index].ui32RefCount+1, gXProcWorkaroundShareData[ui32Index].ui32RefCount)); + + if (gXProcWorkaroundShareData[ui32Index].ui32RefCount == 0) + { + if (gXProcWorkaroundShareData[ui32Index].psArena != IMG_NULL) + { + IMG_SYS_PHYADDR sSysPAddr; + + if (gXProcWorkaroundShareData[ui32Index].pvCpuVAddr != IMG_NULL) + { + OSUnReservePhys(gXProcWorkaroundShareData[ui32Index].pvCpuVAddr, + gXProcWorkaroundShareData[ui32Index].ui32Size, + gXProcWorkaroundShareData[ui32Index].ui32AllocFlags, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle); + } + sSysPAddr = gXProcWorkaroundShareData[ui32Index].sSysPAddr; + RA_Free (gXProcWorkaroundShareData[ui32Index].psArena, + (IMG_UINTPTR_T)sSysPAddr.uiAddr, + IMG_FALSE); + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, "freeing OS memory")); + OSFreePages(gXProcWorkaroundShareData[ui32Index].ui32AllocFlags, + gXProcWorkaroundShareData[ui32Index].ui32PageSize, + gXProcWorkaroundShareData[ui32Index].pvCpuVAddr, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle); + } + } +} + +static IMG_VOID XProcWorkaroundFreeShareable(IMG_HANDLE hOSMemHandle) +{ + IMG_UINT32 ui32SI = (IMG_UINT32)((IMG_UINTPTR_T)hOSMemHandle & 0xffffU); + PVRSRV_ERROR eError; + + eError = XProcWorkaroundHandleToSI(hOSMemHandle, &ui32SI); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "bad handle")); + return; + } + + BM_XProcIndexRelease(ui32SI); +} + + +/*! +****************************************************************************** + + @Function BM_ImportMemory + + @Description Provide a resource allocator with a source of pages of memory + from the Host OS's own allocation. Allocates a block of pages + larger than requested, allowing the resource allocator to + operate a small cache of pre allocated pages. + + @Input pH - buffer manager handle, not the void type is dictated + by the generic nature of the resource allocator interface. + @Input uRequestSize - requested size in bytes + @Output pActualSize - receives the actual size allocated in bytes + which may be >= requested size + @Output ppsMapping - receives the arbitrary user reference + associated with the underlying storage. + @Input ui32Flags - bit mask of allocation flags + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + @Output pBase - receives a pointer to the allocated storage. + + @Return IMG_TRUE - success + IMG_FALSE - failed + + *****************************************************************************/ +static IMG_BOOL +BM_ImportMemory (IMG_VOID *pH, + IMG_SIZE_T uRequestSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 ui32Flags, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase) +{ + BM_MAPPING *pMapping; + BM_HEAP *pBMHeap = pH; + BM_CONTEXT *pBMContext = pBMHeap->pBMContext; + IMG_BOOL bResult; + IMG_SIZE_T uSize; + IMG_SIZE_T uPSize; + IMG_SIZE_T uDevVAddrAlignment = 0; /* ? */ + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_ImportMemory (pBMContext=0x%p, uRequestSize=0x%" SIZE_T_FMT_LEN + "x, ui32Flags=0x%x, uAlign=0x%" SIZE_T_FMT_LEN "x)", + pBMContext, uRequestSize, ui32Flags, uDevVAddrAlignment)); + + PVR_ASSERT (ppsMapping != IMG_NULL); + PVR_ASSERT (pBMContext != IMG_NULL); + + if (ppsMapping == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter")); + goto fail_exit; + } + + uSize = HOST_PAGEALIGN (uRequestSize); + PVR_ASSERT (uSize >= uRequestSize); + + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_MAPPING), + (IMG_PVOID *)&pMapping, IMG_NULL, + "Buffer Manager Mapping") != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc")); + goto fail_exit; + } + + pMapping->hOSMemHandle = 0; + pMapping->CpuVAddr = 0; + pMapping->DevVAddr.uiAddr = 0; + pMapping->CpuPAddr.uiAddr = 0; + pMapping->uSize = uSize; + if ((ui32Flags & PVRSRV_MEM_SPARSE) == 0) + { + pMapping->uSizeVM = uSize; + } + pMapping->pBMHeap = pBMHeap; + pMapping->ui32Flags = ui32Flags; +#if defined (PVRSRV_DEVMEM_TIME_STATS) + pMapping->pui32TimeToDevUnmap = IMG_NULL; +#endif + + /* + * If anyone wants to know, pass back the actual size of our allocation. + * There could be up to an extra page's worth of memory which will be marked + * as free in the RA. + */ + if (pActualSize) + { + *pActualSize = uSize; + } + + /* if it's a dummy allocation only use one physical page */ + if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + uPSize = pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + uPSize = pMapping->uSize; + } + + if (ui32Flags & PVRSRV_MEM_XPROC) + { + IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs | PVRSRV_MEM_XPROC; + IMG_BOOL bBadBackingStoreType; + + bBadBackingStoreType = IMG_TRUE; + + if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0) + { + uDevVAddrAlignment = MAX(pBMHeap->sDevArena.ui32DataPageSize, HOST_PAGESIZE()); + + + if (uPSize % uDevVAddrAlignment != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot use this memory sharing workaround with allocations that might be suballocated")); + goto fail_mapping_alloc; + } + uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */ + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + /* allocate "shared" pages. */ + if (XProcWorkaroundAllocShareable(IMG_NULL, + ui32Attribs, + (IMG_UINT32)uPSize, + pBMHeap->sDevArena.ui32DataPageSize, + pvPrivData, + ui32PrivDataLength, + (IMG_VOID **)&pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%" SIZE_T_FMT_LEN "x) failed", + uPSize)); + goto fail_mapping_alloc; + } + + /* specify how page addresses are derived */ + /* it works just like "env" now - no need to record + it as shareable, as we use the actual hOSMemHandle + and only divert to our wrapper layer based on Attribs */ + pMapping->eCpuMemoryOrigin = hm_env; + bBadBackingStoreType = IMG_FALSE; + } + + if ((ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) != 0) + { + uDevVAddrAlignment = pBMHeap->sDevArena.ui32DataPageSize; + + if (uPSize % uDevVAddrAlignment != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot use this memory sharing workaround with allocations that might be suballocated")); + goto fail_mapping_alloc; + } + uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */ + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + /* allocate "shared" pages. */ + if (XProcWorkaroundAllocShareable(pBMHeap->pLocalDevMemArena, + ui32Attribs, + (IMG_UINT32)uPSize, + pBMHeap->sDevArena.ui32DataPageSize, + pvPrivData, + ui32PrivDataLength, + (IMG_VOID **)&pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%" SIZE_T_FMT_LEN "x) failed", + uPSize)); + goto fail_mapping_alloc; + } + + /* specify how page addresses are derived */ + /* it works just like "env" now - no need to record + it as shareable, as we use the actual hOSMemHandle + and only divert to our wrapper layer based on Attribs */ + pMapping->eCpuMemoryOrigin = hm_env; + bBadBackingStoreType = IMG_FALSE; + } + + if (bBadBackingStoreType) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot use this memory sharing workaround with this type of backing store")); + goto fail_mapping_alloc; + } + } + else + + /* + What type of backing store do we have? + */ + if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) + { + IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; + + /* The allocation code needs to know this is a sparse mapping */ + if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) + { + ui32Attribs |= PVRSRV_MEM_SPARSE; + } + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + if (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM) + { + ui32Attribs &= ~PVRSRV_MEM_ALLOCATENONCACHEDMEM; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM); + } + + /* allocate pages from the OS RAM */ + if (OSAllocPages(ui32Attribs, + uPSize, + pBMHeap->sDevArena.ui32DataPageSize, + pvPrivData, + ui32PrivDataLength, + pMapping, + (IMG_VOID **)&pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: OSAllocPages(0x%" SIZE_T_FMT_LEN "x) failed", + uPSize)); + goto fail_mapping_alloc; + } + + /* specify how page addresses are derived */ + pMapping->eCpuMemoryOrigin = hm_env; + } + else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) + { + IMG_SYS_PHYADDR sSysPAddr; + IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; + + /* The allocation code needs to know this is a sparse mapping */ + if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) + { + ui32Attribs |= PVRSRV_MEM_SPARSE; + } + + /* allocate pages from the local device memory allocator */ + PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL); + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + if (!RA_Alloc (pBMHeap->pLocalDevMemArena, + uPSize, + IMG_NULL, + IMG_NULL, + 0, + pBMHeap->sDevArena.ui32DataPageSize, + 0, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&sSysPAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%" SIZE_T_FMT_LEN "x) FAILED", uPSize)); + goto fail_mapping_alloc; + } + + /* derive the CPU virtual address */ + pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + if(OSReservePhys(pMapping->CpuPAddr, + uPSize, + ui32Attribs, + pMapping, + &pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed")); + goto fail_dev_mem_alloc; + } + + /* specify how page addresses are derived */ + pMapping->eCpuMemoryOrigin = hm_contiguous; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type")); + goto fail_mapping_alloc; + } + + /* + * Allocate some device memory for what we just allocated. + */ + if ((ui32Flags & PVRSRV_MEM_SPARSE) == 0) + { +#if defined (PVRSRV_DEVMEM_TIME_STATS) + IMG_UINT64 ui64TimeStart; + ui64TimeStart = OSClockMonotonicus(); +#endif + bResult = DevMemoryAlloc (pBMContext, + pMapping, + IMG_NULL, + ui32Flags, + (IMG_UINT32)uDevVAddrAlignment, + &pMapping->DevVAddr); + if (!bResult) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: DevMemoryAlloc(0x%" SIZE_T_FMT_LEN "x) failed", + pMapping->uSize)); + goto fail_dev_mem_alloc; + } +#if defined (PVRSRV_DEVMEM_TIME_STATS) + pMapping->ui32TimeToDevMap = OSClockMonotonicus() - ui64TimeStart; +#endif + + /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */ + /* PRQA S 3356,3358 1 */ + PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1); + PVR_ASSERT(pBase); + *pBase = pMapping->DevVAddr.uiAddr; + } + + *ppsMapping = pMapping; + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE")); + return IMG_TRUE; + +fail_dev_mem_alloc: + /* pMapping must be valid: if the allocation failed, we'd have jumped to fail_exit */ + if (pMapping->CpuVAddr || pMapping->hOSMemHandle) + { + /* the size is double the actual size for interleaved allocations */ + if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) + { + pMapping->uSize /= 2; + } + + if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + uPSize = pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + uPSize = pMapping->uSize; + } + + if (ui32Flags & PVRSRV_MEM_XPROC) + { + XProcWorkaroundFreeShareable(pMapping->hOSMemHandle); + } + else + if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) + { + OSFreePages(pBMHeap->ui32Attribs, + uPSize, + (IMG_VOID *)pMapping->CpuVAddr, + pMapping->hOSMemHandle); + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + + if(pMapping->CpuVAddr) + { + OSUnReservePhys(pMapping->CpuVAddr, + uPSize, + pBMHeap->ui32Attribs, + pMapping->hOSMemHandle); + } + sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); + RA_Free (pBMHeap->pLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE); + } + } +fail_mapping_alloc: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + /*not nulling pointer, out of scope*/ +fail_exit: + return IMG_FALSE; +} + + +/*! +****************************************************************************** + + @Function BM_FreeMemory + + @Description Free a block of pages previously allocated via + BM_ImportMemory. + + @Input h - buffer manager handle, not the void type as dictated by + the generic nature of the resource allocator interface. + @Input _base - base address of blocks to free. + @Input psMapping - arbitrary user reference associated with the + underlying storage provided by BM_ImportMemory + @Return None + + *****************************************************************************/ +static IMG_VOID +BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping) +{ + BM_HEAP *pBMHeap = h; + IMG_SIZE_T uPSize; + + PVR_UNREFERENCED_PARAMETER (_base); + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_FreeMemory (h=0x%p, base=0x" UINTPTR_FMT ", psMapping=0x%p)", + h, _base, psMapping)); + + PVR_ASSERT (psMapping != IMG_NULL); + + if (psMapping == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter")); + return; + } + + /* + Only free the virtual memory if we got as far a allocating it. + This NULL check should be safe as we always have a guard page + at virtual address 0x00000000 + */ + if (psMapping->DevVAddr.uiAddr) + { +#if defined (PVRSRV_DEVMEM_TIME_STATS) + unsigned long ulTimeStart = 0; + if (psMapping->pui32TimeToDevUnmap) + { + /* NON-NULL pointer signifies user has provided space to get timings info in */ + ulTimeStart = OSClockMonotonicus(); + } +#endif + DevMemoryFree (psMapping); +#if defined (PVRSRV_DEVMEM_TIME_STATS) + if (psMapping->pui32TimeToDevUnmap) + { + *(psMapping->pui32TimeToDevUnmap) = OSClockMonotonicus() - ulTimeStart; + } +#endif + } + + /* the size is double the actual for interleaved */ + if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) + { + psMapping->uSize /= 2; + } + + if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + uPSize = psMapping->uSize; + } + + if (psMapping->ui32Flags & PVRSRV_MEM_XPROC) + { + XProcWorkaroundFreeShareable(psMapping->hOSMemHandle); + } + else + if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) + { + OSFreePages(pBMHeap->ui32Attribs, + uPSize, + (IMG_VOID *) psMapping->CpuVAddr, + psMapping->hOSMemHandle); + } + else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) + { + IMG_SYS_PHYADDR sSysPAddr; + + OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle); + + sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); + + RA_Free (pBMHeap->pLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type")); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + PVR_DPF((PVR_DBG_MESSAGE, + "..BM_FreeMemory (h=0x%p, base=0x" UINTPTR_FMT ")", + h, _base)); +} + +/*! +****************************************************************************** + + @Function BM_GetPhysPageAddr + + @Description + + @Input psMemInfo + + @Input sDevVPageAddr + + @Output psDevPAddr + + @Return IMG_VOID + +******************************************************************************/ + +IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_DEV_VIRTADDR sDevVPageAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr")); + + PVR_ASSERT(psMemInfo && psDevPAddr); + + /* check it's a page address */ + PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); + + /* PRQA S 0505 4 */ /* PVR_ASSERT should catch NULL ptrs */ + psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode; + + *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap, + sDevVPageAddr); +} + + +/*! +****************************************************************************** + @Function BM_GetMMUContext + + @Description utility function to return the MMU context + + @Input hDevMemHeap - the Dev mem heap handle + + @Return MMU context, else NULL +**************************************************************************/ +MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap) +{ + BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap; + + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext")); + + return pBMHeap->pBMContext->psMMUContext; +} + +/*! +****************************************************************************** + @Function BM_GetMMUContextFromMemContext + + @Description utility function to return the MMU context + + @Input hDevMemContext - the Dev mem context handle + + @Return MMU context, else NULL +**************************************************************************/ +MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext) +{ + BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext; + + PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext")); + + return pBMContext->psMMUContext; +} + +/*! +****************************************************************************** + @Function BM_GetMMUHeap + + @Description utility function to return the MMU heap handle + + @Input hDevMemHeap - the Dev mem heap handle + + @Return MMU heap handle, else NULL +**************************************************************************/ +IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap) +{ + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap")); + + return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap; +} + + +/*! +****************************************************************************** + @Function BM_GetDeviceNode + + @Description utility function to return the devicenode from the BM Context + + @Input hDevMemContext - the Dev Mem Context + + @Return MMU heap handle, else NULL +**************************************************************************/ +PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext) +{ + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode")); + + return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode; +} + + +/*! +****************************************************************************** + @Function BM_GetMappingHandle + + @Description utility function to return the mapping handle from a meminfo + + @Input psMemInfo - kernel meminfo + + @Return mapping handle, else NULL +**************************************************************************/ +IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle")); + + return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle; +} + +/*! +****************************************************************************** + @Function BM_MappingHandleFromBuffer + + @Description utility function to get the BM mapping handle from a BM buffer + + @Input hBuffer - Handle to BM buffer + + @Return BM mapping handle +**************************************************************************/ +IMG_HANDLE BM_MappingHandleFromBuffer(IMG_HANDLE hBuffer) +{ + BM_BUF *psBuffer; + + PVR_ASSERT(hBuffer != IMG_NULL); + psBuffer = hBuffer; + return psBuffer->pMapping; +} + +/*! +****************************************************************************** + @Function BM_GetVirtualSize + + @Description utility function to get the VM size of a BM mapping + + @Input hBMHandle - Handle to BM mapping + + @Return VM size of mapping +**************************************************************************/ +IMG_UINT32 BM_GetVirtualSize(IMG_HANDLE hBMHandle) +{ + BM_MAPPING *psMapping; + + PVR_ASSERT(hBMHandle != IMG_NULL); + psMapping = hBMHandle; + return psMapping->ui32ChunkSize * psMapping->ui32NumVirtChunks; +} + +/*! +****************************************************************************** + @Function BM_MapPageAtOffset + + @Description utility function check if the specified offset in a BM mapping + is a page that needs to be mapped + + @Input hBMHandle - Handle to BM mapping + + @Input ui32Offset - Offset into allocation + + @Return IMG_TRUE if the page should be mapped +**************************************************************************/ +IMG_BOOL BM_MapPageAtOffset(IMG_HANDLE hBMHandle, IMG_UINT32 ui32Offset) +{ + BM_MAPPING *psMapping; + IMG_UINT32 ui32ChunkIndex; + + PVR_ASSERT(hBMHandle != IMG_NULL); + psMapping = hBMHandle; + + ui32ChunkIndex = ui32Offset / psMapping->ui32ChunkSize; + /* Check for overrun */ + PVR_ASSERT(ui32ChunkIndex <= psMapping->ui32NumVirtChunks); + return psMapping->pabMapChunk[ui32ChunkIndex]; +} + +/*! +****************************************************************************** + @Function BM_VirtOffsetToPhysical + + @Description utility function find of physical offset of a sparse allocation + from its virtual offset. + + @Input hBMHandle - Handle to BM mapping + + @Input ui32VirtOffset - Virtual offset into allocation + + @Output pui32PhysOffset - Physical offset + + @Return IMG_TRUE if the virtual offset is physically backed +**************************************************************************/ +IMG_BOOL BM_VirtOffsetToPhysical(IMG_HANDLE hBMHandle, + IMG_UINT32 ui32VirtOffset, + IMG_UINT32 *pui32PhysOffset) +{ + BM_MAPPING *psMapping; + IMG_UINT32 ui32ChunkOffset; + IMG_UINT32 ui32PhysOffset = 0; + IMG_UINT32 i; + + PVR_ASSERT(hBMHandle != IMG_NULL); + psMapping = hBMHandle; + + ui32ChunkOffset = ui32VirtOffset / psMapping->ui32ChunkSize; + if (!psMapping->pabMapChunk[ui32ChunkOffset]) + { + return IMG_FALSE; + } + + for (i=0;ipabMapChunk[i]) + { + ui32PhysOffset += psMapping->ui32ChunkSize; + } + } + *pui32PhysOffset = ui32PhysOffset; + + return IMG_TRUE; +} +/****************************************************************************** + End of file (buffer_manager.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/deviceclass.c b/sgx_km/eurasia_km/services4/srvkm/common/deviceclass.c new file mode 100644 index 0000000..b6ab4da --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/deviceclass.c @@ -0,0 +1,2945 @@ +/*************************************************************************/ /*! +@File +@Title Device class services functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel services functions for device class devices +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "buffer_manager.h" +#include "kernelbuffer.h" +#include "kerneldisplay.h" +#include "pvr_bridge_km.h" +#include "pdump_km.h" +#include "deviceid.h" + +#include "lists.h" + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include "pvr_sync_common.h" +#endif + +PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID); +PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID); + +/*********************************************************************** + Local Display Class Structures +************************************************************************/ +typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE; + +/* + Display Class Buffer Info +*/ +typedef struct PVRSRV_DC_BUFFER_TAG +{ + /* BC/DC common details - THIS MUST BE THE FIRST MEMBER */ + PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer; + + struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo; + struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain; +} PVRSRV_DC_BUFFER; + +/* + Display Device Class kernel swapchain information structure +*/ +typedef struct PVRSRV_DC_SWAPCHAIN_TAG +{ + IMG_HANDLE hExtSwapChain; + IMG_UINT32 ui32SwapChainID; + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32Flags; + PVRSRV_QUEUE_INFO *psQueue; + PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + IMG_UINT32 ui32BufferCount; + PVRSRV_DC_BUFFER *psLastFlipBuffer; + IMG_UINT32 ui32MinSwapInterval; + IMG_UINT32 ui32MaxSwapInterval; +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + PVRSRV_KERNEL_SYNC_INFO **ppsLastSyncInfos; + IMG_UINT32 ui32LastNumSyncInfos; +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo; + struct PVRSRV_DC_SWAPCHAIN_TAG *psNext; +} PVRSRV_DC_SWAPCHAIN; + + +/* + Display Device Class kernel swapchain referecne structure +*/ +typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG +{ + struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain; + IMG_HANDLE hResItem; +} PVRSRV_DC_SWAPCHAIN_REF; + + +/* + Display Device Class kernel services information structure +*/ +typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG +{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32DeviceID; + IMG_HANDLE hExtDevice; + PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable; + IMG_HANDLE hDevMemContext; + PVRSRV_DC_BUFFER sSystemBuffer; + struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared; +} PVRSRV_DISPLAYCLASS_INFO; + + +/* + Per-context Display Device Class kernel services information structure +*/ +typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PRESMAN_ITEM hResItem; +} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO; + + +/*********************************************************************** + Local Buffer Class Structures +************************************************************************/ +typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE; + +/* + Buffer Class Buffer Info +*/ +typedef struct PVRSRV_BC_BUFFER_TAG +{ + /* BC/DC common details - THIS MUST BE THE FIRST MEMBER */ + PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer; + + struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo; +} PVRSRV_BC_BUFFER; + + +/* + Buffer Device Class kernel services information structure +*/ +typedef struct PVRSRV_BUFFERCLASS_INFO_TAG +{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32DeviceID; + IMG_HANDLE hExtDevice; + PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable; + IMG_HANDLE hDevMemContext; + /* buffer info returned from 3rd party driver */ + IMG_UINT32 ui32BufferCount; + PVRSRV_BC_BUFFER *psBuffer; + +} PVRSRV_BUFFERCLASS_INFO; + + +/* + Per-context Buffer Device Class kernel services information structure +*/ +typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + IMG_HANDLE hResItem; +} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO; + + +/*! +****************************************************************************** + @Function DCDeviceHandleToDCInfo + + @Description + + Convert a client-visible 3rd party device class handle to an internal + PVRSRV_DISPLAYCLASS_INFO pointer. + + @Input hDeviceKM - handle to display class device, returned from OpenDCDevice + + @Return + success: pointer to PVRSRV_DISPLAYCLASS_INFO + failure: IMG_NULL +******************************************************************************/ +static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM) +{ + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + + psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM; + + return psDCPerContextInfo->psDCInfo; +} + + +/*! +****************************************************************************** + @Function BCDeviceHandleToBCInfo + + @Description + + Convert a client-visible 3rd party buffer class handle to an internal + PVRSRV_BUFFERCLASS_INFO pointer. + + @Input hDeviceKM - handle to buffer class device, returned from OpenBCDevice + + @Return + success: pointer to PVRSRV_BUFFERCLASS_INFO + failure: IMG_NULL +******************************************************************************/ +static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM) +{ + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + + psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM; + + return psBCPerContextInfo->psBCInfo; +} + +/*! +****************************************************************************** + @Function PVRSRVEnumerateDCKM_ForEachVaCb + + @Description + + Enumerates the device node (if is of the same class as given). + + @Input psDeviceNode - The device node to be enumerated + va - variable arguments list, with: + pui32DevCount - The device count pointer (to be increased) + ppui32DevID - The pointer to the device IDs pointer (to be updated and increased) + peDeviceClass - The pointer to the device class of the psDeviceNode's to be enumerated. +******************************************************************************/ +static IMG_VOID PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + IMG_UINT *pui32DevCount; + IMG_UINT32 **ppui32DevID; + PVRSRV_DEVICE_CLASS peDeviceClass; + + pui32DevCount = va_arg(va, IMG_UINT*); + ppui32DevID = va_arg(va, IMG_UINT32**); + peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS); + + if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass) + && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT)) + { + (*pui32DevCount)++; + if(*ppui32DevID) + { + *(*ppui32DevID)++ = psDeviceNode->sDevId.ui32DeviceIndex; + } + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVEnumerateDCKM + + @Description + + Enumerates devices available in a given class. + On first call, pass valid ptr for pui32DevCount and IMG_NULL for pui32DevID, + On second call, pass same ptr for pui32DevCount and client allocated ptr + for pui32DevID device id list + + @Input hServices - handle for services connection + @Input ui32DevClass - device class identifier + @Output pui32DevCount - number of devices available in class + @Output pui32DevID - list of device ids in the device class + + @Return + success: handle to matching display class device + failure: IMG_NULL + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass, + IMG_UINT32 *pui32DevCount, + IMG_UINT32 *pui32DevID ) +{ + /*PVRSRV_DEVICE_NODE *psDeviceNode;*/ + IMG_UINT ui32DevCount = 0; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* search devonode list for devices in specified class and return the device ids */ + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVEnumerateDCKM_ForEachVaCb, + &ui32DevCount, + &pui32DevID, + DeviceClass); + + if(pui32DevCount) + { + *pui32DevCount = ui32DevCount; + } + else if(pui32DevID == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters")); + return (PVRSRV_ERROR_INVALID_PARAMS); + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterDCDeviceKM + + @Description + + registers an external device with the system + + @Input psFuncTable : device function table + + @Output pui32DeviceID : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable, + IMG_UINT32 *pui32DeviceID) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL; + PVRSRV_DEVICE_NODE *psDeviceNode = IMG_NULL; + SYS_DATA *psSysData; + + /* + IN: + - name of client side ext. device driver library for subsequent loading + - predefined list of callbacks into kernel ext. device driver (based on class type) + + FUNCTION TASKS: + - allocate display device class info structure + - hang ext.device kernel callbacks on this structure (pfnKSwapToSystem) + + OUT: + - DEVICE_ID + - pass back devinfo? no + + Q&A: + - DEVICE_ID passed in or allocated - assume allocate + */ + + SysAcquireData(&psSysData); + + /* + If we got this far we're doing dynamic enumeration + or first time static registration + */ + + /* Allocate device control block */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psDCInfo), + (IMG_VOID **)&psDCInfo, IMG_NULL, + "Display Class Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psDCInfo, 0, sizeof(*psDCInfo)); + + /* setup the display device information structure */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), + (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL, + "Function table for SRVKM->DISPLAY") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc")); + goto ErrorExit; + } + OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE)); + + /* copy the jump table */ + *psDCInfo->psFuncTable = *psFuncTable; + + /* Allocate device node */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), + (IMG_VOID **)&psDeviceNode, IMG_NULL, + "Device Node") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc")); + goto ErrorExit; + } + OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); + + psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo; + psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo); + psDeviceNode->ui32RefCount = 1; + psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT; + psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY; + psDeviceNode->psSysData = psSysData; + + /* allocate a unique device id */ + if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID")); + goto ErrorExit; + } + psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + if (pui32DeviceID) + { + *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + } + + /* Register the device with the system */ + SysRegisterExternalDevice(psDeviceNode); + + /* and finally insert the device into the dev-list */ + List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode); + + return PVRSRV_OK; + +ErrorExit: + + if(psDCInfo->psFuncTable) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL); + psDCInfo->psFuncTable = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + + if(psDeviceNode) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + } + + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +/*! +****************************************************************************** + + @Function PVRSRVRemoveDCDeviceKM + + @Description + + Removes external device from services system record + + @Input ui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + SysAcquireData(&psSysData); + + /*search the node matching the devindex and display class*/ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_DISPLAY); + if (!psDeviceNode) + { + /*device not found*/ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + + /* setup DCInfo ptr */ + psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice; + + /* + The device can only be removed if there are + no open connections in the Services interface + */ + if(psDCInfo->ui32RefCount == 0) + { + /* + Remove from the device list. + */ + List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); + + /* Unregister the device with the system */ + SysRemoveExternalDevice(psDeviceNode); + + /* + OK found a device with a matching devindex + remove registration information + */ + PVR_ASSERT(psDCInfo->ui32RefCount == 0); + (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex); + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL); + psDCInfo->psFuncTable = IMG_NULL; + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL); + /*not nulling original pointer, overwritten*/ + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + else + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open", psDCInfo->ui32RefCount)); + return PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterBCDeviceKM + + @Description + + registers an external device with the system + + @Input psFuncTable : device function table + @Input ui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable, + IMG_UINT32 *pui32DeviceID) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL; + PVRSRV_DEVICE_NODE *psDeviceNode = IMG_NULL; + SYS_DATA *psSysData; + /* + IN: + - name of client side ext. device driver library for subsequent loading + - predefined list of callbacks into kernel ext. device driver (based on class type) + + FUNCTION TASKS: + - allocate buffer device class info structure + + OUT: + - DEVICE_ID + - pass back devinfo? no + + Q&A: + - DEVICE_ID passed in or allocated - assume allcoate + */ + + SysAcquireData(&psSysData); + + /* + If we got this far we're doing dynamic enumeration + or first time static registration + */ + + /* Allocate device control block */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psBCInfo), + (IMG_VOID **)&psBCInfo, IMG_NULL, + "Buffer Class Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psBCInfo, 0, sizeof(*psBCInfo)); + + /* setup the buffer device information structure */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), + (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL, + "Function table for SRVKM->BUFFER") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc")); + goto ErrorExit; + } + OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE)); + + /* copy the jump table */ + *psBCInfo->psFuncTable = *psFuncTable; + + /* Allocate device node */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), + (IMG_VOID **)&psDeviceNode, IMG_NULL, + "Device Node") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc")); + goto ErrorExit; + } + OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); + + psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo; + psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo); + psDeviceNode->ui32RefCount = 1; + psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT; + psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER; + psDeviceNode->psSysData = psSysData; + + /* allocate a unique device id */ + if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID")); + goto ErrorExit; + } + psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + if (pui32DeviceID) + { + *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + } + + /* and finally insert the device into the dev-list */ + List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode); + + return PVRSRV_OK; + +ErrorExit: + + if(psBCInfo->psFuncTable) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL); + psBCInfo->psFuncTable = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL); + /*not nulling shared pointer, wasn't allocated to this point*/ + + if(psDeviceNode) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + } + + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemoveBCDeviceKM + + @Description + + Removes external device from services system record + + @Input ui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + + SysAcquireData(&psSysData); + + /*search the device node with the devindex and buffer class*/ + psDevNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_BUFFER); + + if (!psDevNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + + /* set-up devnode ptr */ +/* psDevNode = *(ppsDevNode); */ + /* setup BCInfo ptr */ + psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice; + + /* + The device can only be removed if there are + no open connections in the Services interface + */ + if(psBCInfo->ui32RefCount == 0) + { + /* + Remove from the device list. + */ + List_PVRSRV_DEVICE_NODE_Remove(psDevNode); + + /* + OK found a device with a matching devindex + remove registration information + */ + (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex); + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL); + psBCInfo->psFuncTable = IMG_NULL; + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDevNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + else + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open", psBCInfo->ui32RefCount)); + return PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE; + } + + return PVRSRV_OK; +} + + + +/*! +****************************************************************************** + + @Function PVRSRVCloseDCDeviceKM + + @Description + + Closes a connection to the Display Class device + + @Input hDeviceKM : device handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE hDeviceKM) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + + psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM; + + /* Remove the item from the resman list and trigger the callback. */ + eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem, CLEANUP_WITH_POLL); + + return eError; +} + + +static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam; + psDCInfo = psDCPerContextInfo->psDCInfo; + + if(psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount != 0) + { + PVR_DPF((PVR_DBG_MESSAGE,"CloseDCDeviceCallBack: system buffer (0x%p) still mapped (refcount = %d)", + &psDCInfo->sSystemBuffer.sDeviceClassBuffer, + psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount)); + } + + psDCInfo->ui32RefCount--; + if(psDCInfo->ui32RefCount == 0) + { + /* close the external device */ + psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice); + + PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + + psDCInfo->hDevMemContext = IMG_NULL; + psDCInfo->hExtDevice = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVOpenDCDeviceKM + + @Description + + Opens a connection to the Display Class device, associating the connection + with a Device Memory Context for a services managed device + + @Input psPerProc : Per-process data + @Input ui32DeviceID : unique device index + @Input hDevCookie : devcookie used to derive the Device Memory + Context into BC surfaces will be mapped into + @Outut phDeviceKM : handle to the DC device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVOpenDCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32DeviceID, + IMG_HANDLE hDevCookie, + IMG_HANDLE *phDeviceKM) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + if(!phDeviceKM || !hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* find the matching devicenode */ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DeviceID, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_DISPLAY); + if (!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice; + + /* + Allocate the per-context DC Info before calling the external device, + to make error handling easier. + */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psDCPerContextInfo), + (IMG_VOID **)&psDCPerContextInfo, IMG_NULL, + "Display Class per Context Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo)); + + if(psDCInfo->ui32RefCount++ == 0) + { + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* store the device kernel context to map into */ + psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext; + + /* create a syncinfo for the device's system surface */ + eError = PVRSRVAllocSyncInfoKM(IMG_NULL, + (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext, + &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc")); + goto ErrorExit; + } + + /* open the external device */ + eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID, + &psDCInfo->hExtDevice, + (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device")); + goto ErrorExitSyncInfo; + } + + psDCPerContextInfo->psDCInfo = psDCInfo; + eError = PVRSRVGetDCSystemBufferKM(psDCPerContextInfo, IMG_NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to get system buffer")); + goto ErrorExitCloseDevice; + } + psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount = 0; + } + else + { + psDCPerContextInfo->psDCInfo = psDCInfo; + } + + psDCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DISPLAYCLASS_DEVICE, + psDCPerContextInfo, + 0, + &CloseDCDeviceCallBack); + + /* return a reference to the DCPerContextInfo */ + *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo; + + return PVRSRV_OK; + +ErrorExitCloseDevice: + psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice); + +ErrorExitSyncInfo: + PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + +ErrorExit: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL); + psDCInfo->ui32RefCount = 0; + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVEnumDCFormatsKM + + @Description + + Enumerates the devices pixel formats + + @Input hDeviceKM : device handle + @Output pui32Count : number of pixel formats + @Output psFormat : format list + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + if(!hDeviceKM || !pui32Count || !psFormat) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat); +} + + + +/*! +****************************************************************************** + + @Function PVRSRVEnumDCDimsKM + + @Description + + Enumerates the devices mode dimensions for a given pixel format + + @Input hDeviceKM : device handle + @Input psFormat : pixel format + @Output pui32Count : number of dimensions + @Output psDim : dimensions list + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM, + DISPLAY_FORMAT *psFormat, + IMG_UINT32 *pui32Count, + DISPLAY_DIMS *psDim) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + if(!hDeviceKM || !pui32Count || !psFormat) // psDim==NULL to query number of dims + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim); +} + + +/*! +****************************************************************************** + + @Function PVRSRVGetDCSystemBufferKM + + @Description + + Get the primary surface and optionally return its buffer handle + + @Input hDeviceKM : device handle + @Output phBuffer : Optional buffer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM, + IMG_HANDLE *phBuffer) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + IMG_HANDLE hExtBuffer; + + if(!hDeviceKM) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver")); + return eError; + } + + /* save the new info */ + psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr; + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext; + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice; + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer; + + psDCInfo->sSystemBuffer.psDCInfo = psDCInfo; + + /* return handle */ + if (phBuffer) + { + *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer); + } + + return PVRSRV_OK; +} + + +/****************************************************************************** + + @Function PVRSRVGetDCInfoKM + + @Description + + Gets Display Class device Info + + @Input hDeviceKM : device handle + @Output psDisplayInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM, + DISPLAY_INFO *psDisplayInfo) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_ERROR eError; + + if(!hDeviceKM || !psDisplayInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo); + if (eError != PVRSRV_OK) + { + return eError; + } + + if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) + { + psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS; + } + + return PVRSRV_OK; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChainRef) +{ + PVRSRV_ERROR eError; + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef; + + if(!hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psSwapChainRef = hSwapChainRef; + + eError = ResManFreeResByPtr(psSwapChainRef->hResItem, CLEANUP_WITH_POLL); + + return eError; +} + + +static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN *psSwapChain) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo; + IMG_UINT32 i; + + /* Update shared swapchains list */ + if( psDCInfo->psDCSwapChainShared ) + { + if( psDCInfo->psDCSwapChainShared == psSwapChain ) + { + psDCInfo->psDCSwapChainShared = psSwapChain->psNext; + } + else + { + PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain; + psCurrentSwapChain = psDCInfo->psDCSwapChainShared; + while( psCurrentSwapChain->psNext ) + { + if( psCurrentSwapChain->psNext != psSwapChain ) + { + psCurrentSwapChain = psCurrentSwapChain->psNext; + continue; + } + psCurrentSwapChain->psNext = psSwapChain->psNext; + break; + } + } + } + + /* Destroy command queue before swapchain - it may use the swapchain when commands are flushed. */ + PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue); + + /* call into the display device driver to destroy a swapchain */ + eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DestroyDCSwapChainCallBack: Failed to destroy DC swap chain")); + return eError; + } + + /* free the resources */ + for(i=0; iui32BufferCount; i++) + { + if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + if (psSwapChain->ppsLastSyncInfos) + { + for (i = 0; i < psSwapChain->ui32LastNumSyncInfos; i++) + { + if (psSwapChain->ppsLastSyncInfos[i]) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->ppsLastSyncInfos[i], IMG_NULL); + psSwapChain->ppsLastSyncInfos[i] = IMG_NULL; + } + } + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * psSwapChain->ui32LastNumSyncInfos, + psSwapChain->ppsLastSyncInfos, IMG_NULL); + } +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return eError; +} + + +static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) pvParam; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + for (i = 0; i < psSwapChainRef->psSwapChain->ui32BufferCount; i++) + { + if (psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "DestroyDCSwapChainRefCallBack: swapchain (0x%p) still mapped (ui32MemMapRefCount = %d)", + &psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer, + psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount)); + } + } + + if(--psSwapChainRef->psSwapChain->ui32RefCount == 0) + { + eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL); + return eError; +} + +static PVRSRV_DC_SWAPCHAIN* PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO *psDCInfo, + IMG_UINT32 ui32SwapChainID) +{ + PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain; + + for(psCurrentSwapChain = psDCInfo->psDCSwapChainShared; + psCurrentSwapChain; + psCurrentSwapChain = psCurrentSwapChain->psNext) + { + if(psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID) + return psCurrentSwapChain; + } + return IMG_NULL; +} + +static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_DC_SWAPCHAIN *psSwapChain, + PVRSRV_DC_SWAPCHAIN_REF **ppsSwapChainRef) +{ + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL; + + /* Allocate swapchain reference structre*/ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_SWAPCHAIN_REF), + (IMG_VOID **)&psSwapChainRef, IMG_NULL, + "Display Class Swapchain Reference") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF)); + + /* Bump refcount */ + psSwapChain->ui32RefCount++; + + /* Create reference resource */ + psSwapChainRef->psSwapChain = psSwapChain; + psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, + psSwapChainRef, + 0, + &DestroyDCSwapChainRefCallBack); + if (psSwapChainRef->hResItem == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "PVRSRVCreateDCSwapChainRefKM: ResManRegisterRes failed")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL); + return PVRSRV_ERROR_INVALID_PARAMS; + } + *ppsSwapChainRef = psSwapChainRef; + + return PVRSRV_OK; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDeviceKM, + IMG_UINT32 ui32Flags, + DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, + DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib, + IMG_UINT32 ui32BufferCount, + IMG_UINT32 ui32OEMFlags, + IMG_HANDLE *phSwapChainRef, + IMG_UINT32 *pui32SwapChainID +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + ,IMG_INT32 i32TimelineFd +#endif + ) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL; + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL; + PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + PVRSRV_QUEUE_INFO *psQueue = IMG_NULL; + PVRSRV_ERROR eError; + IMG_UINT32 i; + DISPLAY_INFO sDisplayInfo; + + if(!hDeviceKM + || !psDstSurfAttrib + || !psSrcSurfAttrib + || !phSwapChainRef + || !pui32SwapChainID) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + OSMemSet (apsSyncData, 0, sizeof(PVRSRV_SYNC_DATA *) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); + + if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers")); + return PVRSRV_ERROR_TOOMANYBUFFERS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY ) + { + /* Query - use pui32SwapChainID as input */ + psSwapChain = PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID ); + if( psSwapChain ) + { + /* Create new reference */ + eError = PVRSRVCreateDCSwapChainRefKM(psPerProc, + psSwapChain, + &psSwapChainRef); + if( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference")); + return eError; + } + + *phSwapChainRef = (IMG_HANDLE)psSwapChainRef; + return PVRSRV_OK; + } + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query")); + return PVRSRV_ERROR_FLIP_CHAIN_EXISTS; + } + + /* Allocate swapchain control structure for srvkm */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_SWAPCHAIN), + (IMG_VOID **)&psSwapChain, IMG_NULL, + "Display Class Swapchain") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN)); + + /* Create a command queue for the swapchain */ + eError = PVRSRVCreateCommandQueueKM(1024, &psQueue); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue")); + goto ErrorExit; + } + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + psQueue->i32TimelineFd = i32TimelineFd; +#endif + + /* store the Queue */ + psSwapChain->psQueue = psQueue; + + /* Create a Sync Object for each surface in the swapchain */ + for(i=0; ihDevMemContext, + &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain")); + goto ErrorExit; + } + + /* setup common device class info */ + psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr; + psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext; + psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice; + + /* save off useful ptrs */ + psSwapChain->asBuffer[i].psDCInfo = psDCInfo; + psSwapChain->asBuffer[i].psSwapChain = psSwapChain; + + /* syncinfos must be passed as array of syncdata ptrs to the 3rd party driver */ + apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM; + } + + psSwapChain->ui32BufferCount = ui32BufferCount; + psSwapChain->psDCInfo = psDCInfo; + +#if defined(PDUMP) + PDUMPCOMMENT("Allocate DC swap chain (SwapChainID == %u, BufferCount == %u)", + *pui32SwapChainID, + ui32BufferCount); + PDUMPCOMMENT(" Src surface dimensions == %u x %u", + psSrcSurfAttrib->sDims.ui32Width, + psSrcSurfAttrib->sDims.ui32Height); + PDUMPCOMMENT(" Dst surface dimensions == %u x %u", + psDstSurfAttrib->sDims.ui32Width, + psDstSurfAttrib->sDims.ui32Height); +#endif + + eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, &sDisplayInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to get DC info")); + return eError; + } + + psSwapChain->ui32MinSwapInterval = sDisplayInfo.ui32MinSwapInterval; + psSwapChain->ui32MaxSwapInterval = sDisplayInfo.ui32MaxSwapInterval; + + /* call into the display device driver to create a swapchain */ + eError = psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice, + ui32Flags, + psDstSurfAttrib, + psSrcSurfAttrib, + ui32BufferCount, + apsSyncData, + ui32OEMFlags, + &psSwapChain->hExtSwapChain, + &psSwapChain->ui32SwapChainID); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain")); + PDUMPCOMMENT("Swapchain allocation failed."); + goto ErrorExit; + } + + /* Create new reference */ + eError = PVRSRVCreateDCSwapChainRefKM(psPerProc, + psSwapChain, + &psSwapChainRef); + if( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference")); + PDUMPCOMMENT("Swapchain allocation failed."); + goto ErrorExit; + } + + psSwapChain->ui32RefCount = 1; + psSwapChain->ui32Flags = ui32Flags; + + /* Save pointer in DC structure if it's shared struct */ + if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED ) + { + if(! psDCInfo->psDCSwapChainShared ) + { + psDCInfo->psDCSwapChainShared = psSwapChain; + } + else + { + PVRSRV_DC_SWAPCHAIN *psOldHead = psDCInfo->psDCSwapChainShared; + psDCInfo->psDCSwapChainShared = psSwapChain; + psSwapChain->psNext = psOldHead; + } + } + + /* We create swapchain - pui32SwapChainID is output */ + *pui32SwapChainID = psSwapChain->ui32SwapChainID; + + /* return the swapchain reference handle */ + *phSwapChainRef= (IMG_HANDLE)psSwapChainRef; + + return eError; + +ErrorExit: + + for(i=0; iasBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + + if(psQueue) + { + PVRSRVDestroyCommandQueueKM(psQueue); + } + + if(psSwapChain) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + + return eError; +} + + + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_RECT *psRect) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + psRect); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_RECT *psRect) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + psRect); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_UINT32 ui32CKColour) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + ui32CKColour); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_UINT32 ui32CKColour) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + ui32CKColour); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_UINT32 *pui32BufferCount, + IMG_HANDLE *phBuffer, + IMG_SYS_PHYADDR *psPhyAddr) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + PVRSRV_ERROR eError; + IMG_UINT32 i; + + if(!hDeviceKM || !hSwapChainRef || !phBuffer || !psPhyAddr) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + /* call into the display device driver to get info */ + eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + pui32BufferCount, + ahExtBuffer); + + PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); + + /* + populate the srvkm's buffer structure with the 3rd party buffer handles + and return the services buffer handles + */ + for(i=0; i<*pui32BufferCount; i++) + { + psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i]; + phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i]; + } + + return eError; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hBuffer, + IMG_UINT32 ui32SwapInterval, + IMG_HANDLE hPrivateTag, + IMG_UINT32 ui32ClipRectCount, + IMG_RECT *psClipRect) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_BUFFER *psBuffer; + PVRSRV_QUEUE_INFO *psQueue; + DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; + IMG_UINT32 i; + IMG_BOOL bAddReferenceToLast = IMG_TRUE; + IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND; + IMG_UINT32 ui32NumSrcSyncs = 1; + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; + PVRSRV_COMMAND *psCommand; + SYS_DATA *psSysData; + + if(!hDeviceKM || !hBuffer || !psClipRect) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBuffer = (PVRSRV_DC_BUFFER*)hBuffer; + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* Validate swap interval against limits */ + if(ui32SwapInterval < psBuffer->psSwapChain->ui32MinSwapInterval || + ui32SwapInterval > psBuffer->psSwapChain->ui32MaxSwapInterval) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid swap interval. Requested %u, Allowed range %u-%u", + ui32SwapInterval, psBuffer->psSwapChain->ui32MinSwapInterval, psBuffer->psSwapChain->ui32MaxSwapInterval)); + return PVRSRV_ERROR_INVALID_SWAPINTERVAL; + } + + /* get the queue from the buffer structure */ + psQueue = psBuffer->psSwapChain->psQueue; + + /* specify the syncs */ + apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo; + if(bAddReferenceToLast && psBuffer->psSwapChain->psLastFlipBuffer && + psBuffer != psBuffer->psSwapChain->psLastFlipBuffer) + { + apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo; + ui32NumSrcSyncs++; + } + + /* insert the command (header) */ + eError = PVRSRVInsertCommandKM (psQueue, + &psCommand, + psDCInfo->ui32DeviceID, + ui16SwapCommandID, + 0, + IMG_NULL, + ui32NumSrcSyncs, + apsSrcSync, + sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount), + IMG_NULL, + IMG_NULL, + IMG_NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue")); + goto Exit; + } + + /* setup the flip command */ + psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData; + + /* Ext Device Handle */ + psFlipCmd->hExtDevice = psDCInfo->hExtDevice; + + /* Ext SwapChain Handle */ + psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain; + + /* Ext Buffer Handle (Buffer to Flip to) */ + psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer; + + /* private tag */ + psFlipCmd->hPrivateTag = hPrivateTag; + + /* setup the clip rects */ + psFlipCmd->ui32ClipRectCount = ui32ClipRectCount; + /* cliprect memory appends the command structure */ + psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND)); // PRQA S 3305 + /* copy the clip rects */ + for(i=0; ipsClipRect[i] = psClipRect[i]; + } + + /* number of vsyncs between successive flips */ + psFlipCmd->ui32SwapInterval = ui32SwapInterval; + + SysAcquireData(&psSysData); + + /* Because we might be composing just software surfaces, without + * any SGX renders since the last frame, we won't necessarily + * have cleaned/flushed the CPU caches before the buffers need + * to be displayed. + * + * Doing so now is safe because InsertCommand bumped ROP2 on the + * affected buffers (preventing more SW renders starting) but the + * display won't start to process the buffers until SubmitCommand. + */ + { + if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + OSFlushCPUCacheKM(); + } + else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + OSCleanCPUCacheKM(); + } + + psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE; + } + + /* submit the command */ + eError = PVRSRVSubmitCommandKM (psQueue, psCommand); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command")); + goto Exit; + } + + /* + Schedule an MISR to process it + */ + eError = OSScheduleMISR(psSysData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to schedule MISR")); + goto Exit; + } + + /* update the last flip buffer */ + psBuffer->psSwapChain->psLastFlipBuffer = psBuffer; + +Exit: + + if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) + { + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + +typedef struct _CALLBACK_DATA_ +{ + IMG_PVOID pvPrivData; + IMG_UINT32 ui32PrivDataLength; + IMG_PVOID ppvMemInfos; + IMG_UINT32 ui32NumMemInfos; +} CALLBACK_DATA; + +static IMG_VOID FreePrivateData(IMG_HANDLE hCallbackData) +{ + CALLBACK_DATA *psCallbackData = hCallbackData; + + if(psCallbackData->ui32PrivDataLength) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + psCallbackData->ui32PrivDataLength, + psCallbackData->pvPrivData, IMG_NULL); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + sizeof(IMG_VOID *) * psCallbackData->ui32NumMemInfos, + psCallbackData->ppvMemInfos, IMG_NULL); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + sizeof(CALLBACK_DATA), + hCallbackData, IMG_NULL); +} + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSwapToDCBuffer2KM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_UINT32 ui32SwapInterval, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfos, + PVRSRV_KERNEL_SYNC_INFO **ppsSyncInfos, + IMG_UINT32 ui32NumMemSyncInfos, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_HANDLE *phFence) +{ + IMG_UINT32 ui32NumSyncInfos = ui32NumMemSyncInfos; + IMG_UINT32 ui32NumMemInfos = ui32NumMemSyncInfos; + PVRSRV_KERNEL_SYNC_INFO **ppsCompiledSyncInfos; + IMG_UINT32 i, ui32NumCompiledSyncInfos; + DISPLAYCLASS_FLIP_COMMAND2 *psFlipCmd; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + CALLBACK_DATA *psCallbackData; + PVRSRV_QUEUE_INFO *psQueue; + PVRSRV_COMMAND *psCommand; + IMG_PVOID *ppvMemInfos; + PVRSRV_ERROR eError; + SYS_DATA *psSysData; +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + struct sync_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] = {}; +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + struct fence *apsFence[SGX_MAX_SRC_SYNCS_TA] = {}; +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + if(!hDeviceKM || !hSwapChain || !ppsMemInfos || !ppsSyncInfos || ui32NumMemSyncInfos < 1) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChain)->psSwapChain; + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* Validate swap interval against limits */ + if(ui32SwapInterval < psSwapChain->ui32MinSwapInterval || + ui32SwapInterval > psSwapChain->ui32MaxSwapInterval) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Invalid swap interval. Requested %u, Allowed range %u-%u", + ui32SwapInterval, psSwapChain->ui32MinSwapInterval, psSwapChain->ui32MaxSwapInterval)); + return PVRSRV_ERROR_INVALID_SWAPINTERVAL; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + sizeof(CALLBACK_DATA), + (IMG_VOID **)&psCallbackData, IMG_NULL, + "PVRSRVSwapToDCBuffer2KM callback data"); + if (eError != PVRSRV_OK) + { + return eError; + } + + psCallbackData->pvPrivData = pvPrivData; + psCallbackData->ui32PrivDataLength = ui32PrivDataLength; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + sizeof(void *) * ui32NumMemInfos, + (void **)&ppvMemInfos, IMG_NULL, + "Swap Command Meminfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); + psCallbackData->ppvMemInfos = IMG_NULL; + goto Exit; + } + + for(i = 0; i < ui32NumMemInfos; i++) + { + ppvMemInfos[i] = ppsMemInfos[i]; + } + + psCallbackData->ppvMemInfos = ppvMemInfos; + psCallbackData->ui32NumMemInfos = ui32NumMemInfos; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + eError = PVRSyncFencesToSyncInfos(ppsSyncInfos, &ui32NumSyncInfos, apsFence); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: PVRSyncFencesToSyncInfos failed")); + goto Exit; + } +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + /* get the queue from the buffer structure */ + psQueue = psSwapChain->psQueue; + +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + if(psSwapChain->ppsLastSyncInfos) + { + IMG_UINT32 ui32NumUniqueSyncInfos = psSwapChain->ui32LastNumSyncInfos; + IMG_BOOL *abUnique; + IMG_UINT32 j; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psSwapChain->ui32LastNumSyncInfos, + (IMG_VOID **)&abUnique, IMG_NULL, + "Unique booleans") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for unique booleans")); + goto Exit; + } + + for(j = 0; j < psSwapChain->ui32LastNumSyncInfos; j++) + { + abUnique[j] = IMG_TRUE; + for(i = 0; i < ui32NumSyncInfos; i++) + { + PVR_ASSERT(psSwapChain->ppsLastSyncInfos[j]); + PVR_ASSERT(ppsSyncInfos[i]); + if(psSwapChain->ppsLastSyncInfos[j] == ppsSyncInfos[i]) + { + abUnique[j] = IMG_FALSE; + ui32NumUniqueSyncInfos--; + break; + } + } + } + + ui32NumCompiledSyncInfos = ui32NumSyncInfos + ui32NumUniqueSyncInfos; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos, + (IMG_VOID **)&ppsCompiledSyncInfos, IMG_NULL, + "Compiled syncinfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psSwapChain->ui32LastNumSyncInfos, + (IMG_VOID *)abUnique, IMG_NULL); +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) + sync_fence_put(apsFence[i]); +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) + fence_put(apsFence[i]); +#endif + goto Exit; + } + + OSMemCopy(ppsCompiledSyncInfos, ppsSyncInfos, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumSyncInfos); + for(j = 0, i = ui32NumSyncInfos; j < psSwapChain->ui32LastNumSyncInfos; j++) + { + if(abUnique[j]) + { + ppsCompiledSyncInfos[i] = psSwapChain->ppsLastSyncInfos[j]; + i++; + } + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psSwapChain->ui32LastNumSyncInfos, + (IMG_VOID *)abUnique, IMG_NULL); + } + else +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + { + IMG_UINT32 j, ui32Missing = 0; + + /* Older synchronization schemes would just pass down the syncinfos + * hanging off of the meminfos. So we would expect identical lists. + * However, newer drivers may send down additional synchronization + * i.e. for TQ fence operations. In such a case we need to allocate + * more space for the compiled syncinfos to ensure everything is + * ROP2 synchronized. + */ + for(i = 0; i < ui32NumMemInfos; i++) + { + for(j = 0; j < ui32NumSyncInfos; j++) + { + if(ppsSyncInfos[j] == ppsMemInfos[i]->psKernelSyncInfo) + break; + } + + if(j == ui32NumSyncInfos) + ui32Missing++; + } + + if(ui32Missing) + { + IMG_UINT32 k; + + ui32NumCompiledSyncInfos = ui32NumSyncInfos + ui32Missing; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos, + (IMG_VOID **)&ppsCompiledSyncInfos, IMG_NULL, + "Compiled syncinfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) + sync_fence_put(apsFence[i]); +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) + fence_put(apsFence[i]); +#endif + goto Exit; + } + + for(i = 0; i < ui32NumSyncInfos; i++) + { + ppsCompiledSyncInfos[i] = ppsSyncInfos[i]; + } + + k = i; + for(i = 0; i < ui32NumMemInfos; i++) + { + for(j = 0; j < ui32NumSyncInfos; j++) + { + if(ppsSyncInfos[j] == ppsMemInfos[i]->psKernelSyncInfo) + break; + } + + if(j == ui32NumSyncInfos) + { + /* Insert the unique one */ + PVR_ASSERT(k < ui32NumCompiledSyncInfos); + ppsCompiledSyncInfos[k] = ppsMemInfos[i]->psKernelSyncInfo; + k++; + } + } + + PVR_ASSERT(k == ui32NumCompiledSyncInfos); + + /* As a further complication, if we have multiple displays, we + * might see the same layer/meminfo submitted twice. This is + * valid, as the layer might be needed by two separate pipes, + * but we should not use the meminfo's synchronization twice + * because this will deadlock the queue processor. + * + * For now, work over the meminfo end of the compiled syncs + * list and collapse any duplicates. We can assume the fence + * sync part of the array has already been de-duplicated. + */ + k = ui32NumSyncInfos; + for(i = ui32NumSyncInfos; i < ui32NumCompiledSyncInfos; i++) + { + /* Compare the i'th entry with all that follow */ + for(j = i + 1; j < ui32NumCompiledSyncInfos; j++) + { + if(ppsCompiledSyncInfos[i] == ppsCompiledSyncInfos[j]) + break; + } + + if(j == ui32NumCompiledSyncInfos) + { + /* No duplicate found. Use this entry */ + ppsCompiledSyncInfos[k] = ppsCompiledSyncInfos[i]; + k++; + } + } + ui32NumCompiledSyncInfos = k; + } + else + { + ppsCompiledSyncInfos = ppsSyncInfos; + ui32NumCompiledSyncInfos = ui32NumSyncInfos; + } + } + + /* insert the command (header) */ + eError = PVRSRVInsertCommandKM (psQueue, + &psCommand, + psDCInfo->ui32DeviceID, + DC_FLIP_COMMAND, + 0, + IMG_NULL, + ui32NumCompiledSyncInfos, + ppsCompiledSyncInfos, + sizeof(DISPLAYCLASS_FLIP_COMMAND2), + FreePrivateData, + psCallbackData, + phFence); + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + /* InsertCommand bumped the refcount on the raw sync objects, so we + * can put the fences now. Even if the fences are deleted, the syncs + * will persist. + */ + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) + sync_fence_put(apsFence[i]); +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) + fence_put(apsFence[i]); +#endif + + if (ppsCompiledSyncInfos != ppsSyncInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos, + (IMG_VOID *)ppsCompiledSyncInfos, + IMG_NULL); + } + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to get space in queue")); + goto Exit; + } + + /* setup the flip command */ + psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND2*)psCommand->pvData; + + /* Ext Device Handle */ + psFlipCmd->hExtDevice = psDCInfo->hExtDevice; + + /* Ext SwapChain Handle */ + psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; + + /* number of vsyncs between successive flips */ + psFlipCmd->ui32SwapInterval = ui32SwapInterval; + + /* Opaque private data, if supplied */ + psFlipCmd->pvPrivData = pvPrivData; + psFlipCmd->ui32PrivDataLength = ui32PrivDataLength; + + psFlipCmd->ppsMemInfos = (PDC_MEM_INFO *)ppvMemInfos; + psFlipCmd->ui32NumMemInfos = ui32NumMemInfos; + + /* Even though this is "unused", we have to initialize it, + * as the display controller might NULL-test it. + */ + psFlipCmd->hUnused = IMG_NULL; + + SysAcquireData(&psSysData); + + /* Because we might be composing just software surfaces, without + * any SGX renders since the last frame, we won't necessarily + * have cleaned/flushed the CPU caches before the buffers need + * to be displayed. + * + * Doing so now is safe because InsertCommand bumped ROP2 on the + * affected buffers (preventing more SW renders starting) but the + * display won't start to process the buffers until SubmitCommand. + */ + { + if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + OSFlushCPUCacheKM(); + } + else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + OSCleanCPUCacheKM(); + } + + psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE; + } + + /* submit the command */ + PVRSRVSubmitCommandKM (psQueue, psCommand); + + /* The command has been submitted and so psCallbackData will be freed by the callback */ + psCallbackData = IMG_NULL; + + /* + Schedule an MISR to process it + */ + OSScheduleMISR(psSysData); + +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + /* Reallocate the syncinfo list if it was too small */ + if (psSwapChain->ui32LastNumSyncInfos < ui32NumSyncInfos) + { + if (psSwapChain->ppsLastSyncInfos) + { + for (i = 0; i < psSwapChain->ui32LastNumSyncInfos; i++) + { + if (psSwapChain->ppsLastSyncInfos[i]) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->ppsLastSyncInfos[i], IMG_NULL); + psSwapChain->ppsLastSyncInfos[i] = IMG_NULL; + } + } + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * psSwapChain->ui32LastNumSyncInfos, + psSwapChain->ppsLastSyncInfos, IMG_NULL); + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumSyncInfos, + (IMG_VOID **)&psSwapChain->ppsLastSyncInfos, IMG_NULL, + "Last syncinfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + sync_fence_put(*phFence); + *phFence = IMG_NULL; +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + fence_put(*phFence); + *phFence = IMG_NULL; +#endif + goto Exit; + } + } + + for (i = 0; i < psSwapChain->ui32LastNumSyncInfos; i++) + { + if (psSwapChain->ppsLastSyncInfos[i]) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->ppsLastSyncInfos[i], IMG_NULL); + psSwapChain->ppsLastSyncInfos[i] = IMG_NULL; + } + } + + psSwapChain->ui32LastNumSyncInfos = ui32NumSyncInfos; + + for(i = 0; i < ui32NumSyncInfos; i++) + { + psSwapChain->ppsLastSyncInfos[i] = ppsSyncInfos[i]; + PVRSRVKernelSyncInfoIncRef(psSwapChain->ppsLastSyncInfos[i], IMG_NULL); + } +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + +Exit: + if (psCallbackData) + { + if(psCallbackData->ppvMemInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, + sizeof(IMG_VOID *) * psCallbackData->ui32NumMemInfos, + psCallbackData->ppvMemInfos, IMG_NULL); + } + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP | PVRSRV_SWAP_BUFFER_ALLOCATION, sizeof(CALLBACK_DATA), psCallbackData, IMG_NULL); + } + if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) + { + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef) +{ + PVRSRV_ERROR eError; + PVRSRV_QUEUE_INFO *psQueue; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef; + DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; + IMG_UINT32 ui32NumSrcSyncs = 1; + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; + PVRSRV_COMMAND *psCommand; + IMG_BOOL bAddReferenceToLast = IMG_TRUE; + IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND; + SYS_DATA *psSysData; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef; + psSwapChain = psSwapChainRef->psSwapChain; + + /* + If more than 1 reference to the swapchain exists then + ignore any request to swap to the system buffer + */ + if (psSwapChain->ui32RefCount > 1) + { + return PVRSRV_OK; + } + + /* get the queue from the buffer structure */ + psQueue = psSwapChain->psQueue; + + /* specify the syncs */ + apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo; + if(bAddReferenceToLast && psSwapChain->psLastFlipBuffer) + { + /* Make sure we don't make a double dependency on the same server */ + if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo) + { + apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo; + ui32NumSrcSyncs++; + } + } + + /* insert the command (header) */ + eError = PVRSRVInsertCommandKM (psQueue, + &psCommand, + psDCInfo->ui32DeviceID, + ui16SwapCommandID, + 0, + IMG_NULL, + ui32NumSrcSyncs, + apsSrcSync, + sizeof(DISPLAYCLASS_FLIP_COMMAND), + IMG_NULL, + IMG_NULL, + IMG_NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue")); + goto Exit; + } + + /* setup the flip command */ + psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData; + + /* Ext Device Handle */ + psFlipCmd->hExtDevice = psDCInfo->hExtDevice; + + /* Ext SwapChain Handle */ + psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; + + /* Ext Buffer Handle (Buffer to Flip to) */ + psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer; + + /* private tag */ + psFlipCmd->hPrivateTag = IMG_NULL; + + /* setup the clip rects */ + psFlipCmd->ui32ClipRectCount = 0; + + psFlipCmd->ui32SwapInterval = 1; + + /* submit the command */ + eError = PVRSRVSubmitCommandKM (psQueue, psCommand); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command")); + goto Exit; + } + + /* Schedule an MISR to process it */ + SysAcquireData(&psSysData); + eError = OSScheduleMISR(psSysData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to schedule MISR")); + goto Exit; + } + + /* update the last flip buffer */ + psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer; + + eError = PVRSRV_OK; + +Exit: + + if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) + { + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterSystemISRHandler + + @Description + + registers an external ISR to be called of the back of a system ISR + + @Input ppfnISRHandler : ISR pointer + + @Input hISRHandlerData : Callback data + + @Input ui32ISRSourceMask : ISR Mask + + @Input ui32DeviceID : unique device key + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER pfnISRHandler, + IMG_VOID *pvISRHandlerData, + IMG_UINT32 ui32ISRSourceMask, + IMG_UINT32 ui32DeviceID) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask); + + SysAcquireData(&psSysData); + + /* Find Dev Node (just using the device id, ignore the class) */ + psDevNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DeviceID, + IMG_TRUE); + + if (psDevNode == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get psDevNode")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + + /* set up data before enabling the ISR */ + psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData; + + /* enable the ISR */ + psDevNode->pfnDeviceISR = pfnISRHandler; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVSetDCState_ForEachVaCb + + @Description + + If the device node is a display, calls its set state function. + + @Input psDeviceNode - the device node + va - variable argument list with: + ui32State - the state to be set. + +******************************************************************************/ +static +IMG_VOID PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + IMG_UINT32 ui32State; + ui32State = va_arg(va, IMG_UINT32); + + if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) + { + psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice; + if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice) + { + psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State); + } + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVSetDCState + + @Description + + Calls the display driver(s) to put them into the specified state. + + @Input ui32State: new DC state - one of DC_STATE_* + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State) +{ +/* PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; */ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVSetDCState_ForEachVaCb, + ui32State); +} + +static PVRSRV_ERROR +PVRSRVDCMemInfoGetCpuVAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_CPU_VIRTADDR *pVAddr) +{ + *pVAddr = psKernelMemInfo->pvLinAddrKM; + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PVRSRVDCMemInfoGetCpuPAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_SIZE_T uByteOffset, IMG_CPU_PHYADDR *pPAddr) +{ + *pPAddr = OSMemHandleToCpuPAddr(psKernelMemInfo->sMemBlk.hOSMemHandle, uByteOffset); + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PVRSRVDCMemInfoGetByteSize(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_SIZE_T *uByteSize) +{ + *uByteSize = psKernelMemInfo->uAllocSize; + return PVRSRV_OK; +} + +static IMG_BOOL +PVRSRVDCMemInfoIsPhysContig(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + return OSMemHandleIsPhysContig(psKernelMemInfo->sMemBlk.hOSMemHandle); +} + +/*! +****************************************************************************** + + @Function PVRGetDisplayClassJTable + + @Description + + Sets up function table for 3rd party Display Class Device to call through + + @Input psJTable : pointer to function pointer table memory + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable) +{ + psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE); + psJTable->pfnPVRSRVRegisterDCDevice = &PVRSRVRegisterDCDeviceKM; + psJTable->pfnPVRSRVRemoveDCDevice = &PVRSRVRemoveDCDeviceKM; + psJTable->pfnPVRSRVOEMFunction = &SysOEMFunction; + psJTable->pfnPVRSRVRegisterCmdProcList = &PVRSRVRegisterCmdProcListKM; + psJTable->pfnPVRSRVRemoveCmdProcList = &PVRSRVRemoveCmdProcListKM; + psJTable->pfnPVRSRVCmdComplete = &PVRSRVCommandCompleteKM; + psJTable->pfnPVRSRVRegisterSystemISRHandler = &PVRSRVRegisterSystemISRHandler; + psJTable->pfnPVRSRVRegisterPowerDevice = &PVRSRVRegisterPowerDevice; + psJTable->pfnPVRSRVDCMemInfoGetCpuVAddr = &PVRSRVDCMemInfoGetCpuVAddr; + psJTable->pfnPVRSRVDCMemInfoGetCpuPAddr = &PVRSRVDCMemInfoGetCpuPAddr; + psJTable->pfnPVRSRVDCMemInfoGetByteSize = &PVRSRVDCMemInfoGetByteSize; + psJTable->pfnPVRSRVDCMemInfoIsPhysContig = &PVRSRVDCMemInfoIsPhysContig; + return IMG_TRUE; +} + + + +/****************************************************************************** + + @Function PVRSRVCloseBCDeviceKM + + @Description + + Closes a connection to the Buffer Class device + + @Input hDeviceKM : device handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE hDeviceKM) +{ + PVRSRV_ERROR eError; + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + + psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM; + + /* Remove the item from the resman list and trigger the callback. */ + eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem, CLEANUP_WITH_POLL); + + return eError; +} + + +static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam; + + psBCInfo = psBCPerContextInfo->psBCInfo; + + for (i = 0; i < psBCInfo->ui32BufferCount; i++) + { + if (psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "CloseBCDeviceCallBack: buffer %d (0x%p) still mapped (ui32MemMapRefCount = %d)", + i, + &psBCInfo->psBuffer[i].sDeviceClassBuffer, + psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount)); + return PVRSRV_ERROR_STILL_MAPPED; + } + } + + psBCInfo->ui32RefCount--; + if(psBCInfo->ui32RefCount == 0) + { + /* close the external device */ + psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->ui32DeviceID, psBCInfo->hExtDevice); + + /* free syncinfos */ + for(i=0; iui32BufferCount; i++) + { + if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + + /* free buffers */ + if(psBCInfo->psBuffer) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER) * psBCInfo->ui32BufferCount, psBCInfo->psBuffer, IMG_NULL); + psBCInfo->psBuffer = IMG_NULL; + } + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVOpenBCDeviceKM + + @Description + + Opens a connection to the Buffer Class device, associating the connection + with a Device Memory Context for a services managed device + + @Input psPerProc : Per-process data + @Input ui32DeviceID : unique device index + @Input hDevCookie : devcookie used to derive the Device Memory + Context into BC surfaces will be mapped into + @Outut phDeviceKM : handle to the DC device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVOpenBCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32DeviceID, + IMG_HANDLE hDevCookie, + IMG_HANDLE *phDeviceKM) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + IMG_UINT32 i; + PVRSRV_ERROR eError; + + if(!phDeviceKM || !hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* find the matching devicenode */ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DeviceID, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_BUFFER); + if (!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice; + +/* +FoundDevice: +*/ + /* + Allocate the per-context BC Info before calling the external device, + to make error handling easier. + */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psBCPerContextInfo), + (IMG_VOID **)&psBCPerContextInfo, IMG_NULL, + "Buffer Class per Context Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo)); + + if(psBCInfo->ui32RefCount++ == 0) + { + BUFFER_INFO sBufferInfo; + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* store the device kernel context to map into */ + psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext; + + /* open the external device */ + eError = psBCInfo->psFuncTable->pfnOpenBCDevice(ui32DeviceID, &psBCInfo->hExtDevice); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device")); + goto ErrorExit; + } + + /* get information about the buffers */ + eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info")); + goto ErrorExitCloseDevice; + } + + /* interpret and store info */ + psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount; + + /* allocate BC buffers */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount, + (IMG_VOID **)&psBCInfo->psBuffer, + IMG_NULL, + "Array of Buffer Class Buffer"); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers")); + goto ErrorExitCloseDevice; + } + OSMemSet (psBCInfo->psBuffer, + 0, + sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount); + + for(i=0; iui32BufferCount; i++) + { + /* create a syncinfo for the device's system surface */ + eError = PVRSRVAllocSyncInfoKM(IMG_NULL, + psBCInfo->hDevMemContext, + &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc")); + goto ErrorExitBuffers; + } + + /* + get the buffers from the buffer class + drivers by index, passing-in the syncdata objects + */ + eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice, + i, + psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData, + &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers")); + goto ErrorExitBuffers; + } + + /* setup common device class info */ + psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr; + psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext; + psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice; + psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount = 0; + } + } + + psBCPerContextInfo->psBCInfo = psBCInfo; + psBCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_BUFFERCLASS_DEVICE, + psBCPerContextInfo, + 0, + &CloseBCDeviceCallBack); + + /* return a reference to the BCPerContextInfo */ + *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo; + + return PVRSRV_OK; + +ErrorExitBuffers: + /* free syncinfos */ + for(i=0; iui32BufferCount; i++) + { + if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + + /* free buffers */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL); + psBCInfo->psBuffer = IMG_NULL; + +ErrorExitCloseDevice: + psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->ui32DeviceID, psBCInfo->hExtDevice); + +ErrorExit: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL); + psBCInfo->ui32RefCount = 0; + return eError; +} + + + + +/****************************************************************************** + + @Function PVRSRVGetBCInfoKM + + @Description + + Gets Buffer Class device Info + + @Input hDeviceKM : device handle + @Output psBufferInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM, + BUFFER_INFO *psBufferInfo) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + PVRSRV_ERROR eError; + + if(!hDeviceKM || !psBufferInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM); + + eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo); + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info")); + return eError; + } + + return PVRSRV_OK; +} + + +/****************************************************************************** + + @Function PVRSRVGetBCBufferKM + + @Description + + Gets Buffer Class Buffer Handle + + @Input hDeviceKM : device handle + @Output psBufferInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM, + IMG_UINT32 ui32BufferIndex, + IMG_HANDLE *phBuffer) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + + if(!hDeviceKM || !phBuffer) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM); + + if(ui32BufferIndex < psBCInfo->ui32BufferCount) + { + *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex]; + } + else + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRGetBufferClassJTable + + @Description + + Sets up function table for 3rd party Buffer Class Device to call through + + @Input psJTable : pointer to function pointer table memory + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable) +{ + psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE); + + psJTable->pfnPVRSRVRegisterBCDevice = &PVRSRVRegisterBCDeviceKM; + psJTable->pfnPVRSRVScheduleDevices = &PVRSRVScheduleDevicesKM; + psJTable->pfnPVRSRVRemoveBCDevice = &PVRSRVRemoveBCDeviceKM; + + return IMG_TRUE; +} + +/****************************************************************************** + End of file (deviceclass.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/deviceid.h b/sgx_km/eurasia_km/services4/srvkm/common/deviceid.h new file mode 100644 index 0000000..1cf9f0f --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/deviceid.h @@ -0,0 +1,51 @@ +/*************************************************************************/ /*! +@Title Device ID helpers +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __DEVICEID_H__ +#define __DEVICEID_H__ + +#include "services.h" +#include "syscommon.h" + +PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID); +PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID); + +#endif /* __DEVICEID_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/devicemem.c b/sgx_km/eurasia_km/services4/srvkm/common/devicemem.c new file mode 100644 index 0000000..dcbbb7c --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/devicemem.c @@ -0,0 +1,3491 @@ +/*************************************************************************/ /*! +@Title Device addressable memory functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device addressable memory APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "services_headers.h" +#include "buffer_manager.h" +#include "pdump_km.h" +#include "pvr_bridge_km.h" +#include "osfunc.h" +#include "devicemem.h" + +#if defined(SUPPORT_ION) +#include "ion.h" +#include "env_perproc.h" +#include "ion_sync.h" + +/* Start size of the g_IonSyncHash hash table */ +#define ION_SYNC_HASH_SIZE 20 +HASH_TABLE *g_psIonSyncHash = IMG_NULL; +#endif + +#if defined(SUPPORT_DMABUF) +#include "dmabuf.h" +#include "dmabuf_sync.h" +#include "pvr_linux_fence.h" + +/* Start size of the g_DmaBufSyncHash hash table */ +#define DMABUF_SYNC_HASH_SIZE 20 +HASH_TABLE *g_psDmaBufSyncHash = IMG_NULL; +#endif + +#include "lists.h" + +/* local function prototypes */ +static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo); + +/* local structures */ + +/* + structure stored in resman to store references + to the SRC and DST meminfo +*/ +typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_ +{ + /* the DST meminfo created by the map */ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + /* SRC meminfo */ + PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo; +} RESMAN_MAP_DEVICE_MEM_DATA; + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +/* + map device class resman memory storage structure +*/ +typedef struct _PVRSRV_DC_MAPINFO_ +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32RangeIndex; + IMG_UINT32 ui32TilingStride; + PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer; +} PVRSRV_DC_MAPINFO; + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +static IMG_UINT32 g_ui32SyncUID = 0; +#if defined (MEM_TRACK_INFO_DEBUG) +PVRSRV_MEM_TRACK_INFO *g_psMemTrackInfoHead = IMG_NULL; +PVRSRV_MEM_TRACK_INFO *g_psMemTrackInfoTail = IMG_NULL; +IMG_UINT32 g_ui32NumOfOpsRecorded = 0; +#endif + +static PVRSRV_KERNEL_SYNC_INFO *g_psSyncInfoList = IMG_NULL; + +#if defined (MEM_TRACK_INFO_DEBUG) +/*! +****************************************************************************** + + @Function PVRSRVAddMemTrackInfo + + @Description + + Adds the current psMemTrackInfo instance to the head of list represented by gMemTrackInfo + + @Input psMemTrackInfo : + @Output + + @Return + +******************************************************************************/ +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVAddMemTrackInfo(PVRSRV_MEM_TRACK_INFO *psMemTrackInfo) +{ + g_ui32NumOfOpsRecorded++; + psMemTrackInfo->next = g_psMemTrackInfoHead; + psMemTrackInfo->prev = IMG_NULL; + if(g_psMemTrackInfoHead) + { + g_psMemTrackInfoHead->prev = psMemTrackInfo; + } + else + g_psMemTrackInfoTail = psMemTrackInfo; + g_psMemTrackInfoHead = psMemTrackInfo; + if(g_ui32NumOfOpsRecorded > MAX_MEM_TRACK_OPS) + { + PVRSRV_MEM_TRACK_INFO *psFreePtr; + psFreePtr = g_psMemTrackInfoTail; + g_psMemTrackInfoTail = g_psMemTrackInfoTail->prev; + g_psMemTrackInfoTail->next = IMG_NULL; + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + psFreePtr, IMG_NULL); + g_ui32NumOfOpsRecorded--; + } +} + +/*! +****************************************************************************** + + @Function PVRSRVPrintMemTrackInfo + + @Description + + Dumps the mem tracking info + + @Input ui32FaultAddr: + @Output + + @Return + +******************************************************************************/ +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVPrintMemTrackInfo(IMG_UINT32 ui32FaultAddr) +{ + PVRSRV_MEM_TRACK_INFO *psMemTrackInfo; + static const IMG_CHAR * const apszMemOpNames[] = {"UNKNOWN", "DEVICE", "DEVICECLASS", "WRAPPED", "MAPPED", "ION", "DMA-BUF" "ALLOC", "FREE"}; + psMemTrackInfo = g_psMemTrackInfoHead; + + PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVMemTrackInfo: Dumping mem tracking info\n")); + PVR_DPF((PVR_DBG_MESSAGE,"DevVAddr | Size | Memory Op | Process ID | Ref Count | Task Name | Heap ID | Time Stamp(uSec)\n")); + while(psMemTrackInfo) + { + if((ui32FaultAddr >= psMemTrackInfo->sDevVAddr.uiAddr) && + (ui32FaultAddr < (psMemTrackInfo->sDevVAddr.uiAddr + psMemTrackInfo->uSize))) + { + PVR_DPF((PVR_DBG_MESSAGE,"***************************\n")); + } + PVR_DPF((PVR_DBG_MESSAGE,"0x%-8x | 0x%-8zx | %-13s | %-11d | %-6u | %-15s | %10s | %-15u ", + psMemTrackInfo->sDevVAddr.uiAddr, + psMemTrackInfo->uSize, + apszMemOpNames[psMemTrackInfo->eOp], + psMemTrackInfo->ui32Pid, + psMemTrackInfo->ui32RefCount, + psMemTrackInfo->asTaskName, + psMemTrackInfo->heapId, + psMemTrackInfo->ui32TimeStampUSecs)); + psMemTrackInfo = psMemTrackInfo->next; + } +} +#endif + +/*! +****************************************************************************** + + @Function PVRSRVGetDeviceMemHeapsKM + + @Description + + Gets the device shared memory heaps + + @Input hDevCookie : + @Output phDevMemContext : ptr to handle to memory context + @Output psHeapInfo : ptr to array of heap info + + @Return PVRSRV_DEVICE_NODE, valid devnode or IMG_NULL + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie, + PVRSRV_HEAP_INFO *psHeapInfo) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32HeapCount; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_UINT32 i; + + if (hDevCookie == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* Setup useful pointers */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + + /* check we don't exceed the max number of heaps */ + PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); + + /* retrieve heap information */ + for(i=0; i 0) denotes a tiled heap */ + psHeapInfo[i].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; + } + + for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++) + { + OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo)); + psHeapInfo[i].ui32HeapID = (IMG_UINT32)PVRSRV_UNDEFINED_HEAP_ID; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVCreateDeviceMemContextKM + + @Description + + Creates a device memory context + + @Input hDevCookie : + @Input psPerProc : Per-process data + @Output phDevMemContext : ptr to handle to memory context + @Output pui32ClientHeapCount : ptr to heap count + @Output psHeapInfo : ptr to array of heap info + + @Return PVRSRV_DEVICE_NODE, valid devnode or IMG_NULL + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE *phDevMemContext, + IMG_UINT32 *pui32ClientHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo, + IMG_BOOL *pbCreated, + IMG_BOOL *pbShared) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_HANDLE hDevMemContext; + IMG_HANDLE hDevMemHeap; + IMG_DEV_PHYADDR sPDDevPAddr; + IMG_UINT32 i; + +#if !defined(PVR_SECURE_HANDLES) + PVR_UNREFERENCED_PARAMETER(pbShared); +#endif + + if (hDevCookie == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* + Setup useful pointers + */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + + /* + check we don't exceed the max number of heaps + */ + PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); + + /* + Create a memory context for the caller + */ + hDevMemContext = BM_CreateContext(psDeviceNode, + &sPDDevPAddr, + psPerProc, + pbCreated); + if (hDevMemContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* create the per context heaps */ + for(i=0; i 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, + &psDeviceMemoryHeap[i]); + if (hDevMemHeap == IMG_NULL) + { + BM_DestroyContext(hDevMemContext, IMG_NULL); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + else + { + hDevMemHeap = IMG_NULL; + } + + /* return information about the heap */ + psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap; + psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + #if defined(SUPPORT_MEMORY_TILING) + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; + #else + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = 0; + #endif +#if defined(PVR_SECURE_HANDLES) + pbShared[ui32ClientHeapCount] = IMG_FALSE; +#endif + + ui32ClientHeapCount++; + break; + } + } + } + + /* return shared_exported and per context heap information to the caller */ + *pui32ClientHeapCount = ui32ClientHeapCount; + *phDevMemContext = hDevMemContext; + + return PVRSRV_OK; +} + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_BOOL *pbDestroyed) +{ + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + return BM_DestroyContext(hDevMemContext, pbDestroyed); +} + + + + +/*! +****************************************************************************** + + @Function PVRSRVGetDeviceMemHeapInfoKM + + @Description + + gets heap info + + @Input hDevCookie : + @Input hDevMemContext : ptr to handle to memory context + @Output pui32ClientHeapCount : ptr to heap count + @Output psHeapInfo : ptr to array of heap info + + @Return + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_UINT32 *pui32ClientHeapCount, + PVRSRV_HEAP_INFO *psHeapInfo, + IMG_BOOL *pbShared) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_HANDLE hDevMemHeap; + IMG_UINT32 i; + +#if !defined(PVR_SECURE_HANDLES) + PVR_UNREFERENCED_PARAMETER(pbShared); +#endif + + if (hDevCookie == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* + Setup useful pointers + */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + + /* + check we don't exceed the max number of heaps + */ + PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); + + /* create the per context heaps */ + for(i=0; i 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, + &psDeviceMemoryHeap[i]); + + if (hDevMemHeap == IMG_NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + else + { + hDevMemHeap = IMG_NULL; + } + + /* return information about the heap */ + psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap; + psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; +#if defined(PVR_SECURE_HANDLES) + pbShared[ui32ClientHeapCount] = IMG_FALSE; +#endif + + ui32ClientHeapCount++; + break; + } + } + } + + /* return shared_exported and per context heap information to the caller */ + *pui32ClientHeapCount = ui32ClientHeapCount; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function AllocDeviceMem + + @Description + + Allocates device memory + + @Input hDevCookie : + + @Input hDevMemHeap + + @Input ui32Flags : Some combination of PVRSRV_MEM_ flags + + @Input ui32Size : Number of bytes to allocate + + @Input ui32Alignment : Alignment of allocation + + @Input pvPrivData : Opaque private data passed through to allocator + + @Input ui32PrivDataLength : Length of opaque private data + + @Output **ppsMemInfo : On success, receives a pointer to the created MEM_INFO structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + BM_HANDLE hBuffer; + /* Pointer to implementation details within the mem_info */ + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + *ppsMemInfo = IMG_NULL; + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + + psMemBlock = &(psMemInfo->sMemBlk); + + /* BM supplied Device Virtual Address with physical backing RAM */ + psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION; + + bBMError = BM_Alloc (hDevMemHeap, + IMG_NULL, + ui32Size, + &psMemInfo->ui32Flags, + IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment), + pvPrivData, + ui32PrivDataLength, + ui32ChunkSize, + ui32NumVirtChunks, + ui32NumPhysChunks, + pabMapChunk, + #if defined (PVRSRV_DEVMEM_TIME_STATS) + &psMemInfo->ui32TimeToDevMap, + #endif + &hBuffer); + + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed")); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* Fill in the public fields of the MEM_INFO structure */ + + psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); + + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + psMemInfo->uAllocSize = ui32ChunkSize * ui32NumVirtChunks; + } + else + { + psMemInfo->uAllocSize = ui32Size; + } + + /* Clear the Backup buffer pointer as we do not have one at this point. We only allocate this as we are going up/down */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* + * Setup the output. + */ + *ppsMemInfo = psMemInfo; + + /* + * And I think we're done for now.... + */ + return (PVRSRV_OK); +} + +static PVRSRV_ERROR FreeDeviceMem2(PVRSRV_KERNEL_MEM_INFO *psMemInfo, PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin) +{ + BM_HANDLE hBuffer; + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + hBuffer = psMemInfo->sMemBlk.hBuffer; + + switch(eCallbackOrigin) + { + case PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR: + BM_Free(hBuffer, + psMemInfo->ui32Flags + #if defined(PVRSRV_DEVMEM_TIME_STATS) + ,psMemInfo->pui32TimeToDevUnmap + #endif + ); + break; + case PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER: + BM_FreeExport(hBuffer, psMemInfo->ui32Flags); + break; + default: + break; + } + + if (psMemInfo->pvSysBackupBuffer && + eCallbackOrigin == PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->uAllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL); + psMemInfo->pvSysBackupBuffer = IMG_NULL; + } + + if (psMemInfo->ui32RefCount == 0) + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + + return(PVRSRV_OK); +} + +static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + BM_HANDLE hBuffer; + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + hBuffer = psMemInfo->sMemBlk.hBuffer; + + BM_Free(hBuffer, psMemInfo->ui32Flags + #if defined(PVRSRV_DEVMEM_TIME_STATS) + , IMG_NULL + #endif + ); + + if(psMemInfo->pvSysBackupBuffer) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->uAllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL); + psMemInfo->pvSysBackupBuffer = IMG_NULL; + } + + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + + return(PVRSRV_OK); +} + + +/*! +****************************************************************************** + + @Function PVRSRVAllocSyncInfoKM + + @Description + + Allocates a sync info + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo) +{ + IMG_HANDLE hSyncDevMemHeap; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + BM_CONTEXT *pBMContext; + PVRSRV_ERROR eError; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + PVRSRV_SYNC_DATA *psSyncData; + + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_SYNC_INFO), + (IMG_VOID **)&psKernelSyncInfo, IMG_NULL, + "Kernel Synchronization Info"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSAtomicAlloc(&psKernelSyncInfo->pvRefCount); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to allocate atomic")); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + + /* Get the devnode from the devheap */ + pBMContext = (BM_CONTEXT*)hDevMemContext; + psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo; + + /* and choose a heap for the syncinfo */ + hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap; + + /* + Cache consistent flag would be unnecessary if the heap attributes were + changed to specify it. + */ + eError = AllocDeviceMem(hDevCookie, + hSyncDevMemHeap, + PVRSRV_MEM_CACHE_CONSISTENT, + sizeof(PVRSRV_SYNC_DATA), + sizeof(IMG_UINT32), + IMG_NULL, + 0, + 0, 0, 0, IMG_NULL, /* Sparse mapping args, not required */ + &psKernelSyncInfo->psSyncDataMemInfoKM); + + if (eError != PVRSRV_OK) + { + + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory")); + OSAtomicFree(psKernelSyncInfo->pvRefCount); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* init sync data */ + psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM; + psSyncData = psKernelSyncInfo->psSyncData; + + psSyncData->ui32WriteOpsPending = 0; + psSyncData->ui32WriteOpsComplete = 0; + psSyncData->ui32ReadOpsPending = 0; + psSyncData->ui32ReadOpsComplete = 0; + psSyncData->ui32ReadOps2Pending = 0; + psSyncData->ui32ReadOps2Complete = 0; + psSyncData->ui32LastOpDumpVal = 0; + psSyncData->ui32LastReadOpDumpVal = 0; + psSyncData->ui64LastWrite = 0; + +#if defined(SUPPORT_DMABUF) + psKernelSyncInfo->hFenceContext = IMG_NULL; +#endif + +#if defined(SUPPORT_PER_SYNC_DEBUG) + psKernelSyncInfo->ui32OperationMask = 0; + memset(psKernelSyncInfo->aui32OpInfo, 0, sizeof(psKernelSyncInfo->aui32OpInfo)); + memset(psKernelSyncInfo->aui32ReadOpSample, 0, sizeof(psKernelSyncInfo->aui32ReadOpSample)); + memset(psKernelSyncInfo->aui32WriteOpSample, 0, sizeof(psKernelSyncInfo->aui32WriteOpSample)); + memset(psKernelSyncInfo->aui32ReadOp2Sample, 0, sizeof(psKernelSyncInfo->aui32ReadOp2Sample)); + psKernelSyncInfo->ui32HistoryIndex = 0; +#endif + + /* + Note: + PDumping here means that we PDump syncs that we might not + need to know about for the multi-process but this + unavoidable as there is no point where we can PDump + that guarantees it will be initialised before we us it + (e.g. kick time is too late as the client might have + issued a POL on it before that point) + */ +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS( +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + PDUMP_FLAGS_PERSISTENT, +#else + PDUMP_FLAGS_CONTINUOUS, +#endif + "Allocating kernel sync object"); + PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM, + psKernelSyncInfo->psSyncDataMemInfoKM, + 0, + (IMG_UINT32)psKernelSyncInfo->psSyncDataMemInfoKM->uAllocSize, +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + PDUMP_FLAGS_PERSISTENT, +#else + PDUMP_FLAGS_CONTINUOUS, +#endif + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); +#endif + + psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete); + psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); + psKernelSyncInfo->sReadOps2CompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOps2Complete); + psKernelSyncInfo->ui32UID = g_ui32SyncUID++; + + /* syncinfo meminfo has no syncinfo! */ + psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL; + + OSAtomicInc(psKernelSyncInfo->pvRefCount); + + /* Add the SyncInfo to a global list */ + List_PVRSRV_KERNEL_SYNC_INFO_Insert(&g_psSyncInfoList, psKernelSyncInfo); + + /* return result */ + *ppsKernelSyncInfo = psKernelSyncInfo; + + return PVRSRV_OK; +} + +IMG_EXPORT +IMG_VOID PVRSRVAcquireSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + OSAtomicInc(psKernelSyncInfo->pvRefCount); +} + +/*! +****************************************************************************** + + @Function PVRSRVFreeSyncInfoKM + + @Description + + Frees a sync info + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVReleaseSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + if (OSAtomicDecAndTest(psKernelSyncInfo->pvRefCount)) + { + /* Remove the SyncInfo to a global list */ + List_PVRSRV_KERNEL_SYNC_INFO_Remove(psKernelSyncInfo); + + #if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for write ops to flush to PDump value (%d)", + psKernelSyncInfo->psSyncData->ui32LastOpDumpVal); + PDUMPMEMPOL(psKernelSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + psKernelSyncInfo->psSyncData->ui32LastOpDumpVal, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for read ops to flush to PDump value (%d)", + psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal); + PDUMPMEMPOL(psKernelSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal, + 0xffffffff, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); + #endif + + FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM); + + /* Catch anyone who is trying to access the freed structure */ + psKernelSyncInfo->psSyncDataMemInfoKM = IMG_NULL; + psKernelSyncInfo->psSyncData = IMG_NULL; + OSAtomicFree(psKernelSyncInfo->pvRefCount); + (IMG_VOID)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + + @Function freeExternal + + @Description + + Code for freeing meminfo elements that are specific to external types memory + + @Input psMemInfo : Kernel meminfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ + +static IMG_VOID freeExternal(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + IMG_HANDLE hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem; + + /* free the page addr array if req'd */ + if(psMemInfo->sMemBlk.psIntSysPAddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL); + psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL; + } + + /* Mem type dependent stuff */ + if (psMemInfo->memType == PVRSRV_MEMTYPE_WRAPPED) + { + if(hOSWrapMem) + { + OSReleasePhysPageAddr(hOSWrapMem); + } + } +#if defined(SUPPORT_ION) + else if (psMemInfo->memType == PVRSRV_MEMTYPE_ION) + { + if (hOSWrapMem) + { + IonUnimportBufferAndReleasePhysAddr(hOSWrapMem); + } + } +#endif +#if defined(SUPPORT_DMABUF) + else if (psMemInfo->memType == PVRSRV_MEMTYPE_DMABUF) + { + if (hOSWrapMem) + { + DmaBufUnimportAndReleasePhysAddr(hOSWrapMem); + } + } +#endif +} + +/*! +****************************************************************************** + + @Function FreeMemCallBackCommon + + @Description + + Common code for freeing device mem (called for freeing, unwrapping and unmapping) + + @Input psMemInfo : Kernel meminfo + @Input ui32Param : packet size + @Input uibFromAllocatorParam : Are we being called by the original allocator? + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR FreeMemCallBackCommon(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin) +{ + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined (MEM_TRACK_INFO_DEBUG) + PVRSRV_MEM_TRACK_INFO *psMemTrackInfo; +#endif + PVR_UNREFERENCED_PARAMETER(ui32Param); + + /* decrement the refcount */ + PVRSRVKernelMemInfoDecRef(psMemInfo); + +#if defined (MEM_TRACK_INFO_DEBUG) + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + (IMG_VOID **)&psMemTrackInfo, IMG_NULL, + "Mem tracking info"); + if (eError != PVRSRV_OK) + return eError; + psMemTrackInfo->sDevVAddr = psMemInfo->sDevVAddr; + psMemTrackInfo->uSize = psMemInfo->uAllocSize; + psMemTrackInfo->ui32Pid = OSGetCurrentProcessIDKM(); + psMemTrackInfo->ui32RefCount = psMemInfo->ui32RefCount; + psMemTrackInfo->eOp = PVRSRV_MEMTYPE_FREE; + psMemTrackInfo->ui32TimeStampUSecs = OSGetCurrentTimeInUSecsKM(); + + OSGetCurrentProcessNameKM(psMemTrackInfo->asTaskName, 128); + + OSStringCopy(psMemTrackInfo->heapId, psMemInfo->heapId); + PVRSRVAddMemTrackInfo(psMemTrackInfo); +#endif + + /* check no other processes has this meminfo mapped */ + if (psMemInfo->ui32RefCount == 0) + { + if((psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED) != 0) + { + IMG_HANDLE hMemInfo = IMG_NULL; + + /* find the handle */ + eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE, + &hMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: can't find exported meminfo in the global handle list")); + return eError; + } + + /* release the handle */ + eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, + hMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: PVRSRVReleaseHandle failed for exported meminfo")); + return eError; + } + } + + switch(psMemInfo->memType) + { + /* Fall through: Free only what we should for each memory type */ + case PVRSRV_MEMTYPE_WRAPPED: + case PVRSRV_MEMTYPE_ION: + case PVRSRV_MEMTYPE_DMABUF: + freeExternal(psMemInfo); + case PVRSRV_MEMTYPE_DEVICE: + case PVRSRV_MEMTYPE_DEVICECLASS: +#if defined(SUPPORT_ION) + if (psMemInfo->hIonSyncInfo) + { + /* + For syncs attached to Ion imported buffers we handle + things a little differently + */ + PVRSRVIonBufferSyncInfoDecRef(psMemInfo->hIonSyncInfo, psMemInfo); + } + else +#endif +#if defined(SUPPORT_DMABUF) + if (psMemInfo->hDmaBufSyncInfo) + { + PVRSRVDmaBufSyncInfoDecRef(psMemInfo->hDmaBufSyncInfo, psMemInfo); + } + else +#endif + { + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + } + break; + default: + PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: Unknown memType")); + eError = PVRSRV_ERROR_INVALID_MEMINFO; + } + } + + /* + * FreeDeviceMem2 will do the right thing, freeing + * the virtual memory info when the allocator calls + * but only releaseing the physical pages when everyone + * is done. + */ + + if (eError == PVRSRV_OK) + { + eError = FreeDeviceMem2(psMemInfo, eCallbackOrigin); + } + + return eError; +} + +/*! +****************************************************************************** + + @Function FreeDeviceMemCallBack + + @Description + + ResMan call back to free device memory + + @Input pvParam : data packet + @Input ui32Param : packet size + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + + +/*! +****************************************************************************** + + @Function PVRSRVFreeDeviceMemKM + + @Description + + Frees memory allocated with PVRAllocDeviceMem, including the mem_info structure + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psMemInfo->sMemBlk.hResItem != IMG_NULL) + { + eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); + } + else + { + /* PVRSRV_MEM_NO_RESMAN */ + eError = FreeDeviceMemCallBack(psMemInfo, 0, CLEANUP_WITH_POLL); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVAllocDeviceMemKM + + @Description + + Allocates device memory + + @Input hDevCookie : + @Input psPerProc : Per-process data + @Input hDevMemHeap + @Input ui32Flags : Some combination of PVRSRV_MEM_ flags + @Input ui32Size : Number of bytes to allocate + @Input ui32Alignment : + @Output **ppsMemInfo : On success, receives a pointer to the created MEM_INFO structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + PVRSRV_ERROR eError; + BM_HEAP *psBMHeap; + IMG_HANDLE hDevMemContext; +#if defined (MEM_TRACK_INFO_DEBUG) + PVRSRV_MEM_TRACK_INFO *psMemTrackInfo; + IMG_UINT32 i; + IMG_CHAR *pszName = "Heap not found"; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; +#endif + + if (!hDevMemHeap || + ((ui32Size == 0) && ((ui32Flags & PVRSRV_MEM_SPARSE) == 0)) || + (((ui32ChunkSize == 0) || (ui32NumVirtChunks == 0) || (ui32NumPhysChunks == 0) || + (pabMapChunk == IMG_NULL )) && (ui32Flags & PVRSRV_MEM_SPARSE))) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Sprase alloc input validation */ + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + IMG_UINT32 i; + IMG_UINT32 ui32Check = 0; + + if (ui32NumVirtChunks < ui32NumPhysChunks) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + for (i=0;ipsKernelSyncInfo = IMG_NULL; + } + else + { + /* + allocate a syncinfo but don't register with resman + because the holding devicemem will handle the syncinfo + */ + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psMemInfo->psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + goto free_mainalloc; + } + } +#if defined (MEM_TRACK_INFO_DEBUG) + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + for(i=0; iui32MappingHeapID) + { + pszName = psDeviceMemoryHeap[i].pszName; + break; + } + } + + OSStringCopy(psMemInfo->heapId, pszName); +#endif + /* + * Setup the output. + */ + *ppsMemInfo = psMemInfo; + + if (ui32Flags & PVRSRV_MEM_NO_RESMAN) + { + psMemInfo->sMemBlk.hResItem = IMG_NULL; + } + else + { + /* register with the resman */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_ALLOCATION, + psMemInfo, + 0, + &FreeDeviceMemCallBack); + if (psMemInfo->sMemBlk.hResItem == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto free_mainalloc; + } + } + + /* increment the refcount */ + PVRSRVKernelMemInfoIncRef(psMemInfo); + + psMemInfo->memType = PVRSRV_MEMTYPE_DEVICE; + +#if defined (MEM_TRACK_INFO_DEBUG) + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + (IMG_VOID **)&psMemTrackInfo, IMG_NULL, + "Mem tracking info"); + if (eError != PVRSRV_OK) + return eError; + psMemTrackInfo->sDevVAddr = psMemInfo->sDevVAddr; + psMemTrackInfo->uSize = psMemInfo->uAllocSize; + psMemTrackInfo->ui32Pid = OSGetCurrentProcessIDKM(); + psMemTrackInfo->ui32RefCount = psMemInfo->ui32RefCount; + psMemTrackInfo->eOp = PVRSRV_MEMTYPE_ALLOC; + psMemTrackInfo->ui32TimeStampUSecs = OSGetCurrentTimeInUSecsKM(); + + OSGetCurrentProcessNameKM(psMemTrackInfo->asTaskName, 128); + + OSStringCopy(psMemTrackInfo->heapId, psMemInfo->heapId); + + PVRSRVAddMemTrackInfo(psMemTrackInfo); +#endif + /* + * And I think we're done for now.... + */ + return (PVRSRV_OK); + +free_mainalloc: + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + FreeDeviceMem(psMemInfo); + + return eError; +} + +#if defined(SUPPORT_ION) +static PVRSRV_ERROR IonUnmapCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + +PVRSRV_ERROR PVRSRVIonBufferSyncAcquire(IMG_HANDLE hUnique, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_ION_SYNC_INFO **ppsIonSyncInfo) +{ + PVRSRV_ION_SYNC_INFO *psIonSyncInfo; + PVRSRV_ERROR eError; + IMG_BOOL bRet; + + /* Check the hash to see if we already have a sync for this buffer */ + psIonSyncInfo = (PVRSRV_ION_SYNC_INFO *) HASH_Retrieve(g_psIonSyncHash, (IMG_UINTPTR_T) hUnique); + if (psIonSyncInfo == 0) + { + /* This buffer is new to us, create the syncinfo for it */ + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_ION_SYNC_INFO), + (IMG_VOID **)&psIonSyncInfo, IMG_NULL, + "Ion Synchronization Info"); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psIonSyncInfo->psSyncInfo); + if (eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_ION_SYNC_INFO), + psIonSyncInfo, + IMG_NULL); + + return eError; + } +#if defined(SUPPORT_MEMINFO_IDS) + psIonSyncInfo->ui64Stamp = ++g_ui64MemInfoID; +#else + psIonSyncInfo->ui64Stamp = 0; +#endif + bRet = HASH_Insert(g_psIonSyncHash, (IMG_UINTPTR_T) hUnique, (IMG_UINTPTR_T) psIonSyncInfo); + if (!bRet) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + + PVRSRVKernelSyncInfoDecRef(psIonSyncInfo->psSyncInfo, IMG_NULL); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_ION_SYNC_INFO), + psIonSyncInfo, + IMG_NULL); + + return eError; + } + + psIonSyncInfo->ui32RefCount = 0; + psIonSyncInfo->hUnique = hUnique; + } + + psIonSyncInfo->ui32RefCount++; + *ppsIonSyncInfo = psIonSyncInfo; + return PVRSRV_OK; +} + +IMG_VOID PVRSRVIonBufferSyncRelease(PVRSRV_ION_SYNC_INFO *psIonSyncInfo) +{ + psIonSyncInfo->ui32RefCount--; + + if (psIonSyncInfo->ui32RefCount == 0) + { + PVRSRV_ION_SYNC_INFO *psLookup; + /* + If we're holding the last reference to the syncinfo + then free it + */ + psLookup = (PVRSRV_ION_SYNC_INFO *) HASH_Remove(g_psIonSyncHash, (IMG_UINTPTR_T) psIonSyncInfo->hUnique); + PVR_ASSERT(psLookup == psIonSyncInfo); + PVRSRVKernelSyncInfoDecRef(psIonSyncInfo->psSyncInfo, IMG_NULL); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_ION_SYNC_INFO), + psIonSyncInfo, + IMG_NULL); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVMapIonHandleKM + + @Description + + Map an ION buffer into the specified device memory context + + @Input psPerProc : PerProcess data + @Input hDevCookie : Device node cookie + @Input hDevMemHeap : Heap ion handles are mapped into + @Input ui32NumBuffers : Number of ion handles to map. (If one handle is being + mapped, this should be 1, not 0.) + @Input phIon : Array of ui32NumBuffers ion handles (fds) + @Input ui32Flags : Mapping flags + @Input ui32ChunkCount : If ui32NumBuffers is 1, this is the number of + "chunks" specified to be mapped into device-virtual + address space. If ui32NumBuffers > 1, it is ignored. + @Input pauiOffset : Array of offsets in device-virtual address space to map + "chunks" of physical from the ion allocation. + @Input pauiSize : Array of sizes in bytes of device-virtual address space to + map "chunks" of physical from the ion allocation. + @Input puiIonBufferSize : Size in bytes of resulting device-virtual mapping. + @Output ppsKernelMemInfo: Output kernel meminfo if successful + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVMapIonHandleKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32NumFDs, + IMG_INT32 *pi32BufferFDs, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32ChunkCount, + IMG_SIZE_T *pauiOffset, + IMG_SIZE_T *pauiSize, + IMG_SIZE_T *puiIonBufferSize, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo, + IMG_UINT64 *pui64Stamp) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProcEnv = PVRSRVProcessPrivateData(psPerProc); + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psNewKernelMemInfo; + IMG_SYS_PHYADDR *pasSysPhysAddr; + IMG_SYS_PHYADDR *pasAdjustedSysPhysAddr; + PVRSRV_MEMBLK *psMemBlock; + PVRSRV_ERROR eError; + IMG_HANDLE hPriv; + IMG_HANDLE hUnique; + BM_HANDLE hBuffer; + IMG_SIZE_T uiMapSize = 0; + IMG_SIZE_T uiAdjustOffset = 0; + IMG_UINT32 ui32PageCount; + IMG_UINT32 i; + IMG_BOOL bAllocSync = (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)?IMG_FALSE:IMG_TRUE; + + if ((hDevCookie == IMG_NULL) || (ui32ChunkCount == 0) + || (hDevMemHeap == IMG_NULL) || (ppsKernelMemInfo == IMG_NULL) + || (psPerProcEnv == IMG_NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid params", __FUNCTION__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + for (i=0;ipsIONClient, + ui32NumFDs, + pi32BufferFDs, + &ui32PageCount, + &pasSysPhysAddr, + &psNewKernelMemInfo->pvLinAddrKM, + &hPriv, + &hUnique); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get ion buffer/buffer phys addr", __FUNCTION__)); + goto exitFailedImport; + } + + /* + Make sure the number of pages detected by the ion import are at least + the size of the total chunked region + */ + if(ui32PageCount * PAGE_SIZE < uiMapSize) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ion allocator returned fewer page addresses " + "than specified chunk size(s)", __FUNCTION__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto exitFailedAdjustedAlloc; + } + + /* + An Ion buffer might have a number of "chunks" in it which need to be + mapped virtually continuous so we need to create a new array of + addresses based on this chunk data for the actual wrap + */ + /* OSAllocMem() must be provided non-zero value for size argument */ + PVR_ASSERT(sizeof(IMG_SYS_PHYADDR) * (uiMapSize/HOST_PAGESIZE() != 0)); + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(IMG_SYS_PHYADDR) * (uiMapSize/HOST_PAGESIZE()), + (IMG_VOID **)&pasAdjustedSysPhysAddr, IMG_NULL, + "Ion adjusted system address array") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Failed to alloc memory for adjusted array", __FUNCTION__)); + goto exitFailedAdjustedAlloc; + } + OSMemSet(pasAdjustedSysPhysAddr, 0, sizeof(IMG_SYS_PHYADDR) * (uiMapSize/HOST_PAGESIZE())); + + for (i=0;isMemBlk; + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; + psMemBlock->hOSWrapMem = hPriv; /* Saves creating a new element as we know hOSWrapMem will not be used */ + psMemBlock->psIntSysPAddr = pasAdjustedSysPhysAddr; + + psNewKernelMemInfo->ui32Flags = ui32Flags; + psNewKernelMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psNewKernelMemInfo->uAllocSize = uiMapSize; + psNewKernelMemInfo->memType = PVRSRV_MEMTYPE_ION; + PVRSRVKernelMemInfoIncRef(psNewKernelMemInfo); + + /* Clear the Backup buffer pointer as we do not have one at this point. We only allocate this as we are going up/down */ + psNewKernelMemInfo->pvSysBackupBuffer = IMG_NULL; + + if (!bAllocSync) + { + psNewKernelMemInfo->psKernelSyncInfo = IMG_NULL; + } + else + { + PVRSRV_ION_SYNC_INFO *psIonSyncInfo; + BM_HEAP *psBMHeap; + IMG_HANDLE hDevMemContext; + + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + + eError = PVRSRVIonBufferSyncInfoIncRef(hUnique, + hDevCookie, + hDevMemContext, + &psIonSyncInfo, + psNewKernelMemInfo); + if(eError != PVRSRV_OK) + { + goto exitFailedSync; + } + psNewKernelMemInfo->hIonSyncInfo = psIonSyncInfo; + psNewKernelMemInfo->psKernelSyncInfo = IonBufferSyncGetKernelSyncInfo(psIonSyncInfo); + *pui64Stamp = IonBufferSyncGetStamp(psIonSyncInfo); + } + + /* register with the resman */ + psNewKernelMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_ION, + psNewKernelMemInfo, + 0, + &IonUnmapCallback); + if (psNewKernelMemInfo->sMemBlk.hResItem == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exitFailedResman; + } + + psNewKernelMemInfo->memType = PVRSRV_MEMTYPE_ION; + + /* + As the user doesn't tell us the size, just the "chunk" information + return actual size of the Ion buffer so we can mmap it. + */ + *puiIonBufferSize = ui32PageCount * HOST_PAGESIZE(); + *ppsKernelMemInfo = psNewKernelMemInfo; + return PVRSRV_OK; + +exitFailedResman: + if (psNewKernelMemInfo->psKernelSyncInfo) + { + PVRSRVIonBufferSyncInfoDecRef(psNewKernelMemInfo->hIonSyncInfo, psNewKernelMemInfo); + } +exitFailedSync: + BM_Free(hBuffer, ui32Flags + #if defined (PVRSRV_DEVMEM_TIME_STATS) + , IMG_NULL + #endif + ); +exitFailedWrap: + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(IMG_SYS_PHYADDR) * uiAdjustOffset, + pasAdjustedSysPhysAddr, + IMG_NULL); +exitFailedAdjustedAlloc: + IonUnimportBufferAndReleasePhysAddr(hPriv); +exitFailedImport: + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psNewKernelMemInfo, + IMG_NULL); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVUnmapIonHandleKM + + @Description + + Frees an ion buffer mapped with PVRSRVMapIonHandleKM, including the mem_info structure + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapIonHandleKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} +#endif /* SUPPORT_ION */ + +#if defined(SUPPORT_DMABUF) +static PVRSRV_ERROR DmaBufUnmapCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + +PVRSRV_ERROR PVRSRVDmaBufSyncAcquire(IMG_HANDLE hUnique, + IMG_HANDLE hPriv, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_DMABUF_SYNC_INFO **ppsDmaBufSyncInfo) +{ + PVRSRV_DMABUF_SYNC_INFO *psDmaBufSyncInfo; + PVRSRV_ERROR eError; + IMG_BOOL bRet; + + /* + * If the import has a unique handle, check the hash to see if we + * already have a sync for the buffer. + */ + psDmaBufSyncInfo = (PVRSRV_DMABUF_SYNC_INFO *) HASH_Retrieve(g_psDmaBufSyncHash, (IMG_UINTPTR_T)hUnique); + if (!psDmaBufSyncInfo) + { + /* Create the syncinfo for the import */ + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_DMABUF_SYNC_INFO), + (IMG_VOID **)&psDmaBufSyncInfo, IMG_NULL, + "DMA-BUF Synchronization Info"); + if (eError != PVRSRV_OK) + { + goto ErrorAllocDmaBufSyncInfo; + } + + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psDmaBufSyncInfo->psSyncInfo); + if (eError != PVRSRV_OK) + { + goto ErrorAllocSyncInfo; + } +#if defined(SUPPORT_MEMINFO_IDS) + psDmaBufSyncInfo->ui64Stamp = ++g_ui64MemInfoID; +#else + psDmaBufSyncInfo->ui64Stamp = 0; +#endif + psDmaBufSyncInfo->ui32RefCount = 0; + psDmaBufSyncInfo->hUnique = hUnique; + + psDmaBufSyncInfo->psSyncInfo->hFenceContext = PVRLinuxFenceContextCreate(psDmaBufSyncInfo->psSyncInfo, hPriv); + if (!psDmaBufSyncInfo->psSyncInfo->hFenceContext) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorCreateFenceContext; + } + + bRet = HASH_Insert(g_psDmaBufSyncHash, (IMG_UINTPTR_T)hUnique, (IMG_UINTPTR_T) psDmaBufSyncInfo); + if (!bRet) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorHashInsert; + } + } + + psDmaBufSyncInfo->ui32RefCount++; + *ppsDmaBufSyncInfo = psDmaBufSyncInfo; + return PVRSRV_OK; + +ErrorHashInsert: + PVRLinuxFenceContextDestroy(psDmaBufSyncInfo->psSyncInfo->hFenceContext); +ErrorCreateFenceContext: + PVRSRVKernelSyncInfoDecRef(psDmaBufSyncInfo->psSyncInfo, IMG_NULL); +ErrorAllocSyncInfo: + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_DMABUF_SYNC_INFO), + psDmaBufSyncInfo, + IMG_NULL); +ErrorAllocDmaBufSyncInfo: + return eError; +} + +IMG_VOID PVRSRVDmaBufSyncRelease(PVRSRV_DMABUF_SYNC_INFO *psDmaBufSyncInfo) +{ + psDmaBufSyncInfo->ui32RefCount--; + + if (psDmaBufSyncInfo->ui32RefCount == 0) + { + PVRSRV_DMABUF_SYNC_INFO *psLookup; + + /* + If we're holding the last reference to the syncinfo + then free it + */ + + psLookup = (PVRSRV_DMABUF_SYNC_INFO *) HASH_Remove(g_psDmaBufSyncHash, (IMG_UINTPTR_T) psDmaBufSyncInfo->hUnique); + PVR_ASSERT(psLookup == psDmaBufSyncInfo); + (void)psLookup; + + PVRLinuxFenceContextDestroy(psDmaBufSyncInfo->psSyncInfo->hFenceContext); + PVRSRVKernelSyncInfoDecRef(psDmaBufSyncInfo->psSyncInfo, IMG_NULL); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_DMABUF_SYNC_INFO), + psDmaBufSyncInfo, + IMG_NULL); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVMapDmaBufKM + + @Description + + Map a dma_buf into the specified device memory context + + @Input psPerProc : PerProcess data. + @Input hDevCookie : Device node cookie. + @Input hDevMemHeap : Heap the buffers are mapped into. + @Input ui32Flags : Mapping flags. + @Input i32DmaBufFD : DMA Buf FD. + @Input uiDmaBufOffset : Offset into DMA Buf. + @Input uiDmaBufSize : Size of DMA Buf chunk. + @Output ppsKernelMemInfo: Output kernel meminfo if successful. + @Output puiSize : Size in bytes of resulting device-virtual mapping. + @Output puiMemInfoOffset : Array of offsets of each chunk in the meminfo. + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVMapDmaBufKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_INT32 i32DmaBufFD, + IMG_SIZE_T uiDmaBufOffset, + IMG_SIZE_T uiDmaBufSize, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo, + IMG_SIZE_T *puiSize, + IMG_SIZE_T *puiMemInfoOffset, + IMG_UINT64 *pui64Stamp) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psNewKernelMemInfo; + IMG_SYS_PHYADDR *pasSysPhysAddr; + PVRSRV_MEMBLK *psMemBlock; + PVRSRV_ERROR eError; + IMG_HANDLE hPriv; + IMG_HANDLE hUnique; + BM_HANDLE hBuffer; + IMG_SIZE_T uiMapSize = 0; + IMG_UINT32 ui32PageCount; + IMG_BOOL bAllocSync = (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) ? IMG_FALSE : IMG_TRUE; + + if ((hDevCookie == IMG_NULL) || (hDevMemHeap == IMG_NULL) + || (ppsKernelMemInfo == IMG_NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid params", __FUNCTION__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + if (OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psNewKernelMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Failed to alloc memory for block", __FUNCTION__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psNewKernelMemInfo, 0, sizeof(PVRSRV_KERNEL_MEM_INFO)); + + /* Import the DMA Buffer */ + eError = DmaBufImportAndAcquirePhysAddr(i32DmaBufFD, + uiDmaBufOffset, + uiDmaBufSize, + &ui32PageCount, + &pasSysPhysAddr, + puiMemInfoOffset, + &psNewKernelMemInfo->pvLinAddrKM, + &hPriv, + &hUnique); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf phys addr", __FUNCTION__)); + goto exitFailedImport; + } + + uiMapSize = ui32PageCount * HOST_PAGESIZE(); + + /* Wrap the returned addresses into our memory context */ + if (!BM_Wrap(hDevMemHeap, + uiMapSize, + 0, + IMG_FALSE, + pasSysPhysAddr, + IMG_NULL, + &ui32Flags, /* This function clobbers our bits in ui32Flags */ + &hBuffer)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wrap dma-buf", __FUNCTION__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exitFailedWrap; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock = &psNewKernelMemInfo->sMemBlk; + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; + psMemBlock->hOSWrapMem = hPriv; /* Saves creating a new element as we know hOSWrapMem will not be used */ + psMemBlock->psIntSysPAddr = pasSysPhysAddr; + + psNewKernelMemInfo->ui32Flags = ui32Flags; + psNewKernelMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psNewKernelMemInfo->uAllocSize = uiMapSize; + psNewKernelMemInfo->memType = PVRSRV_MEMTYPE_DMABUF; + PVRSRVKernelMemInfoIncRef(psNewKernelMemInfo); + + if (!bAllocSync) + { + psNewKernelMemInfo->psKernelSyncInfo = IMG_NULL; + } + else + { + PVRSRV_DMABUF_SYNC_INFO *psDmaBufSyncInfo; + BM_HEAP *psBMHeap; + IMG_HANDLE hDevMemContext; + + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + + eError = PVRSRVDmaBufSyncInfoIncRef(hUnique, + hPriv, + hDevCookie, + hDevMemContext, + &psDmaBufSyncInfo, + psNewKernelMemInfo); + if(eError != PVRSRV_OK) + { + goto exitFailedSync; + } + psNewKernelMemInfo->hDmaBufSyncInfo = psDmaBufSyncInfo; + psNewKernelMemInfo->psKernelSyncInfo = psDmaBufSyncInfo->psSyncInfo; + *pui64Stamp = DmaBufSyncGetStamp(psDmaBufSyncInfo); + } + + /* register with the resman */ + psNewKernelMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_DMABUF, + psNewKernelMemInfo, + 0, + &DmaBufUnmapCallback); + if (psNewKernelMemInfo->sMemBlk.hResItem == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exitFailedResman; + } + + psNewKernelMemInfo->memType = PVRSRV_MEMTYPE_DMABUF; + + /* Return actual size of the imported memory so we can mmap it */ + *puiSize = uiMapSize; + *ppsKernelMemInfo = psNewKernelMemInfo; + return PVRSRV_OK; + +exitFailedResman: + if (psNewKernelMemInfo->psKernelSyncInfo) + { + PVRSRVDmaBufSyncInfoDecRef(psNewKernelMemInfo->hDmaBufSyncInfo, psNewKernelMemInfo); + } +exitFailedSync: + BM_Free(hBuffer, ui32Flags + #if defined (PVRSRV_DEVMEM_TIME_STATS) + , IMG_NULL + #endif + ); + +exitFailedWrap: + DmaBufUnimportAndReleasePhysAddr(hPriv); +exitFailedImport: + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psNewKernelMemInfo, + IMG_NULL); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVUnmapDmaBufKM + + @Description + + Releases a dma_buf mapped with PVRSRVMapDmaBufKM, including the mem_info structure + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDmaBufKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} +#endif /* SUPPORT_DMABUF */ + +/*! +****************************************************************************** + + @Function PVRSRVDissociateDeviceMemKM + + @Description + + Dissociates memory from the process that allocates it. Intended for + transfering the ownership of device memory from a particular process + to the kernel. + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem, psDeviceNode->hResManContext); + + PVR_ASSERT(eError == PVRSRV_OK); + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVGetFreeDeviceMemKM + + @Description + + Determines how much memory remains available in the system with the specified + capabilities. + + @Input ui32Flags : + + @Output pui32Total : + + @Output pui32Free : + + @Output pui32LargestBlock : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags, + IMG_SIZE_T *pui32Total, + IMG_SIZE_T *pui32Free, + IMG_SIZE_T *pui32LargestBlock) +{ + /* TO BE IMPLEMENTED */ + + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(pui32Total); + PVR_UNREFERENCED_PARAMETER(pui32Free); + PVR_UNREFERENCED_PARAMETER(pui32LargestBlock); + + return PVRSRV_OK; +} + + + + +/*! +****************************************************************************** + @Function PVRSRVUnwrapExtMemoryKM + + @Description On last unwrap of a given meminfo, unmaps physical pages from a + wrapped allocation, and frees the associated device address space. + Note: this can only unmap memory mapped by PVRSRVWrapExtMemory + + @Input psMemInfo - mem info describing the wrapped allocation + @Return None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} + + +/*! +****************************************************************************** + @Function UnwrapExtMemoryCallBack + + @Description Resman callback to unwrap memory + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + + +/*! +****************************************************************************** + @Function PVRSRVWrapExtMemoryKM + + @Description Allocates a Device Virtual Address in the shared mapping heap + and maps physical pages into that allocation. Note, if the pages are + already mapped into the heap, the existing allocation is returned. + + @Input hDevCookie - Device cookie + @Input psPerProc - Per-process data + @Input hDevMemContext - device memory context + @Input uByteSize - Size of allocation + @Input uPageOffset - Offset into the first page of the memory to be wrapped + @Input bPhysContig - whether the underlying memory is physically contiguous + @Input psExtSysPAddr - The list of Device Physical page addresses + @Input pvLinAddr - ptr to buffer to wrap + @Output ppsMemInfo - mem info describing the wrapped allocation + @Return None +******************************************************************************/ + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemContext, + IMG_SIZE_T uByteSize, + IMG_SIZE_T uPageOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psExtSysPAddr, + IMG_VOID *pvLinAddr, + IMG_UINT32 ui32Flags, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE(); + IMG_HANDLE hDevMemHeap = IMG_NULL; + PVRSRV_DEVICE_NODE* psDeviceNode; + BM_HANDLE hBuffer; + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + BM_HEAP *psBMHeap; + PVRSRV_ERROR eError; + IMG_VOID *pvPageAlignedCPUVAddr; + IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL; + IMG_HANDLE hOSWrapMem = IMG_NULL; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;IMG_UINT32 i; +#if defined (MEM_TRACK_INFO_DEBUG) + PVRSRV_MEM_TRACK_INFO *psMemTrackInfo; + IMG_CHAR *pszName="Heap not found"; +#endif + IMG_SIZE_T uPageCount = 0; + + PVR_DPF ((PVR_DBG_MESSAGE, + "PVRSRVWrapExtMemoryKM (uSize=0x%" SIZE_T_FMT_LEN "x, uPageOffset=0x%" + SIZE_T_FMT_LEN "x, bPhysContig=%d, extSysPAddr=" SYSPADDR_FMT + ", pvLinAddr=%p, ui32Flags=%u)", + uByteSize, + uPageOffset, + bPhysContig, + psExtSysPAddr?psExtSysPAddr->uiAddr:0x0, + pvLinAddr, + ui32Flags)); + + psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie; + PVR_ASSERT(psDeviceNode != IMG_NULL); + + if (psDeviceNode == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(pvLinAddr) + { + /* derive the page offset from the cpu ptr (in case it's not supplied) */ + uPageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1); + + /* get the pagecount and the page aligned base ptr */ + uPageCount = HOST_PAGEALIGN(uByteSize + uPageOffset) / ui32HostPageSize; + pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - uPageOffset); + + /* allocate array of SysPAddr to hold page addresses */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + uPageCount * sizeof(IMG_SYS_PHYADDR), + (IMG_VOID **)&psIntSysPAddr, IMG_NULL, + "Array of Page Addresses") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, + uPageCount * ui32HostPageSize, + psIntSysPAddr, + &hOSWrapMem); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY;//FIXME: need better error code + goto ErrorExitPhase1; + } + + /* replace the supplied page address list */ + psExtSysPAddr = psIntSysPAddr; + + /* assume memory is not physically contiguous; + we shouldn't trust what the user says here + */ + bPhysContig = IMG_FALSE; + } +#if !defined(__QNXNTO__) + else + { + if (psExtSysPAddr) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter, physical address passing is not supported")); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter, no address specified")); + } + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + + /* Choose the heap to map to */ + psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + for(i=0; iui32MappingHeapID) + { + if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT) + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]); + #if defined (MEM_TRACK_INFO_DEBUG) + pszName = psDeviceMemoryHeap[i].pszName; + #endif + } + else + { + hDevMemHeap = IMG_NULL; + } + } + else + { + hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap; + #if defined (MEM_TRACK_INFO_DEBUG) + pszName = psDeviceMemoryHeap[i].pszName; + #endif + } + break; + } + } + + if(hDevMemHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap")); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP; + goto ErrorExitPhase2; + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExitPhase2; + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + /* + Force the memory to be read/write. This used to be done in the BM, but + ion imports don't want this behaviour + */ + psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE; + + psMemBlock = &(psMemInfo->sMemBlk); + + bBMError = BM_Wrap(hDevMemHeap, + uByteSize, + uPageOffset, + bPhysContig, + psExtSysPAddr, + IMG_NULL, + &psMemInfo->ui32Flags, + &hBuffer); + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto ErrorExitPhase3; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + psMemBlock->hOSWrapMem = hOSWrapMem; + psMemBlock->psIntSysPAddr = psIntSysPAddr; + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* Fill in the public fields of the MEM_INFO structure */ + psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psMemInfo->uAllocSize = uByteSize; + + /* Clear the Backup buffer pointer as we do not have one at this point. + We only allocate this as we are going up/down + */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* + allocate a syncinfo but don't register with resman + because the holding devicemem will handle the syncinfo + */ + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psMemInfo->psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + FreeDeviceMem(psMemInfo); + /* FreeDeviceMem will free the meminfo so jump straight to ErrorExitPhase2 */ + goto ErrorExitPhase2; + } + + /* increment the refcount */ + PVRSRVKernelMemInfoIncRef(psMemInfo); + + psMemInfo->memType = PVRSRV_MEMTYPE_WRAPPED; + + /* Register Resource */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_WRAP, + psMemInfo, + 0, + &UnwrapExtMemoryCallBack); +#if defined (MEM_TRACK_INFO_DEBUG) + OSStringCopy(psMemInfo->heapId, pszName); +#endif + /* return the meminfo */ + *ppsMemInfo = psMemInfo; + +#if defined (MEM_TRACK_INFO_DEBUG) + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + (IMG_VOID **)&psMemTrackInfo, IMG_NULL, + "Mem tracking info"); + if (eError != PVRSRV_OK) + return eError; + psMemTrackInfo->sDevVAddr = psMemInfo->sDevVAddr; + psMemTrackInfo->uSize = psMemInfo->uAllocSize; + psMemTrackInfo->ui32Pid = OSGetCurrentProcessIDKM(); + psMemTrackInfo->ui32RefCount = psMemInfo->ui32RefCount; + psMemTrackInfo->eOp = PVRSRV_MEMTYPE_WRAPPED; + psMemTrackInfo->ui32TimeStampUSecs = OSGetCurrentTimeInUSecsKM(); + + OSGetCurrentProcessNameKM(psMemTrackInfo->asTaskName, 128); + + OSStringCopy(psMemTrackInfo->heapId, psMemInfo->heapId); + PVRSRVAddMemTrackInfo(psMemTrackInfo); +#endif + + return PVRSRV_OK; + + /* error handling: */ +ErrorExitPhase3: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + +ErrorExitPhase2: + if(hOSWrapMem) + { + OSReleasePhysPageAddr(hOSWrapMem); + } + +ErrorExitPhase1: + if(psIntSysPAddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL); + /*not nulling shared pointer, uninitialized to this point*/ + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVUnmapDeviceMemoryKM + + @Description + Unmaps an existing allocation previously mapped by PVRSRVMapDeviceMemory + + @Input psMemInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} + + +/*! +****************************************************************************** + @Function UnmapDeviceMemoryCallBack + + @Description Resman callback to unmap memory memory previously mapped + from one allocation to another + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_ERROR eError; + RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + if(psMapData->psMemInfo->sMemBlk.psIntSysPAddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMapData->psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL); + psMapData->psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL; + } + + if( psMapData->psMemInfo->psKernelSyncInfo ) + { + PVRSRVKernelSyncInfoDecRef(psMapData->psMemInfo->psKernelSyncInfo, psMapData->psMemInfo); + } + + eError = FreeDeviceMem(psMapData->psMemInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free DST meminfo")); + return eError; + } + + /* This will only free the src psMemInfo if we hold the last reference */ + eError = FreeMemCallBackCommon(psMapData->psSrcMemInfo, 0, + PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVMapDeviceMemoryKM + + @Description + Maps an existing allocation to a specific device address space and heap + Note: it's valid to map from one physical device to another + + @Input psPerProc : Per-process data + @Input psSrcMemInfo + @Input hDstDevMemHeap + @Input ppsDstMemInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo, + IMG_HANDLE hDstDevMemHeap, + PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + IMG_SIZE_T uPageCount, uPageOffset; + IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE(); + IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; + IMG_DEV_PHYADDR sDevPAddr; + BM_BUF *psBuf; + IMG_DEV_VIRTADDR sDevVAddr; + PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; + BM_HANDLE hBuffer; + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_VOID *pvPageAlignedCPUVAddr; + RESMAN_MAP_DEVICE_MEM_DATA *psMapData = IMG_NULL; +#if defined (MEM_TRACK_INFO_DEBUG) + PVRSRV_MEM_TRACK_INFO *psMemTrackInfo; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + BM_HEAP *psBMHeap; + IMG_HANDLE hDevMemContext; +#endif + + /* check params */ + if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* initialise the Dst Meminfo to NULL*/ + *ppsDstMemInfo = IMG_NULL; + + uPageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1); + uPageCount = HOST_PAGEALIGN(psSrcMemInfo->uAllocSize + uPageOffset) / ui32HostPageSize; + pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - uPageOffset); + + /* + allocate array of SysPAddr to hold SRC allocation page addresses + */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + uPageCount*sizeof(IMG_SYS_PHYADDR), + (IMG_VOID **)&psSysPAddr, IMG_NULL, + "Array of Page Addresses") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psBuf = psSrcMemInfo->sMemBlk.hBuffer; + + /* get the device node */ + psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode; + + /* build a list of physical page addresses */ + sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(uPageOffset); + for(i=0; isDevId.eDeviceType, sDevPAddr); + + /* advance the DevVaddr one page */ + sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize); + } + + /* allocate the resman map data */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(RESMAN_MAP_DEVICE_MEM_DATA), + (IMG_VOID **)&psMapData, IMG_NULL, + "Resource Manager Map Data") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + + /* + Force the memory to be read/write. This used to be done in the BM, but + ion imports don't want this behaviour + */ + psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE; + + psMemBlock = &(psMemInfo->sMemBlk); + + bBMError = BM_Wrap(hDstDevMemHeap, + psSrcMemInfo->uAllocSize, + uPageOffset, + IMG_FALSE, + psSysPAddr, + pvPageAlignedCPUVAddr, + &psMemInfo->ui32Flags, + &hBuffer); + + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto ErrorExit; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* Store page list */ + psMemBlock->psIntSysPAddr = psSysPAddr; + + /* patch up the CPU VAddr into the meminfo */ + psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM; + + /* Fill in the public fields of the MEM_INFO structure */ + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psMemInfo->uAllocSize = psSrcMemInfo->uAllocSize; + psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo; + + /* reference the same ksi that the original meminfo referenced */ + if(psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoIncRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + + /* Clear the Backup buffer pointer as we do not have one at this point. + We only allocate this as we are going up/down + */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* increment our refcount */ + PVRSRVKernelMemInfoIncRef(psMemInfo); + + /* increment the src refcount */ + PVRSRVKernelMemInfoIncRef(psSrcMemInfo); + + /* Tell the buffer manager about the export */ + BM_Export(psSrcMemInfo->sMemBlk.hBuffer); + + psMemInfo->memType = PVRSRV_MEMTYPE_MAPPED; + + /* setup the resman map data */ + psMapData->psMemInfo = psMemInfo; + psMapData->psSrcMemInfo = psSrcMemInfo; + + /* Register Resource */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_MAPPING, + psMapData, + 0, + &UnmapDeviceMemoryCallBack); +#if defined (MEM_TRACK_INFO_DEBUG) + psBMHeap = (BM_HEAP*)hDstDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + for(i=0; iui32MappingHeapID) + break; + } + + if(i == PVRSRV_MAX_CLIENT_HEAPS) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: unable to find mapping heap")); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP; + OSStringCopy(psMemInfo->heapId, "Heap not found"); + } + else + OSStringCopy(psMemInfo->heapId, psDeviceMemoryHeap[i].pszName); +#endif + + *ppsDstMemInfo = psMemInfo; + +#if defined (MEM_TRACK_INFO_DEBUG) + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + (IMG_VOID **)&psMemTrackInfo, IMG_NULL, + "Mem tracking info"); + if (eError != PVRSRV_OK) + return eError; + psMemTrackInfo->sDevVAddr = psMemInfo->sDevVAddr; + psMemTrackInfo->uSize = psMemInfo->uAllocSize; + psMemTrackInfo->ui32Pid = OSGetCurrentProcessIDKM(); + psMemTrackInfo->ui32RefCount = psMemInfo->ui32RefCount; + psMemTrackInfo->eOp = PVRSRV_MEMTYPE_MAPPED; + psMemTrackInfo->ui32TimeStampUSecs = OSGetCurrentTimeInUSecsKM(); + + OSGetCurrentProcessNameKM(psMemTrackInfo->asTaskName, 128); + OSStringCopy(psMemTrackInfo->heapId, psMemInfo->heapId); + + PVRSRVAddMemTrackInfo(psMemTrackInfo); +#endif + return PVRSRV_OK; + + /* error handling: */ + +ErrorExit: + + if(psSysPAddr) + { + /* Free the page address list */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL); + /*not nulling shared pointer, holding structure could be not initialized*/ + } + + if(psMemInfo) + { + /* Free the page address list */ + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + /*not nulling shared pointer, holding structure could be not initialized*/ + } + + if(psMapData) + { + /* Free the resman map data */ + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + + return eError; +} + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +/*! +****************************************************************************** + @Function PVRSRVUnmapDeviceClassMemoryKM + + @Description unmaps physical pages from devices address space at a specified + Device Virtual Address. + Note: this can only unmap memory mapped by + PVRSRVMapDeviceClassMemoryKM + + @Input psMemInfo - mem info describing the device virtual address + to unmap RAM from + @Return None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} + + +/*! +****************************************************************************** + @Function UnmapDeviceClassMemoryCallBack + + @Description Resman callback to unmap device class memory + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_DC_MAPINFO *psDCMapInfo = pvParam; + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psMemInfo = psDCMapInfo->psMemInfo; + +#if defined(SUPPORT_MEMORY_TILING) + if(psDCMapInfo->ui32TilingStride > 0) + { + PVRSRV_DEVICE_NODE *psDeviceNode = psDCMapInfo->psDeviceNode; + + if (psDeviceNode->pfnFreeMemTilingRange(psDeviceNode, + psDCMapInfo->ui32RangeIndex) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceClassMemoryCallBack: FreeMemTilingRange failed")); + } + } +#endif + + (psDCMapInfo->psDeviceClassBuffer->ui32MemMapRefCount)--; + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_MAPINFO), psDCMapInfo, IMG_NULL); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + + +/*! +****************************************************************************** + @Function PVRSRVMapDeviceClassMemoryKM + + @Description maps physical pages for DeviceClass buffers into a devices + address space at a specified and pre-allocated Device + Virtual Address + + @Input psPerProc - Per-process data + @Input hDevMemContext - Device memory context + @Input hDeviceClassBuffer - Device Class Buffer (Surface) handle + @Input hDevMemContext - device memory context to which mapping + is made + @Output ppsMemInfo - mem info describing the mapped memory + @Output phOSMapInfo - OS specific mapping information + @Return None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemContext, + IMG_HANDLE hDeviceClassBuffer, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo, + IMG_HANDLE *phOSMapInfo) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE* psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; + PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer; + IMG_SYS_PHYADDR *psSysPAddr; + IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr; + IMG_BOOL bPhysContig; + BM_CONTEXT *psBMContext; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_HANDLE hDevMemHeap = IMG_NULL; + IMG_UINT32 ui32ByteSize; + IMG_SIZE_T uOffset; + IMG_SIZE_T uPageSize = HOST_PAGESIZE(); + BM_HANDLE hBuffer; + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + IMG_UINT32 i; + PVRSRV_DC_MAPINFO *psDCMapInfo = IMG_NULL; +#if defined (MEM_TRACK_INFO_DEBUG) + PVRSRV_MEM_TRACK_INFO *psMemTrackInfo; + IMG_CHAR *pszName = "Heap not found"; +#endif + if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* allocate resman storage structure */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_MAPINFO), + (IMG_VOID **)&psDCMapInfo, IMG_NULL, + "PVRSRV_DC_MAPINFO") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for psDCMapInfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psDCMapInfo, 0, sizeof(PVRSRV_DC_MAPINFO)); + + psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer; + + /* + call into external driver to get info so we can map a meminfo + Notes: + It's expected that third party displays will only support + physically contiguous display surfaces. However, it's possible + a given display may have an MMU and therefore support non-contig' + display surfaces. + + If surfaces are contiguous, ext driver should return: + - a CPU virtual address, or IMG_NULL where the surface is not mapped to CPU + - (optional) an OS Mapping handle for KM->UM surface mapping + - the size in bytes + - a single system physical address + + If surfaces are non-contiguous, ext driver should return: + - a CPU virtual address + - (optional) an OS Mapping handle for KM->UM surface mapping + - the size in bytes (must be multiple of 4kB) + - a list of system physical addresses (at 4kB intervals) + */ + eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice, + psDeviceClassBuffer->hExtBuffer, + &psSysPAddr, + &ui32ByteSize, + &pvCPUVAddr, + phOSMapInfo, + &bPhysContig, + &psDCMapInfo->ui32TilingStride); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address")); + goto ErrorExitPhase1; + } + + /* Choose the heap to map to */ + psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext; + psDeviceNode = psBMContext->psDeviceNode; + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + for(i=0; iui32MappingHeapID) + { + if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT) + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]); + #if defined (MEM_TRACK_INFO_DEBUG) + pszName = psDeviceMemoryHeap[i].pszName; + #endif + } + else + { + hDevMemHeap = IMG_NULL; + } + } + else + { + hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap; + #if defined (MEM_TRACK_INFO_DEBUG) + pszName = psDeviceMemoryHeap[i].pszName; + #endif + } + break; + } + } + + if(hDevMemHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap")); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE; + goto ErrorExitPhase1; + } + + /* Only need lower 12 bits of the cpu addr - don't care what size a void* is */ + uOffset = ((IMG_UINTPTR_T)pvCPUVAddr) & (uPageSize - 1); + pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - uOffset); + + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info"); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block")); + goto ErrorExitPhase1; + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + + /* + Force the memory to be read/write. This used to be done in the BM, but + ion imports don't want this behaviour + */ + psMemInfo->ui32Flags |= PVRSRV_MEM_READ | PVRSRV_MEM_WRITE; + + psMemBlock = &(psMemInfo->sMemBlk); + + bBMError = BM_Wrap(hDevMemHeap, + ui32ByteSize, + uOffset, + bPhysContig, + psSysPAddr, + pvPageAlignedCPUVAddr, + &psMemInfo->ui32Flags, + &hBuffer); + + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed")); + /*not nulling pointer, out of scope*/ + eError = PVRSRV_ERROR_BAD_MAPPING; + goto ErrorExitPhase2; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* patch up the CPU VAddr into the meminfo - use the address from the BM, not the one from the deviceclass + api, to ensure user mode mapping is possible + */ + psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); + + /* Fill in the public fields of the MEM_INFO structure */ + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psMemInfo->uAllocSize = ui32ByteSize; + psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo; + + PVR_ASSERT(psMemInfo->psKernelSyncInfo != IMG_NULL); + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoIncRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + + /* Clear the Backup buffer pointer as we do not have one at this point. + We only allocate this as we are going up/down + */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* setup DCMapInfo */ + psDCMapInfo->psMemInfo = psMemInfo; + psDCMapInfo->psDeviceClassBuffer = psDeviceClassBuffer; + +#if defined(SUPPORT_MEMORY_TILING) + psDCMapInfo->psDeviceNode = psDeviceNode; + + if(psDCMapInfo->ui32TilingStride > 0) + { + /* try to acquire a tiling range on this device */ + eError = psDeviceNode->pfnAllocMemTilingRange(psDeviceNode, + psMemInfo, + psDCMapInfo->ui32TilingStride, + &psDCMapInfo->ui32RangeIndex); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: AllocMemTilingRange failed")); + + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + FreeDeviceMem(psMemInfo); + + /* FreeDeviceMem will free the meminfo so jump straight to the final exit */ + goto ErrorExitPhase1; + } + } +#endif + + /* Register Resource */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICECLASSMEM_MAPPING, + psDCMapInfo, + 0, + &UnmapDeviceClassMemoryCallBack); + + (psDeviceClassBuffer->ui32MemMapRefCount)++; + PVRSRVKernelMemInfoIncRef(psMemInfo); + + psMemInfo->memType = PVRSRV_MEMTYPE_DEVICECLASS; +#if defined (MEM_TRACK_INFO_DEBUG) + OSStringCopy(psMemInfo->heapId, pszName); +#endif + + /* return the meminfo */ + *ppsMemInfo = psMemInfo; +#if defined (MEM_TRACK_INFO_DEBUG) + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + (IMG_VOID **)&psMemTrackInfo, IMG_NULL, + "Mem tracking info"); + if (eError != PVRSRV_OK) + return eError; + psMemTrackInfo->sDevVAddr = psMemInfo->sDevVAddr; + psMemTrackInfo->uSize = psMemInfo->uAllocSize; + psMemTrackInfo->ui32Pid = OSGetCurrentProcessIDKM(); + psMemTrackInfo->ui32RefCount = psMemInfo->ui32RefCount; + psMemTrackInfo->eOp = PVRSRV_MEMTYPE_DEVICECLASS; + psMemTrackInfo->ui32TimeStampUSecs = OSGetCurrentTimeInUSecsKM(); + + OSGetCurrentProcessNameKM(psMemTrackInfo->asTaskName, 128); + + OSStringCopy(psMemTrackInfo->heapId, psMemInfo->heapId); + PVRSRVAddMemTrackInfo(psMemTrackInfo); +#endif + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* If the 3PDD supplies a kernel virtual address, we can PDUMP it */ + if(psMemInfo->pvLinAddrKM) + { + /* FIXME: + * Initialise the display surface here when it is mapped into Services. + * Otherwise there is a risk that pdump toolchain will assign previously + * used physical pages, leading to visual artefacts on the unrendered surface + * (e.g. during LLS rendering). + * + * A better method is to pdump the allocation from the DC driver, so the + * BM_Wrap pdumps only the virtual memory which better represents the driver + * behaviour. + */ + PDUMPCOMMENT("Dump display surface"); + PDUMPMEM(IMG_NULL, psMemInfo, uOffset, psMemInfo->uAllocSize, PDUMP_FLAGS_CONTINUOUS, ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping); + } +#endif + return PVRSRV_OK; + +ErrorExitPhase2: + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + +ErrorExitPhase1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_MAPINFO), psDCMapInfo, IMG_NULL); + return eError; +} + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVChangeDeviceMemoryAttributesKM(IMG_HANDLE hKernelMemInfo, IMG_UINT32 ui32Attribs) +{ + PVRSRV_KERNEL_MEM_INFO *psKMMemInfo; + + if (hKernelMemInfo == IMG_NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psKMMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hKernelMemInfo; + + if (ui32Attribs & PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT) + { + psKMMemInfo->ui32Flags |= PVRSRV_MEM_CACHE_CONSISTENT; + } + else + { + psKMMemInfo->ui32Flags &= ~PVRSRV_MEM_CACHE_CONSISTENT; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR IMG_CALLCONV PVRSRVInitDeviceMem(IMG_VOID) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(SUPPORT_ION) + /* + For Ion buffers we need to store which ones we know about so + we don't give the same buffer a different sync + */ + g_psIonSyncHash = HASH_Create(ION_SYNC_HASH_SIZE); + if (!g_psIonSyncHash) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto Error; + } +#endif +#if defined(SUPPORT_DMABUF) + g_psDmaBufSyncHash = HASH_Create(DMABUF_SYNC_HASH_SIZE); + if (!g_psDmaBufSyncHash) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto Error; + } +#endif + +#if defined(SUPPORT_ION) || defined(SUPPORT_DMABUF) + return PVRSRV_OK; +Error: +#endif +#if defined(SUPPORT_DMABUF) + if (g_psDmaBufSyncHash) + { + HASH_Delete(g_psDmaBufSyncHash); + } +#endif +#if defined(SUPPORT_ION) + if (g_psIonSyncHash) + { + HASH_Delete(g_psIonSyncHash); + } +#endif + return eError; +} + +IMG_VOID IMG_CALLCONV PVRSRVDeInitDeviceMem(IMG_VOID) +{ +#if defined(SUPPORT_DMABUF) + HASH_Delete(g_psDmaBufSyncHash); +#endif +#if defined(SUPPORT_ION) + HASH_Delete(g_psIonSyncHash); +#endif +} + +#if defined(MEM_TRACK_INFO_DEBUG) +/*! +****************************************************************************** + + @Function PVRSRVFreeMemOps + + @Description + Frees the list of tracked mem ops represented by g_psMemTrackInfoHead + + @Input + @Output + + @Return + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVFreeMemOps(IMG_VOID) +{ + PVRSRV_MEM_TRACK_INFO *psFreePtr; + while(g_psMemTrackInfoHead) + { + psFreePtr = g_psMemTrackInfoHead; + g_psMemTrackInfoHead = g_psMemTrackInfoHead->next; + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_MEM_TRACK_INFO), + psFreePtr, IMG_NULL); + } +} +#endif + +static PVRSRV_ERROR PVRSRVDumpSync(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + PVR_LOG(("\tSyncInfo %d:", psKernelSyncInfo->ui32UID)); + PVR_LOG(("\t\tWrite ops (0x%08x): P/C = %d/%d (0x%08x/0x%08x)", + psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psKernelSyncInfo->psSyncData->ui32WriteOpsPending, + psKernelSyncInfo->psSyncData->ui32WriteOpsComplete, + psKernelSyncInfo->psSyncData->ui32WriteOpsPending, + psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)); + PVR_LOG(("\t\tRead ops (0x%08x): P/C = %d/%d (0x%08x/0x%08x)", + psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psKernelSyncInfo->psSyncData->ui32ReadOpsPending, + psKernelSyncInfo->psSyncData->ui32ReadOpsComplete, + psKernelSyncInfo->psSyncData->ui32ReadOpsPending, + psKernelSyncInfo->psSyncData->ui32ReadOpsComplete)); + PVR_LOG(("\t\tRead ops 2 (0x%08x): P/C = %d/%d (0x%08x/0x%08x)", + psKernelSyncInfo->sReadOps2CompleteDevVAddr.uiAddr, + psKernelSyncInfo->psSyncData->ui32ReadOps2Pending, + psKernelSyncInfo->psSyncData->ui32ReadOps2Complete, + psKernelSyncInfo->psSyncData->ui32ReadOps2Pending, + psKernelSyncInfo->psSyncData->ui32ReadOps2Complete)); + +#if defined(SUPPORT_PER_SYNC_DEBUG) + { + IMG_UINT32 i; + PVR_LOG(("\t\t --- Per sync debug ---")); + + PVR_LOG(("\t\tOperationMask = 0x%08x", psKernelSyncInfo->ui32OperationMask)); + + + for (i=0;iui32HistoryIndex) % PER_SYNC_HISTORY; + IMG_UINT32 ui32OpInfo = psKernelSyncInfo->aui32OpInfo[ui32Index]; + + if (ui32OpInfo & SYNC_OP_HAS_DATA) + { + IMG_UINT32 ui32OpClass = (ui32OpInfo & SYNC_OP_CLASS_MASK) >> SYNC_OP_CLASS_SHIFT; + IMG_UINT32 ui32OpType = (ui32OpInfo & SYNC_OP_TYPE_MASK) >> SYNC_OP_TYPE_SHIFT; + IMG_CHAR *pzClass; + IMG_CHAR *pzType; + + PVR_LOG(("\t\tOperation last - %d\n", PER_SYNC_HISTORY - i)); + + switch(ui32OpClass) + { + case SYNC_OP_CLASS_MODOBJ: + pzClass = "MODOBJ"; + break; + case SYNC_OP_CLASS_QUEUE: + pzClass = "QUEUE"; + break; + case SYNC_OP_CLASS_KICKTA: + pzClass = "KICKTA"; + break; + case SYNC_OP_CLASS_TQ_3D: + pzClass = "TQ_3D"; + break; + case SYNC_OP_CLASS_TQ_2D: + pzClass = "TQ_2D"; + break; + default: + pzClass = "Unknown"; + } + switch(ui32OpType) + { + case SYNC_OP_TYPE_READOP: + pzType = "READOP"; + break; + case SYNC_OP_TYPE_WRITEOP: + pzType = "WRITEOP"; + break; + case SYNC_OP_TYPE_READOP2: + pzType = "READOP2"; + break; + default: + pzType = "Unknown"; + } + PVR_LOG(("\t\t\tui32OpType = 0x%08x", ui32OpInfo)); + PVR_LOG(("\t\t\t\t%s, %s, %s, %s", + pzClass, + pzType, + (ui32OpInfo & SYNC_OP_TAKE) ?"TAKE":"No TAKE", + (ui32OpInfo & SYNC_OP_ROLLBACK) ?"ROLLBACK":"No ROLLBACK")); + + PVR_LOG(("\t\t\ti32ReadOpSample = %d (0x%08x)", + psKernelSyncInfo->aui32ReadOpSample[ui32Index], + psKernelSyncInfo->aui32ReadOpSample[ui32Index])); + PVR_LOG(("\t\t\taui32WriteOpSample = %d (0x%08x)", + psKernelSyncInfo->aui32WriteOpSample[ui32Index], + psKernelSyncInfo->aui32WriteOpSample[ui32Index])); + PVR_LOG(("\t\t\taui32ReadOp2Sample = %d (0x%08x)", + psKernelSyncInfo->aui32ReadOp2Sample[ui32Index], + psKernelSyncInfo->aui32ReadOp2Sample[ui32Index])); + } + } + } +#endif + return PVRSRV_OK; +} + + +static PVRSRV_ERROR PVRSRVDumpActiveSync(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + if ((psKernelSyncInfo->psSyncData->ui32WriteOpsComplete != psKernelSyncInfo->psSyncData->ui32WriteOpsPending) || + (psKernelSyncInfo->psSyncData->ui32ReadOpsComplete != psKernelSyncInfo->psSyncData->ui32ReadOpsPending) || + (psKernelSyncInfo->psSyncData->ui32ReadOps2Complete != psKernelSyncInfo->psSyncData->ui32ReadOps2Pending)) + { + PVRSRVDumpSync(psKernelSyncInfo); + } + return PVRSRV_OK; +} + + +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVDumpSyncs(IMG_BOOL bActiveOnly) +{ + if (bActiveOnly) + { + PVR_LOG(("Active syncs")); + List_PVRSRV_KERNEL_SYNC_INFO_PVRSRV_ERROR_Any(g_psSyncInfoList, PVRSRVDumpActiveSync); + } + else + { + PVR_LOG(("All syncs")); + List_PVRSRV_KERNEL_SYNC_INFO_PVRSRV_ERROR_Any(g_psSyncInfoList, PVRSRVDumpSync); + } +} +/****************************************************************************** + End of file (devicemem.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/handle.c b/sgx_km/eurasia_km/services4/srvkm/common/handle.c new file mode 100644 index 0000000..b1ac749 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/handle.c @@ -0,0 +1,2548 @@ +/*************************************************************************/ /*! +@Title Resource Handle Manager +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PVR_SECURE_HANDLES) +/* See handle.h for a description of the handle API. */ + +/* + * There is no locking here. It is assumed the code is used in a single + * threaded environment. In particular, it is assumed that the code will + * never be called from an interrupt handler. + * + * The implementation supports movable handle structures, allowing the address + * of a handle structure to change without having to fix up pointers in + * any of the handle structures. For example, the linked list mechanism + * used to link subhandles together uses handle array indices rather than + * pointers to the structures themselves. + */ + +#include + +#include "services_headers.h" +#include "handle.h" + +#ifdef DEBUG +#define HANDLE_BLOCK_SHIFT 2 +#else +#define HANDLE_BLOCK_SHIFT 8 +#endif + +#define DIVIDE_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) >> HANDLE_BLOCK_SHIFT) +#define MULTIPLY_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) << HANDLE_BLOCK_SHIFT) + +#define HANDLE_BLOCK_SIZE MULTIPLY_BY_BLOCK_SIZE(1) +#define HANDLE_SUB_BLOCK_MASK (HANDLE_BLOCK_SIZE - 1) +#define HANDLE_BLOCK_MASK (~(HANDLE_SUB_BLOCK_MASK)) + +#define HANDLE_HASH_TAB_INIT_SIZE 32 + +#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount) + +/* Valid handles are never NULL, but handle array indices are based from 0 */ +#define INDEX_TO_HANDLE(i) ((IMG_HANDLE)((IMG_UINTPTR_T)(i) + 1)) +#define HANDLE_TO_INDEX(h) ((IMG_UINT32)(IMG_UINTPTR_T)(h) - 1) + + +#define INDEX_TO_BLOCK_INDEX(i) DIVIDE_BY_BLOCK_SIZE(i) +#define BLOCK_INDEX_TO_INDEX(i) MULTIPLY_BY_BLOCK_SIZE(i) +#define INDEX_TO_SUB_BLOCK_INDEX(i) ((i) & HANDLE_SUB_BLOCK_MASK) + +#define INDEX_TO_INDEX_STRUCT_PTR(psArray, i) (&((psArray)[INDEX_TO_BLOCK_INDEX(i)])) +#define BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i) INDEX_TO_INDEX_STRUCT_PTR((psBase)->psHandleArray, i) + +#define INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, i) (BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i)->ui32FreeHandBlockCount) + +#define INDEX_TO_HANDLE_STRUCT_PTR(psBase, i) (BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i)->psHandle + INDEX_TO_SUB_BLOCK_INDEX(i)) + +#define HANDLE_TO_HANDLE_STRUCT_PTR(psBase, h) (INDEX_TO_HANDLE_STRUCT_PTR(psBase, HANDLE_TO_INDEX(h))) + +#define HANDLE_PTR_TO_INDEX(psHandle) ((psHandle)->ui32Index) +#define HANDLE_PTR_TO_HANDLE(psHandle) INDEX_TO_HANDLE(HANDLE_PTR_TO_INDEX(psHandle)) + +#define ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(a) (HANDLE_BLOCK_MASK & (a)) +#define ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(a) ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE((a) + HANDLE_BLOCK_SIZE - 1) + +#define DEFAULT_MAX_HANDLE 0x7fffffffu +#define DEFAULT_MAX_INDEX_PLUS_ONE ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(DEFAULT_MAX_HANDLE) + +#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0) + +#define HANDLE_ARRAY_SIZE(handleCount) DIVIDE_BY_BLOCK_SIZE(ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(handleCount)) + +#define SET_FLAG(v, f) ((IMG_VOID)((v) |= (f))) +#define CLEAR_FLAG(v, f) ((IMG_VOID)((v) &= ~(f))) +#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0)) + +#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f) + +#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f) +#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f) +#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f) + +#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) + +#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) + +#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) + +#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE) + +#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE) + +#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE) + +#ifdef MIN +#undef MIN +#endif + +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) + +/* + * Linked list structure. Used for both the list head and list items. + * Array indices, rather than pointers, are used to point to the next and + * previous items on the list. + */ +struct sHandleList +{ + IMG_UINT32 ui32Prev; + IMG_UINT32 ui32Next; + IMG_HANDLE hParent; +}; + +enum ePVRSRVInternalHandleFlag +{ + INTERNAL_HANDLE_FLAG_NONE = 0x00, + INTERNAL_HANDLE_FLAG_BATCHED = 0x01, + INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02, +}; + +/* Handle structure */ +struct sHandle +{ + /* Handle type */ + PVRSRV_HANDLE_TYPE eType; + + /* Pointer to the data that the handle represents */ + IMG_VOID *pvData; + + /* + * When handles are on the free list, the value of the "next index + * plus one field" has the following meaning: + * zero - next handle is the one that follows this one, + * nonzero - the index of the next handle is the value minus one. + * This scheme means handle space can be initialised to all zeros. + * + * When this field is used to link together handles on a list + * other than the free list, zero indicates the end of the + * list, with nonzero the same as above. + */ + IMG_UINT32 ui32NextIndexPlusOne; + + /* Internal flags */ + enum ePVRSRVInternalHandleFlag eInternalFlag; + + /* Flags specified when the handle was allocated */ + PVRSRV_HANDLE_ALLOC_FLAG eFlag; + + /* Index of this handle in the handle array */ + IMG_UINT32 ui32Index; + + /* List head for subhandles of this handle */ + struct sHandleList sChildren; + + /* List entry for sibling subhandles */ + struct sHandleList sSiblings; +}; + +/* Handle array index structure. + * The handle array is an array of index structures, reallocated as the number of + * handles increases. + * NOTE: There is one index structure per block of handles. + */ +struct sHandleIndex +{ + /* Pointer to first handle structure in the block */ + struct sHandle *psHandle; + + /* Block allocation cookie returned from OSAllocMem for the block of handles */ + IMG_HANDLE hBlockAlloc; + + /* Number of free handles in block */ + IMG_UINT32 ui32FreeHandBlockCount; +}; + +struct _PVRSRV_HANDLE_BASE_ +{ + /* Handle returned from OSAllocMem for handle base allocation */ + IMG_HANDLE hBaseBlockAlloc; + + /* Handle returned from OSAllocMem for handle array allocation */ + IMG_HANDLE hArrayBlockAlloc; + + /* Pointer to array of pointers to handle structures */ + struct sHandleIndex *psHandleArray; + + /* + * Pointer to handle hash table. + * The hash table is used to do reverse lookups, converting data + * pointers to handles. + */ + HASH_TABLE *psHashTab; + + /* Number of free handles */ + IMG_UINT32 ui32FreeHandCount; + + /* + * If purging is not enabled, this is the array index of first free + * handle. + * If purging is enabled, this is the index to start searching for + * a free handle from. In this case it is usually zero, unless + * the handle array size has been increased due to lack of + * handles. + */ + IMG_UINT32 ui32FirstFreeIndex; + + /* Maximum handle index, plus one */ + IMG_UINT32 ui32MaxIndexPlusOne; + + /* Total number of handles, free and allocated */ + IMG_UINT32 ui32TotalHandCount; + + /* + * Index of the last free index, plus one. Not used if purging + * is enabled. + */ + IMG_UINT32 ui32LastFreeIndexPlusOne; + + /* Size of current handle batch, or zero if batching not enabled */ + IMG_UINT32 ui32HandBatchSize; + + /* Number of handles prior to start of current batch */ + IMG_UINT32 ui32TotalHandCountPreBatch; + + /* Index of first handle in batch, plus one */ + IMG_UINT32 ui32FirstBatchIndexPlusOne; + + /* Number of handle allocation failures in batch */ + IMG_UINT32 ui32BatchHandAllocFailures; + + /* Purging enabled. + * If purging is enabled, the size of the table can be reduced + * by removing free space at the end of the table. To make + * purging more likely to succeed, handles are allocated as + * far to the front of the table as possible. The first free + * handle is found by a linear search from the start of the table, + * and so no free handle list management is done. + */ + IMG_BOOL bPurgingEnabled; +}; + +/* + * The key for the handle hash table is an array of three elements, the + * pointer to the resource, the resource type, and the process ID. The + * eHandKey enumeration gives the array indices of the elements making + * up the key. + */ +enum eHandKey { + HAND_KEY_DATA = 0, + HAND_KEY_TYPE, + HAND_KEY_PARENT, + HAND_KEY_LEN /* Must be last item in list */ +}; + +/* + * Kernel handle base structure. For handles that are not allocated on + * behalf of a particular process + */ +PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL; + +/* HAND_KEY is the type of the hash table key */ +typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN]; + +/*! +****************************************************************************** + + @Function HandleListInit + + @Description Initialise a linked list structure embedded in a handle + structure. + + @Input ui32Index - index of handle in the handle array + psList - pointer to linked list structure + hParent - parent handle, or IMG_NULL + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInit) +#endif +static INLINE +IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent) +{ + psList->ui32Next = ui32Index; + psList->ui32Prev = ui32Index; + psList->hParent = hParent; +} + +/*! +****************************************************************************** + + @Function InitParentList + + @Description Initialise the children list head in a handle structure. + The children are the subhandles of this handle. + + @Input psHandle - pointer to handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitParentList) +#endif +static INLINE +IMG_VOID InitParentList(struct sHandle *psHandle) +{ + IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psHandle); + + HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(ui32Parent)); +} + +/*! +****************************************************************************** + + @Function InitChildEntry + + @Description Initialise the child list entry in a handle structure. + The list entry is used to link together subhandles of + a given handle. + + @Input psHandle - pointer to handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitChildEntry) +#endif +static INLINE +IMG_VOID InitChildEntry(struct sHandle *psHandle) +{ + HandleListInit(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings, IMG_NULL); +} + +/*! +****************************************************************************** + + @Function HandleListIsEmpty + + @Description Determine whether a given linked list is empty. + + @Input ui32Index - index of the handle containing the list head + psList - pointer to the list head + + @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIsEmpty) +#endif +static INLINE +IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList) +{ + IMG_BOOL bIsEmpty; + + bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index); + +#ifdef DEBUG + { + IMG_BOOL bIsEmpty2; + + bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index); + PVR_ASSERT(bIsEmpty == bIsEmpty2); + } +#endif + + return bIsEmpty; +} + +#ifdef DEBUG +/*! +****************************************************************************** + + @Function NoChildren + + @Description Determine whether a handle has any subhandles + + @Input psHandle - pointer to handle structure + + @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoChildren) +#endif +static INLINE +IMG_BOOL NoChildren(struct sHandle *psHandle) +{ + PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psHandle)); + + return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sChildren); +} + +/*! +****************************************************************************** + + @Function NoParent + + @Description Determine whether a handle is a subhandle + + @Input psHandle - pointer to handle structure + + @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoParent) +#endif +static INLINE +IMG_BOOL NoParent(struct sHandle *psHandle) +{ + if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings)) + { + PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL); + + return IMG_TRUE; + } + else + { + PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL); + } + return IMG_FALSE; +} +#endif /*DEBUG*/ +/*! +****************************************************************************** + + @Function ParentHandle + + @Description Determine the parent of a handle + + @Input psHandle - pointer to handle structure + + @Return Parent handle, or IMG_NULL if the handle is not a subhandle. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentHandle) +#endif +static INLINE +IMG_HANDLE ParentHandle(struct sHandle *psHandle) +{ + return psHandle->sSiblings.hParent; +} + +/* + * The LIST_PTR_FROM_INDEX_AND_OFFSET macro is used to generate either a + * pointer to the subhandle list head, or a pointer to the linked list + * structure of an item on a subhandle list. + * The list head is itself on the list, but is at a different offset + * in the handle structure to the linked list structure for items on + * the list. The two linked list structures are differentiated by + * the third parameter, containing the parent index. The parent field + * in the list head structure references the handle structure that contains + * it. For items on the list, the parent field in the linked list structure + * references the parent handle, which will be different from the handle + * containing the linked list structure. + */ +#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \ + ((struct sHandleList *)((IMG_CHAR *)(INDEX_TO_HANDLE_STRUCT_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo)))) + +/*! +****************************************************************************** + + @Function HandleListInsertBefore + + @Description Insert a handle before a handle currently on the list. + + @Input ui32InsIndex - index of handle to be inserted after + psIns - pointer to handle structure to be inserted after + uiParentOffset - offset to list head struct in handle structure + ui32EntryIndex - index of handle to be inserted + psEntry - pointer to handle structure of item to be inserted + uiEntryOffset - offset of list item struct in handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInsertBefore) +#endif +static INLINE +IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex) +{ + /* PRQA S 3305 7 */ /*override stricter alignment warning */ + struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset); + + PVR_ASSERT(psEntry->hParent == IMG_NULL); + PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next); + PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(ui32ParentIndex)); + + psEntry->ui32Prev = psIns->ui32Prev; + psIns->ui32Prev = ui32EntryIndex; + psEntry->ui32Next = ui32InsIndex; + psPrevIns->ui32Next = ui32EntryIndex; + + psEntry->hParent = INDEX_TO_HANDLE(ui32ParentIndex); +} + +/*! +****************************************************************************** + + @Function AdoptChild + + @Description Assign a subhandle to a handle + + @Input psParent - pointer to handle structure of parent handle + psChild - pointer to handle structure of child subhandle + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(AdoptChild) +#endif +static INLINE +IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild) +{ + IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psParent->sChildren.hParent); + + PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psParent)); + + HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent); + +} + +/*! +****************************************************************************** + + @Function HandleListRemove + + @Description Remove a handle from a list + + @Input ui32EntryIndex - index of handle to be removed + psEntry - pointer to handle structure of item to be removed + uiEntryOffset - offset of list item struct in handle structure + uiParentOffset - offset to list head struct in handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListRemove) +#endif +static INLINE +IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset) +{ + if (!HandleListIsEmpty(ui32EntryIndex, psEntry)) + { + /* PRQA S 3305 3 */ /*override stricter alignment warning */ + struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psEntry->hParent), uiParentOffset, uiEntryOffset); + struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psEntry->hParent), uiParentOffset, uiEntryOffset); + + /* + * The list head is on the list, and we don't want to + * remove it. + */ + PVR_ASSERT(psEntry->hParent != IMG_NULL); + + psPrev->ui32Next = psEntry->ui32Next; + psNext->ui32Prev = psEntry->ui32Prev; + + HandleListInit(ui32EntryIndex, psEntry, IMG_NULL); + } +} + +/*! +****************************************************************************** + + @Function UnlinkFromParent + + @Description Remove a subhandle from its parents list + + @Input psHandle - pointer to handle structure of child subhandle + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(UnlinkFromParent) +#endif +static INLINE +IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle) +{ + HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren)); +} + +/*! +****************************************************************************** + + @Function HandleListIterate + + @Description Iterate over the items in a list + + @Input psHead - pointer to list head + uiParentOffset - offset to list head struct in handle structure + uiEntryOffset - offset of list item struct in handle structure + pfnIterFunc - function to be called for each handle in the list + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIterate) +#endif +static INLINE +PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *)) +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psHead->hParent); + + PVR_ASSERT(psHead->hParent != IMG_NULL); + + /* + * Follow the next chain from the list head until we reach + * the list head again, which signifies the end of the list. + */ + for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; ) + { + struct sHandle *psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32Index); + /* PRQA S 3305 2 */ /*override stricter alignment warning */ + struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset); + PVRSRV_ERROR eError; + + PVR_ASSERT(psEntry->hParent == psHead->hParent); + /* + * Get the next index now, in case the list item is + * modified by the iteration function. + */ + ui32Index = psEntry->ui32Next; + + eError = (*pfnIterFunc)(psBase, psHandle); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function IterateOverChildren + + @Description Iterate over the subhandles of a parent handle + + @Input psParent - pointer to parent handle structure + pfnIterFunc - function to be called for each subhandle + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(IterateOverChildren) +#endif +static INLINE +PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *)) +{ + return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc); +} + +/*! +****************************************************************************** + + @Function GetHandleStructure + + @Description Get the handle structure for a given handle + + @Input psBase - pointer to handle base structure + ppsHandle - location to return pointer to handle structure + hHandle - handle from client + eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the + handle type is not to be checked. + + @Output ppsHandle - points to a pointer to the handle structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(GetHandleStructure) +#endif +static INLINE +PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle); + struct sHandle *psHandle; + + /* Check handle index is in range */ + if (!INDEX_IS_VALID(psBase, ui32Index)) + { + PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%u >= %u)", ui32Index, psBase->ui32TotalHandCount)); + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32Index); + if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE) + { + PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index)); + return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; + } + + /* + * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function, + * check handle is of the correct type. + */ + if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType) + { + PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType)); + return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH; + } + + /* Return the handle structure */ + *ppsHandle = psHandle; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function ParentIfPrivate + + @Description Return the parent handle if the handle was allocated + with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return + IMG_NULL + + @Input psHandle - pointer to handle + + @Return Parent handle, or IMG_NULL + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentIfPrivate) +#endif +static INLINE +IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle) +{ + return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? + ParentHandle(psHandle) : IMG_NULL; +} + +/*! +****************************************************************************** + + @Function InitKey + + @Description Initialise a hash table key for the current process + + @Input psBase - pointer to handle base structure + aKey - pointer to key + pvData - pointer to the resource the handle represents + eType - type of resource + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitKey) +#endif +static INLINE +IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData; + aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType; + aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent; +} + +/*! +****************************************************************************** + + @Function ReallocHandleArray + + @Description Reallocate the handle array + + @Input psBase - handle base. + phBlockAlloc - pointer to block allocation handle. + ui32NewCount - new handle count + ui32OldCount - old handle count + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static +PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32NewCount) +{ + struct sHandleIndex *psOldArray = psBase->psHandleArray; + IMG_HANDLE hOldArrayBlockAlloc = psBase->hArrayBlockAlloc; + IMG_UINT32 ui32OldCount = psBase->ui32TotalHandCount; + struct sHandleIndex *psNewArray = IMG_NULL; + IMG_HANDLE hNewArrayBlockAlloc = IMG_NULL; + PVRSRV_ERROR eError; + PVRSRV_ERROR eReturn = PVRSRV_OK; + IMG_UINT32 ui32Index; + + if (ui32NewCount == ui32OldCount) + { + return PVRSRV_OK; + } + + if (ui32NewCount != 0 && !psBase->bPurgingEnabled && + ui32NewCount < ui32OldCount) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (((ui32OldCount % HANDLE_BLOCK_SIZE) != 0) || + ((ui32NewCount % HANDLE_BLOCK_SIZE) != 0)) + { + PVR_ASSERT((ui32OldCount % HANDLE_BLOCK_SIZE) == 0); + PVR_ASSERT((ui32NewCount % HANDLE_BLOCK_SIZE) == 0); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NewCount != 0) + { + /* Allocate new handle array */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + HANDLE_ARRAY_SIZE(ui32NewCount) * sizeof(struct sHandleIndex), + (IMG_VOID **)&psNewArray, + &hNewArrayBlockAlloc, + "Memory Area"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't allocate new handle array (%d)", eError)); + eReturn = eError; + goto error; + } + + if (ui32OldCount != 0) + { + OSMemCopy(psNewArray, psOldArray, HANDLE_ARRAY_SIZE(MIN(ui32NewCount, ui32OldCount)) * sizeof(struct sHandleIndex)); + } + } + + /* + * If the new handle array is smaller than the old one, free + * unused handle structures + */ + for(ui32Index = ui32NewCount; ui32Index < ui32OldCount; ui32Index += HANDLE_BLOCK_SIZE) + { + struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psOldArray, ui32Index); + + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(struct sHandle) * HANDLE_BLOCK_SIZE, + psIndex->psHandle, + psIndex->hBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free handle structures (%d)", eError)); + } + } + + /* + * If the new handle array is bigger than the old one, allocate + * new handle structures + */ + for(ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) + { + /* PRQA S 0505 1 */ /* psNewArray is never NULL, see assert earlier */ + struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psNewArray, ui32Index); + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(struct sHandle) * HANDLE_BLOCK_SIZE, + (IMG_VOID **)&psIndex->psHandle, + &psIndex->hBlockAlloc, + "Memory Area"); + if (eError != PVRSRV_OK) + { + psIndex->psHandle = IMG_NULL; + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't allocate handle structures (%d)", eError)); + eReturn = eError; + } + else + { + IMG_UINT32 ui32SubIndex; + + psIndex->ui32FreeHandBlockCount = HANDLE_BLOCK_SIZE; + + for(ui32SubIndex = 0; ui32SubIndex < HANDLE_BLOCK_SIZE; ui32SubIndex++) + { + struct sHandle *psHandle = psIndex->psHandle + ui32SubIndex; + + + psHandle->ui32Index = ui32SubIndex + ui32Index; + psHandle->eType = PVRSRV_HANDLE_TYPE_NONE; + psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE; + psHandle->ui32NextIndexPlusOne = 0; + } + } + } + if (eReturn != PVRSRV_OK) + { + goto error; + } + +#ifdef DEBUG_MAX_HANDLE_COUNT + /* Force handle failure to test error exit code */ + if (ui32NewCount > DEBUG_MAX_HANDLE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Max handle count (%u) reached", DEBUG_MAX_HANDLE_COUNT)); + eReturn = PVRSRV_ERROR_OUT_OF_MEMORY; + goto error; + } +#endif + + if (psOldArray != IMG_NULL) + { + /* Free old handle array */ + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + HANDLE_ARRAY_SIZE(ui32OldCount) * sizeof(struct sHandleIndex), + psOldArray, + hOldArrayBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free old handle array (%d)", eError)); + } + } + + psBase->psHandleArray = psNewArray; + psBase->hArrayBlockAlloc = hNewArrayBlockAlloc; + psBase->ui32TotalHandCount = ui32NewCount; + + if (ui32NewCount > ui32OldCount) + { + /* Check for wraparound */ + PVR_ASSERT(psBase->ui32FreeHandCount + (ui32NewCount - ui32OldCount) > psBase->ui32FreeHandCount); + + /* PRQA S 3382 1 */ /* ui32NewCount always > ui32OldCount */ + psBase->ui32FreeHandCount += (ui32NewCount - ui32OldCount); + + /* + * If purging is enabled, there is no free handle list + * management, but as an optimization, when allocating + * new handles, we use ui32FirstFreeIndex to point to + * the first handle in a newly allocated block. + */ + if (psBase->ui32FirstFreeIndex == 0) + { + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); + + psBase->ui32FirstFreeIndex = ui32OldCount; + } + else + { + if (!psBase->bPurgingEnabled) + { + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); + PVR_ASSERT(INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); + + INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32OldCount + 1; + } + } + + if (!psBase->bPurgingEnabled) + { + psBase->ui32LastFreeIndexPlusOne = ui32NewCount; + } + } + else + { + PVR_ASSERT(ui32NewCount == 0 || psBase->bPurgingEnabled); + PVR_ASSERT(ui32NewCount == 0 || psBase->ui32FirstFreeIndex <= ui32NewCount); + PVR_ASSERT(psBase->ui32FreeHandCount - (ui32OldCount - ui32NewCount) < psBase->ui32FreeHandCount); + + /* PRQA S 3382 1 */ /* ui32OldCount always >= ui32NewCount */ + psBase->ui32FreeHandCount -= (ui32OldCount - ui32NewCount); + + if (ui32NewCount == 0) + { + psBase->ui32FirstFreeIndex = 0; + psBase->ui32LastFreeIndexPlusOne = 0; + } + } + + PVR_ASSERT(psBase->ui32FirstFreeIndex <= psBase->ui32TotalHandCount); + + return PVRSRV_OK; + +error: + PVR_ASSERT(eReturn != PVRSRV_OK); + + if (psNewArray != IMG_NULL) + { + /* Free any new handle structures that were allocated */ + for(ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) + { + struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psNewArray, ui32Index); + if (psIndex->psHandle != IMG_NULL) + { + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(struct sHandle) * HANDLE_BLOCK_SIZE, + psIndex->psHandle, + psIndex->hBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free handle structures (%d)", eError)); + } + } + } + + /* Free new handle array */ + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + HANDLE_ARRAY_SIZE(ui32NewCount) * sizeof(struct sHandleIndex), + psNewArray, + hNewArrayBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free new handle array (%d)", eError)); + } + } + + return eReturn; +} + +/*! +****************************************************************************** + + @Function FreeHandleArray + + @Description Frees the handle array. + The memory containing the array of handle structure + pointers is deallocated. + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase) +{ + return ReallocHandleArray(psBase, 0); +} + +/*! +****************************************************************************** + + @Function FreeHandle + + @Description Free a handle structure. + + @Input psBase - pointer to handle base structure + psHandle - pointer to handle structure + + @Return PVRSRV_OK or PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle) +{ + HAND_KEY aKey; + IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psHandle); + PVRSRV_ERROR eError; + + /* + * If a handle allocated in batch mode is freed whilst still + * in batch mode, the type is set to PVRSRV_HANDLE_TYPE_NONE further + * down, to indicate the handle will not be used, but not actually + * freed. The Free is completed when this function is called a + * second time as part of the batch commit or release. + */ + + InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle)); + + if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + IMG_HANDLE hHandle; + hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey); + + + PVR_ASSERT(hHandle != IMG_NULL); + PVR_ASSERT(hHandle == INDEX_TO_HANDLE(ui32Index)); + PVR_UNREFERENCED_PARAMETER(hHandle); + } + + /* Unlink handle from parent */ + UnlinkFromParent(psBase, psHandle); + + /* Free children */ + eError = IterateOverChildren(psBase, psHandle, FreeHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError)); + return eError; + } + + /* + * Clear the type here, so that a handle can no longer be looked + * up if it is only partially freed. + */ + psHandle->eType = PVRSRV_HANDLE_TYPE_NONE; + + if (BATCHED_HANDLE(psHandle) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + /* PRQA S 1474,4130 1 */ /* ignore warnings about enum types being modified */ + SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle); + /* + * If the handle was allocated in batch mode, delay the free + * until the batch commit or release. + */ + return PVRSRV_OK; + } + + /* No free list management if purging is enabled */ + if (!psBase->bPurgingEnabled) + { + if (psBase->ui32FreeHandCount == 0) + { + PVR_ASSERT(psBase->ui32FirstFreeIndex == 0); + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); + + psBase->ui32FirstFreeIndex = ui32Index; + } + else + { + /* + * Put the handle pointer on the end of the free + * handle pointer linked list. + */ + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); + PVR_ASSERT(INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); + INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1; + } + + PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0); + + /* Update the end of the free handle linked list */ + psBase->ui32LastFreeIndexPlusOne = ui32Index + 1; + } + + psBase->ui32FreeHandCount++; + INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32Index)++; + + PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32Index)<= HANDLE_BLOCK_SIZE); + +#ifdef DEBUG + { + IMG_UINT32 ui32BlockedIndex; + IMG_UINT32 ui32FreeHandCount = 0; + + for (ui32BlockedIndex = 0; ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) + { + ui32FreeHandCount += INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32BlockedIndex); + } + + PVR_ASSERT(ui32FreeHandCount == psBase->ui32FreeHandCount); + } +#endif + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function FreeAllHandles + + @Description Free all handles for a given handle base + + @Input psBase - pointer to handle base structure + + @Return PVRSRV_OK or PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount) + { + return eError; + } + + for (i = 0; i < psBase->ui32TotalHandCount; i++) + { + struct sHandle *psHandle; + + psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, i); + + if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE) + { + eError = FreeHandle(psBase, psHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError)); + break; + } + + /* Break out of loop if all the handles free */ + if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount) + { + break; + } + } + } + + return eError; +} + +/*! +****************************************************************************** + + @Function FreeHandleBase + + @Description Free a handle base. + + @Input psHandleBase - pointer to handle base + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase) +{ + PVRSRV_ERROR eError; + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_WARNING, "FreeHandleBase: Uncommitted/Unreleased handle batch")); + PVRSRVReleaseHandleBatch(psBase); + } + + /* Free the handle array */ + eError = FreeAllHandles(psBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError)); + return eError; + } + + /* Free the handle array */ + eError = FreeHandleArray(psBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError)); + return eError; + } + + if (psBase->psHashTab != IMG_NULL) + { + /* Free the hash table */ + HASH_Delete(psBase->psHashTab); + } + + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psBase), + psBase, + psBase->hBaseBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function FindHandle + + @Description Find handle corresponding to a resource pointer + + @Input psBase - pointer to handle base structure + pvData - pointer to resource to be associated with the handle + eType - the type of resource + + @Return the handle, or IMG_NULL if not found + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(FindHandle) +#endif +static INLINE +IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent) +{ + HAND_KEY aKey; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + InitKey(aKey, psBase, pvData, eType, hParent); + + return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); +} + +/*! +****************************************************************************** + + @Function IncreaseHandleArraySize + + @Description Allocate some more free handles + + @Input psBase - pointer to handle base structure + ui32Delta - number of new handles required + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Delta) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(ui32Delta); + IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted; + + PVR_ASSERT(ui32Delta != 0); + + /* + * Check new count against max handle index, and check for wrap around. + */ + if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne || ui32NewTotalHandCount <= psBase->ui32TotalHandCount) + { + ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne; + + ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount; + + if (ui32DeltaAdjusted < ui32Delta) + { + PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Maximum handle limit reached (%d)", psBase->ui32MaxIndexPlusOne)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta); + + /* Realloc handle pointer array */ + eError = ReallocHandleArray(psBase, ui32NewTotalHandCount); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: ReallocHandleArray failed (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function EnsureFreeHandles + + @Description Ensure there are enough free handles + + @Input psBase - pointer to handle base structure + ui32Free - number of free handles required + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Free) +{ + PVRSRV_ERROR eError; + + if (ui32Free > psBase->ui32FreeHandCount) + { + IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount; + eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", ui32FreeHandDelta, ui32Free, eError)); + + return eError; + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function AllocHandle + + @Description Allocate a new handle + + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + hParent - parent handle or IMG_NULL + + @Output phHandle - points to new handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent) +{ + IMG_UINT32 ui32NewIndex = DEFAULT_MAX_INDEX_PLUS_ONE; + struct sHandle *psNewHandle = IMG_NULL; + IMG_HANDLE hHandle; + HAND_KEY aKey; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(psBase != IMG_NULL); + PVR_ASSERT(psBase->psHashTab != IMG_NULL); + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* Handle must not already exist */ + PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL); + } + + if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_WARNING, "AllocHandle: Handle batch size (%u) was too small, allocating additional space", psBase->ui32HandBatchSize)); + } + + /* Ensure there is a free handle */ + eError = EnsureFreeHandles(psBase, 1); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "AllocHandle: EnsureFreeHandles failed (%d)", eError)); + return eError; + } + PVR_ASSERT(psBase->ui32FreeHandCount != 0); + + if (!psBase->bPurgingEnabled) + { + /* Array index of first free handle */ + ui32NewIndex = psBase->ui32FirstFreeIndex; + + /* Get handle array entry */ + psNewHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32NewIndex); + } + else + { + IMG_UINT32 ui32BlockedIndex; + + /* + * If purging is enabled, we always try to allocate handles + * at the front of the array, to increase the chances that + * the size of the handle array can be reduced by a purge. + * No linked list of free handles is kept; we search for + * free handles as required. + */ + + /* + * ui32FirstFreeIndex should only be set when a new batch of + * handle structures is allocated, and should always be a + * multiple of the block size. + */ + PVR_ASSERT((psBase->ui32FirstFreeIndex % HANDLE_BLOCK_SIZE) == 0); + + for (ui32BlockedIndex = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(psBase->ui32FirstFreeIndex); ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) + { + struct sHandleIndex *psIndex = BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, ui32BlockedIndex); + + if (psIndex->ui32FreeHandBlockCount == 0) + { + continue; + } + + for (ui32NewIndex = ui32BlockedIndex; ui32NewIndex < ui32BlockedIndex + HANDLE_BLOCK_SIZE; ui32NewIndex++) + { + psNewHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32NewIndex); + if (HANDLE_STRUCT_IS_FREE(psNewHandle)) + { + break; + } + } + } + psBase->ui32FirstFreeIndex = 0; + PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount); + } + PVR_ASSERT(psNewHandle != IMG_NULL); + + /* Handle to be returned to client */ + hHandle = INDEX_TO_HANDLE(ui32NewIndex); + + /* + * If a data pointer can be associated with multiple handles, we + * don't put the handle in the hash table, as the data pointer + * may not map to a unique handle + */ + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* Initialise hash key */ + InitKey(aKey, psBase, pvData, eType, hParent); + + /* Put the new handle in the hash table */ + if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle)) + { + PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table")); + + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + } + } + + psBase->ui32FreeHandCount--; + + PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex) <= HANDLE_BLOCK_SIZE); + PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex) > 0); + + INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex)--; + + /* No free list management if purging is enabled */ + if (!psBase->bPurgingEnabled) + { + /* Check whether the last free handle has been allocated */ + if (psBase->ui32FreeHandCount == 0) + { + PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex); + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1)); + + psBase->ui32LastFreeIndexPlusOne = 0; + psBase->ui32FirstFreeIndex = 0; + } + else + { + /* + * Update the first free handle index. + * If the "next free index plus one" field in the new + * handle structure is zero, the next free index is + * the index of the new handle plus one. This + * convention has been adopted to simplify the + * initialisation of freshly allocated handle + * space. + */ + psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ? + ui32NewIndex + 1 : + psNewHandle->ui32NextIndexPlusOne - 1; + } + } + + /* Initialise the newly allocated handle */ + PVR_ASSERT(psNewHandle->ui32Index == ui32NewIndex); + + /* PRQA S 0505 1 */ /* psNewHandle is never NULL, see assert earlier */ + psNewHandle->eType = eType; + psNewHandle->pvData = pvData; + psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE; + psNewHandle->eFlag = eFlag; + + InitParentList(psNewHandle); +#if defined(DEBUG) + PVR_ASSERT(NoChildren(psNewHandle)); +#endif + + InitChildEntry(psNewHandle); +#if defined(DEBUG) + PVR_ASSERT(NoParent(psNewHandle)); +#endif + + if (HANDLES_BATCHED(psBase)) + { + /* Add handle to batch list */ + psNewHandle->ui32NextIndexPlusOne = psBase->ui32FirstBatchIndexPlusOne; + + psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1; + + /* PRQA S 1474 1 */ /* ignore warnings about enum types being modified */ + SET_BATCHED_HANDLE(psNewHandle); + } + else + { + psNewHandle->ui32NextIndexPlusOne = 0; + } + + /* Return the new handle to the client */ + *phHandle = hHandle; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVAllocHandle + + @Description Allocate a handle + + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + + @Output phHandle - points to new handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag) +{ + IMG_HANDLE hHandle; + PVRSRV_ERROR eError; + + *phHandle = IMG_NULL; + + if (HANDLES_BATCHED(psBase)) + { + /* + * Increment the counter in case of failure. It will be + * decremented on success. + */ + psBase->ui32BatchHandAllocFailures++; + } + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* See if there is already a handle for this data pointer */ + hHandle = FindHandle(psBase, pvData, eType, IMG_NULL); + if (hHandle != IMG_NULL) + { + struct sHandle *psHandle; + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed")); + return eError; + } + + /* + * If the client is willing to share a handle, and the + * existing handle is marked as shareable, return the + * existing handle. + */ + if (TEST_FLAG(psHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED)) + { + *phHandle = hHandle; + eError = PVRSRV_OK; + goto exit_ok; + } + + return PVRSRV_ERROR_HANDLE_NOT_SHAREABLE; + } + } + + eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL); + +exit_ok: + if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK)) + { + psBase->ui32BatchHandAllocFailures--; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVAllocSubHandle + + @Description Allocate a subhandle + + @Input phHandle - location for new subhandle + pvData - pointer to resource to be associated with the subhandle + eType - the type of resource + hParent - parent handle + + @Output phHandle - points to new subhandle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent) +{ + struct sHandle *psPHand; + struct sHandle *psCHand; + PVRSRV_ERROR eError; + IMG_HANDLE hParentKey; + IMG_HANDLE hHandle; + + *phHandle = IMG_NULL; + + if (HANDLES_BATCHED(psBase)) + { + /* + * Increment the counter in case of failure. It will be + * decremented on success. + */ + psBase->ui32BatchHandAllocFailures++; + } + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? + hParent : IMG_NULL; + + /* Lookup the parent handle */ + eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + return eError; + } + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* See if there is already a handle for this data pointer */ + hHandle = FindHandle(psBase, pvData, eType, hParentKey); + if (hHandle != IMG_NULL) + { + struct sHandle *psCHandle; + PVRSRV_ERROR eErr; + + eErr = GetHandleStructure(psBase, &psCHandle, hHandle, eType); + if (eErr != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed")); + return eErr; + } + + PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle)) == hParent); + + /* + * If the client is willing to share a handle, the + * existing handle is marked as shareable, and the + * existing handle has the same parent, return the + * existing handle. + */ + if (TEST_FLAG(psCHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle)) == hParent) + { + *phHandle = hHandle; + goto exit_ok; + } + return PVRSRV_ERROR_HANDLE_NOT_SHAREABLE; + } + } + + eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey); + if (eError != PVRSRV_OK) + { + return eError; + } + + /* + * Get the parent handle structure again, in case the handle + * structure has moved (depending on the implementation + * of AllocHandle). + */ + psPHand = HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hParent); + + psCHand = HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle); + + AdoptChild(psBase, psPHand, psCHand); + + *phHandle = hHandle; + +exit_ok: + if (HANDLES_BATCHED(psBase)) + { + psBase->ui32BatchHandAllocFailures--; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVFindHandle + + @Description Find handle corresponding to a resource pointer + + @Input phHandle - location for returned handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + + @Output phHandle - points to handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType) +{ + IMG_HANDLE hHandle; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + /* See if there is a handle for this data pointer */ + hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL); + if (hHandle == IMG_NULL) + { + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + *phHandle = hHandle; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupHandleAnyType + + @Description Lookup the data pointer and type corresponding to a handle + + @Input ppvData - location to return data pointer + peType - location to return handle type + hHandle - handle from client + + @Output ppvData - points to the data pointer + peType - points to handle type + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle) +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError)); + OSDumpStack(); + return eError; + } + + *ppvData = psHandle->pvData; + *peType = psHandle->eType; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupHandle + + @Description Lookup the data pointer corresponding to a handle + + @Input ppvData - location to return data pointer + hHandle - handle from client + eType - handle type + + @Output ppvData - points to the data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError)); + OSDumpStack(); + return eError; + } + + *ppvData = psHandle->pvData; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupSubHandle + + @Description Lookup the data pointer corresponding to a subhandle + + @Input ppvData - location to return data pointer + hHandle - handle from client + eType - handle type + hAncestor - ancestor handle + + @Output ppvData - points to the data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor) +{ + struct sHandle *psPHand; + struct sHandle *psCHand; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psCHand, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError)); + OSDumpStack(); + return eError; + } + + /* Look for hAncestor among the handle's ancestors */ + for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; ) + { + eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor")); + return PVRSRV_ERROR_INVALID_SUBHANDLE; + } + } + + *ppvData = psCHand->pvData; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetParentHandle + + @Description Lookup the parent of a handle + + @Input phParent - location for returning parent handle + hHandle - handle for which the parent handle is required + eType - handle type + hParent - parent handle + + @Output *phParent - parent handle, or IMG_NULL if there is no parent + + @Return Error code or PVRSRV_OK. Note that not having a parent is + not regarded as an error. + +******************************************************************************/ +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError)); + OSDumpStack(); + return eError; + } + + *phParent = ParentHandle(psHandle); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupAndReleaseHandle + + @Description Lookup the data pointer corresponding to a handle + + @Input ppvData - location to return data pointer + hHandle - handle from client + eType - handle type + eFlag - lookup flags + + @Output ppvData - points to the data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError)); + OSDumpStack(); + return eError; + } + + *ppvData = psHandle->pvData; + + eError = FreeHandle(psBase, psHandle); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVReleaseHandle + + @Description Release a handle that is no longer needed + + @Input hHandle - handle from client + eType - handle type + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError)); + OSDumpStack(); + return eError; + } + + eError = FreeHandle(psBase, psHandle); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVNewHandleBatch + + @Description Start a new handle batch + + @Input psBase - handle base + @Input ui32BatchSize - handle batch size + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize) +{ + PVRSRV_ERROR eError; + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", psBase->ui32HandBatchSize)); + return PVRSRV_ERROR_HANDLE_BATCH_IN_USE; + } + + if (ui32BatchSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: Invalid batch size (%u)", ui32BatchSize)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = EnsureFreeHandles(psBase, ui32BatchSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", eError)); + return eError; + } + + psBase->ui32HandBatchSize = ui32BatchSize; + + /* Record current number of handles */ + psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount; + + PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0); + + PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0); + + PVR_ASSERT(HANDLES_BATCHED(psBase)); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVHandleBatchCommitOrRelease + + @Description Release a handle batch + + @Input psBase - handle base + bCommit - commit handles + + @Return none + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit) +{ + + IMG_UINT32 ui32IndexPlusOne; + IMG_BOOL bCommitBatch = bCommit; + + if (!HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: There is no handle batch")); + return PVRSRV_ERROR_INVALID_PARAMS; + + } + + if (psBase->ui32BatchHandAllocFailures != 0) + { + if (bCommit) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures.")); + } + bCommitBatch = IMG_FALSE; + } + /* + * The whole point of batched handles is to avoid handle allocation + * failures. + */ + PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit); + + ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne; + while(ui32IndexPlusOne != 0) + { + struct sHandle *psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32IndexPlusOne - 1); + IMG_UINT32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne; + PVR_ASSERT(BATCHED_HANDLE(psHandle)); + + psHandle->ui32NextIndexPlusOne = 0; + + if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + PVRSRV_ERROR eError; + + /* + * We need a complete free here. If the handle + * is not partially free, set the handle as + * unbatched to avoid a partial free. + */ + if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + /* PRQA S 1474,4130 1 */ /* ignore warnings about enum types being modified */ + SET_UNBATCHED_HANDLE(psHandle); /* PRQA S 4130 */ /* mis-use of enums FIXME*/ + } + + eError = FreeHandle(psBase, psHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); + } + else + { + /* PRQA S 1474,4130 1 */ /* ignore warnings about enum types being modified */ + SET_UNBATCHED_HANDLE(psHandle); + } + + ui32IndexPlusOne = ui32NextIndexPlusOne; + } + +#ifdef DEBUG + if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount) + { + IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - psBase->ui32TotalHandCountPreBatch; + + PVR_ASSERT(psBase->ui32TotalHandCount > psBase->ui32TotalHandCountPreBatch); + + PVR_DPF((PVR_DBG_WARNING, "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", psBase->ui32HandBatchSize, psBase->ui32HandBatchSize + ui32Delta)); + + } +#endif + + psBase->ui32HandBatchSize = 0; + psBase->ui32FirstBatchIndexPlusOne = 0; + psBase->ui32TotalHandCountPreBatch = 0; + psBase->ui32BatchHandAllocFailures = 0; + + if (psBase->ui32BatchHandAllocFailures != 0 && bCommit) + { + PVR_ASSERT(!bCommitBatch); + + return PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVCommitHandleBatch + + @Description Commit a handle batch + + @Input psBase - handle base + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE); +} + +/*! +****************************************************************************** + + @Function PVRSRReleaseHandleBatch + + @Description Release a handle batch + + @Input psBase - handle base + + @Return none + +******************************************************************************/ +IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + (IMG_VOID) PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE); +} + +/*! +****************************************************************************** + + @Function PVRSRVSetMaxHandle + + @Description Set maximum handle number for given handle base + + @Input psBase - pointer to handle base structure + ui32MaxHandle - Maximum handle number + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle) +{ + IMG_UINT32 ui32MaxHandleRounded; + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Validate the limit */ + if (ui32MaxHandle == 0 || ui32MaxHandle > DEFAULT_MAX_HANDLE) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive", 0, DEFAULT_MAX_HANDLE)); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* The limit can only be set if no handles have been allocated */ + if (psBase->ui32TotalHandCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set because handles have already been allocated")); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + ui32MaxHandleRounded = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(ui32MaxHandle); + + /* + * Allow the maximum number of handles to be reduced, but never to + * zero. + */ + if (ui32MaxHandleRounded != 0 && ui32MaxHandleRounded < psBase->ui32MaxIndexPlusOne) + { + psBase->ui32MaxIndexPlusOne = ui32MaxHandleRounded; + } + + PVR_ASSERT(psBase->ui32MaxIndexPlusOne != 0); + PVR_ASSERT(psBase->ui32MaxIndexPlusOne <= DEFAULT_MAX_INDEX_PLUS_ONE); + PVR_ASSERT((psBase->ui32MaxIndexPlusOne % HANDLE_BLOCK_SIZE) == 0); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetMaxHandle + + @Description Get maximum handle number for given handle base + + @Input psBase - pointer to handle base structure + + @Output Maximum handle number, or 0 if handle limits not + supported. + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase) +{ + return psBase->ui32MaxIndexPlusOne; +} + +/*! +****************************************************************************** + + @Function PVRSRVEnableHandlePurging + + @Description Enable purging for a given handle base + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase) +{ + if (psBase->bPurgingEnabled) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVEnableHandlePurging: Purging already enabled")); + return PVRSRV_OK; + } + + /* Purging can only be enabled if no handles have been allocated */ + if (psBase->ui32TotalHandCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: Handles have already been allocated")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBase->bPurgingEnabled = IMG_TRUE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVPurgeHandles + + @Description Purge handles for a given handle base + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) +{ + IMG_UINT32 ui32BlockIndex; + IMG_UINT32 ui32NewHandCount; + + if (!psBase->bPurgingEnabled) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not enabled for this handle base")); + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_ASSERT((psBase->ui32TotalHandCount % HANDLE_BLOCK_SIZE) == 0); + + for (ui32BlockIndex = INDEX_TO_BLOCK_INDEX(psBase->ui32TotalHandCount); ui32BlockIndex != 0; ui32BlockIndex--) + { + if (psBase->psHandleArray[ui32BlockIndex - 1].ui32FreeHandBlockCount != HANDLE_BLOCK_SIZE) + { + break; + } + } + ui32NewHandCount = BLOCK_INDEX_TO_INDEX(ui32BlockIndex); + + /* + * Check for a suitable decrease in the handle count. + */ + if (ui32NewHandCount <= (psBase->ui32TotalHandCount/2)) + { + PVRSRV_ERROR eError; + + // PVR_TRACE((" PVRSRVPurgeHandles: reducing number of handles from %u to %u", psBase->ui32TotalHandCount, ui32NewHandCount)); + + eError = ReallocHandleArray(psBase, ui32NewHandCount); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVAllocHandleBase + + @Description Allocate a handle base structure for a process + + @Input ppsBase - pointer to handle base structure pointer + + @Output ppsBase - points to handle base structure pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase) +{ + PVRSRV_HANDLE_BASE *psBase; + IMG_HANDLE hBlockAlloc; + PVRSRV_ERROR eError; + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(*psBase), + (IMG_PVOID *)&psBase, + &hBlockAlloc, + "Handle Base"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError)); + return eError; + } + OSMemSet(psBase, 0, sizeof(*psBase)); + + /* Create hash table */ + psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default); + if (psBase->psHashTab == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n")); + (IMG_VOID)PVRSRVFreeHandleBase(psBase); + return PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE; + } + + psBase->hBaseBlockAlloc = hBlockAlloc; + + psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE; + + *ppsBase = psBase; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVFreeHandleBase + + @Description Free a handle base structure + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psBase != gpsKernelHandleBase); + + eError = FreeHandleBase(psBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)", eError)); + } + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVHandleInit + + @Description Initialise handle management + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsKernelHandleBase == IMG_NULL); + + eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)", eError)); + goto error; + } + + eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)", eError)); + goto error; + } + + return PVRSRV_OK; +error: + (IMG_VOID) PVRSRVHandleDeInit(); + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVHandleDeInit + + @Description De-initialise handle management + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (gpsKernelHandleBase != IMG_NULL) + { + eError = FreeHandleBase(gpsKernelHandleBase); + if (eError == PVRSRV_OK) + { + gpsKernelHandleBase = IMG_NULL; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleDeInit: FreeHandleBase failed (%d)", eError)); + } + } + + return eError; +} +#else +/* disable warning about empty module */ +#endif /* #if defined(PVR_SECURE_HANDLES) */ +/****************************************************************************** + End of file (handle.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/hash.c b/sgx_km/eurasia_km/services4/srvkm/common/hash.c new file mode 100644 index 0000000..8dcedf9 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/hash.c @@ -0,0 +1,739 @@ +/*************************************************************************/ /*! +@Title Self scaling hash tables. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description + Implements simple self scaling hash tables. Hash collisions are + handled by chaining entries together. Hash tables are increased in + size when they become more than (50%?) full and decreased in size + when less than (25%?) full. Hash tables are never decreased below + their initial size. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "img_defs.h" +#include "services.h" +#include "servicesint.h" +#include "hash.h" +#include "osfunc.h" + +#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b)) + +#define KEY_TO_INDEX(pHash, key, uSize) \ + ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize)) + +#define KEY_COMPARE(pHash, pKey1, pKey2) \ + ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2))) + +/* Each entry in a hash table is placed into a bucket */ +struct _BUCKET_ +{ + /* the next bucket on the same chain */ + struct _BUCKET_ *pNext; + + /* entry value */ + IMG_UINTPTR_T v; + + /* entry key */ + IMG_UINTPTR_T k[]; /* PRQA S 0642 */ /* override dynamic array declaration warning */ +}; +typedef struct _BUCKET_ BUCKET; + +struct _HASH_TABLE_ +{ + /* the hash table array */ + BUCKET **ppBucketTable; + + /* current size of the hash table */ + IMG_UINT32 uSize; + + /* number of entries currently in the hash table */ + IMG_UINT32 uCount; + + /* the minimum size that the hash table should be re-sized to */ + IMG_UINT32 uMinimumSize; + + /* size of key in bytes */ + IMG_UINT32 uKeySize; + + /* hash function */ + HASH_FUNC *pfnHashFunc; + + /* key comparison function */ + HASH_KEY_COMP *pfnKeyComp; +}; + +/*! +****************************************************************************** + @Function HASH_Func_Default + + @Description Hash function intended for hashing keys composed of + IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey - a pointer to the key to hash. + @Input uHashTabLen - the length of the hash table. + + @Return the hash value. +******************************************************************************/ +IMG_UINT32 +HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen) +{ + IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey; + IMG_UINT32 uKeyLen = (IMG_UINT32)(uKeySize / sizeof(IMG_UINTPTR_T)); + IMG_UINT32 ui; + IMG_UINT32 uHashKey = 0; + + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + IMG_UINT32 uHashPart = (IMG_UINT32)*p++; + + uHashPart += (uHashPart << 12); + uHashPart ^= (uHashPart >> 22); + uHashPart += (uHashPart << 4); + uHashPart ^= (uHashPart >> 9); + uHashPart += (uHashPart << 10); + uHashPart ^= (uHashPart >> 2); + uHashPart += (uHashPart << 7); + uHashPart ^= (uHashPart >> 12); + + uHashKey += uHashPart; + } + + return uHashKey; +} + +/*! +****************************************************************************** + @Function HASH_Key_Comp_Default + + @Description Compares keys composed of IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey1 - pointer to first hash key to compare. + @Input pKey2 - pointer to second hash key to compare. + @Return IMG_TRUE - the keys match. + IMG_FALSE - the keys don't match. +******************************************************************************/ +IMG_BOOL +HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2) +{ + IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1; + IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2; + IMG_UINT32 uKeyLen = (IMG_UINT32)(uKeySize / sizeof(IMG_UINTPTR_T)); + IMG_UINT32 ui; + + PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + if (*p1++ != *p2++) + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + @Function _ChainInsert + + @Description Insert a bucket into the appropriate hash table chain. + + @Input pBucket - the bucket + @Input ppBucketTable - the hash table + @Input uSize - the size of the hash table + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize) +{ + IMG_UINT32 uIndex; + + PVR_ASSERT (pBucket != IMG_NULL); + PVR_ASSERT (ppBucketTable != IMG_NULL); + PVR_ASSERT (uSize != 0); + + if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0)) + { + PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */ + pBucket->pNext = ppBucketTable[uIndex]; + ppBucketTable[uIndex] = pBucket; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function _Rehash + + @Description Iterate over every entry in an old hash table and + rehash into the new table. + + @Input ppOldTable - the old hash table + @Input uOldSize - the size of the old hash table + @Input ppNewTable - the new hash table + @Input uNewSize - the size of the new hash table + + @Return None +******************************************************************************/ +static PVRSRV_ERROR +_Rehash (HASH_TABLE *pHash, + BUCKET **ppOldTable, IMG_UINT32 uOldSize, + BUCKET **ppNewTable, IMG_UINT32 uNewSize) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex< uOldSize; uIndex++) + { + BUCKET *pBucket; + pBucket = ppOldTable[uIndex]; + while (pBucket != IMG_NULL) + { + PVRSRV_ERROR eError; + BUCKET *pNextBucket = pBucket->pNext; + eError = _ChainInsert (pHash, pBucket, ppNewTable, uNewSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed")); + return eError; + } + pBucket = pNextBucket; + } + } + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function _Resize + + @Description Attempt to resize a hash table, failure to allocate a + new larger hash table is not considered a hard failure. + We simply continue and allow the table to fill up, the + effect is to allow hash chains to become longer. + + @Input pHash - Hash table to resize. + @Input uNewSize - Required table size. + @Return IMG_TRUE Success + IMG_FALSE Failed +******************************************************************************/ +static IMG_BOOL +_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize) +{ + if (uNewSize != pHash->uSize) + { + BUCKET **ppNewTable; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x", + pHash->uSize, uNewSize, pHash->uCount)); + + OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof (BUCKET *) * uNewSize, + (IMG_PVOID*)&ppNewTable, IMG_NULL, + "Hash Table Buckets"); + if (ppNewTable == IMG_NULL) + return IMG_FALSE; + + for (uIndex=0; uIndexppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK) + { + OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *) * uNewSize, ppNewTable, IMG_NULL); + return IMG_FALSE; + } + + OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL); + /*not nulling pointer, being reassigned just below*/ + pHash->ppBucketTable = ppNewTable; + pHash->uSize = uNewSize; + } + return IMG_TRUE; +} + + +/*! +****************************************************************************** + @Function HASH_Create_Extended + + @Description Create a self scaling hash table, using the supplied + key size, and the supplied hash and key comparsion + functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the number + of entries in the hash table, not its size in + bytes. + @Input uKeySize - the size of the key, in bytes. + @Input pfnHashFunc - pointer to hash function. + @Input pfnKeyComp - pointer to key comparsion function. + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp) +{ + HASH_TABLE *pHash; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen)); + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(HASH_TABLE), + (IMG_VOID **)&pHash, IMG_NULL, + "Hash Table") != PVRSRV_OK) + { + return IMG_NULL; + } + + pHash->uCount = 0; + pHash->uSize = uInitialLen; + pHash->uMinimumSize = uInitialLen; + pHash->uKeySize = (IMG_UINT32)uKeySize; + pHash->pfnHashFunc = pfnHashFunc; + pHash->pfnKeyComp = pfnKeyComp; + + OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof (BUCKET *) * pHash->uSize, + (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL, + "Hash Table Buckets"); + + if (pHash->ppBucketTable == IMG_NULL) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL); + /*not nulling pointer, out of scope*/ + return IMG_NULL; + } + + for (uIndex=0; uIndexuSize; uIndex++) + pHash->ppBucketTable[uIndex] = IMG_NULL; + return pHash; +} + +/*! +****************************************************************************** + @Function HASH_Create + + @Description Create a self scaling hash table with a key + consisting of a single IMG_UINTPTR_T, and using + the default hash and key comparison functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the + number of entries in the hash table, not its size + in bytes. + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen) +{ + return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T), + &HASH_Func_Default, &HASH_Key_Comp_Default); +} + +/*! +****************************************************************************** + @Function HASH_Delete + + @Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been + removed before calling this function. + + @Input pHash - hash table + + @Return None +******************************************************************************/ +IMG_VOID +HASH_Delete (HASH_TABLE *pHash) +{ + if (pHash != IMG_NULL) + { + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete")); + + PVR_ASSERT (pHash->uCount==0); + if(pHash->uCount != 0) + { + PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!")); + PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing allocations before destroying devmemcontext")); + } + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL); + pHash->ppBucketTable = IMG_NULL; + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + @Function HASH_Insert_Extended + + @Description Insert a key value pair into a hash table created + with HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success + IMG_FALSE - failure +******************************************************************************/ +IMG_BOOL +HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v) +{ + BUCKET *pBucket; + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Insert_Extended: Hash=0x%p, pKey=0x%p, v=0x" UINTPTR_FMT, + pHash, pKey, v)); + + PVR_ASSERT (pHash != IMG_NULL); + + if (pHash == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter")); + return IMG_FALSE; + } + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(BUCKET) + pHash->uKeySize, + (IMG_VOID **)&pBucket, IMG_NULL, + "Hash Table entry") != PVRSRV_OK) + { + return IMG_FALSE; + } + + pBucket->v = v; + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/ + OSMemCopy(pBucket->k, pKey, pHash->uKeySize); + if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(BUCKET) + pHash->uKeySize, + pBucket, IMG_NULL); + return IMG_FALSE; + } + + pHash->uCount++; + + /* check if we need to think about re-balencing */ + if (pHash->uCount << 1 > pHash->uSize) + { + /* Ignore the return code from _Resize because the hash table is + still in a valid state and although not ideally sized, it is still + functional */ + _Resize (pHash, pHash->uSize << 1); + } + + + return IMG_TRUE; +} + +/*! +****************************************************************************** + @Function HASH_Insert + + @Description Insert a key value pair into a hash table created with + HASH_Create. + + @Input pHash - the hash table. + @Input k - the key value. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success. + IMG_FALSE - failure. +******************************************************************************/ +IMG_BOOL +HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v) +{ + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Insert: Hash=0x%p, k=0x" UINTPTR_FMT ", v=0x" UINTPTR_FMT, + pHash, k, v)); + + return HASH_Insert_Extended(pHash, &k, v); +} + +/*! +****************************************************************************** + @Function HASH_Remove_Extended + + @Description Remove a key from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to key. + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=0x%p, pKey=0x%p", + pHash, pKey)); + + PVR_ASSERT (pHash != IMG_NULL); + + if (pHash == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table")); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + IMG_UINTPTR_T v = pBucket->v; + (*ppBucket) = pBucket->pNext; + + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL); + /*not nulling original pointer, already overwritten*/ + + pHash->uCount--; + + /* check if we need to think about re-balencing */ + if (pHash->uSize > (pHash->uCount << 2) && + pHash->uSize > pHash->uMinimumSize) + { + /* Ignore the return code from _Resize because the + hash table is still in a valid state and although + not ideally sized, it is still functional */ + _Resize (pHash, + PRIVATE_MAX (pHash->uSize >> 1, + pHash->uMinimumSize)); + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Remove_Extended: Hash=0x%p, pKey=0x%p = 0x" UINTPTR_FMT, + pHash, pKey, v)); + return v; + } + } + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Remove_Extended: Hash=0x%p, pKey=0x%p = 0x0 !!!!", + pHash, pKey)); + return 0; +} + +/*! +****************************************************************************** + @Function HASH_Remove + + @Description Remove a key value pair from a hash table created + with HASH_Create. + + @Input pHash - the hash table + @Input k - the key + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k) +{ + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=0x%p, k=0x" UINTPTR_FMT, + pHash, k)); + + return HASH_Remove_Extended(pHash, &k); +} + +/*! +****************************************************************************** + @Function HASH_Retrieve_Extended + + @Description Retrieve a value from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve_Extended: Hash=0x%p, pKey=0x%p", + pHash, pKey)); + + PVR_ASSERT (pHash != IMG_NULL); + + if (pHash == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table")); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + IMG_UINTPTR_T v = pBucket->v; + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Retrieve: Hash=0x%p, pKey=0x%p = 0x" UINTPTR_FMT, + pHash, pKey, v)); + return v; + } + } + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Retrieve: Hash=0x%p, pKey=0x%p = 0x0 !!!!", + pHash, pKey)); + return 0; +} + +/*! +****************************************************************************** + @Function HASH_Retrieve + + @Description Retrieve a value from a hash table created with + HASH_Create. + + @Input pHash - the hash table + @Input k - the key + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k) +{ + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=0x%p, k=0x" UINTPTR_FMT, + pHash, k)); + return HASH_Retrieve_Extended(pHash, &k); +} + +/*! +****************************************************************************** + @Function HASH_Iterate + + @Description Iterate over every entry in the hash table + + @Input pHash - the old hash table + @Input pfnCallback - the size of the old hash table + + @Return Callback error if any, otherwise PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR +HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex < pHash->uSize; uIndex++) + { + BUCKET *pBucket; + pBucket = pHash->ppBucketTable[uIndex]; + while (pBucket != IMG_NULL) + { + PVRSRV_ERROR eError; + BUCKET *pNextBucket = pBucket->pNext; + + eError = pfnCallback((IMG_UINTPTR_T) ((IMG_VOID *) *(pBucket->k)), (IMG_UINTPTR_T) pBucket->v); + + /* The callback might want us to break out early */ + if (eError != PVRSRV_OK) + return eError; + + pBucket = pNextBucket; + } + } + return PVRSRV_OK; +} + +#ifdef HASH_TRACE +/*! +****************************************************************************** + @Function HASH_Dump + + @Description To dump the contents of a hash table in human readable + form. + + @Input pHash - the hash table + + @Return None +******************************************************************************/ +IMG_VOID +HASH_Dump (HASH_TABLE *pHash) +{ + IMG_UINT32 uIndex; + IMG_UINT32 uMaxLength=0; + IMG_UINT32 uEmptyCount=0; + + PVR_ASSERT (pHash != IMG_NULL); + for (uIndex=0; uIndexuSize; uIndex++) + { + BUCKET *pBucket; + IMG_UINT32 uLength = 0; + if (pHash->ppBucketTable[uIndex] == IMG_NULL) + { + uEmptyCount++; + } + for (pBucket=pHash->ppBucketTable[uIndex]; + pBucket != IMG_NULL; + pBucket = pBucket->pNext) + { + uLength++; + } + uMaxLength = PRIVATE_MAX (uMaxLength, uLength); + } + + PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d", + pHash->uMinimumSize, pHash->uSize, pHash->uCount)); + PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength)); +} +#endif diff --git a/sgx_km/eurasia_km/services4/srvkm/common/lists.c b/sgx_km/eurasia_km/services4/srvkm/common/lists.c new file mode 100644 index 0000000..4ffdb0d --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/lists.c @@ -0,0 +1,159 @@ +/*************************************************************************/ /*! +@Title Linked list shared functions implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implementation of the list iterators for types shared among + more than one file in the services code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "lists.h" +#include "services_headers.h" + +/*=================================================================== + LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just + once are implemented locally). + ===================================================================*/ + +IMPLEMENT_LIST_ANY_VA(BM_HEAP) +IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP) +IMPLEMENT_LIST_REMOVE(BM_HEAP) +IMPLEMENT_LIST_INSERT(BM_HEAP) + +IMPLEMENT_LIST_ANY_VA(BM_CONTEXT) +IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL) +IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT) +IMPLEMENT_LIST_REMOVE(BM_CONTEXT) +IMPLEMENT_LIST_INSERT(BM_CONTEXT) + +IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE) + +IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV) +IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV) +IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV) + +IMPLEMENT_LIST_ANY_2(PVRSRV_KERNEL_SYNC_INFO, PVRSRV_ERROR, PVRSRV_OK); +IMPLEMENT_LIST_INSERT(PVRSRV_KERNEL_SYNC_INFO) +IMPLEMENT_LIST_REMOVE(PVRSRV_KERNEL_SYNC_INFO) + +/*=================================================================== + BELOW ARE IMPLEMENTED SOME COMMON CALLBACKS USED IN DIFFERENT FILES + ===================================================================*/ + + +/*! +****************************************************************************** + @Function MatchDeviceKM_AnyVaCb + @Description Matchs a device node with an id and optionally a class. + + @Input psDeviceNode - Pointer to the device node. + @Input va - Variable argument list, with te following values: + # ui32DevIndex - Index of de device to match. + # bIgnoreClass - Flag indicating if there's + no need to check the device class. + # eDevClass - Device class, ONLY present if + bIgnoreClass was IMG_FALSE. + + @Return The pointer to the device node if it matchs, IMG_NULL + otherwise. +******************************************************************************/ +IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va) +{ + IMG_UINT32 ui32DevIndex; + IMG_BOOL bIgnoreClass; + PVRSRV_DEVICE_CLASS eDevClass; + + ui32DevIndex = va_arg(va, IMG_UINT32); + bIgnoreClass = va_arg(va, IMG_BOOL); + if (!bIgnoreClass) + { + eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS); + } + else + { + /*this value will never be used, since the short circuit evaluation + of the first clause will stop because bIgnoreClass is true, but the + compiler complains if it's not initialized.*/ + eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32; + } + + if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) && + psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex) + { + return psDeviceNode; + } + return IMG_NULL; +} + +/*! +****************************************************************************** + + @Function MatchPowerDeviceIndex_AnyVaCb + + @Description + Matches a power device with its device index. + + @Input va : variable argument list with: + ui32DeviceIndex : device index + + @Return the pointer to the device it matched, IMG_NULL otherwise. + +******************************************************************************/ +IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va) +{ + IMG_UINT32 ui32DeviceIndex; + + ui32DeviceIndex = va_arg(va, IMG_UINT32); + + if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex) + { + return psPowerDev; + } + else + { + return IMG_NULL; + } +} diff --git a/sgx_km/eurasia_km/services4/srvkm/common/mem.c b/sgx_km/eurasia_km/services4/srvkm/common/mem.c new file mode 100644 index 0000000..cccdd24 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/mem.c @@ -0,0 +1,175 @@ +/*************************************************************************/ /*! +@Title System memory functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System memory allocation APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "pvr_bridge_km.h" + + +static PVRSRV_ERROR +FreeSharedSysMemCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + OSFreePages(psKernelMemInfo->ui32Flags, + psKernelMemInfo->uAllocSize, + psKernelMemInfo->pvLinAddrKM, + psKernelMemInfo->sMemBlk.hOSMemHandle); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psKernelMemInfo, + IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +IMG_EXPORT PVRSRV_ERROR +PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32Flags, + IMG_SIZE_T uSize, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psKernelMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo)); + + ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK; + ui32Flags |= PVRSRV_HAP_MULTI_PROCESS; + psKernelMemInfo->ui32Flags = ui32Flags; + psKernelMemInfo->uAllocSize = uSize; + + if(OSAllocPages(psKernelMemInfo->ui32Flags, + psKernelMemInfo->uAllocSize, + (IMG_UINT32)HOST_PAGESIZE(), + IMG_NULL, + 0, + IMG_NULL, + &psKernelMemInfo->pvLinAddrKM, + &psKernelMemInfo->sMemBlk.hOSMemHandle) + != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psKernelMemInfo, + 0); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* register with the resman */ + psKernelMemInfo->sMemBlk.hResItem = + ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_MEM_INFO, + psKernelMemInfo, + 0, + &FreeSharedSysMemCallBack); + + *ppsKernelMemInfo = psKernelMemInfo; + + return PVRSRV_OK; +} + + +IMG_EXPORT PVRSRV_ERROR +PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVRSRV_ERROR eError; + + if(psKernelMemInfo->sMemBlk.hResItem) + { + eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); + } + else + { + eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0, CLEANUP_WITH_POLL); + } + + return eError; +} + + +IMG_EXPORT PVRSRV_ERROR +PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(!psKernelMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(psKernelMemInfo->sMemBlk.hResItem) + { + eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, IMG_NULL); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed")); + PVR_DBG_BREAK; + return eError; + } + + psKernelMemInfo->sMemBlk.hResItem = IMG_NULL; + } + + return eError; +} + +/****************************************************************************** + End of file (mem.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/mem_debug.c b/sgx_km/eurasia_km/services4/srvkm/common/mem_debug.c new file mode 100644 index 0000000..f79e7cb --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/mem_debug.c @@ -0,0 +1,276 @@ +/*************************************************************************/ /*! +@Title Memory debugging routines. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Adds extra memory to the allocations to trace the memory bounds + and other runtime information. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef MEM_DEBUG_C +#define MEM_DEBUG_C + +#if defined(PVRSRV_DEBUG_OS_MEMORY) + +#include "img_types.h" +#include "services_headers.h" + +#if defined (__cplusplus) +extern "C" +{ +#endif + +#define STOP_ON_ERROR 0 + + /* + Allocated Memory Layout: + + --------- \ + Status [OSMEM_DEBUG_INFO] |- TEST_BUFFER_PADDING_STATUS + --------- < + [0xBB]* [raw bytes] |- ui32Size + --------- < + [0xB2]* [raw bytes] |- TEST_BUFFER_PADDING_AFTER + --------- / + */ + + IMG_BOOL MemCheck(const IMG_PVOID pvAddr, const IMG_UINT8 ui8Pattern, IMG_SIZE_T uSize) + { + IMG_UINT8 *pui8Addr; + for (pui8Addr = (IMG_UINT8*)pvAddr; uSize > 0; uSize--, pui8Addr++) + { + if (*pui8Addr != ui8Pattern) + { + return IMG_FALSE; + } + } + return IMG_TRUE; + } + + /* + This function expects the pointer to the user data, not the debug data. + */ + IMG_VOID OSCheckMemDebug(IMG_PVOID pvCpuVAddr, IMG_SIZE_T uSize, const IMG_CHAR *pszFileName, const IMG_UINT32 uLine) + { + OSMEM_DEBUG_INFO const *psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINTPTR_T)pvCpuVAddr - TEST_BUFFER_PADDING_STATUS); + + /* invalid pointer */ + if (pvCpuVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%p : null pointer" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /* align */ + if (((IMG_UINT32)pvCpuVAddr&3) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%p : invalid alignment" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /*check guard region before*/ + if (!MemCheck((IMG_PVOID)psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore))) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%p : guard region before overwritten" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /*check size*/ + if (uSize != psInfo->uSize) + { + PVR_DPF((PVR_DBG_WARNING, + "Pointer 0x%p : supplied size was different to stored size (0x%" + SIZE_T_FMT_LEN "X != 0x%" SIZE_T_FMT_LEN "X)" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, uSize, psInfo->uSize, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /*check size parity*/ + if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize) + { + PVR_DPF((PVR_DBG_WARNING, + "Pointer 0x%p : stored size parity error (0x%" + SIZE_T_FMT_LEN "X != 0x%" SIZE_T_FMT_LEN "X)" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, psInfo->uSize, 0x01234567 ^ psInfo->uSizeParityCheck, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + else + { + /*the stored size is ok, so we use it instead the supplied uSize*/ + uSize = psInfo->uSize; + } + + /*check padding after*/ + if (uSize) + { + if (!MemCheck((IMG_VOID*)((IMG_UINTPTR_T)pvCpuVAddr + uSize), 0xB2, TEST_BUFFER_PADDING_AFTER)) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%p : guard region after overwritten" + " - referenced from %s:%d - allocated from %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + } + } + + /* allocated... */ + if (psInfo->eValid != isAllocated) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%p : not allocated (freed? %d)" + " - referenced %s:%d - freed %s:%d", + pvCpuVAddr, psInfo->eValid == isFree, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + } + + IMG_VOID debug_strcpy(IMG_CHAR *pDest, const IMG_CHAR *pSrc) + { + IMG_SIZE_T i = 0; + + for (; i < 128; i++) /*changed to 128 to match the filename array size*/ + { + *pDest = *pSrc; + if (*pSrc == '\0') break; + pDest++; + pSrc++; + } + } + + PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID *ppvCpuVAddr, + IMG_HANDLE *phBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line) + { + OSMEM_DEBUG_INFO *psInfo; + + PVRSRV_ERROR eError; + + eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags, + ui32Size + TEST_BUFFER_PADDING, + ppvCpuVAddr, + phBlockAlloc, + pszFilename, + ui32Line); + + if (eError != PVRSRV_OK) + { + return eError; + } + + OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS, 0xBB, ui32Size); + OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + ui32Size + TEST_BUFFER_PADDING_STATUS, 0xB2, TEST_BUFFER_PADDING_AFTER); + + /*fill the dbg info struct*/ + psInfo = (OSMEM_DEBUG_INFO *)(*ppvCpuVAddr); + + OSMemSet(psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore)); + debug_strcpy(psInfo->sFileName, pszFilename); + psInfo->uLineNo = ui32Line; + psInfo->eValid = isAllocated; + psInfo->uSize = ui32Size; + psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size; + + /*point to the user data section*/ + *ppvCpuVAddr = (IMG_PVOID) ((IMG_UINTPTR_T)*ppvCpuVAddr)+TEST_BUFFER_PADDING_STATUS; + +#ifdef PVRSRV_LOG_MEMORY_ALLOCS + /*this is here to simplify the surounding logging macro, that is a expression + maybe the macro should be an expression */ + PVR_TRACE(("Allocated pointer (after debug info): 0x%p from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line)); +#endif + + return PVRSRV_OK; + } + + PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID pvCpuVAddr, + IMG_HANDLE hBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line) + { + OSMEM_DEBUG_INFO *psInfo; + + /*check dbginfo (arg pointing to user memory)*/ + OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line); + + /*mark memory as freed*/ + OSMemSet(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER); + + /*point to the starting address of the total allocated memory*/ + psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINTPTR_T) pvCpuVAddr - TEST_BUFFER_PADDING_STATUS); + + /*update dbg info struct*/ + psInfo->uSize = 0; + psInfo->uSizeParityCheck = 0; + psInfo->eValid = isFree; + psInfo->uLineNo = ui32Line; + debug_strcpy(psInfo->sFileName, pszFilename); + + return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags, ui32Size + TEST_BUFFER_PADDING, psInfo, hBlockAlloc, pszFilename, ui32Line); + } + +#if defined (__cplusplus) + +} +#endif + +#endif /* PVRSRV_DEBUG_OS_MEMORY */ + +#endif /* MEM_DEBUG_C */ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/metrics.c b/sgx_km/eurasia_km/services4/srvkm/common/metrics.c new file mode 100644 index 0000000..7370ec1 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/metrics.c @@ -0,0 +1,209 @@ +/*************************************************************************/ /*! +@Title Time measuring functions. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "metrics.h" + +/* VGX: */ +#if defined(SUPPORT_VGX) +#include "vgxapi_km.h" +#endif + +/* SGX: */ +#if defined(SUPPORT_SGX) +#include "sgxapi_km.h" +#endif + +#if defined(DEBUG) || defined(TIMING) + +static volatile IMG_UINT32 *pui32TimerRegister = 0; + +#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total +#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS) +#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count + + +Temporal_Data asTimers[PVRSRV_NUM_TIMERS]; + + +/*********************************************************************************** + Function Name : PVRSRVTimeNow + Inputs : None + Outputs : None + Returns : Current timer register value + Description : Returns the current timer register value +************************************************************************************/ +IMG_UINT32 PVRSRVTimeNow(IMG_VOID) +{ + if (!pui32TimerRegister) + { + static IMG_BOOL bFirstTime = IMG_TRUE; + + if (bFirstTime) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up")); + + bFirstTime = IMG_FALSE; + } + + return 0; + } + +#if defined(__sh__) + + return (0xffffffff-*pui32TimerRegister); + +#else /* defined(__sh__) */ + + return 0; + +#endif /* defined(__sh__) */ +} + + +/*********************************************************************************** + Function Name : PVRSRVGetCPUFreq + Inputs : None + Outputs : None + Returns : CPU timer frequency + Description : Returns the CPU timer frequency +************************************************************************************/ +static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID) +{ + IMG_UINT32 ui32Time1, ui32Time2; + + ui32Time1 = PVRSRVTimeNow(); + + OSWaitus(1000000); + + ui32Time2 = PVRSRVTimeNow(); + + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1)); + + return (ui32Time2 - ui32Time1); +} + + +/*********************************************************************************** + Function Name : PVRSRVSetupMetricTimers + Inputs : pvDevInfo + Outputs : None + Returns : None + Description : Resets metric timers and sets up the timer register +************************************************************************************/ +IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo) +{ + IMG_UINT32 ui32Loop; + + PVR_UNREFERENCED_PARAMETER(pvDevInfo); + + for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) + { + asTimers[ui32Loop].ui32Total = 0; + asTimers[ui32Loop].ui32Count = 0; + } + + #if defined(__sh__) + + /* timer control register */ + // clock / 1024 when TIMER_DIVISOR = 4 + // underflow int disabled + // we get approx 38 uS per timer tick + *TCR_2 = TIMER_DIVISOR; + + /* reset the timer counter to 0 */ + *TCOR_2 = *TCNT_2 = (IMG_UINT)0xffffffff; + + /* start timer 2 */ + *TST_REG |= (IMG_UINT8)0x04; + + pui32TimerRegister = (IMG_UINT32 *)TCNT_2; + + #else /* defined(__sh__) */ + + pui32TimerRegister = 0; + + #endif /* defined(__sh__) */ +} + + +/*********************************************************************************** + Function Name : PVRSRVOutputMetricTotals + Inputs : None + Outputs : None + Returns : None + Description : Displays final metric data +************************************************************************************/ +IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID) +{ + IMG_UINT32 ui32TicksPerMS, ui32Loop; + + ui32TicksPerMS = PVRSRVGetCPUFreq(); + + if (!ui32TicksPerMS) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq")); + return; + } + + for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) + { + if (asTimers[ui32Loop].ui32Count & 0x80000000L) + { + PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop)); + } + } +#if 0 + /* + ** EXAMPLE TIMER OUTPUT + */ + PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1))); + PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1))); + PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1))); +#endif +} + +#endif /* defined(DEBUG) || defined(TIMING) */ + +/****************************************************************************** + End of file (metrics.c) +******************************************************************************/ + diff --git a/sgx_km/eurasia_km/services4/srvkm/common/osfunc_common.c b/sgx_km/eurasia_km/services4/srvkm/common/osfunc_common.c new file mode 100644 index 0000000..19ba9ea --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/osfunc_common.c @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Wrapper layer for osfunc routines that have common code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Adds extra memory to the allocations to trace the memory bounds + and other runtime information. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "services_headers.h" +#include "osfunc.h" + + diff --git a/sgx_km/eurasia_km/services4/srvkm/common/pdump_common.c b/sgx_km/eurasia_km/services4/srvkm/common/pdump_common.c new file mode 100644 index 0000000..af56752 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/pdump_common.c @@ -0,0 +1,3117 @@ +/*************************************************************************/ /*! +@Title Common PDump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PDUMP) +#include + +#include "services_headers.h" +#include "perproc.h" + +/* pdump headers */ +#include "pdump_km.h" +#include "pdump_int.h" + +/* Allow temporary buffer size override */ +#if !defined(PDUMP_TEMP_BUFFER_SIZE) +#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U) +#endif + +/* DEBUG */ +#if 1 +#define PDUMP_DBG(a) PDumpOSDebugPrintf (a) +#else +#define PDUMP_DBG(a) +#endif + + +#define PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x))) +#define VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID *, p, x) +#define VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x)) +#define MAX_PDUMP_MMU_CONTEXTS (32) +static IMG_VOID *gpvTempBuffer = IMG_NULL; +static IMG_HANDLE ghTempBufferBlockAlloc; +static IMG_UINT16 gui16MMUContextUsage = 0; + +#if defined(PDUMP_DEBUG_OUTFILES) +/* counter increments each time debug write is called */ +IMG_UINT32 g_ui32EveryLineCounter = 1U; +#endif + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + + +IMG_BOOL _PDumpIsProcessActive(IMG_VOID) +{ + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc == IMG_NULL) + { + return IMG_FALSE; + } + return psPerProc->bPDumpActive; +} + +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + +#if defined(PDUMP_DEBUG_OUTFILES) +static INLINE +IMG_UINT32 _PDumpGetPID(IMG_VOID) +{ + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc == IMG_NULL) + { + /* Kernel PID */ + return 0; + } + return psPerProc->ui32PID; +} +#endif /* PDUMP_DEBUG_OUTFILES */ + +/************************************************************************** + * Function Name : GetTempBuffer + * Inputs : None + * Outputs : None + * Returns : Temporary buffer address, or IMG_NULL + * Description : Get temporary buffer address. +**************************************************************************/ +static IMG_VOID *GetTempBuffer(IMG_VOID) +{ + /* + * Allocate the temporary buffer, it it hasn't been allocated already. + * Return the address of the temporary buffer, or IMG_NULL if it + * couldn't be allocated. + * It is expected that the buffer will be allocated once, at driver + * load time, and left in place until the driver unloads. + */ + + if (gpvTempBuffer == IMG_NULL) + { + PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + PDUMP_TEMP_BUFFER_SIZE, + &gpvTempBuffer, + &ghTempBufferBlockAlloc, + "PDUMP Temporary Buffer"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed: %d", eError)); + } + } + + return gpvTempBuffer; +} + +static IMG_VOID FreeTempBuffer(IMG_VOID) +{ + + if (gpvTempBuffer != IMG_NULL) + { + PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + PDUMP_TEMP_BUFFER_SIZE, + gpvTempBuffer, + ghTempBufferBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeTempBuffer: OSFreeMem failed: %d", eError)); + } + else + { + gpvTempBuffer = IMG_NULL; + } + } +} + +IMG_VOID PDumpInitCommon(IMG_VOID) +{ + /* Allocate temporary buffer for copying from user space */ + (IMG_VOID) GetTempBuffer(); + + /* Call environment specific PDump initialisation */ + PDumpInit(); +} + +IMG_VOID PDumpDeInitCommon(IMG_VOID) +{ + /* Free temporary buffer */ + FreeTempBuffer(); + + /* Call environment specific PDump Deinitialisation */ + PDumpDeInit(); +} + +IMG_BOOL PDumpIsSuspended(IMG_VOID) +{ + return PDumpOSIsSuspended(); +} + +IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID) +{ +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if( _PDumpIsProcessActive() ) + { + return PDumpOSIsCaptureFrameKM(); + } + return IMG_FALSE; +#else + return PDumpOSIsCaptureFrameKM(); +#endif +} + +PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame) +{ +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if( _PDumpIsProcessActive() ) + { + return PDumpOSSetFrameKM(ui32Frame); + } + return PVRSRV_OK; +#else + return PDumpOSSetFrameKM(ui32Frame); +#endif +} + +static IMG_BOOL _PDumpWillCapture(IMG_UINT32 ui32Flags) +{ + /* + FIXME: + We really need to know if the PDump client is connected so we can + check if the continuous data will be saved or not. + */ + if ((ui32Flags & PDUMP_FLAGS_PERSISTENT) || (ui32Flags & PDUMP_FLAGS_CONTINUOUS)) + { + return IMG_TRUE; + } + else + { + return PDumpIsCaptureFrameKM(); + } +} + +IMG_BOOL PDumpWillCapture(IMG_UINT32 ui32Flags) +{ +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if( _PDumpIsProcessActive() ) + { + return _PDumpWillCapture(ui32Flags); + } + return PVRSRV_OK; +#else + return _PDumpWillCapture(ui32Flags); +#endif +} + +/************************************************************************** + * Function Name : PDumpRegWithFlagsKM + * Inputs : pszPDumpDevName, Register offset, and value to write + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write +**************************************************************************/ +PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + + PDUMP_LOCK(); + PDUMP_DBG(("PDumpRegWithFlagsKM")); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X\r\n", + pszPDumpRegName, ui32Reg, ui32Data); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpRegKM + * Inputs : Register offset, and value to write + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write +**************************************************************************/ +PVRSRV_ERROR PDumpRegKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data) +{ + return PDumpRegWithFlagsKM(pszPDumpRegName, ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS); +} + +/************************************************************************** + * Function Name : PDumpRegPolWithFlagsKM + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + * Register offset + * expected value + * mask for that value + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents a register read + * with the expected value +**************************************************************************/ +PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator) +{ + /* Timings correct for linux and XP */ + #define POLL_DELAY 1000U + #define POLL_COUNT_LONG (2000000000U / POLL_DELAY) + #define POLL_COUNT_SHORT (1000000U / POLL_DELAY) + + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PollCount; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + PDUMP_DBG(("PDumpRegPolWithFlagsKM")); + + ui32PollCount = POLL_COUNT_LONG; + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d\r\n", + pszPDumpRegName, ui32RegAddr, ui32RegValue, + ui32Mask, eOperator, ui32PollCount, POLL_DELAY); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpRegPol + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + Register offset + * expected value + * mask for that value + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents a register read + * with the expected value +**************************************************************************/ +PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, PDUMP_POLL_OPERATOR eOperator) +{ + return PDumpRegPolWithFlagsKM(pszPDumpRegName, ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS, eOperator); +} + +/************************************************************************** + * Function Name : PDumpMallocPages + * Inputs : psDevID, ui32DevVAddr, pvLinAddr, ui32NumBytes, hOSMemHandle + * : hUniqueTag + * Outputs : None + * Returns : None + * Description : Malloc memory pages + +FIXME: This function assumes pvLinAddr is the address of the start of the +block for this hOSMemHandle. +If this isn't true, the call to PDumpOSCPUVAddrToDevPAddr below will be +incorrect. (Consider using OSMemHandleToCPUPAddr() instead?) +The only caller at the moment is in buffer_manager.c, which does the right +thing. +**************************************************************************/ +PVRSRV_ERROR PDumpMallocPages (PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_UINT32 ui32DevVAddr, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_HANDLE hUniqueTag, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_PUINT8 pui8LinAddr; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32NumPages; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32Page; + IMG_UINT32 ui32PageSizeShift = 0; + IMG_UINT32 ui32PageSizeTmp; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + + /* However, lin addr is only required in non-linux OSes */ +#if !defined(LINUX) + PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PageSize -1)) == 0); +#endif + + PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (ui32PageSize -1)) == 0); + PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize -1)) == 0); + + /* + Compute the amount to right-shift in order to divide by the page-size. + Required for 32-bit PAE kernels (thus phys addresses are 64-bits) where + 64-bit division is unsupported. + */ + ui32PageSizeTmp = ui32PageSize; + while (ui32PageSizeTmp >>= 1) + ui32PageSizeShift++; + + /* + Write a comment to the PDump2 script streams indicating the memory allocation + */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :%s:VA_%08X 0x%08X %u (%d pages)\r\n", + psDevID->pszPDumpDevName, ui32DevVAddr, ui32NumBytes, ui32PageSize, ui32NumBytes / ui32PageSize); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory allocation + */ + pui8LinAddr = (IMG_PUINT8) pvLinAddr; + ui32Offset = 0; + ui32NumPages = ui32NumBytes >> ui32PageSizeShift; + while (ui32NumPages) + { + ui32NumPages--; + + /* See FIXME in function header. + * Currently: linux pdump uses OSMemHandle and Offset + * other OSes use the LinAddr. + */ + /* Calculate the device physical address for this page */ + PDumpOSCPUVAddrToDevPAddr(psDevID->eDeviceType, + hOSMemHandle, + ui32Offset, + pui8LinAddr, + ui32PageSize, + &sDevPAddr); + ui32Page = (IMG_UINT32)(sDevPAddr.uiAddr >> ui32PageSizeShift); + /* increment kernel virtual address */ + pui8LinAddr += ui32PageSize; + ui32Offset += ui32PageSize; + + sDevPAddr.uiAddr = ui32Page * ui32PageSize; + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:PA_" UINTPTR_FMT DEVPADDR_FMT " %u %u 0x" DEVPADDR_FMT "\r\n", + psDevID->pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr, + ui32PageSize, + ui32PageSize, + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + } + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpMallocPageTable + * Inputs : psDevId, pvLinAddr, ui32NumBytes, hUniqueTag + * Outputs : None + * Returns : None + * Description : Malloc memory page table +**************************************************************************/ +PVRSRV_ERROR PDumpMallocPageTable (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32PTSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_DEV_PHYADDR sDevPAddr; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PTSize - 1)) == 0); + + ui32Flags |= PDUMP_FLAGS_CONTINUOUS; + + /* + Write a comment to the PDump2 script streams indicating the memory allocation + */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "-- MALLOC :%s:PAGE_TABLE 0x%08X %u\r\n", + psDevId->pszPDumpDevName, + ui32PTSize, + ui32PTSize); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory allocation + */ + // FIXME: we'll never need more than a 4k page for a pagetable + // fixing to 1 page for now. + // note: when the mmu code supports packed pagetables the PTs + // will be as small as 16bytes + + PDumpOSCPUVAddrToDevPAddr(psDevId->eDeviceType, + hOSMemHandle, /* um - does this mean the pvLinAddr would be ignored? Is that safe? */ + ui32Offset, + (IMG_PUINT8) pvLinAddr, + ui32PTSize, + &sDevPAddr); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:PA_" UINTPTR_FMT DEVPADDR_FMT + " 0x%X %u 0x" DEVPADDR_FMT "\r\n", + psDevId->pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr, + ui32PTSize,//size + ui32PTSize,//alignment + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpFreePages + * Inputs : psBMHeap, sDevVAddr, ui32NumBytes, hUniqueTag, + bInterLeaved + * Outputs : None + * Returns : None + * Description : Free memory pages +**************************************************************************/ +PVRSRV_ERROR PDumpFreePages (BM_HEAP *psBMHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_HANDLE hUniqueTag, + IMG_BOOL bInterleaved, + IMG_BOOL bSparse, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32NumPages, ui32PageCounter; + IMG_DEV_PHYADDR sDevPAddr; + PVRSRV_DEVICE_NODE *psDeviceNode; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0); + PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize - 1)) == 0); + + psDeviceNode = psBMHeap->pBMContext->psDeviceNode; + + /* + Write a comment to the PDUMP2 script streams indicating the memory free + */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:VA_%08X\r\n", + psDeviceNode->sDevId.pszPDumpDevName, sDevVAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory free + */ + ui32NumPages = ui32NumBytes / ui32PageSize; + for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++) + { + if (!bInterleaved || (ui32PageCounter % 2) == 0) + { + sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr); + + /* With sparse mappings we expect spaces */ + if (bSparse && (sDevPAddr.uiAddr == 0)) + { + continue; + } + + PVR_ASSERT(sDevPAddr.uiAddr != 0); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:PA_" UINTPTR_FMT DEVPADDR_FMT "\r\n", + psDeviceNode->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + } + else + { + /* Gap pages in an interleaved allocation should be ignored. */ + } + + sDevVAddr.uiAddr += ui32PageSize; + } + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpFreePageTable + * Inputs : psDevID, pvLinAddr, ui32NumBytes, hUniqueTag + * Outputs : None + * Returns : None + * Description : Free memory page table +**************************************************************************/ +PVRSRV_ERROR PDumpFreePageTable (PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32PTSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_DEV_PHYADDR sDevPAddr; + PDUMP_GET_SCRIPT_STRING(); + PVR_UNREFERENCED_PARAMETER(ui32PTSize); + + PDUMP_LOCK(); + + /* override QAC warning about wrap around */ + PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PTSize-1UL)) == 0); /* PRQA S 3382 */ + + /* + Write a comment to the PDUMP2 script streams indicating the memory free + */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:PAGE_TABLE\r\n", psDevID->pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory free + */ + // FIXME: we'll never need more than a 4k page for a pagetable + // fixing to 1 page for now. + // note: when the mmu code supports packed pagetables the PTs + // will be as small as 16bytes + + PDumpOSCPUVAddrToDevPAddr(psDevID->eDeviceType, + hOSMemHandle, /* um - does this mean the pvLinAddr would be ignored? Is that safe? */ + 0, + (IMG_PUINT8) pvLinAddr, + ui32PTSize, + &sDevPAddr); + + { + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:PA_" UINTPTR_FMT DEVPADDR_FMT "\r\n", + psDevID->pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + } + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpPDRegWithFlags + * Inputs : psMMUAttrib + * : ui32Reg + * : ui32Data + * : hUniqueTag + * Outputs : None + * Returns : None + * Description : Kernel Services internal pdump memory API + * Used for registers specifying physical addresses + e.g. MMU page directory register +**************************************************************************/ +PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_CHAR *pszRegString; + IMG_DEV_PHYADDR sDevPAddr; + + PDUMP_GET_SCRIPT_STRING() + + PDUMP_LOCK(); + if(psMMUAttrib->pszPDRegRegion != IMG_NULL) + { + pszRegString = psMMUAttrib->pszPDRegRegion; + } + else + { + pszRegString = psMMUAttrib->sDevId.pszPDumpRegName; + } + + /* + Write to the MMU script stream indicating the physical page directory + */ +#if defined(SGX_FEATURE_36BIT_MMU) + sDevPAddr.uiAddr = ((ui32Data & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PDEAlignShift); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, + "WRW :%s:$1 :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr, + ui32Data & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :%s:$1 :%s:$1 0x4\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, + "WRW :%s:0x%08X: %s:$1\r\n", + pszRegString, + ui32Reg, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); +#else + sDevPAddr.uiAddr = ((ui32Data & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PDEAlignShift); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "WRW :%s:0x%08X :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X\r\n", + pszRegString, + ui32Reg, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr, + ui32Data & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); +#endif + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpPDReg + * Inputs : psMMUAttrib + : ui32Reg + * : ui32Data + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Kernel Services internal pdump memory API + * Used for registers specifying physical addresses + e.g. MMU page directory register +**************************************************************************/ +PVRSRV_ERROR PDumpPDReg (PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_HANDLE hUniqueTag) +{ + return PDumpPDRegWithFlags(psMMUAttrib, ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS, hUniqueTag); +} + +/************************************************************************** + * Function Name : PDumpMemPolKM + * Inputs : psMemInfo + * : ui32Offset + * : ui32Value + * : ui32Mask + * : eOperator + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Implements Client pdump memory poll API +**************************************************************************/ +PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + #define MEMPOLL_DELAY (1000) + #define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY) + + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PageOffset; + IMG_UINT8 *pui8LinAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + PDUMP_MMU_ATTRIB *psMMUAttrib; + + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + if (PDumpOSIsSuspended()) + { + PDUMP_UNLOCK(); + return PVRSRV_OK; + } + + /* Check the offset and size don't exceed the bounds of the allocation */ + PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->uAllocSize); + + psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + + /* + Write a comment to the PDump2 script streams indicating the virtual memory pol + */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "-- POL :%s:VA_%08X 0x%08X 0x%08X %d %d %d\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMemInfo->sDevVAddr.uiAddr + ui32Offset, + ui32Value, + ui32Mask, + eOperator, + MEMPOLL_COUNT, + MEMPOLL_DELAY); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + + pui8LinAddr = psMemInfo->pvLinAddrKM; + + /* Advance address by offset */ + pui8LinAddr += ui32Offset; + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle, + ui32Offset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageOffset); + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset; + + PVR_ASSERT((sDevVPageAddr.uiAddr & psMMUAttrib->ui32DataPageMask) == 0); + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageOffset; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "POL :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08X 0x%08X %d %d %d\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask), + (unsigned int)(sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask)), + ui32Value, + ui32Mask, + eOperator, + MEMPOLL_COUNT, + MEMPOLL_DELAY); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : _PDumpMemIntKM + * Inputs : psMemInfo + * : ui32Offset + * : ui32Bytes + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Implements Client pdump mem API +**************************************************************************/ +static PVRSRV_ERROR _PDumpMemIntKM(IMG_PVOID pvAltLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32PhyOffset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32NumPages; + IMG_UINT32 ui32PageByteOffset; + IMG_UINT32 ui32BlockBytes; + IMG_UINT8* pui8LinAddr; + IMG_UINT8* pui8DataLinAddr = IMG_NULL; + IMG_DEV_VIRTADDR sDevVPageAddr; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32ParamOutPos; + PDUMP_MMU_ATTRIB *psMMUAttrib; + IMG_UINT32 ui32DataPageSize; + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + PDUMP_LOCK(); + /* PRQA S 3415 1 */ /* side effects desired */ + if (ui32Bytes == 0 || PDumpOSIsSuspended()) + { + PDUMP_UNLOCK(); + return PVRSRV_OK; + } + + psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + + /* + check the offset and size don't exceed the bounds of the allocation + */ + PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->uAllocSize); + + if (!PDumpOSJTInitialised()) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + } + + /* setup memory addresses */ + if(pvAltLinAddr) + { + pui8DataLinAddr = pvAltLinAddr; + } + else if(psMemInfo->pvLinAddrKM) + { + pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset; + } + pui8LinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM; + sDevVAddr = psMemInfo->sDevVAddr; + + /* advance address by offset */ + sDevVAddr.uiAddr += ui32Offset; + pui8LinAddr += ui32Offset; + + PVR_ASSERT(pui8DataLinAddr); + + PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags); + + ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2); + + /* + write the binary data up-front. + */ + if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2), + pui8DataLinAddr, + ui32Bytes, + ui32Flags)) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + + if (PDumpOSGetParamFileNum() == 0) + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm"); + } + else + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum()); + } + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + /* + Write a comment to the PDump2 script streams indicating the virtual memory load + */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "-- LDB :%s:VA_" UINTPTR_FMT "%08X:0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + psMemInfo->sDevVAddr.uiAddr, + ui32Offset, + ui32Bytes, + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + + if (psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) + { + PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle, + ui32PhyOffset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageByteOffset); + } + else + { + PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle, + ui32Offset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageByteOffset); + } + ui32DataPageSize = psMMUAttrib->ui32DataPageMask + 1; + ui32NumPages = (ui32PageByteOffset + ui32Bytes + psMMUAttrib->ui32DataPageMask) / ui32DataPageSize; + + while(ui32NumPages) + { + ui32NumPages--; + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset; + + if (ui32DataPageSize <= PDUMP_TEMP_BUFFER_SIZE) + { + /* if a page fits within temp buffer, we should dump in page-aligned chunks. */ + PVR_ASSERT((sDevVPageAddr.uiAddr & psMMUAttrib->ui32DataPageMask) == 0); + } + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageByteOffset; + + /* how many bytes to dump from this page */ + if (ui32PageByteOffset + ui32Bytes > ui32DataPageSize) + { + /* dump up to the page boundary */ + ui32BlockBytes = ui32DataPageSize - ui32PageByteOffset; + } + else + { + /* dump what's left */ + ui32BlockBytes = ui32Bytes; + } + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "LDB :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask), + (unsigned int)(sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask)), + ui32BlockBytes, + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* update details for next page */ + +#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) + /* page may be larger than pdump temporary buffer */ + ui32PageByteOffset = (ui32PageByteOffset + ui32BlockBytes) % ui32DataPageSize; +#else + /* page offset 0 after first page dump */ + ui32PageByteOffset = 0; +#endif + /* bytes left over */ + ui32Bytes -= ui32BlockBytes; /* PRQA S 3382 */ /* QAC missed MIN test */ + /* advance devVaddr */ + sDevVAddr.uiAddr += ui32BlockBytes; + /* advance the cpuVaddr */ + pui8LinAddr += ui32BlockBytes; + /* update the file write offset */ + ui32ParamOutPos += ui32BlockBytes; + } + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpMemKM + * Inputs : psMemInfo + * : ui32Offset + * : ui32Bytes + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Implements Client pdump mem API +**************************************************************************/ +PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + /* + For now we don't support dumping sparse allocations that + are from within the kernel, or are from UM but without a + alternative linear address + */ + PVR_ASSERT((psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) == 0); + + if (psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + else + { + return _PDumpMemIntKM(pvAltLinAddr, + psMemInfo, + ui32Offset, + 0, + ui32Bytes, + ui32Flags, + hUniqueTag); + } +} + +PVRSRV_ERROR PDumpMemPDEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2) +{ + PDUMP_MMU_ATTRIB sMMUAttrib; + + /* Override the (variable) PT size since PDs are always 4K in size */ + sMMUAttrib = *psMMUAttrib; + sMMUAttrib.ui32PTSize = (IMG_UINT32)HOST_PAGESIZE(); + return PDumpMemPTEntriesKM( &sMMUAttrib, + hOSMemHandle, + pvLinAddr, + ui32Bytes, + ui32Flags, + bInitialisePages, + hUniqueTag1, + hUniqueTag2); +} + +/************************************************************************** + * Function Name : PDumpMemPTEntriesKM + * Inputs : psMMUAttrib - MMU attributes for pdump + * : pvLinAddr - CPU address of PT base + * : ui32Bytes - size + * : ui32Flags - pdump flags + * : bInitialisePages - whether to initialise pages from file + * : hUniqueTag1 - ID for PT physical page + * : hUniqueTag2 - ID for target physical page (if !bInitialisePages) + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Kernel Services internal pdump memory API + * Used for memory without DevVAddress mappings + e.g. MMU page tables + FIXME: This function doesn't support non-4k data pages, + e.g. dummy data page +**************************************************************************/ +PVRSRV_ERROR PDumpMemPTEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32NumPages; + IMG_UINT32 ui32PageOffset; + IMG_UINT32 ui32BlockBytes; + IMG_UINT8* pui8LinAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_DEV_PHYADDR sDevPAddrTmp; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32ParamOutPos; + IMG_UINT32 ui32PageMask; /* mask for the physical page backing the PT */ + +#if !defined(SGX_FEATURE_36BIT_MMU) + IMG_DEV_PHYADDR sDevPAddrTmp2; +#endif + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + PDUMP_LOCK(); + + + if (PDumpOSIsSuspended()) + { + PDUMP_UNLOCK(); + return PVRSRV_OK; + } + + if (!PDumpOSJTInitialised()) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + } + + if (!pvLinAddr) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags); + + ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2); + + if (bInitialisePages) + { + /* + write the binary data up-front + Use the 'continuous' memory stream + */ + if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2), + pvLinAddr, + ui32Bytes, + ui32Flags | PDUMP_FLAGS_CONTINUOUS)) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + + if (PDumpOSGetParamFileNum() == 0) + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm"); + } + else + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum()); + } + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + } + + /* + Mask for the physical page address backing the PT + The PT size can be less than 4k with variable page size support + The PD size is always 4k + FIXME: This won't work for dumping the dummy data page + */ + ui32PageMask = psMMUAttrib->ui32PTSize - 1; + + /* + Write to the MMU script stream indicating the physical page table entries + */ + /* physical pages that back the virtual address */ + ui32PageOffset = (IMG_UINT32)((IMG_UINTPTR_T)pvLinAddr & (psMMUAttrib->ui32PTSize - 1)); + ui32NumPages = (ui32PageOffset + ui32Bytes + psMMUAttrib->ui32PTSize - 1) / psMMUAttrib->ui32PTSize; + pui8LinAddr = (IMG_UINT8*) pvLinAddr; + + while (ui32NumPages) + { + ui32NumPages--; + /* FIXME: if we used OSMemHandleToCPUPAddr() here, we might be + able to lose the lin addr arg. At least one thing that + would need to be done here is to pass in an offset, as the + calling function doesn't necessarily give us the lin addr + of the start of the mem area. Probably best to keep the + lin addr arg for now - but would be nice to remove the + redundancy */ + sCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, pui8LinAddr); + sDevPAddr = SysCpuPAddrToDevPAddr(psMMUAttrib->sDevId.eDeviceType, sCpuPAddr); + + /* how many bytes to dump from this page */ + if (ui32PageOffset + ui32Bytes > psMMUAttrib->ui32PTSize) + { + /* dump up to the page boundary */ + ui32BlockBytes = psMMUAttrib->ui32PTSize - ui32PageOffset; + } + else + { + /* dump what's left */ + ui32BlockBytes = ui32Bytes; + } + + /* + Write a comment to the MMU script stream indicating the page table load + */ + + if (bInitialisePages) + { + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "LDB :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddr.uiAddr & ~ui32PageMask, + (unsigned int)(sDevPAddr.uiAddr & ui32PageMask), + ui32BlockBytes, + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + } + else + { + for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32)) + { + IMG_UINT32 ui32PTE = *((IMG_UINT32 *)(IMG_UINTPTR_T)(pui8LinAddr + ui32Offset)); /* PRQA S 3305 */ /* strict pointer */ + + if ((ui32PTE & psMMUAttrib->ui32PDEMask) != 0) + { + /* PT entry points to non-null page */ +#if defined(SGX_FEATURE_36BIT_MMU) + sDevPAddrTmp.uiAddr = ((ui32PTE & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PTEAlignShift); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:$1 :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x0\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag2, + sDevPAddrTmp.uiAddr); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :%s:$1 :%s:$1 0x4\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "OR :%s:$1 :%s:$1 0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + ui32PTE & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + sDevPAddrTmp.uiAddr = (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X :%s:$1\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddrTmp.uiAddr, + (unsigned int)((sDevPAddr.uiAddr + ui32Offset) & ui32PageMask), + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); +#else + sDevPAddrTmp.uiAddr = (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask; + sDevPAddrTmp2.uiAddr = (ui32PTE & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PTEAlignShift; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddrTmp.uiAddr, + (unsigned int)((sDevPAddr.uiAddr + ui32Offset) & ui32PageMask), + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag2, + sDevPAddrTmp2.uiAddr, + (unsigned int)(ui32PTE & ~psMMUAttrib->ui32PDEMask)); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); +#endif + } + else + { +#if !defined(FIX_HW_BRN_31620) + PVR_ASSERT((ui32PTE & psMMUAttrib->ui32PTEValid) == 0UL); +#endif + sDevPAddrTmp.uiAddr = (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08X" UINTPTR_FMT "\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddrTmp.uiAddr, + (unsigned int)((sDevPAddr.uiAddr + ui32Offset) & ui32PageMask), + ui32PTE << psMMUAttrib->ui32PTEAlignShift, + (IMG_UINTPTR_T)hUniqueTag2); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + } + } + } + + /* update details for next page */ + + /* page offset 0 after first page dump */ + ui32PageOffset = 0; + /* bytes left over */ + ui32Bytes -= ui32BlockBytes; + /* advance the cpuVaddr */ + pui8LinAddr += ui32BlockBytes; + /* update the file write offset */ + ui32ParamOutPos += ui32BlockBytes; + } + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_DEV_PHYADDR sPDDevPAddr, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PageByteOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + IMG_UINT32 ui32ParamOutPos; + PDUMP_MMU_ATTRIB *psMMUAttrib; + IMG_UINT32 ui32PageMask; /* mask for the physical page backing the PT */ + IMG_DEV_PHYADDR sDevPAddrTmp; + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + PDUMP_LOCK(); + if (!PDumpOSJTInitialised()) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + } + + psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + ui32PageMask = psMMUAttrib->ui32PTSize - 1; + + ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2); + + /* Write the PD phys addr to the param stream up front */ + if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2), + (IMG_UINT8 *)&sPDDevPAddr, + sizeof(IMG_DEV_PHYADDR), + ui32Flags)) + { + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + + if (PDumpOSGetParamFileNum() == 0) + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm"); + } + else + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum()); + } + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + /* Write a comment indicating the PD phys addr write, so that the offsets + * into the param stream increase in correspondence with the number of bytes + * written. */ + sDevPAddrTmp.uiAddr = sPDDevPAddr.uiAddr & ~ui32PageMask; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "-- LDB :%s:PA_0x" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08" SIZE_T_FMT_LEN "X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddrTmp.uiAddr, + (unsigned int)(sPDDevPAddr.uiAddr & ui32PageMask), + sizeof(IMG_DEV_PHYADDR), + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + sDevVAddr = psMemInfo->sDevVAddr; + ui32PageByteOffset = sDevVAddr.uiAddr & ui32PageMask; + + sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset; + PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); + + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset; + +#if defined(SGX_FEATURE_36BIT_MMU) + sDevPAddrTmp.uiAddr = sPDDevPAddr.uiAddr & ~ui32PageMask; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:$1 :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag2, + sDevPAddrTmp.uiAddr, + (unsigned int)(sPDDevPAddr.uiAddr & ui32PageMask)); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :%s:$1 :%s:$1 0x4\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + PDumpOSWriteString2(hScript, ui32Flags); + sDevPAddrTmp.uiAddr = sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X :%s:$1\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddrTmp.uiAddr, + (unsigned int)((sDevPAddr.uiAddr) & (psMMUAttrib->ui32DataPageMask)), + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } +#else + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X \r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag1, + sDevPAddr.uiAddr & ~ui32PageMask, + (unsigned int)(sDevPAddr.uiAddr & ui32PageMask), + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag2, + sPDDevPAddr.uiAddr & psMMUAttrib->ui32PDEMask, + (unsigned int)(sPDDevPAddr.uiAddr & ~psMMUAttrib->ui32PDEMask)); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } +#endif + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpCommentKM + * Inputs : pszComment, ui32Flags + * Outputs : None + * Returns : None + * Description : Dumps a comment +**************************************************************************/ +PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR pszCommentPrefix[] = "-- "; /* prefix for comments */ +#if defined(PDUMP_DEBUG_OUTFILES) + IMG_CHAR pszTemp[256]; +#endif + IMG_UINT32 ui32LenCommentPrefix; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + PDUMP_DBG(("PDumpCommentKM")); + + /* Put \r \n sequence at the end if it isn't already there */ + PDumpOSVerifyLineEnding(pszComment, ui32MaxLen); + + /* Length of string excluding terminating NULL character */ + ui32LenCommentPrefix = PDumpOSBuflen(pszCommentPrefix, sizeof(pszCommentPrefix)); + + /* Ensure output file is available for writing */ + /* FIXME: is this necessary? */ + if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_SCRIPT2), + (IMG_UINT8*)pszCommentPrefix, + ui32LenCommentPrefix, + ui32Flags)) + { +#if defined(PDUMP_DEBUG_OUTFILES) + if(ui32Flags & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s (continuous set)", + g_ui32EveryLineCounter, pszComment)); + PDUMP_UNLOCK(); + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + else if(ui32Flags & PDUMP_FLAGS_PERSISTENT) + { + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s (persistent set)", + g_ui32EveryLineCounter, pszComment)); + PDUMP_UNLOCK(); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s", + g_ui32EveryLineCounter, pszComment)); + PDUMP_UNLOCK(); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } +#else + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %s", + pszComment)); + PDUMP_UNLOCK(); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; +#endif + } + +#if defined(PDUMP_DEBUG_OUTFILES) + /* Prefix comment with PID and line number */ + eErr = PDumpOSSprintf(pszTemp, 256, "%d-%d %s", + _PDumpGetPID(), + g_ui32EveryLineCounter, + pszComment); + + /* Append the comment to the script stream */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "%s", + pszTemp); +#else + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "%s", + pszComment); +#endif + if( (eErr != PVRSRV_OK) && + (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpCommentWithFlags + * Inputs : psPDev - PDev for PDump device + * : pszFormat - format string for comment + * : ... - args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comments +**************************************************************************/ +PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...) +{ + PVRSRV_ERROR eErr; + PDUMP_va_list ap; + PDUMP_GET_MSG_STRING(); + + PDUMP_LOCK_MSG(); + /* Construct the string */ + PDUMP_va_start(ap, pszFormat); + eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap); + PDUMP_va_end(ap); + + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK_MSG(); + return eErr; + } + eErr = PDumpCommentKM(pszMsg, ui32Flags); + PDUMP_UNLOCK_MSG(); + return eErr; +} + +/************************************************************************** + * Function Name : PDumpComment + * Inputs : psPDev - PDev for PDump device + * : pszFormat - format string for comment + * : ... - args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comments +**************************************************************************/ +PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...) +{ + PVRSRV_ERROR eErr; + PDUMP_va_list ap; + PDUMP_GET_MSG_STRING(); + + PDUMP_LOCK_MSG(); + /* Construct the string */ + PDUMP_va_start(ap, pszFormat); + eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap); + PDUMP_va_end(ap); + + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK_MSG(); + return eErr; + } + eErr = PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS); + PDUMP_UNLOCK_MSG(); + return eErr; +} + +/************************************************************************** + * Function Name : PDumpDriverInfoKM + * Inputs : pszString, ui32Flags + * Outputs : None + * Returns : None + * Description : Dumps a comment +**************************************************************************/ +PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32MsgLen; + PDUMP_GET_MSG_STRING(); + + PDUMP_LOCK_MSG(); + /* Construct the string */ + eErr = PDumpOSSprintf(pszMsg, ui32MaxLen, "%s", pszString); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK_MSG(); + return eErr; + } + + /* Put \r \n sequence at the end if it isn't already there */ + PDumpOSVerifyLineEnding(pszMsg, ui32MaxLen); + ui32MsgLen = PDumpOSBuflen(pszMsg, ui32MaxLen); + + if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO), + (IMG_UINT8*)pszMsg, + ui32MsgLen, + ui32Flags)) + { + if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) + { + PDUMP_UNLOCK_MSG(); + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + else + { + PDUMP_UNLOCK_MSG(); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + } + + PDUMP_UNLOCK_MSG(); + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PDumpBitmapKM + + @Description + + Dumps a bitmap from device memory to a file + + @Input psDevId + @Input pszFileName + @Input ui32FileOffset + @Input ui32Width + @Input ui32Height + @Input ui32StrideInBytes + @Input sDevBaseAddr + @Input ui32Size + @Input ePixelFormat + @Input eMemFormat + @Input ui32PDumpFlags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpBitmapKM( PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_HANDLE hDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + PDUMP_MEM_FORMAT eMemFormat, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId; + IMG_UINT32 ui32MMUContextID; + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n"); + + PDUMP_LOCK(); + /* find MMU context ID */ + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID( hDevMemContext ); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SII %s %s.bin :%s:v%x:0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\r\n", + pszFileName, + pszFileName, + psDevId->pszPDumpDevName, + ui32MMUContextID, + sDevBaseAddr.uiAddr, + ui32Size, + ui32FileOffset, + ePixelFormat, + ui32Width, + ui32Height, + ui32StrideInBytes, + eMemFormat); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + PDumpOSWriteString2( hScript, ui32PDumpFlags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PDumpReadRegKM + + @Description + + Dumps a read from a device register to a file + + @Input psConnection : connection info + @Input pszFileName + @Input ui32FileOffset + @Input ui32Address + @Input ui32Size + @Input ui32PDumpFlags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + PVR_UNREFERENCED_PARAMETER(ui32Size); + + PDUMP_LOCK(); + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:0x%08X 0x%08X %s\r\n", + pszPDumpRegName, + ui32Address, + ui32FileOffset, + pszFileName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + PDumpOSWriteString2( hScript, ui32PDumpFlags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpTestNextFrame + @brief Tests whether the next frame will be pdumped + @param ui32CurrentFrame + @return bFrameDumped +*****************************************************************************/ +IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame) +{ + IMG_BOOL bFrameDumped; + + /* + Try dumping a string + */ + (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame + 1); + bFrameDumped = PDumpIsCaptureFrameKM(); + (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame); + + return bFrameDumped; +} + +/***************************************************************************** + @name PDumpSignatureRegister + @brief Dumps a single signature register + @param psDevId - device ID + @param ui32Address - The register address + @param ui32Size - The amount of data to be dumped in bytes + @param pui32FileOffset - Offset of dump in output file + @param ui32Flags - Flags + @return none +*****************************************************************************/ +static PVRSRV_ERROR PDumpSignatureRegister (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 *pui32FileOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:0x%08X 0x%08X %s\r\n", + psDevId->pszPDumpRegName, + ui32Address, + *pui32FileOffset, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpOSWriteString2(hScript, ui32Flags); + *pui32FileOffset += ui32Size; + + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpRegisterRange + @brief Dumps a list of signature registers to a file + @param psDevId - device ID + @param pszFileName - target filename for dump + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @param pui32FileOffset - file offset + @param ui32Size - size of write in bytes + @param ui32Flags - pdump flags + @return none + *****************************************************************************/ +static IMG_VOID PDumpRegisterRange(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters, + IMG_UINT32 *pui32FileOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags) +{ + IMG_UINT32 i; + for (i = 0; i < ui32NumRegisters; i++) + { + PDumpSignatureRegister(psDevId, pszFileName, pui32Registers[i], ui32Size, pui32FileOffset, ui32Flags); + } +} + +/***************************************************************************** + @name PDump3DSignatureRegisters + @brief Dumps the signature registers for 3D modules... + @param psDevId - device ID info + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDump3DSignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32FileOffset, ui32Flags; + PDUMP_GET_FILE_STRING(); + + ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; + ui32FileOffset = 0; + + PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump 3D signature registers\r\n"); + eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u_3d.sig", ui32DumpFrameNum); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + /* + Note: + PDumpCommentWithFlags will take the lock so we defer the lock + taking until here + */ + PDUMP_LOCK(); + PDumpRegisterRange(psDevId, + pszFileName, + pui32Registers, + ui32NumRegisters, + &ui32FileOffset, + sizeof(IMG_UINT32), + ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpTASignatureRegisters + @brief Dumps the TA signature registers + @param psDevId - device id info + @param ui32DumpFrameNum - frame number + @param ui32TAKickCount - TA kick counter + @param bLastFrame + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpTASignatureRegisters (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_UINT32 ui32TAKickCount, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32FileOffset, ui32Flags; + PDUMP_GET_FILE_STRING(); + + ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; + ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32); + + PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump TA signature registers\r\n"); + eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u_ta.sig", ui32DumpFrameNum); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + /* + Note: + PDumpCommentWithFlags will take the lock so we defer the lock + taking until here + */ + PDUMP_LOCK(); + PDumpRegisterRange(psDevId, + pszFileName, + pui32Registers, + ui32NumRegisters, + &ui32FileOffset, + sizeof(IMG_UINT32), + ui32Flags); + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpCounterRegisters + @brief Dumps the performance counters + @param psDevId - device id info + @param ui32DumpFrameNum - frame number + @param bLastFrame + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpCounterRegisters (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32FileOffset, ui32Flags; + PDUMP_GET_FILE_STRING(); + + ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL; + ui32FileOffset = 0UL; + + PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n"); + eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u.perf", ui32DumpFrameNum); + if(eErr != PVRSRV_OK) + { + return eErr; + } + /* + Note: + PDumpCommentWithFlags will take the lock so we defer the lock + taking until here + */ + PDUMP_LOCK(); + PDumpRegisterRange(psDevId, + pszFileName, + pui32Registers, + ui32NumRegisters, + &ui32FileOffset, + sizeof(IMG_UINT32), + ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpRegRead + @brief Dump signature register read to script + @param pszPDumpDevName - pdump device name + @param ui32RegOffset - register offset + @param ui32Flags - pdump flags + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpRegRead(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X\r\n", + pszPDumpRegName, + ui32RegOffset); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpSaveMemKM + @brief Save device memory to a file + @param psDevId + @param pszFileName + @param ui32FileOffset + @param sDevBaseAddr + @param ui32Size + @param ui32PDumpFlags + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpSaveMemKM (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:v%x:0x%08X 0x%08X 0x%08X %s.bin\r\n", + psDevId->pszPDumpDevName, + ui32MMUContextID, + sDevBaseAddr.uiAddr, + ui32Size, + ui32FileOffset, + pszFileName); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + + PDumpOSWriteString2(hScript, ui32PDumpFlags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpCycleCountRegRead + @brief Dump counter register read to script + @param ui32RegOffset - register offset + @param bLastFrame + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpCycleCountRegRead(PVRSRV_DEVICE_IDENTIFIER *psDevId, + const IMG_UINT32 ui32RegOffset, + IMG_BOOL bLastFrame) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X\r\n", + psDevId->pszPDumpRegName, + ui32RegOffset); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PDumpSignatureBuffer + + @Description + + Dumps a signature registers buffer + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpSignatureBuffer (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_CHAR *pszBufferType, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump microkernel %s signature Buffer\r\n", + pszBufferType); + PDumpCommentWithFlags(ui32PDumpFlags, "Buffer format (sizes in 32-bit words):\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tNumber of signatures per sample (1)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tNumber of samples (1)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tSignature register offsets (1 * number of signatures)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tSignature sample values (number of samples * number of signatures)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "Note: If buffer is full, last sample is final state after test completed\r\n"); + return PDumpSaveMemKM(psDevId, pszFileName, ui32FileOffset, sDevBaseAddr, ui32Size, + ui32MMUContextID, ui32PDumpFlags); +} + + +/*! +****************************************************************************** + + @Function PDumpHWPerfCBKM + + @Description + + Dumps the HW Perf Circular Buffer + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpHWPerfCBKM (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump Hardware Performance Circular Buffer\r\n"); + return PDumpSaveMemKM(psDevId, pszFileName, ui32FileOffset, sDevBaseAddr, ui32Size, + ui32MMUContextID, ui32PDumpFlags); +} + + +/***************************************************************************** + FUNCTION : PDumpCBP + + PURPOSE : Dump CBP command to script + + PARAMETERS : + + RETURNS : None +*****************************************************************************/ +PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo, + IMG_UINT32 ui32ROffOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PageOffset; + IMG_UINT8 *pui8LinAddr; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + //IMG_CPU_PHYADDR CpuPAddr; + PDUMP_MMU_ATTRIB *psMMUAttrib; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + psMMUAttrib = ((BM_BUF*)psROffMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + + /* Check the offset and size don't exceed the bounds of the allocation */ + PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->uAllocSize); + + pui8LinAddr = psROffMemInfo->pvLinAddrKM; + sDevVAddr = psROffMemInfo->sDevVAddr; + + /* Advance addresses by offset */ + pui8LinAddr += ui32ROffOffset; + sDevVAddr.uiAddr += ui32ROffOffset; + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle, + ui32ROffOffset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageOffset); + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset; + + PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageOffset; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "CBP :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08X 0x%08X 0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask), + (unsigned int)(sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask)), + ui32WPosVal, + ui32PacketSize, + ui32BufferSize); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpIDLWithFlags + * Inputs : Idle time in clocks + * Outputs : None + * Returns : Error + * Description : Dump IDL command to script +**************************************************************************/ +PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + PDUMP_DBG(("PDumpIDLWithFlags")); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u\r\n", ui32Clocks); + if(eErr != PVRSRV_OK) + { + PDUMP_UNLOCK(); + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpIDL + * Inputs : Idle time in clocks + * Outputs : None + * Returns : Error + * Description : Dump IDL command to script +**************************************************************************/ +PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks) +{ + return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS); +} + +/************************************************************************** + * Function Name : PDumpMemUM + * Inputs : pvAltLinAddrUM + * : pvLinAddrUM + * : psMemInfo + * : ui32Offset + * : ui32Bytes + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Dump user mode memory +**************************************************************************/ +PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_PVOID pvAltLinAddrUM, + IMG_PVOID pvLinAddrUM, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + IMG_VOID *pvAddrUM; + IMG_VOID *pvAddrKM; + PVRSRV_ERROR eError; + + if (psMemInfo->pvLinAddrKM != IMG_NULL && pvAltLinAddrUM == IMG_NULL) + { + /* + * There is a kernel virtual address for the memory that is + * being dumped, and no alternate user mode linear address. + */ + return PDumpMemKM(IMG_NULL, + psMemInfo, + ui32Offset, + ui32Bytes, + ui32Flags, + hUniqueTag); + } + + pvAddrUM = (pvAltLinAddrUM != IMG_NULL) ? pvAltLinAddrUM : ((pvLinAddrUM != IMG_NULL) ? VPTR_PLUS(pvLinAddrUM, ui32Offset) : IMG_NULL); + + pvAddrKM = GetTempBuffer(); + + /* + * The memory to be dumped needs to be copied in from + * the client. Dump the memory, a buffer at a time. + */ + PVR_ASSERT(pvAddrUM != IMG_NULL && pvAddrKM != IMG_NULL); + if (pvAddrUM == IMG_NULL || pvAddrKM == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE) + { + PDumpCommentWithFlags(ui32Flags, "Dumping 0x%08x bytes of memory, in blocks of 0x%08x bytes", ui32Bytes, (IMG_UINT32)PDUMP_TEMP_BUFFER_SIZE); + } + + if (psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) + { + /* + In case of sparse mappings we can't just copy the full range as not + all pages are valid, instead we walk a page at a time only dumping + if the a page exists at that address + */ + IMG_UINT32 ui32BytesRemain = ui32Bytes; + IMG_UINT32 ui32BytesToCopy = 0; + IMG_UINT32 ui32InPageStart = ui32Offset & (~HOST_PAGEMASK); + IMG_UINT32 ui32PageOffset = ui32Offset & (HOST_PAGEMASK); + IMG_UINT32 ui32InPhyPageStart = 0; + + if(ui32InPageStart != 0) + { + IMG_UINT32 ui32DummyInPageStart = 0; + + while(ui32DummyInPageStart != ui32InPageStart) + { + if (BM_MapPageAtOffset(BM_MappingHandleFromBuffer(psMemInfo->sMemBlk.hBuffer), ui32DummyInPageStart)) + { + ui32InPhyPageStart += HOST_PAGESIZE(); + } + ui32DummyInPageStart += HOST_PAGESIZE(); + } + } + + do + { + ui32BytesToCopy = MIN(HOST_PAGESIZE() - ui32PageOffset, ui32BytesRemain); + + if (BM_MapPageAtOffset(BM_MappingHandleFromBuffer(psMemInfo->sMemBlk.hBuffer), ui32InPageStart)) + { + eError = OSCopyFromUser(psPerProc, + pvAddrKM, + pvAddrUM, + ui32BytesToCopy); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d)", eError)); + return eError; + } + + /* + At this point we know we're dumping a valid page so call + the internal function + */ + eError = _PDumpMemIntKM(pvAddrKM, + psMemInfo, + ui32PageOffset + ui32InPageStart, + ui32PageOffset + ui32InPhyPageStart, + ui32BytesToCopy, + ui32Flags, + hUniqueTag); + + if (eError != PVRSRV_OK) + { + /* + * If writing fails part way through, then some + * investigation is needed. + */ + if (ui32BytesToCopy != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError)); + } + PVR_ASSERT(ui32BytesToCopy == 0); + return eError; + } + ui32InPhyPageStart += HOST_PAGESIZE(); + } + + VPTR_INC(pvAddrUM, ui32BytesToCopy); + ui32BytesRemain -= ui32BytesToCopy; + ui32InPageStart += HOST_PAGESIZE(); + ui32PageOffset = 0; + + } while(ui32BytesRemain); + } + else + { + IMG_UINT32 ui32CurrentOffset = ui32Offset; + IMG_UINT32 ui32BytesDumped; + + for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;) + { + IMG_UINT32 ui32BytesToDump = MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped); + + eError = OSCopyFromUser(psPerProc, + pvAddrKM, + pvAddrUM, + ui32BytesToDump); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d)", eError)); + return eError; + } + + eError = PDumpMemKM(pvAddrKM, + psMemInfo, + ui32CurrentOffset, + ui32BytesToDump, + ui32Flags, + hUniqueTag); + + if (eError != PVRSRV_OK) + { + /* + * If writing fails part way through, then some + * investigation is needed. + */ + if (ui32BytesDumped != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError)); + } + PVR_ASSERT(ui32BytesDumped == 0); + return eError; + } + + VPTR_INC(pvAddrUM, ui32BytesToDump); + ui32CurrentOffset += ui32BytesToDump; + ui32BytesDumped += ui32BytesToDump; + } + } + + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : _PdumpAllocMMUContext + * Inputs : pui32MMUContextID + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : pdump util to allocate MMU contexts +**************************************************************************/ +static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID) +{ + IMG_UINT32 i; + + /* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */ + for(i=0; ipvLinAddrKM & psMMUAttrib->ui32DataPageMask); + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = uiAddr - ui32PageOffset; + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageOffset; + + PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:PA_" UINTPTR_FMT DEVPADDR_FMT ":0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINTPTR_T)hUniqueTag, + (sDevPAddr.uiAddr & ~psMMUAttrib->ui32DataPageMask), + (unsigned int)(sDevPAddr.uiAddr & psMMUAttrib->ui32DataPageMask), + ui32Size, + ui32FileOffset, + pszFileName); + + PDumpOSWriteString2(hScript, ui32PDumpFlags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + +/***************************************************************************** + FUNCTION : PDumpRegBasedCBP + + PURPOSE : Dump CBP command to script + + PARAMETERS : + + RETURNS : None +*****************************************************************************/ +PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags) +{ + PDUMP_GET_SCRIPT_STRING(); + + PDUMP_LOCK(); + + PDumpOSBufprintf(hScript, + ui32MaxLen, + "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X\r\n", + pszPDumpRegName, + ui32RegOffset, + ui32WPosVal, + ui32PacketSize, + ui32BufferSize); + PDumpOSWriteString2(hScript, ui32Flags); + + PDUMP_UNLOCK(); + return PVRSRV_OK; +} + + +/**************************************************** + * Non-uitron code here. + * For example, code communicating with dbg driver. + ***************************************************/ +/* PRQA S 5087 1 */ /* include file needed here */ +#include "syscommon.h" + +/************************************************************************** + * Function Name : PDumpConnectionNotify + * Description : Called by the debugdrv to tell Services that pdump has + * connected + * NOTE: No debugdrv on uitron. + **************************************************************************/ +IMG_EXPORT IMG_VOID PDumpConnectionNotify(IMG_VOID) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psThis; + PVR_DPF((PVR_DBG_WARNING, "PDump has connected.")); + + /* Loop over all known devices */ + SysAcquireData(&psSysData); + + psThis = psSysData->psDeviceNodeList; + while (psThis) + { + if (psThis->pfnPDumpInitDevice) + { + /* Reset pdump according to connected device */ + psThis->pfnPDumpInitDevice(psThis); + } + psThis = psThis->psNext; + } +} + +/***************************************************************************** + * Function Name : DbgWrite + * Inputs : psStream - debug stream to write to + pui8Data - buffer + ui32BCount - buffer length + ui32Flags - flags, e.g. continuous, LF + * Outputs : None + * Returns : Bytes written + * Description : Write a block of data to a debug stream + * NOTE: No debugdrv on uitron. + *****************************************************************************/ +IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32BytesWritten = 0; + IMG_UINT32 ui32Off = 0; + PDBG_STREAM_CONTROL psCtrl = psStream->psCtrl; + + /* Return immediately if marked as "never" */ + if ((ui32Flags & PDUMP_FLAGS_NEVER) != 0) + { + return ui32BCount; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Return if process is not marked for pdumping, unless it's persistent. + */ + if ( (_PDumpIsProcessActive() == IMG_FALSE ) && + ((ui32Flags & PDUMP_FLAGS_PERSISTENT) == 0) && psCtrl->bInitPhaseComplete) + { + return ui32BCount; + } +#endif + + /* Send persistent data first ... + * If we're still initialising the params will be captured to the + * init stream in the call to pfnDBGDrivWrite2 below. + */ + if ( ((ui32Flags & PDUMP_FLAGS_PERSISTENT) != 0) && (psCtrl->bInitPhaseComplete) ) + { + while (ui32BCount > 0) + { + /* + Params marked as persistent should be appended to the init phase. + For example window system mem mapping of the primary surface. + */ + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_PERSISTENT, + &pui8Data[ui32Off], ui32BCount, 1, 0); + + if (ui32BytesWritten == 0) + { + PVR_DPF((PVR_DBG_ERROR, "DbgWrite: Failed to send persistent data")); + PDumpOSReleaseExecution(); + } + + if (ui32BytesWritten != 0xFFFFFFFFU) + { + ui32Off += ui32BytesWritten; + ui32BCount -= ui32BytesWritten; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "DbgWrite: Failed to send persistent data")); + if( (psCtrl->ui32Flags & DEBUG_FLAGS_READONLY) != 0) + { + /* suspend pdump to prevent flooding kernel log buffer */ + PDumpSuspendKM(); + } + return 0xFFFFFFFFU; + } + } + + /* reset buffer counters */ + ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0; + } + + while (((IMG_UINT32) ui32BCount > 0) && (ui32BytesWritten != 0xFFFFFFFFU)) + { + /* If we're in the init phase we treat persisent as meaning continuous */ + if (((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0) || ((ui32Flags & PDUMP_FLAGS_PERSISTENT) != 0)) + { + /* + If pdump client (or its equivalent) isn't running then throw continuous data away. + */ + if (((psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) && + (psCtrl->ui32Start == 0xFFFFFFFFU) && + (psCtrl->ui32End == 0xFFFFFFFFU) && + psCtrl->bInitPhaseComplete) + { + ui32BytesWritten = ui32BCount; + } + else + { + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_CONTINUOUS, + &pui8Data[ui32Off], ui32BCount, 1, 0); + } + } + else + { + if (ui32Flags & PDUMP_FLAGS_LASTFRAME) + { + IMG_UINT32 ui32DbgFlags; + + ui32DbgFlags = 0; + if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER) + { + ui32DbgFlags |= WRITELF_FLAGS_RESETBUF; + } + + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_LASTFRAME, + &pui8Data[ui32Off], ui32BCount, 1, ui32DbgFlags); + } + else + { + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_BINCM, + &pui8Data[ui32Off], ui32BCount, 1, 0); + } + } + + /* + If the debug driver's buffers are full so no data could be written then yield + execution so pdump can run and empty them. + */ + if (ui32BytesWritten == 0) + { + if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_ERROR, "Buffer is full during writing of %s", &pui8Data[ui32Off])); + } + PDumpOSReleaseExecution(); + } + + if (ui32BytesWritten != 0xFFFFFFFFU) + { + ui32Off += ui32BytesWritten; + ui32BCount -= ui32BytesWritten; + } + else + { + if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_ERROR, "Error during writing of %s", &pui8Data[ui32Off])); + } + } + /* loop exits when i) all data is written, or ii) an unrecoverable error occurs */ + } + + return ui32BytesWritten; +} + + + +#else /* defined(PDUMP) */ +/* disable warning about empty module */ +#endif /* defined(PDUMP) */ +/***************************************************************************** + End of file (pdump_common.c) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/perproc.c b/sgx_km/eurasia_km/services4/srvkm/common/perproc.c new file mode 100644 index 0000000..3918bb2 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/perproc.c @@ -0,0 +1,398 @@ +/*************************************************************************/ /*! +@Title Per-process storage +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Manage per-process storage +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "resman.h" +#include "handle.h" +#include "perproc.h" +#include "osperproc.h" +#if defined(TTRACE) +#include "ttrace.h" +#endif + +#define HASH_TAB_INIT_SIZE 32 + +static HASH_TABLE *psHashTab = IMG_NULL; + +/*! +****************************************************************************** + + @Function FreePerProcData + + @Description Free a per-process data area + + @Input psPerProc - pointer to per-process data area + + @Return Error code, or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_UINTPTR_T uiPerProc; + + PVR_ASSERT(psPerProc != IMG_NULL); + + if (psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID); + if (uiPerProc == 0) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table")); + /* + * We must have failed early in the per-process data area + * creation, before the process ID was set. + */ + PVR_ASSERT(psPerProc->ui32PID == 0); + } + else + { + PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc); + PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID); + } + + /* Free handle base for this process */ + if (psPerProc->psHandleBase != IMG_NULL) + { + eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError)); + return eError; + } + } + + /* Release handle for per-process data area */ + if (psPerProc->hPerProcData != IMG_NULL) + { + eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError)); + return eError; + } + } + + /* Call environment specific per process deinit function */ + eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError)); + return eError; + } + + eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(*psPerProc), + psPerProc, + psPerProc->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessData + + @Description Return per-process data area + + @Input ui32PID - process ID + + @Return Pointer to per-process data area, or IMG_NULL on error. + +******************************************************************************/ +PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID) +{ + PVRSRV_PER_PROCESS_DATA *psPerProc; + + PVR_ASSERT(psHashTab != IMG_NULL); + + /* Look for existing per-process data area */ + psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); + return psPerProc; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataConnect + + @Description Allocate per-process data area, or increment refcount if one + already exists for this PID. + + @Input ui32PID - process ID + ppsPerProc - Pointer to per-process data area + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags) +{ + PVRSRV_PER_PROCESS_DATA *psPerProc; + IMG_HANDLE hBlockAlloc; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psHashTab == IMG_NULL) + { + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* Look for existing per-process data area */ + psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); + + if (psPerProc == IMG_NULL) + { + /* Allocate per-process data area */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(*psPerProc), + (IMG_PVOID *)&psPerProc, + &hBlockAlloc, + "Per Process Data"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError)); + return eError; + } + OSMemSet(psPerProc, 0, sizeof(*psPerProc)); + psPerProc->hBlockAlloc = hBlockAlloc; + + if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table")); + eError = PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED; + goto failure; + } + + psPerProc->ui32PID = ui32PID; + psPerProc->ui32RefCount = 0; + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if (ui32Flags == SRV_FLAGS_PDUMP_ACTIVE) + { + psPerProc->bPDumpActive = IMG_TRUE; + } +#else + PVR_UNREFERENCED_PARAMETER(ui32Flags); +#endif + + /* Call environment specific per process init function */ + eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError)); + goto failure; + } + + /* Allocate a handle for the per-process data area */ + eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, + &psPerProc->hPerProcData, + psPerProc, + PVRSRV_HANDLE_TYPE_PERPROC_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError)); + goto failure; + } + + /* Allocate handle base for this process */ + eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError)); + goto failure; + } + + /* Set per-process handle options */ + eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError)); + goto failure; + } + + /* Create a resource manager context for the process */ + eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager")); + goto failure; + } +#if defined (TTRACE) + PVRSRVTimeTraceBufferCreate(ui32PID); +#endif + } + + psPerProc->ui32RefCount++; + PVR_DPF((PVR_DBG_MESSAGE, + "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d", + ui32PID, psPerProc->ui32RefCount)); + + return eError; + +failure: + (IMG_VOID)FreePerProcessData(psPerProc); + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataDisconnect + + @Description Decrement refcount for per-process data area, + and free the resources if necessary. + + @Input ui32PID - process ID + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID) +{ + PVRSRV_ERROR eError; + PVRSRV_PER_PROCESS_DATA *psPerProc; + + PVR_ASSERT(psHashTab != IMG_NULL); + + psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); + if (psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", ui32PID)); + } + else + { + psPerProc->ui32RefCount--; + if (psPerProc->ui32RefCount == 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataDisconnect: " + "Last close from process 0x%x received", ui32PID)); + + /* Close the Resource Manager connection */ + PVRSRVResManDisconnect(psPerProc->hResManContext, IMG_FALSE); + +#if defined (TTRACE) + PVRSRVTimeTraceBufferDestroy(ui32PID); +#endif + + /* Free the per-process data */ + eError = FreePerProcessData(psPerProc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Error freeing per-process data")); + } + } + } + + eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)", eError)); + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataInit + + @Description Initialise per-process data management + + @Return Error code, or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID) +{ + PVR_ASSERT(psHashTab == IMG_NULL); + + /* Create hash table */ + psHashTab = HASH_Create(HASH_TAB_INIT_SIZE); + if (psHashTab == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table")); + return PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataDeInit + + @Description De-initialise per-process data management + + @Return Error code, or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID) +{ + /* Destroy per-process data area hash table */ + if (psHashTab != IMG_NULL) + { + /* Free the hash table */ + HASH_Delete(psHashTab); + psHashTab = IMG_NULL; + } + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (perproc.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/power.c b/sgx_km/eurasia_km/services4/srvkm/common/power.c new file mode 100644 index 0000000..511a690 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/power.c @@ -0,0 +1,996 @@ +/*************************************************************************/ /*! +@Title Power management functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "pdump_km.h" + +#include "lists.h" + +static IMG_BOOL gbInitServerRunning = IMG_FALSE; +static IMG_BOOL gbInitServerRan = IMG_FALSE; +static IMG_BOOL gbInitSuccessful = IMG_FALSE; + +/*! +****************************************************************************** + + @Function PVRSRVSetInitServerState + + @Description Sets given services init state. + + @Input eInitServerState : a services init state + @Input bState : a state to set + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState) +{ + + switch(eInitServerState) + { + case PVRSRV_INIT_SERVER_RUNNING: + gbInitServerRunning = bState; + break; + case PVRSRV_INIT_SERVER_RAN: + gbInitServerRan = bState; + break; + case PVRSRV_INIT_SERVER_SUCCESSFUL: + gbInitSuccessful = bState; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSetInitServerState : Unknown state %x", eInitServerState)); + return PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetInitServerState + + @Description Tests whether a given services init state was run. + + @Input eInitServerState : a services init state + + @Return IMG_BOOL + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState) +{ + IMG_BOOL bReturnVal; + + switch(eInitServerState) + { + case PVRSRV_INIT_SERVER_RUNNING: + bReturnVal = gbInitServerRunning; + break; + case PVRSRV_INIT_SERVER_RAN: + bReturnVal = gbInitServerRan; + break; + case PVRSRV_INIT_SERVER_SUCCESSFUL: + bReturnVal = gbInitSuccessful; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVGetInitServerState : Unknown state %x", eInitServerState)); + bReturnVal = IMG_FALSE; + } + + return bReturnVal; +} + +/*! +****************************************************************************** + + @Function _IsSystemStatePowered + + @Description Tests whether a given system state represents powered-up. + + @Input eSystemPowerState : a system power state + + @Return IMG_BOOL + +******************************************************************************/ +static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState) +{ + return (IMG_BOOL)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2); +} + + +/*! +****************************************************************************** + + @Function PVRSRVPowerLock + + @Description Obtain the mutex for power transitions + + @Input ui32CallerID : KERNEL_ID or ISR_ID + @Input bSystemPowerEvent : Only pass IMG_TRUE if the lock is for a + system power state change + + @Return PVRSRV_ERROR IMG_CALLCONV + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, + IMG_BOOL bSystemPowerEvent) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + IMG_UINT32 ui32Timeout = 1000000; + IMG_BOOL bTryLock = (ui32CallerID == ISR_ID); + + SysAcquireData(&psSysData); + + eError = OSPowerLockWrap(bTryLock); + if (eError != PVRSRV_OK) + { + return eError; + } + + do + { + eError = OSLockResource(&psSysData->sPowerStateChangeResource, + ui32CallerID); + if (eError == PVRSRV_OK) + { + break; + } + else if (bTryLock) + { + /* + ISR failed to acquire lock so it must be held by a kernel thread. + */ + eError = PVRSRV_ERROR_RETRY; + break; + } + + OSWaitus(1); + ui32Timeout--; + } while (ui32Timeout > 0); + + if (eError != PVRSRV_OK) + { + OSPowerLockUnwrap(); + } + + /* PRQA S 3415 3 */ /* side effects desired */ + if ((eError == PVRSRV_OK) && + !bSystemPowerEvent && + !_IsSystemStatePowered(psSysData->eCurrentPowerState)) + { + /* Reject device power state change due to system power state. */ + PVRSRVPowerUnlock(ui32CallerID); + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPowerUnlock + + @Description Release the mutex for power transitions + + @Input ui32CallerID : KERNEL_ID or ISR_ID + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID) +{ + OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID); + OSPowerLockUnwrap(); +} + + +/*! +****************************************************************************** + + @Function PVRSRVDevicePrePowerStateKM_AnyVaCb + + @Description + + Perform device-specific processing required before a power transition + + @Input psPowerDevice : the device + @Input va : variable argument list with: + bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + ui32DeviceIndex : device index + eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va) +{ + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + PVRSRV_ERROR eError; + + /*Variable Argument variables*/ + IMG_BOOL bAllDevices; + IMG_UINT32 ui32DeviceIndex; + PVRSRV_DEV_POWER_STATE eNewPowerState; + + /* WARNING: if types were not aligned to 4 bytes, this could be dangerous. */ + bAllDevices = va_arg(va, IMG_BOOL); + ui32DeviceIndex = va_arg(va, IMG_UINT32); + eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE); + + if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) + { + eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? + psPowerDevice->eDefaultPowerState : eNewPowerState; + + if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState) + { + if (psPowerDevice->pfnPrePower != IMG_NULL) + { + /* Call the device's power callback. */ + eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + /* Do any required system-layer processing. */ + eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePrePowerStateKM + + @Description + + Perform device-specific processing required before a power transition + + @Input bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + @Input ui32DeviceIndex : device index + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices, + IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* Loop through the power devices. */ + eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList, + &PVRSRVDevicePrePowerStateKM_AnyVaCb, + bAllDevices, + ui32DeviceIndex, + eNewPowerState); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostPowerStateKM_AnyVaCb + + @Description + + Perform device-specific processing required after a power transition + + @Input psPowerDevice : the device + @Input va : variable argument list with: + bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + ui32DeviceIndex : device index + eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va) +{ + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + PVRSRV_ERROR eError; + + /*Variable Argument variables*/ + IMG_BOOL bAllDevices; + IMG_UINT32 ui32DeviceIndex; + PVRSRV_DEV_POWER_STATE eNewPowerState; + + /* WARNING: if types were not aligned to 4 bytes, this could be dangerous. */ + bAllDevices = va_arg(va, IMG_BOOL); + ui32DeviceIndex = va_arg(va, IMG_UINT32); + eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE); + + if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) + { + eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? + psPowerDevice->eDefaultPowerState : eNewPowerState; + + if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState) + { + /* Do any required system-layer processing. */ + eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + + if (psPowerDevice->pfnPostPower != IMG_NULL) + { + /* Call the device's power callback. */ + eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + psPowerDevice->eCurrentPowerState = eNewDevicePowerState; + } + } + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostPowerStateKM + + @Description + + Perform device-specific processing required after a power transition + + @Input bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + @Input ui32DeviceIndex : device index + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices, + IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* Loop through the power devices. */ + eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList, + &PVRSRVDevicePostPowerStateKM_AnyVaCb, + bAllDevices, + ui32DeviceIndex, + eNewPowerState); + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSetDevicePowerStateKM + + @Description Set the Device into a new state + + @Input ui32DeviceIndex : device index + @Input eNewPowerState : New power state + @Input ui32CallerID : KERNEL_ID or ISR_ID + @Input bRetainMutex : If true, the power mutex is retained on exit + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + #if defined(PDUMP) + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + /* + Pdump a power-up regardless of the default state. + Then disable pdump and transition to the default power state. + This ensures that a power-up is always present in the pdump when necessary. + */ + eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON); + if(eError != PVRSRV_OK) + { + goto Exit; + } + + eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON); + + if (eError != PVRSRV_OK) + { + goto Exit; + } + + PDUMPSUSPEND(); + } + #endif /* PDUMP */ + + eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState); + if(eError != PVRSRV_OK) + { + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + PDUMPRESUME(); + } + goto Exit; + } + + eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState); + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + PDUMPRESUME(); + } + +Exit: + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError)); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSystemPrePowerStateKM + + @Description Perform processing required before a system power transition + + @Input eNewSysPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + + SysAcquireData(&psSysData); + + /* This mutex is unlocked in PVRSRVSystemPostPowerStateKM() */ + eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE); + if(eError != PVRSRV_OK) + { + return eError; + } + + if (_IsSystemStatePowered(eNewSysPowerState) != + _IsSystemStatePowered(psSysData->eCurrentPowerState)) + { + if (_IsSystemStatePowered(eNewSysPowerState)) + { + /* Return device back to its default state. */ + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; + } + else + { + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + + /* Perform device-specific transitions. */ + eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + + if (eNewSysPowerState != psSysData->eCurrentPowerState) + { + /* Perform system-specific power transitions. */ + eError = SysSystemPrePowerState(eNewSysPowerState); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + + return eError; + +ErrorExit: + + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x", + psSysData->eCurrentPowerState, eNewSysPowerState, eError)); + + /* save the power state for the re-attempt */ + psSysData->eFailedPowerState = eNewSysPowerState; + + PVRSRVPowerUnlock(KERNEL_ID); + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSystemPostPowerStateKM + + @Description Perform processing required after a system power transition + + @Input eNewSysPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYS_DATA *psSysData; + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + + SysAcquireData(&psSysData); + + if (eNewSysPowerState != psSysData->eCurrentPowerState) + { + /* Perform system-specific power transitions. */ + eError = SysSystemPostPowerState(eNewSysPowerState); + if (eError != PVRSRV_OK) + { + goto Exit; + } + } + + if (_IsSystemStatePowered(eNewSysPowerState) != + _IsSystemStatePowered(psSysData->eCurrentPowerState)) + { + if (_IsSystemStatePowered(eNewSysPowerState)) + { + /* Return device back to its default state. */ + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; + } + else + { + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + + /* Perform device-specific power transitions. */ + eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState); + if (eError != PVRSRV_OK) + { + goto Exit; + } + } + + PVR_DPF((PVR_DBG_MESSAGE, + "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK", + psSysData->eCurrentPowerState, eNewSysPowerState)); + + psSysData->eCurrentPowerState = eNewSysPowerState; + +Exit: + + PVRSRVPowerUnlock(KERNEL_ID); + + /* PRQA S 3415 2 */ /* side effects desired */ + if (_IsSystemStatePowered(eNewSysPowerState) && + PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) + { + /* + Reprocess the devices' queues in case commands were blocked during + the power transition. + */ + PVRSRVScheduleDeviceCallbacks(); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSetPowerStateKM + + @Description Set the system into a new state + + @Input eNewPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState); + if(eError != PVRSRV_OK) + { + goto ErrorExit; + } + + eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState); + if(eError != PVRSRV_OK) + { + goto ErrorExit; + } + + /* save new power state */ + psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified; + + return PVRSRV_OK; + +ErrorExit: + + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x", + psSysData->eCurrentPowerState, eNewSysPowerState, eError)); + + /* save the power state for the re-attempt */ + psSysData->eFailedPowerState = eNewSysPowerState; + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterPowerDevice + + @Description + + Registers a device with the power manager. Passes Pre/Post Power handlers + and private device handle to be passed to power handlers + + @Input ui32DeviceIndex : device index + @Input pfnPrePower : Pre power transition handler + @Input pfnPostPower : Post power transition handler + @Input pfnPreClockSpeedChange : Pre clock speed transition handler (if required) + @Input pfnPostClockSpeedChange : Post clock speed transition handler (if required) + @Input hDevCookie : Dev cookie for dev power handlers + @Input eCurrentPowerState : Current power state of the device + @Input eDefaultPowerState : Default power state of the device + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex, + PFN_PRE_POWER pfnPrePower, + PFN_POST_POWER pfnPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + if (pfnPrePower == IMG_NULL && + pfnPostPower == IMG_NULL) + { + return PVRSRVRemovePowerDevice(ui32DeviceIndex); + } + + SysAcquireData(&psSysData); + + eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_POWER_DEV), + (IMG_VOID **)&psPowerDevice, IMG_NULL, + "Power Device"); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV")); + return eError; + } + + /* setup device for power manager */ + psPowerDevice->pfnPrePower = pfnPrePower; + psPowerDevice->pfnPostPower = pfnPostPower; + psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; + psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; + psPowerDevice->hDevCookie = hDevCookie; + psPowerDevice->ui32DeviceIndex = ui32DeviceIndex; + psPowerDevice->eCurrentPowerState = eCurrentPowerState; + psPowerDevice->eDefaultPowerState = eDefaultPowerState; + + /* insert into power device list */ + List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList), psPowerDevice); + + return (PVRSRV_OK); +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemovePowerDevice + + @Description + + Removes device from power management register. Device is located by Device Index + + @Input ui32DeviceIndex : device index + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex) +{ + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDev; + + SysAcquireData(&psSysData); + + /* find device in list and remove it */ + psPowerDev = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + + if (psPowerDev) + { + List_PVRSRV_POWER_DEV_Remove(psPowerDev); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV), psPowerDev, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } + + return (PVRSRV_OK); +} + + +/*! +****************************************************************************** + + @Function PVRSRVIsDevicePowered + + @Description + + Whether the device is powered, for the purposes of lockup detection. + + @Input ui32DeviceIndex : device index + + @Return IMG_BOOL + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex) +{ + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + SysAcquireData(&psSysData); + + /* PRQA S 3415 2 */ /* order not important */ + if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) || + OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID)) + { + return IMG_FALSE; + } + + psPowerDevice = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + return (psPowerDevice && (psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)) + ? IMG_TRUE : IMG_FALSE; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDevicePreClockSpeedChange + + @Description + + Notification from system layer that a device clock speed change is about to happen. + + @Input ui32DeviceIndex : device index + @Input bIdleDevice : whether the device should be idled + @Input pvInfo + + @Return IMG_VOID + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + SysAcquireData(&psSysData); + + if (bIdleDevice) + { + /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */ + eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%x", eError)); + return eError; + } + } + + /*search the device and then do the pre clock speed change*/ + psPowerDevice = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + + if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange) + { + eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, + bIdleDevice, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVDevicePreClockSpeedChange : Device %u failed, error:0x%x", + ui32DeviceIndex, eError)); + } + } + + if (bIdleDevice && eError != PVRSRV_OK) + { + PVRSRVPowerUnlock(KERNEL_ID); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostClockSpeedChange + + @Description + + Notification from system layer that a device clock speed change has just happened. + + @Input ui32DeviceIndex : device index + @Input bIdleDevice : whether the device had been idled + @Input pvInfo + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + SysAcquireData(&psSysData); + + /*search the device and then do the post clock speed change*/ + psPowerDevice = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + + if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange) + { + eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, + bIdleDevice, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVDevicePostClockSpeedChange : Device %u failed, error:0x%x", + ui32DeviceIndex, eError)); + } + } + + + if (bIdleDevice) + { + /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */ + PVRSRVPowerUnlock(KERNEL_ID); + } +} + +/****************************************************************************** + End of file (power.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/pvrsrv.c b/sgx_km/eurasia_km/services4/srvkm/common/pvrsrv.c new file mode 100644 index 0000000..e823db4 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/pvrsrv.c @@ -0,0 +1,1909 @@ +/*************************************************************************/ /*! +@Title core services functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for core services functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "buffer_manager.h" +#include "pvr_bridge_km.h" +#include "handle.h" +#include "perproc.h" +#include "pdump_km.h" +#include "deviceid.h" +#include "ra.h" +#if defined(TTRACE) +#include "ttrace.h" +#endif +#include "perfkm.h" +#include "devicemem.h" + +#include "pvrversion.h" + +#include "lists.h" + +IMG_UINT32 g_ui32InitFlags; + +/* mark which parts of Services were initialised */ +#define INIT_DATA_ENABLE_PDUMPINIT 0x1U +#define INIT_DATA_ENABLE_TTRACE 0x2U +#define INIT_DATA_ENABLE_DEVMEM 0x4U + +/*! +****************************************************************************** + + @Function AllocateDeviceID + + @Description + + allocates a device id from the pool of valid ids + + @input psSysData : system data + + @input pui32DevID : device id to return + + @Return device id + +******************************************************************************/ +PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID) +{ + SYS_DEVICE_ID* psDeviceWalker; + SYS_DEVICE_ID* psDeviceEnd; + + psDeviceWalker = &psSysData->sDeviceID[0]; + psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices; + + /* find a free ID */ + while (psDeviceWalker < psDeviceEnd) + { + if (!psDeviceWalker->bInUse) + { + psDeviceWalker->bInUse = IMG_TRUE; + *pui32DevID = psDeviceWalker->uiID; + return PVRSRV_OK; + } + psDeviceWalker++; + } + + PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!")); + + /* Should never get here: sDeviceID[] may have been setup too small */ + PVR_ASSERT(psDeviceWalker < psDeviceEnd); + + return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE; +} + + +/*! +****************************************************************************** + + @Function FreeDeviceID + + @Description + + frees a device id from the pool of valid ids + + @input psSysData : system data + + @input ui32DevID : device id to free + + @Return device id + +******************************************************************************/ +PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID) +{ + SYS_DEVICE_ID* psDeviceWalker; + SYS_DEVICE_ID* psDeviceEnd; + + psDeviceWalker = &psSysData->sDeviceID[0]; + psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices; + + /* find the ID to free */ + while (psDeviceWalker < psDeviceEnd) + { + /* if matching id and in use, free */ + if ( + (psDeviceWalker->uiID == ui32DevID) && + (psDeviceWalker->bInUse) + ) + { + psDeviceWalker->bInUse = IMG_FALSE; + return PVRSRV_OK; + } + psDeviceWalker++; + } + + PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!")); + + /* should never get here */ + PVR_ASSERT(psDeviceWalker < psDeviceEnd); + + return PVRSRV_ERROR_INVALID_DEVICEID; +} +/*! +****************************************************************************** + + @Function PVRSRVCompatCheckKM + + @Description UM/KM ddk branch Compatibility check function + + @input psUserModeDDKDetails: User mode DDK version + + @output In case of incompatibility, returns PVRSRV_ERROR_DDK_VERSION_MISMATCH + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVCompatCheckKM(PVRSRV_BRIDGE_IN_COMPAT_CHECK *psUserModeDDKDetails, PVRSRV_BRIDGE_RETURN *psRetOUT) +{ +#if defined(SUPPORT_TI_VERSION_STRING) + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); +#endif + + if(psUserModeDDKDetails->ui32DDKVersion != ((PVRVERSION_MAJ << 16) | (PVRVERSION_MIN << 8)) + || (psUserModeDDKDetails->ui32DDKBuild != PVRVERSION_BUILD)) + { + psRetOUT->eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; + PVR_LOG(("(FAIL) UM-KM DDK Mismatch UM-(%d) KM-(%d).", + psUserModeDDKDetails->ui32DDKBuild, PVRVERSION_BUILD)); + } + else + { + psRetOUT->eError = PVRSRV_OK; + PVR_LOG(("UM DDK-(%d) and KM DDK-(%d) match. [ OK ]", + psUserModeDDKDetails->ui32DDKBuild ,PVRVERSION_BUILD)); + } + +#if defined(SUPPORT_TI_VERSION_STRING) + /* If TI version string is supported, populate it here. This will show + * up in proc entry. + * Probably a bad place to do this. FIXME + */ + memcpy(psSysData->szTIVersion, psUserModeDDKDetails->szTIVersion, 64); +#endif +} + +/*! +****************************************************************************** + + @Function ReadHWReg + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : byte offset from register base + + @Return register value + +******************************************************************************/ +#ifndef ReadHWReg +IMG_EXPORT +IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) +{ + return *(volatile IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset); +} +#endif + + +/*! +****************************************************************************** + + @Function WriteHWReg + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : byte offset from register base + + @input ui32Value : value to write to register + + @Return register value : original reg. value + +******************************************************************************/ +#ifndef WriteHWReg +IMG_EXPORT +IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ + PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%p, Offset: %x, Value %x", + pvLinRegBaseAddr,ui32Offset,ui32Value)); + + *(IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset) = ui32Value; +} +#endif + + +/*! +****************************************************************************** + + @Function WriteHWRegs + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Count : register count + + @input psHWRegs : address/value register list + + @Return none + +******************************************************************************/ +#ifndef WriteHWRegs +IMG_EXPORT +IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs) +{ + while (ui32Count) + { + WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal); + psHWRegs++; + ui32Count--; + } +} +#endif + +/*! +****************************************************************************** + @Function PVRSRVEnumerateDCKM_ForEachVaCb + + @Description + + Enumerates the device node (if is of the same class as given). + + @Input psDeviceNode - The device node to be enumerated + va - variable arguments list, with: + pui32DevCount - The device count pointer (to be increased) + ppui32DevID - The pointer to the device IDs pointer (to be updated and increased) +******************************************************************************/ +static IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + IMG_UINT *pui32DevCount; + PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList; + + pui32DevCount = va_arg(va, IMG_UINT*); + ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER**); + + if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT) + { + *(*ppsDevIdList) = psDeviceNode->sDevId; + (*ppsDevIdList)++; + (*pui32DevCount)++; + } +} + + + +/*! +****************************************************************************** + + @Function PVRSRVEnumerateDevicesKM + + @Description + This function will enumerate all the devices supported by the + PowerVR services within the target system. + The function returns a list of the device ID strcutres stored either in + the services or constructed in the user mode glue component in certain + environments. The number of devices in the list is also returned. + + In a binary layered component which does not support dynamic runtime selection, + the glue code should compile to return the supported devices statically, + e.g. multiple instances of the same device if multiple devices are supported, + or the target combination of MBX and display device. + + In the case of an environment (for instance) where one MBX1 may connect to two + display devices this code would enumerate all three devices and even + non-dynamic MBX1 selection code should retain the facility to parse the list + to find the index of the MBX device + + @output pui32NumDevices : On success, contains the number of devices present + in the system + + @output psDevIdList : Pointer to called supplied buffer to receive the + list of PVRSRV_DEVICE_IDENTIFIER + + @return PVRSRV_ERROR : PVRSRV_NO_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices, + PVRSRV_DEVICE_IDENTIFIER *psDevIdList) +{ + SYS_DATA *psSysData; +/* PVRSRV_DEVICE_NODE *psDeviceNode; */ + IMG_UINT32 i; + + if (!pui32NumDevices || !psDevIdList) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* + setup input buffer to be `empty' + */ + for (i=0; ipsDeviceNodeList, + &PVRSRVEnumerateDevicesKM_ForEachVaCb, + pui32NumDevices, + &psDevIdList); + + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVInit + + @Description Initialise services + + @Input psSysData : sysdata structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData) +{ + PVRSRV_ERROR eError; + + /* Initialise Resource Manager */ + eError = ResManInit(); + if (eError != PVRSRV_OK) + { + goto Error; + } + + eError = PVRSRVPerProcessDataInit(); + if(eError != PVRSRV_OK) + { + goto Error; + } + + /* Initialise handles */ + eError = PVRSRVHandleInit(); + if(eError != PVRSRV_OK) + { + goto Error; + } + + /* Initialise Power Manager Lock */ + eError = OSCreateResource(&psSysData->sPowerStateChangeResource); + if (eError != PVRSRV_OK) + { + goto Error; + } + + /* Initialise system power state */ + psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0; + psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified; + + /* Create an event object */ + if((eError = OSAllocMem( PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_EVENTOBJECT) , + (IMG_VOID **)&psSysData->psGlobalEventObject, 0, + "Event Object")) != PVRSRV_OK) + { + + goto Error; + } + + if((eError = OSEventObjectCreateKM("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject)) != PVRSRV_OK) + { + goto Error; + } + + /* Store OS high res timer fallbacks, the system is free to overide these */ + psSysData->pfnHighResTimerCreate = OSFuncHighResTimerCreate; + psSysData->pfnHighResTimerGetus = OSFuncHighResTimerGetus; + psSysData->pfnHighResTimerDestroy = OSFuncHighResTimerDestroy; + +#if defined(TTRACE) + eError = PVRSRVTimeTraceInit(); + if (eError != PVRSRV_OK) + goto Error; + g_ui32InitFlags |= INIT_DATA_ENABLE_TTRACE; +#endif + +#if defined(PDUMP) + /* Initialise pdump */ + PDUMPINIT(); + g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT; +#endif + +#if defined(SUPPORT_ION) || defined(SUPPORT_DMABUF) + eError = PVRSRVInitDeviceMem(); + if (eError != PVRSRV_OK) + goto Error; + g_ui32InitFlags |= INIT_DATA_ENABLE_DEVMEM; +#endif + + PERFINIT(); + return eError; + +Error: + PVRSRVDeInit(psSysData); + return eError; +} + + + +/*! +****************************************************************************** + + @Function PVRSRVDeInit + + @Description De-Initialise services + + @Input psSysData : sysdata structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psSysData); + + if (psSysData == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param")); + return; + } + + PERFDEINIT(); + + +#if defined(SUPPORT_ION) || defined(SUPPORT_DMABUF) + if ((g_ui32InitFlags & INIT_DATA_ENABLE_DEVMEM) > 0) + { + PVRSRVDeInitDeviceMem(); + } +#endif + +#if defined(MEM_TRACK_INFO_DEBUG) + /* Free the list of memory operations */ + PVRSRVFreeMemOps(); +#endif + +#if defined(TTRACE) + /* deinitialise ttrace */ + if ((g_ui32InitFlags & INIT_DATA_ENABLE_TTRACE) > 0) + { + PVRSRVTimeTraceDeinit(); + } +#endif + +#if defined(PDUMP) + /* deinitialise pdump */ + if( (g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0) + { + PDUMPDEINIT(); + } +#endif + + /* destroy event object */ + if(psSysData->psGlobalEventObject) + { + OSEventObjectDestroyKM(psSysData->psGlobalEventObject); + OSFreeMem( PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_EVENTOBJECT), + psSysData->psGlobalEventObject, + 0); + psSysData->psGlobalEventObject = IMG_NULL; + } + + eError = PVRSRVHandleDeInit(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed")); + } + + eError = PVRSRVPerProcessDataDeInit(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed")); + } + + ResManDeInit(); +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterDevice + + @Description + + registers a device with the system + + @Input psSysData : sysdata structure + + @Input pfnRegisterDevice : device registration function + + @Input ui32SOCInterruptBit : SoC interrupt bit for this device + + @Output pui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData, + PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*), + IMG_UINT32 ui32SOCInterruptBit, + IMG_UINT32 *pui32DeviceIndex) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* Allocate device node */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), + (IMG_VOID **)&psDeviceNode, IMG_NULL, + "Device Node") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); + + eError = pfnRegisterDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device")); + return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED); + } + + /* + make the refcount 1 and test on this to initialise device + at acquiredevinfo. On release if refcount is 1, deinitialise + and when refcount is 0 (sysdata de-alloc) deallocate the device + structures + */ + psDeviceNode->ui32RefCount = 1; + psDeviceNode->psSysData = psSysData; + psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit; + + /* all devices need a unique identifier */ + AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex); + + /* and finally insert the device into the dev-list */ + List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode); + + /* and copy back index */ + *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVInitialiseDevice + + @Description + + initialises device by index + + @Input ui32DevIndex : Index to the required device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice")); + + SysAcquireData(&psSysData); + + /* Find device in the list */ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_TRUE); + if(!psDeviceNode) + { + /* Devinfo not in the list */ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present")); + return PVRSRV_ERROR_INIT_FAILURE; + } +/* +FoundDevice: +*/ + + PVR_ASSERT (psDeviceNode->ui32RefCount > 0); + + /* + Create the device's resource manager context. + */ + eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call")); + return eError; + } + + /* Initialise the device */ + if(psDeviceNode->pfnInitDevice != IMG_NULL) + { + eError = psDeviceNode->pfnInitDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call")); + return eError; + } + } + + return PVRSRV_OK; +} + + +static PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVPowerLock call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex)); + return eError; + } + + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE_DEFAULT); + PVRSRVPowerUnlock(KERNEL_ID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex)); + } + return eError; +} + +/*wraps the PVRSRVDevInitCompatCheck call and prints a debugging message if failed*/ +static PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex)); + } + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVFinaliseSystem + + @Description + + Final part of system initialisation. + + @Input ui32DevIndex : Index to the required device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful) +{ +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem")); + + SysAcquireData(&psSysData); + + if (bInitSuccessful) + { + eError = SysFinalise(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError)); + return eError; + } + + /* Place all devices into their default power state. */ + eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList, + &PVRSRVFinaliseSystem_SetPowerState_AnyCb); + if (eError != PVRSRV_OK) + { + return eError; + } + + /* Verify microkernel compatibility for devices */ + eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList, + &PVRSRVFinaliseSystem_CompatCheck_AnyCb); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + /* Some platforms call this too early in the boot phase. */ + PDUMPENDINITPHASE(); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Only check devices which specify a compatibility check callback */ + if (psDeviceNode->pfnInitDeviceCompatCheck) + return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode); + else + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVAcquireDeviceDataKM + + @Description + + Matchs a device given a device type and a device index. + + @input psDeviceNode :The device node to be matched. + + @Input va : Variable argument list with: + eDeviceType : Required device type. If type is unknown use ui32DevIndex + to locate device data + + ui32DevIndex : Index to the required device obtained from the + PVRSRVEnumerateDevice function + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + PVRSRV_DEVICE_TYPE eDeviceType; + IMG_UINT32 ui32DevIndex; + + eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE); + ui32DevIndex = va_arg(va, IMG_UINT32); + + if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN && + psDeviceNode->sDevId.eDeviceType == eDeviceType) || + (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN && + psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)) + { + return psDeviceNode; + } + else + { + return IMG_NULL; + } +} + +/*! +****************************************************************************** + + @Function PVRSRVAcquireDeviceDataKM + + @Description + + Returns device information + + @Input ui32DevIndex : Index to the required device obtained from the + PVRSRVEnumerateDevice function + + @Input eDeviceType : Required device type. If type is unknown use ui32DevIndex + to locate device data + + @Output *phDevCookie : Dev Cookie + + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32 ui32DevIndex, + PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE *phDevCookie) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM")); + + SysAcquireData(&psSysData); + + /* Find device in the list */ + psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &PVRSRVAcquireDeviceDataKM_Match_AnyVaCb, + eDeviceType, + ui32DevIndex); + + + if (!psDeviceNode) + { + /* device can't be found in the list so it isn't in the system */ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present")); + return PVRSRV_ERROR_INIT_FAILURE; + } + +/*FoundDevice:*/ + + PVR_ASSERT (psDeviceNode->ui32RefCount > 0); + + /* return the dev cookie? */ + if (phDevCookie) + { + *phDevCookie = (IMG_HANDLE)psDeviceNode; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDeinitialiseDevice + + @Description + + This De-inits device + + @Input ui32DevIndex : Index to the required device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + SysAcquireData(&psSysData); + + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_TRUE); + + if (!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex)); + return PVRSRV_ERROR_DEVICEID_NOT_FOUND; + } + + eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVPowerLock call")); + return eError; + } + + /* + Power down the device if necessary. + */ + eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex, + PVRSRV_DEV_POWER_STATE_OFF); + PVRSRVPowerUnlock(KERNEL_ID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call")); + return eError; + } + + /* + Free the dissociated device memory. + */ + eError = ResManFreeResByCriteria(psDeviceNode->hResManContext, + RESMAN_CRITERIA_RESTYPE, + RESMAN_TYPE_DEVICEMEM_ALLOCATION, + IMG_NULL, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call")); + return eError; + } + + /* + De-init the device. + */ + if(psDeviceNode->pfnDeInitDevice != IMG_NULL) + { + eError = psDeviceNode->pfnDeInitDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call")); + return eError; + } + } + + /* + Close the device's resource manager context. + */ + PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE); + psDeviceNode->hResManContext = IMG_NULL; + + /* remove node from list */ + List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); + + /* deallocate id and memory */ + (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex); + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return (PVRSRV_OK); +} + + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Timeoutus, + IMG_UINT32 ui32PollPeriodus, + IMG_BOOL bAllowPreemption) +{ +#if defined (EMULATOR) + { + PVR_UNREFERENCED_PARAMETER(bAllowPreemption); + #if !defined(__linux__) + PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); + #endif + + /* For the Emulator we want the system to stop when a lock-up is detected so the state can be analysed. + * Also the Emulator is much slower than real silicon so timeouts are not valid. + */ + if((*pui32LinMemAddr & ui32Mask) == ui32Value) + { + return PVRSRV_OK; + } + + do + { + #if defined(__linux__) + OSWaitus(ui32PollPeriodus); + #else + OSReleaseThreadQuanta(); + #endif + + if((*pui32LinMemAddr & ui32Mask) == ui32Value) + { + return PVRSRV_OK; + } + + } while (ui32Timeoutus); /* Endless loop only for the Emulator */ + } +#else + { + IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ + + if (bAllowPreemption) + { + PVR_ASSERT(ui32PollPeriodus >= 1000); + } + + ui32ActualValue = (*pui32LinMemAddr & ui32Mask); + if(ui32ActualValue == ui32Value) + { + return PVRSRV_OK; + } + + /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ + LOOP_UNTIL_TIMEOUT(ui32Timeoutus) + { + if (bAllowPreemption) + { + OSSleepms(ui32PollPeriodus / 1000); + } + else + { + OSWaitus(ui32PollPeriodus); + } + + ui32ActualValue = (*pui32LinMemAddr & ui32Mask); + if(ui32ActualValue == ui32Value) + { + return PVRSRV_OK; + } + + } END_LOOP_UNTIL_TIMEOUT(); + + PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", + ui32Value, ui32ActualValue, ui32Mask)); + } +#endif /* #if defined (EMULATOR) */ + + return PVRSRV_ERROR_TIMEOUT; +} + + +/*Level 3 of the loop nesting*/ +static IMG_VOID PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP *psBMHeap, va_list va) +{ + IMG_CHAR **ppszStr; + IMG_UINT32 *pui32StrLen; + IMG_UINT32 ui32Mode; + PVRSRV_ERROR (*pfnGetStats)(RA_ARENA *, IMG_CHAR **, IMG_UINT32 *); + + ppszStr = va_arg(va, IMG_CHAR**); + pui32StrLen = va_arg(va, IMG_UINT32*); + ui32Mode = va_arg(va, IMG_UINT32); + + /* Would be better to pass fn pointer in the variable args list + * but MS C compiler complains with error C2066: In ANSI C, + * it is not legal to cast between a function pointer and a data pointer. + */ + switch(ui32Mode) + { + case PVRSRV_MISC_INFO_MEMSTATS_PRESENT: + pfnGetStats = &RA_GetStats; + break; + case PVRSRV_MISC_INFO_FREEMEM_PRESENT: + pfnGetStats = &RA_GetStatsFreeMem; + break; + default: + return; + } + + if(psBMHeap->pImportArena) + { + pfnGetStats(psBMHeap->pImportArena, + ppszStr, + pui32StrLen); + } + + if(psBMHeap->pVMArena) + { + pfnGetStats(psBMHeap->pVMArena, + ppszStr, + pui32StrLen); + } +} + +/*Level 2 of the loop nesting*/ +static PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT *psBMContext, va_list va) +{ + + IMG_UINT32 *pui32StrLen; + IMG_INT32 *pi32Count; + IMG_CHAR **ppszStr; + IMG_UINT32 ui32Mode; + + pui32StrLen = va_arg(va, IMG_UINT32*); + pi32Count = va_arg(va, IMG_INT32*); + ppszStr = va_arg(va, IMG_CHAR**); + ui32Mode = va_arg(va, IMG_UINT32); + + CHECK_SPACE(*pui32StrLen); + *pi32Count = OSSNPrintf(*ppszStr, 100, "\nApplication Context (hDevMemContext) %p:\n", + (IMG_HANDLE)psBMContext); + UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen); + + List_BM_HEAP_ForEach_va(psBMContext->psBMHeap, + &PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb, + ppszStr, + pui32StrLen, + ui32Mode); + return PVRSRV_OK; +} + + +/*level 1 of the loop nesting*/ +static PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + IMG_UINT32 *pui32StrLen; + IMG_INT32 *pi32Count; + IMG_CHAR **ppszStr; + IMG_UINT32 ui32Mode; + + pui32StrLen = va_arg(va, IMG_UINT32*); + pi32Count = va_arg(va, IMG_INT32*); + ppszStr = va_arg(va, IMG_CHAR**); + ui32Mode = va_arg(va, IMG_UINT32); + + CHECK_SPACE(*pui32StrLen); + *pi32Count = OSSNPrintf(*ppszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType); + UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen); + + /* kernel context: */ + if(psDeviceNode->sDevMemoryInfo.pBMKernelContext) + { + CHECK_SPACE(*pui32StrLen); + *pi32Count = OSSNPrintf(*ppszStr, 100, "\nKernel Context:\n"); + UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen); + + List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap, + &PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb, + ppszStr, + pui32StrLen, + ui32Mode); + } + + /* double loop app contexts:heaps */ + return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.pBMContext, + &PVRSRVGetMiscInfoKM_BMContext_AnyVaCb, + pui32StrLen, + pi32Count, + ppszStr, + ui32Mode); +} + + +/*! +****************************************************************************** + + @Function PVRSRVGetMiscInfoKM + + @Description + Retrieves misc. info. + + @Output PVRSRV_MISC_INFO + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo) +{ + SYS_DATA *psSysData; + + if(!psMiscInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psMiscInfo->ui32StatePresent = 0; + + /* do a basic check for uninitialised request flag */ + if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT + |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT + |PVRSRV_MISC_INFO_MEMSTATS_PRESENT + |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT + |PVRSRV_MISC_INFO_DDKVERSION_PRESENT + |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT + |PVRSRV_MISC_INFO_RESET_PRESENT + |PVRSRV_MISC_INFO_FREEMEM_PRESENT + |PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT + |PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT + |PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT)) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* return SOC Timer registers */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) && + (psSysData->pvSOCTimerRegisterKM != IMG_NULL)) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; + psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM; + psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle; + } + else + { + psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL; + psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL; + } + + /* return SOC Clock Gating registers */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) && + (psSysData->pvSOCClockGateRegsBase != IMG_NULL)) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; + psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase; + psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize; + } + + /* memory stats */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) && + (psMiscInfo->pszMemoryStr != IMG_NULL)) + { + RA_ARENA **ppArena; +/* BM_HEAP *psBMHeap; + BM_CONTEXT *psBMContext; + PVRSRV_DEVICE_NODE *psDeviceNode;*/ + IMG_CHAR *pszStr; + IMG_UINT32 ui32StrLen; + IMG_INT32 i32Count; + + pszStr = psMiscInfo->pszMemoryStr; + ui32StrLen = psMiscInfo->ui32MemoryStrLen; + + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT; + + /* Local backing stores */ + ppArena = &psSysData->apsLocalDevMemArena[0]; + while(*ppArena) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + RA_GetStats(*ppArena, + &pszStr, + &ui32StrLen); + /* advance through the array */ + ppArena++; + } + + /* per device */ +/* psDeviceNode = psSysData->psDeviceNodeList;*/ + + /*triple loop; devices:contexts:heaps*/ + List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, + &PVRSRVGetMiscInfoKM_Device_AnyVaCb, + &ui32StrLen, + &i32Count, + &pszStr, + PVRSRV_MISC_INFO_MEMSTATS_PRESENT); + + /* attach a new line and string terminate */ + i32Count = OSSNPrintf(pszStr, 100, "\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + /* Lean version of mem stats: only show free mem on each RA */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0) + && psMiscInfo->pszMemoryStr) + { + IMG_CHAR *pszStr; + IMG_UINT32 ui32StrLen; + IMG_INT32 i32Count; + + pszStr = psMiscInfo->pszMemoryStr; + ui32StrLen = psMiscInfo->ui32MemoryStrLen; + + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT; + + /* triple loop over devices:contexts:heaps */ + List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, + &PVRSRVGetMiscInfoKM_Device_AnyVaCb, + &ui32StrLen, + &i32Count, + &pszStr, + PVRSRV_MISC_INFO_FREEMEM_PRESENT); + + i32Count = OSSNPrintf(pszStr, 100, "\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) && + (psSysData->psGlobalEventObject != IMG_NULL)) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; + psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject; + } + + /* DDK version and memstats not supported in same call to GetMiscInfo */ + + if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) + && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) + && (psMiscInfo->pszMemoryStr != IMG_NULL)) + { + IMG_CHAR *pszStr; + IMG_UINT32 ui32StrLen; + IMG_UINT32 ui32LenStrPerNum = 12; /* string length per UI32: 10 digits + '.' + '\0' = 12 bytes */ + IMG_INT32 i32Count; + IMG_INT i; + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT; + + /* construct DDK string */ + psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ; + psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN; + psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BUILD_HI; + psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD_LO; + + pszStr = psMiscInfo->pszMemoryStr; + ui32StrLen = psMiscInfo->ui32MemoryStrLen; + + for (i=0; i<4; i++) + { + if (ui32StrLen < ui32LenStrPerNum) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + if (i != 3) + { + i32Count = OSSNPrintf(pszStr, 2, "."); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + } + } + + if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT; + + if(psMiscInfo->sCacheOpCtl.bDeferOp) + { + /* For now, assume deferred ops are "full" cache ops, + * and we don't need (or expect) a meminfo. + */ + psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType; + } + else + { + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + PVRSRV_PER_PROCESS_DATA *psPerProc; + + if(!psMiscInfo->sCacheOpCtl.u.hKernelMemInfo) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " + "Ignoring non-deferred cache op with no meminfo")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " + "Deferred cache op is pending. It is unlikely you want " + "to combine deferred cache ops with immediate ones")); + } + + psPerProc = PVRSRVFindPerProcessData(); + + if(PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psMiscInfo->sCacheOpCtl.u.hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " + "Can't find kernel meminfo")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + if(psMiscInfo->sCacheOpCtl.ui32Length!=0) + { + if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, + 0, + psMiscInfo->sCacheOpCtl.pvBaseVAddr, + psMiscInfo->sCacheOpCtl.ui32Length)) + { + return PVRSRV_ERROR_CACHEOP_FAILED; + } + } + } + else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + if(psMiscInfo->sCacheOpCtl.ui32Length!=0) + { + if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, + 0, + psMiscInfo->sCacheOpCtl.pvBaseVAddr, + psMiscInfo->sCacheOpCtl.ui32Length)) + { + return PVRSRV_ERROR_CACHEOP_FAILED; + } + } + } + } + } + + if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT) != 0UL) + { + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + PVRSRV_PER_PROCESS_DATA *psPerProc; + + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT; + + psPerProc = PVRSRVFindPerProcessData(); + + if(PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psMiscInfo->sGetRefCountCtl.u.hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " + "Can't find kernel meminfo")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psMiscInfo->sGetRefCountCtl.ui32RefCount = psKernelMemInfo->ui32RefCount; + } + + if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT) != 0UL) + { + psMiscInfo->ui32PageSize = HOST_PAGESIZE(); + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT; + } + +#if defined(PVRSRV_RESET_ON_HWTIMEOUT) + if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL) + { + PVR_LOG(("User requested OS reset")); + OSPanic(); + } +#endif /* #if defined(PVRSRV_RESET_ON_HWTIMEOUT) */ + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT) != 0UL) + { + PVRSRVProcessQueues(IMG_TRUE); + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT; + } +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDeviceLISR + + @Description + OS-independent Device Low-level Interrupt Service Routine + + @Input psDeviceNode + + @Return IMG_BOOL : Whether any interrupts were serviced + +******************************************************************************/ +IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + SYS_DATA *psSysData; + IMG_BOOL bStatus = IMG_FALSE; + IMG_UINT32 ui32InterruptSource; + + if(!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n")); + goto out; + } + psSysData = psDeviceNode->psSysData; + + /* query the SOC/system to see whether this device was the source of the interrupt */ + ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode); + if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) + { + if(psDeviceNode->pfnDeviceISR != IMG_NULL) + { + bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData); + } + + SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit); + } + +out: + return bStatus; +} + +static IMG_VOID PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + + IMG_BOOL *pbStatus; + IMG_UINT32 *pui32InterruptSource; + IMG_UINT32 *pui32ClearInterrupts; + + pbStatus = va_arg(va, IMG_BOOL*); + pui32InterruptSource = va_arg(va, IMG_UINT32*); + pui32ClearInterrupts = va_arg(va, IMG_UINT32*); + + + if(psDeviceNode->pfnDeviceISR != IMG_NULL) + { + if(*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) + { + if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData)) + { + /* Record if serviced any interrupts. */ + *pbStatus = IMG_TRUE; + } + /* Combine the SOC clear bits. */ + *pui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit; + } + } +} + +/*! +****************************************************************************** + + @Function PVRSRVSystemLISR + + @Description + OS-independent System Low-level Interrupt Service Routine + + @Input pvSysData + + @Return IMG_BOOL : Whether any interrupts were serviced + +******************************************************************************/ +IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = pvSysData; + IMG_BOOL bStatus = IMG_FALSE; + IMG_UINT32 ui32InterruptSource; + IMG_UINT32 ui32ClearInterrupts = 0; +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + + if(!psSysData) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n")); +/* goto out; */ + } + else + { + /* query SOC for source of interrupts */ + ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL); + + /* only proceed if PVR interrupts */ + if(ui32InterruptSource) + { + /* traverse the devices' ISR handlers */ + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVSystemLISR_ForEachVaCb, + &bStatus, + &ui32InterruptSource, + &ui32ClearInterrupts); + + SysClearInterrupts(psSysData, ui32ClearInterrupts); + } +/*out:*/ + } + return bStatus; +} + + +static IMG_VOID PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if(psDeviceNode->pfnDeviceMISR != IMG_NULL) + { + (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVMISR + + @Input pvSysData + + @Description + OS-independent Medium-level Interrupt Service Routine + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = pvSysData; +/* PVRSRV_DEVICE_NODE *psDeviceNode; */ + + if(!psSysData) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n")); + return; + } + + /* Traverse the devices' MISR handlers. */ + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, + &PVRSRVMISR_ForEachCb); + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + /* Process the queues. */ + if (PVRSRVProcessQueues(IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED) + { + PVRSRVProcessQueues(IMG_FALSE); + } +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + + /* signal global event object */ + if (psSysData->psGlobalEventObject) + { + IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM; + if(hOSEventKM) + { + OSEventObjectSignalKM(hOSEventKM); + } + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVProcessConnect + + @Description Inform services that a process has connected. + + @Input ui32PID - process ID + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags) +{ + return PVRSRVPerProcessDataConnect(ui32PID, ui32Flags); +} + + +/*! +****************************************************************************** + + @Function PVRSRVProcessDisconnect + + @Description Inform services that a process has disconnected. + + @Input ui32PID - process ID + + @Return IMG_VOID + +******************************************************************************/ +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID) +{ + PVRSRVPerProcessDataDisconnect(ui32PID); +} + + +/*! +****************************************************************************** + + @Function PVRSRVSaveRestoreLiveSegments + + @Input pArena - the arena the segment was originally allocated from. + pbyBuffer - the system memory buffer set to null to get the size needed. + puiBufSize - size of system memory buffer. + bSave - IMG_TRUE if a save is required + + @Description + Function to save or restore Resources Live segments + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, + IMG_SIZE_T *puiBufSize, IMG_BOOL bSave) +{ + IMG_SIZE_T uiBytesSaved = 0; + IMG_PVOID pvLocalMemCPUVAddr; + RA_SEGMENT_DETAILS sSegDetails; + + if (hArena == IMG_NULL) + { + return (PVRSRV_ERROR_INVALID_PARAMS); + } + + sSegDetails.uiSize = 0; + sSegDetails.sCpuPhyAddr.uiAddr = 0; + sSegDetails.hSegment = 0; + + /* walk the arena segments and write live one to the buffer */ + while (RA_GetNextLiveSegment(hArena, &sSegDetails)) + { + if (pbyBuffer == IMG_NULL) + { + /* calc buffer required */ + uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize; + } + else + { + if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize) + { + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + + PVR_DPF(( + PVR_DBG_MESSAGE, + "PVRSRVSaveRestoreLiveSegments: Base " CPUPADDR_FMT " size %" SIZE_T_FMT_LEN "x", + sSegDetails.sCpuPhyAddr.uiAddr, + sSegDetails.uiSize)); + + /* Map the device's local memory area onto the host. */ + pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr, + sSegDetails.uiSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + if (pvLocalMemCPUVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + + if (bSave) + { + /* write segment size then segment data */ + OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize)); + pbyBuffer += sizeof(sSegDetails.uiSize); + + OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize); + pbyBuffer += sSegDetails.uiSize; + } + else + { + IMG_UINT32 uiSize; + /* reag segment size and validate */ + OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize)); + + if (uiSize != sSegDetails.uiSize) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error")); + } + else + { + pbyBuffer += sizeof(sSegDetails.uiSize); + + OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize); + pbyBuffer += sSegDetails.uiSize; + } + } + + + uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize; + + OSUnMapPhysToLin(pvLocalMemCPUVAddr, + sSegDetails.uiSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + } + } + + if (pbyBuffer == IMG_NULL) + { + *puiBufSize = uiBytesSaved; + } + + return (PVRSRV_OK); +} + + +/*! + ****************************************************************************** + + @Function PVRSRVGetErrorStringKM + + @Description Returns a text string relating to the PVRSRV_ERROR enum. + + @Note case statement used rather than an indexed arrary to ensure text is + synchronised with the correct enum + + @Input eError : PVRSRV_ERROR enum + + @Return const IMG_CHAR * : Text string + + @Note Must be kept in sync with servicesext.h + +******************************************************************************/ + +IMG_EXPORT +const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError) +{ +/* PRQA S 5087 1 */ /* include file required here */ +#include "pvrsrv_errors.h" +} + +static IMG_VOID PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL) + { + /* Call the device's callback function. */ + (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVScheduleDeviceCallbacks + + @Description Schedule all device callbacks + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_VOID) +{ + SYS_DATA *psSysData; +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + + SysAcquireData(&psSysData); + + /*for all the device, invoke the callback function*/ + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, + &PVRSRVCommandCompleteCallbacks_ForEachCb); +} + +/*! +****************************************************************************** + + @Function PVRSRVScheduleDevices + + @Description Schedules all Services-Managed Devices to check their pending + command queues. The intention is that ScheduleDevices be called by the + 3rd party BC driver after it has finished writing new data to its output + texture. + + @Return IMG_VOID + +******************************************************************************/ +IMG_EXPORT +IMG_VOID PVRSRVScheduleDevicesKM(IMG_VOID) +{ + PVRSRVScheduleDeviceCallbacks(); +} + +/***************************************************************************** + End of file (pvrsrv.c) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/queue.c b/sgx_km/eurasia_km/services4/srvkm/common/queue.c new file mode 100644 index 0000000..c12d5e2 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/queue.c @@ -0,0 +1,1764 @@ +/*************************************************************************/ /*! +@Title Kernel side command queue functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "pvr_bridge_km.h" + +#include "lists.h" +#include "ttrace.h" + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) +#include +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#include <../drivers/staging/android/sw_sync.h> +#else +#include <../drivers/dma-buf/sync_debug.h> +#endif + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include "pvrsrv_sync_server.h" +#endif + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#include +#include + +typedef struct _PVR_QUEUE_SYNC_KERNEL_SYNC_INFO_ +{ + /* Base services sync info structure */ + PVRSRV_KERNEL_SYNC_INFO *psBase; + + struct list_head sHead; +} PVR_QUEUE_SYNC_KERNEL_SYNC_INFO; + +static IMG_BOOL PVRSyncIsSyncInfoInUse(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo) +{ + return !(psSyncInfo->psSyncData->ui32WriteOpsPending == psSyncInfo->psSyncData->ui32WriteOpsComplete && + psSyncInfo->psSyncData->ui32ReadOpsPending == psSyncInfo->psSyncData->ui32ReadOpsComplete && + psSyncInfo->psSyncData->ui32ReadOps2Pending == psSyncInfo->psSyncData->ui32ReadOps2Complete); +} + +/* Defer Workqueue for releasing command kernel sync info */ +static struct workqueue_struct *gpsWorkQueue; + +/* Linux work struct for workqueue. */ +static struct work_struct gsWork; + +/* The "defer-free" sync object list. */ +static LIST_HEAD(gSyncInfoFreeList); +static DEFINE_SPINLOCK(gSyncInfoFreeListLock); + +static void PVRSyncWorkQueueFunction(struct work_struct *data) +{ + struct list_head sFreeList, *psEntry, *n; + PVR_QUEUE_SYNC_KERNEL_SYNC_INFO *psSyncInfo; + + INIT_LIST_HEAD(&sFreeList); + spin_lock(&gSyncInfoFreeListLock); + list_for_each_safe(psEntry, n, &gSyncInfoFreeList) + { + psSyncInfo = container_of(psEntry, PVR_QUEUE_SYNC_KERNEL_SYNC_INFO, sHead); + + if(!PVRSyncIsSyncInfoInUse(psSyncInfo->psBase)) + list_move_tail(psEntry, &sFreeList); + } + spin_unlock(&gSyncInfoFreeListLock); + + list_for_each_safe(psEntry, n, &sFreeList) + { + psSyncInfo = container_of(psEntry, PVR_QUEUE_SYNC_KERNEL_SYNC_INFO, sHead); + + list_del(psEntry); + + PVRSRVKernelSyncInfoDecRef(psSyncInfo->psBase, IMG_NULL); + } +} +#endif + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) +static struct sync_fence *AllocQueueFence(struct sw_sync_timeline *psTimeline, IMG_UINT32 ui32FenceValue, const char *szName) +{ + struct sync_fence *psFence = IMG_NULL; + struct sync_pt *psPt; + + psPt = sw_sync_pt_create(psTimeline, ui32FenceValue); + if(psPt) + { + psFence = sync_fence_create(szName, psPt); + if(!psFence) + { + sync_pt_free(psPt); + } + } + + return psFence; +} +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) */ + +/* + * The number of commands of each type which can be in flight at once. + */ + +#define DC_MAX_SUPPORTED_QUEUES 1 +#if defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) +#define DC_NUM_COMMANDS_PER_QUEUE 2 +#else +#define DC_NUM_COMMANDS_PER_QUEUE 1 +#endif + +#define DC_NUM_COMMANDS_PER_TYPE (DC_NUM_COMMANDS_PER_QUEUE * DC_MAX_SUPPORTED_QUEUES) + +static IMG_UINT32 ui32NoOfSwapchainCreated = 0; + +/* + * List of private command processing function pointer tables and command + * complete tables for a device in the system. + * Each table is allocated when the device registers its private command + * processing functions. + */ +typedef struct _DEVICE_COMMAND_DATA_ +{ + PFN_CMD_PROC pfnCmdProc; + PCOMMAND_COMPLETE_DATA apsCmdCompleteData[DC_NUM_COMMANDS_PER_TYPE]; + IMG_UINT32 ui32CCBOffset; + IMG_UINT32 ui32MaxDstSyncCount; /*!< Maximum number of dest syncs */ + IMG_UINT32 ui32MaxSrcSyncCount; /*!< Maximum number of source syncs */ +} DEVICE_COMMAND_DATA; + + +#if defined(__linux__) && defined(__KERNEL__) + +#include "proc.h" + +/***************************************************************************** + FUNCTION : ProcSeqShowQueue + + PURPOSE : Print the content of queue element to /proc file + (See env/linux/proc.c:CreateProcReadEntrySeq) + + PARAMETERS : sfile - /proc seq_file + el - Element to print +*****************************************************************************/ +void ProcSeqShowQueue(struct seq_file *sfile,void* el) +{ + PVRSRV_QUEUE_INFO *psQueue = (PVRSRV_QUEUE_INFO*)el; + IMG_INT cmds = 0; + IMG_SIZE_T uReadOffset; + IMG_SIZE_T uWriteOffset; + PVRSRV_COMMAND *psCmd; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf( sfile, + "Command Queues\n" + "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n"); + return; + } + + uReadOffset = psQueue->uReadOffset; + uWriteOffset = psQueue->uWriteOffset; + + while (uReadOffset != uWriteOffset) + { + psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + uReadOffset); + + seq_printf(sfile, "%p %p %5u %6u %3" SIZE_T_FMT_LEN "u %5u %2u %2u %3" SIZE_T_FMT_LEN "u \n", + psQueue, + psCmd, + psCmd->ui32ProcessID, + psCmd->CommandType, + psCmd->uCmdSize, + psCmd->ui32DevIndex, + psCmd->ui32DstSyncCount, + psCmd->ui32SrcSyncCount, + psCmd->uDataSize); + { + IMG_UINT32 i; + for (i = 0; i < psCmd->ui32SrcSyncCount; i++) + { + PVRSRV_SYNC_DATA *psSyncData = psCmd->psSrcSync[i].psKernelSyncInfoKM->psSyncData; + seq_printf(sfile, " Sync %u: ROP/ROC: 0x%x/0x%x WOP/WOC: 0x%x/0x%x ROC-VA: 0x%x WOC-VA: 0x%x\n", + i, + psCmd->psSrcSync[i].ui32ReadOps2Pending, + psSyncData->ui32ReadOps2Complete, + psCmd->psSrcSync[i].ui32WriteOpsPending, + psSyncData->ui32WriteOpsComplete, + psCmd->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmd->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr); + } + } + + /* taken from UPDATE_QUEUE_ROFF in queue.h */ + uReadOffset += psCmd->uCmdSize; + uReadOffset &= psQueue->uQueueSize - 1; + cmds++; + } + + if (cmds == 0) + { + seq_printf(sfile, "%p \n", psQueue); + } +} + +/***************************************************************************** + FUNCTION : ProcSeqOff2ElementQueue + + PURPOSE : Transale offset to element (/proc stuff) + + PARAMETERS : sfile - /proc seq_file + off - the offset into the buffer + + RETURNS : element to print +*****************************************************************************/ +void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off) +{ + PVRSRV_QUEUE_INFO *psQueue = IMG_NULL; + SYS_DATA *psSysData; + + PVR_UNREFERENCED_PARAMETER(sfile); + + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + + psSysData = SysAcquireDataNoCheck(); + if (psSysData != IMG_NULL) + { + for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM); + } + + return psQueue; +} +#endif /* __linux__ && __KERNEL__ */ + +/*! + * Macro to return space in given command queue + */ +#define GET_SPACE_IN_CMDQ(psQueue) \ + ((((psQueue)->uReadOffset - (psQueue)->uWriteOffset) \ + + ((psQueue)->uQueueSize - 1)) & ((psQueue)->uQueueSize - 1)) + +/*! + * Macro to Write Offset in given command queue + */ +#define UPDATE_QUEUE_WOFF(psQueue, uSize) \ + (psQueue)->uWriteOffset = ((psQueue)->uWriteOffset + (uSize)) \ + & ((psQueue)->uQueueSize - 1); + +/*! + * Check if an ops complete value has gone past the pending value. + * This can happen when dummy processing multiple operations, e.g. hardware recovery. + */ +#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \ + ((ui32OpsComplete) >= (ui32OpsPending)) + +/*! +**************************************************************************** + @Function : PVRSRVGetWriteOpsPending + + @Description : Gets the next operation to wait for in a sync object + + @Input : psSyncInfo - pointer to sync information struct + @Input : bIsReadOp - Is this a read or write op + + @Return : Next op value +*****************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetWriteOpsPending) +#endif +static INLINE +IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp) +{ + IMG_UINT32 ui32WriteOpsPending; + + if(bIsReadOp) + { + ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + else + { + /* + Note: This needs to be atomic and is provided the + kernel driver is single threaded (non-rentrant) + */ + ui32WriteOpsPending = SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_QUEUE); + } + + return ui32WriteOpsPending; +} + +/*! +***************************************************************************** + @Function : PVRSRVGetReadOpsPending + + @Description : Gets the number of pending read ops + + @Input : psSyncInfo - pointer to sync information struct + @Input : bIsReadOp - Is this a read or write op + + @Return : Next op value +*****************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetReadOpsPending) +#endif +static INLINE +IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp) +{ + IMG_UINT32 ui32ReadOpsPending; + + if(bIsReadOp) + { + ui32ReadOpsPending = SyncTakeReadOp2(psSyncInfo, SYNC_OP_CLASS_QUEUE); + } + else + { + ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending; + } + + return ui32ReadOpsPending; +} + +static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData, + IMG_UINT32 i, + IMG_BOOL bIsSrc) +{ + PVRSRV_SYNC_OBJECT *psSyncObject; + + psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync; + + if (psCmdCompleteData->bInUse) + { + PVR_LOG(("\t%s %u: ROC DevVAddr:0x%X ROP:0x%x ROC:0x%x, WOC DevVAddr:0x%X WOP:0x%x WOC:0x%x", + bIsSrc ? "SRC" : "DEST", i, + psSyncObject[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Pending, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete, + psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete)) + } + else + { + PVR_LOG(("\t%s %u: (Not in use)", bIsSrc ? "SRC" : "DEST", i)) + } +} + + +static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) + { + IMG_UINT32 ui32CmdCounter, ui32SyncCounter; + SYS_DATA *psSysData; + DEVICE_COMMAND_DATA *psDeviceCommandData; + PCOMMAND_COMPLETE_DATA psCmdCompleteData; + + SysAcquireData(&psSysData); + + psDeviceCommandData = psSysData->apsDeviceCommandData[psDeviceNode->sDevId.ui32DeviceIndex]; + + if (psDeviceCommandData != IMG_NULL) + { + for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++) + { + psCmdCompleteData = psDeviceCommandData[DC_FLIP_COMMAND].apsCmdCompleteData[ui32CmdCounter]; + + PVR_LOG(("Flip Command Complete Data %u for display device %u:", + ui32CmdCounter, psDeviceNode->sDevId.ui32DeviceIndex)) + + for (ui32SyncCounter = 0; + ui32SyncCounter < psCmdCompleteData->ui32SrcSyncCount; + ui32SyncCounter++) + { + QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_TRUE); + } + + for (ui32SyncCounter = 0; + ui32SyncCounter < psCmdCompleteData->ui32DstSyncCount; + ui32SyncCounter++) + { + QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_FALSE); + } + } + } + else + { + PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex)) + } + } +} + + +IMG_VOID QueueDumpDebugInfo(IMG_VOID) +{ + SYS_DATA *psSysData; + SysAcquireData(&psSysData); + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, &QueueDumpDebugInfo_ForEachCb); +} + + +/***************************************************************************** + Kernel-side functions of User->Kernel transitions +******************************************************************************/ + +static IMG_SIZE_T NearestPower2(IMG_SIZE_T uValue) +{ + IMG_SIZE_T uTemp, uResult = 1; + + if(!uValue) + return 0; + + uTemp = uValue - 1; + while(uTemp) + { + uResult <<= 1; + uTemp >>= 1; + } + + return uResult; +} + + +/*! +****************************************************************************** + + @Function PVRSRVCreateCommandQueueKM + + @Description + Creates a new command queue into which render/blt commands etc can be + inserted. + + @Input uQueueSize : + + @Output ppsQueueInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T uQueueSize, + PVRSRV_QUEUE_INFO **ppsQueueInfo) +{ + PVRSRV_QUEUE_INFO *psQueueInfo; + IMG_SIZE_T uPower2QueueSize = NearestPower2(uQueueSize); + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + IMG_HANDLE hMemBlock; + + if (ui32NoOfSwapchainCreated >= DC_NUM_COMMANDS_PER_TYPE) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Swapchain already exists, increment DC_MAX_SUPPORTED_QUEUES to support more than one swapchain")); + return PVRSRV_ERROR_FLIP_CHAIN_EXISTS; + } + + SysAcquireData(&psSysData); + + /* allocate an internal queue info structure */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + (IMG_VOID **)&psQueueInfo, &hMemBlock, + "Queue Info"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct")); + goto ErrorExit; + } + OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO)); + + psQueueInfo->hMemBlock[0] = hMemBlock; + psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM(); + + /* allocate the command queue buffer - allow for overrun */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + uPower2QueueSize + PVRSRV_MAX_CMD_SIZE, + &psQueueInfo->pvLinQueueKM, &hMemBlock, + "Command Queue"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer")); + goto ErrorExit; + } + + psQueueInfo->hMemBlock[1] = hMemBlock; + psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM; + + /* Sanity check: Should be zeroed by OSMemSet */ + PVR_ASSERT(psQueueInfo->uReadOffset == 0); + PVR_ASSERT(psQueueInfo->uWriteOffset == 0); + + psQueueInfo->uQueueSize = uPower2QueueSize; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + psQueueInfo->pvTimeline = sw_sync_timeline_create("pvr_queue_proc"); + if(psQueueInfo->pvTimeline == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: sw_sync_timeline_create() failed")); + goto ErrorExit; + } +#endif + + /* if this is the first q, create a lock resource for the q list */ + if (psSysData->psQueueList == IMG_NULL) + { + eError = OSCreateResource(&psSysData->sQProcessResource); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + + /* Ensure we don't corrupt queue list, by blocking access */ +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + eError = OSLockResourceAndBlockMISR(&psSysData->sQProcessResource, + KERNEL_ID); +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + eError = OSLockResource(&psSysData->sQProcessResource, + KERNEL_ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + psQueueInfo->psNextKM = psSysData->psQueueList; + psSysData->psQueueList = psQueueInfo; + +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + eError = OSUnlockResourceAndUnblockMISR(&psSysData->sQProcessResource, KERNEL_ID); +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + *ppsQueueInfo = psQueueInfo; + +#if (defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if(!ui32NoOfSwapchainCreated) + { + gpsWorkQueue = create_freezable_workqueue("flip_pvr_sync_workqueue"); + if(!gpsWorkQueue) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create flip_pvr_sync workqueue", __func__)); + goto ErrorExit; + } + INIT_WORK(&gsWork, PVRSyncWorkQueueFunction); + } +#endif + ui32NoOfSwapchainCreated++; + + return PVRSRV_OK; + +ErrorExit: + + if(psQueueInfo) + { + if(psQueueInfo->pvLinQueueKM) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + psQueueInfo->uQueueSize, + psQueueInfo->pvLinQueueKM, + psQueueInfo->hMemBlock[1]); + psQueueInfo->pvLinQueueKM = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + psQueueInfo, + psQueueInfo->hMemBlock[0]); + /*not nulling pointer, out of scope*/ + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDestroyCommandQueueKM + + @Description Destroys a command queue + + @Input psQueueInfo : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo) +{ + PVRSRV_QUEUE_INFO *psQueue; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + IMG_BOOL bTimeout = IMG_TRUE; + + SysAcquireData(&psSysData); + + psQueue = psSysData->psQueueList; + + /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if(psQueueInfo->uReadOffset == psQueueInfo->uWriteOffset) + { + bTimeout = IMG_FALSE; + break; + } + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + if (bTimeout) + { + /* The command queue could not be flushed within the timeout period. + Allow the queue to be destroyed before returning the error code. */ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue")); + eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE; + goto ErrorExit; + } + + /* Ensure we don't corrupt queue list, by blocking access */ +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + eError = OSLockResourceAndBlockMISR(&psSysData->sQProcessResource, + KERNEL_ID); +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + eError = OSLockResource(&psSysData->sQProcessResource, + KERNEL_ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + ui32NoOfSwapchainCreated--; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + sync_timeline_destroy(psQueueInfo->pvTimeline); +#endif + + if(psQueue == psQueueInfo) + { + psSysData->psQueueList = psQueueInfo->psNextKM; + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + NearestPower2(psQueueInfo->uQueueSize) + PVRSRV_MAX_CMD_SIZE, + psQueueInfo->pvLinQueueKM, + psQueueInfo->hMemBlock[1]); + psQueueInfo->pvLinQueueKM = IMG_NULL; + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + psQueueInfo, + psQueueInfo->hMemBlock[0]); + /* PRQA S 3199 1 */ /* see note */ + psQueueInfo = IMG_NULL; /*it's a copy on stack, but null it because the function doesn't end right here*/ + } + else + { + while(psQueue) + { + if(psQueue->psNextKM == psQueueInfo) + { + psQueue->psNextKM = psQueueInfo->psNextKM; + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + psQueueInfo->uQueueSize, + psQueueInfo->pvLinQueueKM, + psQueueInfo->hMemBlock[1]); + psQueueInfo->pvLinQueueKM = IMG_NULL; + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + psQueueInfo, + psQueueInfo->hMemBlock[0]); + /* PRQA S 3199 1 */ /* see note */ + psQueueInfo = IMG_NULL; /*it's a copy on stack, but null it because the function doesn't end right here*/ + break; + } + psQueue = psQueue->psNextKM; + } + + if(!psQueue) + { +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + eError = OSUnlockResourceAndUnblockMISR(&psSysData->sQProcessResource, KERNEL_ID); +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto ErrorExit; + } + } + + /* unlock the Q list lock resource */ +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + eError = OSUnlockResourceAndUnblockMISR(&psSysData->sQProcessResource, KERNEL_ID); +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + /* if the Q list is now empty, destroy the Q list lock resource */ + if (psSysData->psQueueList == IMG_NULL) + { + eError = OSDestroyResource(&psSysData->sQProcessResource); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if(!ui32NoOfSwapchainCreated && gpsWorkQueue) + { + destroy_workqueue(gpsWorkQueue); + } +#endif +ErrorExit: + + return eError; +} + + +/*! +***************************************************************************** + + @Function : PVRSRVGetQueueSpaceKM + + @Description : Waits for queue access rights and checks for available space in + queue for task param structure + + @Input : psQueue - pointer to queue information struct + @Input : ui32ParamSize - size of task data structure + @Output : ppvSpace + + @Return : PVRSRV_ERROR +*****************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue, + IMG_SIZE_T uParamSize, + IMG_VOID **ppvSpace) +{ + /* round to 4byte units */ + uParamSize = (uParamSize + 3) & 0xFFFFFFFC; + + if (uParamSize > PVRSRV_MAX_CMD_SIZE) + { + PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + + if (GET_SPACE_IN_CMDQ(psQueue) > uParamSize) + { + *ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->uWriteOffset); + } + else + { + *ppvSpace = IMG_NULL; + return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE; + } + + return PVRSRV_OK; +} + + +/*! +***************************************************************************** + @Function PVRSRVInsertCommandKM + + @Description : + command insertion utility + - waits for space in the queue for a new command + - fills in generic command information + - returns a pointer to the caller who's expected to then fill + in the private data. + The caller should follow PVRSRVInsertCommand with PVRSRVSubmitCommand + which will update the queue's write offset so the command can be + executed. + + @Input psQueue : pointer to queue information struct + + @Output ppvCmdData : holds pointer to space in queue for private cmd data + + @Return PVRSRV_ERROR +*****************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND **ppsCommand, + IMG_UINT32 ui32DevIndex, + IMG_UINT16 CommandType, + IMG_UINT32 ui32DstSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsDstSync[], + IMG_UINT32 ui32SrcSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[], + IMG_SIZE_T uDataByteSize, + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete, + IMG_HANDLE hCallbackData, + IMG_HANDLE *phFence) +{ + PVRSRV_ERROR eError; + PVRSRV_COMMAND *psCommand; + IMG_SIZE_T uCommandSize; + IMG_UINT32 i; + SYS_DATA *psSysData; + DEVICE_COMMAND_DATA *psDeviceCommandData; + +#if !(defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) + PVR_UNREFERENCED_PARAMETER(phFence); +#endif + + /* Check that we've got enough space in our command complete data for this command */ + SysAcquireData(&psSysData); + psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex]; + + if ((psDeviceCommandData[CommandType].ui32MaxDstSyncCount < ui32DstSyncCount) || + (psDeviceCommandData[CommandType].ui32MaxSrcSyncCount < ui32SrcSyncCount)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: Too many syncs")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Round up to nearest 32 bit size so pointer arithmetic works */ + uDataByteSize = (uDataByteSize + 3UL) & ~3UL; + + /* calc. command size */ + uCommandSize = sizeof(PVRSRV_COMMAND) + + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT)) + + uDataByteSize; + + /* wait for space in queue */ + eError = PVRSRVGetQueueSpaceKM (psQueue, uCommandSize, (IMG_VOID**)&psCommand); + if(eError != PVRSRV_OK) + { + return eError; + } + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + if(phFence != IMG_NULL) + { + struct sync_fence *psRetireFence, *psCleanupFence; + + /* New command? New timeline target */ + psQueue->ui32FenceValue++; + + psRetireFence = AllocQueueFence(psQueue->pvTimeline, psQueue->ui32FenceValue, "pvr_queue_retire"); + if(!psRetireFence) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: sync_fence_create() failed")); + psQueue->ui32FenceValue--; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* This similar to the retire fence, except that it is destroyed + * when a display command completes, rather than at the whim of + * userspace. It is used to keep the timeline alive. + */ + psCleanupFence = AllocQueueFence(psQueue->pvTimeline, psQueue->ui32FenceValue, "pvr_queue_cleanup"); + if(!psCleanupFence) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: sync_fence_create() #2 failed")); + sync_fence_put(psRetireFence); + psQueue->ui32FenceValue--; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psCommand->pvCleanupFence = psCleanupFence; + psCommand->pvTimeline = psQueue->pvTimeline; + *phFence = psRetireFence; + } + else + { + psCommand->pvTimeline = IMG_NULL; + psCommand->pvCleanupFence = IMG_NULL; + } +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + if(phFence != IMG_NULL) + { + struct fence *psRetireFence, *psCleanupFence; + + SyncSWGetTimelineObj(psQueue->i32TimelineFd, &psCommand->pvTimeline); + if(psCommand->pvTimeline == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInsertCommandKM: timeline get failed")); + return PVRSRV_ERROR_STREAM_ERROR; + } + + /* New command? New timeline target */ + psQueue->ui32FenceValue++; + + psRetireFence = SyncSWTimelineFenceCreateKM(psQueue->i32TimelineFd, psQueue->ui32FenceValue, "pvr_queue_retire"); + if(!psRetireFence) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: sync_file_create() failed")); + psQueue->ui32FenceValue--; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* This similar to the retire fence, except that it is destroyed + * when a display command completes, rather than at the whim of + * userspace. It is used to keep the timeline alive. + */ + psCleanupFence = SyncSWTimelineFenceCreateKM(psQueue->i32TimelineFd, psQueue->ui32FenceValue, "pvr_queue_cleanup"); + if(!psCleanupFence) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: sync_file_create() #2 failed")); + SyncSWTimelineFenceReleaseKM(psRetireFence); + psQueue->ui32FenceValue--; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psCommand->pvCleanupFence = psCleanupFence; + *phFence = psRetireFence; + } + else + { + psCommand->pvTimeline = IMG_NULL; + psCommand->pvCleanupFence = IMG_NULL; + } + +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + psCommand->ui32ProcessID = OSGetCurrentProcessIDKM(); + + /* setup the command */ + psCommand->uCmdSize = uCommandSize; /* this may change if cmd shrinks */ + psCommand->ui32DevIndex = ui32DevIndex; + psCommand->CommandType = CommandType; + psCommand->ui32DstSyncCount = ui32DstSyncCount; + psCommand->ui32SrcSyncCount = ui32SrcSyncCount; + /* override QAC warning about stricter pointers */ + /* PRQA S 3305 END_PTR_ASSIGNMENTS */ + psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND)); + + + psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync) + + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); + + psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync) + + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); +/* PRQA L:END_PTR_ASSIGNMENTS */ + + psCommand->uDataSize = uDataByteSize;/* this may change if cmd shrinks */ + + psCommand->pfnCommandComplete = pfnCommandComplete; + psCommand->hCallbackData = hCallbackData; + + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_START, QUEUE_TOKEN_INSERTKM); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_NONE, + QUEUE_TOKEN_COMMAND_TYPE, CommandType); + + /* setup dst sync objects and their sync dependencies */ + for (i=0; ipsBase = apsDstSync[i]; + spin_lock(&gSyncInfoFreeListLock); + list_add_tail(&psQueueSync->sHead, &gSyncInfoFreeList); + spin_unlock(&gSyncInfoFreeListLock); +#endif + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC, + apsDstSync[i], PVRSRV_SYNCOP_SAMPLE); + + psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i]; + psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE); + psCommand->psDstSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE); + + PVRSRVKernelSyncInfoIncRef(apsDstSync[i], IMG_NULL); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCommand->psDstSync[i].ui32ReadOps2Pending, + psCommand->psDstSync[i].ui32WriteOpsPending)); + } + + /* setup src sync objects and their sync dependencies */ + for (i=0; ipsBase = apsSrcSync[i]; + spin_lock(&gSyncInfoFreeListLock); + list_add_tail(&psQueueSync->sHead, &gSyncInfoFreeList); + spin_unlock(&gSyncInfoFreeListLock); +#endif + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC, + apsSrcSync[i], PVRSRV_SYNCOP_SAMPLE); + + psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i]; + psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE); + psCommand->psSrcSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE); + + PVRSRVKernelSyncInfoIncRef(apsSrcSync[i], IMG_NULL); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCommand->psSrcSync[i].ui32ReadOps2Pending, + psCommand->psSrcSync[i].ui32WriteOpsPending)); + } + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_END, QUEUE_TOKEN_INSERTKM); + + /* return pointer to caller to fill out private data */ + *ppsCommand = psCommand; + + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + @Function : PVRSRVSubmitCommandKM + + @Description : + updates the queue's write offset so the command can be executed. + + @Input : psQueue - queue command is in + @Input : psCommand + + @Return : PVRSRV_ERROR +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND *psCommand) +{ + /* override QAC warnings about stricter pointers */ + /* PRQA S 3305 END_PTR_ASSIGNMENTS2 */ + /* patch pointers in the command to be kernel pointers */ + if (psCommand->ui32DstSyncCount > 0) + { + psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM) + + psQueue->uWriteOffset + sizeof(PVRSRV_COMMAND)); + } + + if (psCommand->ui32SrcSyncCount > 0) + { + psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM) + + psQueue->uWriteOffset + sizeof(PVRSRV_COMMAND) + + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); + } + + psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM) + + psQueue->uWriteOffset + sizeof(PVRSRV_COMMAND) + + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)) + + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); + +/* PRQA L:END_PTR_ASSIGNMENTS2 */ + + /* update write offset before releasing access lock */ + UPDATE_QUEUE_WOFF(psQueue, psCommand->uCmdSize); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function CheckIfSyncIsQueued + + @Description Check if the specificed sync object is already queued and + can safely be given to the display controller. + This check is required as a 3rd party displayclass device can + have several flips "in flight" and we need to ensure that we + keep their pipeline full and don't deadlock waiting for them + to complete an operation on a surface. + + @Input psSysData : system data + @Input psCmdData : COMMAND_COMPLETE_DATA structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR CheckIfSyncIsQueued(PVRSRV_SYNC_OBJECT *psSync, COMMAND_COMPLETE_DATA *psCmdData) +{ + IMG_UINT32 k; + + if (psCmdData->bInUse) + { + for (k=0;kui32SrcSyncCount;k++) + { + if (psSync->psKernelSyncInfoKM == psCmdData->psSrcSync[k].psKernelSyncInfoKM) + { + PVRSRV_SYNC_DATA *psSyncData = psSync->psKernelSyncInfoKM->psSyncData; + IMG_UINT32 ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; + + /* + We still need to ensure that we don't we don't give a command + to the display controller if writes are outstanding on it + */ + if (ui32WriteOpsComplete == psSync->ui32WriteOpsPending) + { + return PVRSRV_OK; + } + else + { + if (SYNCOPS_STALE(ui32WriteOpsComplete, psSync->ui32WriteOpsPending)) + { + PVR_DPF((PVR_DBG_WARNING, + "CheckIfSyncIsQueued: Stale syncops psSyncData:0x%p ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x", + psSyncData, ui32WriteOpsComplete, psSync->ui32WriteOpsPending)); + return PVRSRV_OK; + } + } + } + } + } + return PVRSRV_ERROR_FAILED_DEPENDENCIES; +} + +/*! +****************************************************************************** + + @Function PVRSRVProcessCommand + + @Description Tries to process a command + + @Input psSysData : system data + @Input psCommand : PVRSRV_COMMAND structure + @Input bFlush : Check for stale dependencies (only used for HW recovery) + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData, + PVRSRV_COMMAND *psCommand, + IMG_BOOL bFlush) +{ + PVRSRV_SYNC_OBJECT *psWalkerObj; + PVRSRV_SYNC_OBJECT *psEndObj; + IMG_UINT32 i; + COMMAND_COMPLETE_DATA *psCmdCompleteData; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32WriteOpsComplete; + IMG_UINT32 ui32ReadOpsComplete; + DEVICE_COMMAND_DATA *psDeviceCommandData; + IMG_UINT32 ui32CCBOffset; + + /* satisfy sync dependencies on the DST(s) */ + psWalkerObj = psCommand->psDstSync; + psEndObj = psWalkerObj + psCommand->ui32DstSyncCount; + while (psWalkerObj < psEndObj) + { + PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData; + + ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; + ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete; + /* fail if reads or writes are not up to date */ + if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending) + || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending)) + { + if (!bFlush || + !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) || + !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending)) + { + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + } + + psWalkerObj++; + } + + /* satisfy sync dependencies on the SRC(s) */ + psWalkerObj = psCommand->psSrcSync; + psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount; + while (psWalkerObj < psEndObj) + { + PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData; + + ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete; + ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; + /* fail if writes are not up to date */ + if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending) + || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending)) + { + if (!bFlush && + SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) && + SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending)) + { + PVR_DPF((PVR_DBG_WARNING, + "PVRSRVProcessCommand: Stale syncops psSyncData:0x%p ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x", + psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending)); + } + + if (!bFlush || + !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) || + !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending)) + { + IMG_UINT32 j; + PVRSRV_ERROR eError; + IMG_BOOL bFound = IMG_FALSE; + + psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex]; + for (j=0;jCommandType].apsCmdCompleteData[j]); + + if (eError == PVRSRV_OK) + { + bFound = IMG_TRUE; + } + } + if (!bFound) + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + } + psWalkerObj++; + } + + /* validate device type */ + if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVProcessCommand: invalid DeviceType 0x%x", + psCommand->ui32DevIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* fish out the appropriate storage structure for the duration of the command */ + psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex]; + ui32CCBOffset = psDeviceCommandData[psCommand->CommandType].ui32CCBOffset; + psCmdCompleteData = psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[ui32CCBOffset]; + if (psCmdCompleteData->bInUse) + { + /* can use this to protect against concurrent execution of same command */ + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + + /* mark the structure as in use */ + psCmdCompleteData->bInUse = IMG_TRUE; + + /* copy src updates over */ + psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount; + for (i=0; iui32DstSyncCount; i++) + { + psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i]; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)", + i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psDstSync[i].ui32WriteOpsPending, + ui32CCBOffset)); + } + + psCmdCompleteData->pfnCommandComplete = psCommand->pfnCommandComplete; + psCmdCompleteData->hCallbackData = psCommand->hCallbackData; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + psCmdCompleteData->pvCleanupFence = psCommand->pvCleanupFence; + psCmdCompleteData->pvTimeline = psCommand->pvTimeline; +#endif + + /* copy dst updates over */ + psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount; + for (i=0; iui32SrcSyncCount; i++) + { + psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i]; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)", + i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending, + ui32CCBOffset)); + } + + /* + call the cmd specific handler: + it should: + - check the cmd specific dependencies + - setup private cmd complete structure + - execute cmd on HW + - store psCmdCompleteData `cookie' and later pass as + argument to Generic Command Complete Callback + + n.b. ui32DataSize (packet size) is useful for packet validation + */ + if (psDeviceCommandData[psCommand->CommandType].pfnCmdProc((IMG_HANDLE)psCmdCompleteData, + (IMG_UINT32)psCommand->uDataSize, + psCommand->pvData) == IMG_FALSE) + { + /* + clean-up: + free cmd complete structure + */ + psCmdCompleteData->bInUse = IMG_FALSE; + eError = PVRSRV_ERROR_CMD_NOT_PROCESSED; + PVR_LOG(("Failed to submit command from queue processor, this could cause sync wedge!")); + } + else + { + /* Increment the CCB offset */ + psDeviceCommandData[psCommand->CommandType].ui32CCBOffset = (ui32CCBOffset + 1) % DC_NUM_COMMANDS_PER_TYPE; + } + + return eError; +} + + +static IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->bReProcessDeviceCommandComplete && + psDeviceNode->pfnDeviceCommandComplete != IMG_NULL) + { + (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVProcessQueues + + @Description Tries to process a command from each Q + + @input ui32CallerID - used to distinguish between async ISR/DPC type calls + the synchronous services driver + @input bFlush - flush commands with stale dependencies (only used for HW recovery) + + @Return PVRSRV_ERROR + +******************************************************************************/ + +IMG_EXPORT +PVRSRV_ERROR PVRSRVProcessQueues(IMG_BOOL bFlush) +{ + PVRSRV_QUEUE_INFO *psQueue; + SYS_DATA *psSysData; + PVRSRV_COMMAND *psCommand; +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + + SysAcquireData(&psSysData); + + /* Ensure we don't corrupt queue list, by blocking access. This is required for OSs where + multiple ISR threads may exist simultaneously (eg WinXP DPC routines) + */ + if (psSysData->psQueueList == IMG_NULL) + { + PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands")); + return PVRSRV_OK; + } + +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + while (OSLockResourceAndBlockMISR(&psSysData->sQProcessResource, ISR_ID) != PVRSRV_OK) +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + while (OSLockResource(&psSysData->sQProcessResource, ISR_ID) != PVRSRV_OK) +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + { + OSWaitus(1); + }; + + psQueue = psSysData->psQueueList; + + if(!psQueue) + { + PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands")); + } + + if (bFlush) + { + PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS); + } + + while (psQueue) + { + while (psQueue->uReadOffset != psQueue->uWriteOffset) + { + psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->uReadOffset); + + if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK) + { + /* processed cmd so update queue */ + UPDATE_QUEUE_ROFF(psQueue, psCommand->uCmdSize) + continue; + } + + break; + } + psQueue = psQueue->psNextKM; + } + + if (bFlush) + { + PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS); + } + + /* Re-process command complete handlers if necessary. */ + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, + &PVRSRVProcessQueues_ForEachCb); + +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + OSUnlockResourceAndUnblockMISR(&psSysData->sQProcessResource, ISR_ID); +#else /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + OSUnlockResource(&psSysData->sQProcessResource, ISR_ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + + return PVRSRV_OK; +} + +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) +extern void sgxfreq_notif_sgx_frame_done(void); +#endif /* (SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ + +/*! +****************************************************************************** + + @Function PVRSRVCommandCompleteKM + + @Description Updates non-private command complete sync objects + + @Input hCmdCookie : command cookie + @Input bScheduleMISR : boolean to schedule MISR + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, + IMG_BOOL bScheduleMISR) +{ + IMG_UINT32 i; + COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie; + SYS_DATA *psSysData; + +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + sgxfreq_notif_sgx_frame_done(); +#endif /* (SYS_OMAP_HAS_DVFS_FRAMEWORK) */ + + SysAcquireData(&psSysData); + + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_START, + QUEUE_TOKEN_COMMAND_COMPLETE); + + /* update DST(s) syncs */ + for (i=0; iui32DstSyncCount; i++) + { + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++; + +#if !((defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)) + PVRSRVKernelSyncInfoDecRef(psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM, IMG_NULL); +#endif + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_DST, + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM, + PVRSRV_SYNCOP_COMPLETE); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psDstSync[i].ui32WriteOpsPending)); + } + + /* update SRC(s) syncs */ + for (i=0; iui32SrcSyncCount; i++) + { + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete++; + +#if !((defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)) + PVRSRVKernelSyncInfoDecRef(psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM, IMG_NULL); +#endif + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_SRC, + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM, + PVRSRV_SYNCOP_COMPLETE); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending)); + } + +#if (defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE)) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if(psCmdCompleteData->ui32DstSyncCount || psCmdCompleteData->ui32SrcSyncCount) + { + /* Add work to worker thread for checking and freeing of kernel sync */ + queue_work(gpsWorkQueue, &gsWork); + } +#endif + + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_END, + QUEUE_TOKEN_COMMAND_COMPLETE); + + if (psCmdCompleteData->pfnCommandComplete) + { + psCmdCompleteData->pfnCommandComplete(psCmdCompleteData->hCallbackData); + } + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + if(psCmdCompleteData->pvTimeline) + { + sw_sync_timeline_inc(psCmdCompleteData->pvTimeline, 1); + sync_fence_put(psCmdCompleteData->pvCleanupFence); + } +#elif defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + if(psCmdCompleteData->pvTimeline) + { + SyncSWTimelineAdvanceKM(psCmdCompleteData->pvTimeline); + SyncSWTimelineReleaseKM(psCmdCompleteData->pvTimeline); + psCmdCompleteData->pvTimeline = IMG_NULL; + + psCmdCompleteData->pvCleanupFence = IMG_NULL; + } +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + + /* free command complete storage */ + psCmdCompleteData->bInUse = IMG_FALSE; + + /* FIXME: This may cause unrelated devices to be woken up. */ + PVRSRVScheduleDeviceCallbacks(); + + if(bScheduleMISR) + { + OSScheduleMISR(psSysData); + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterCmdProcListKM + + @Description + + registers a list of private command processing functions with the Command + Queue Manager + + @Input ui32DevIndex : device index + + @Input ppfnCmdProcList : function ptr table of private command processors + + @Input ui32MaxSyncsPerCmd : max number of syncobjects used by command + + @Input ui32CmdCount : number of entries in function ptr table + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex, + PFN_CMD_PROC *ppfnCmdProcList, + IMG_UINT32 ui32MaxSyncsPerCmd[][2], + IMG_UINT32 ui32CmdCount) +{ + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdCounter, ui32CmdTypeCounter; + IMG_SIZE_T ui32AllocSize; + DEVICE_COMMAND_DATA *psDeviceCommandData; + COMMAND_COMPLETE_DATA *psCmdCompleteData; + + /* validate device type */ + if(ui32DevIndex >= SYS_DEVICE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x", + ui32DevIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* acquire system data structure */ + SysAcquireData(&psSysData); + + /* array of pointers for each command store */ + ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData); + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + ui32AllocSize, + (IMG_VOID **)&psDeviceCommandData, IMG_NULL, + "Array of Pointers for Command Store"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data")); + goto ErrorExit; + } + + psSysData->apsDeviceCommandData[ui32DevIndex] = psDeviceCommandData; + + for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++) + { + psDeviceCommandData[ui32CmdTypeCounter].pfnCmdProc = ppfnCmdProcList[ui32CmdTypeCounter]; + psDeviceCommandData[ui32CmdTypeCounter].ui32CCBOffset = 0; + psDeviceCommandData[ui32CmdTypeCounter].ui32MaxDstSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0]; + psDeviceCommandData[ui32CmdTypeCounter].ui32MaxSrcSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1]; + for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++) + { + /* + allocate storage for the sync update on command complete + */ + ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA) /* space for one GENERIC_CMD_COMPLETE */ + + ((ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0] + + ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1]) + * sizeof(PVRSRV_SYNC_OBJECT)); /* space for max sync objects */ + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + ui32AllocSize, + (IMG_VOID **)&psCmdCompleteData, + IMG_NULL, + "Command Complete Data"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d", ui32CmdTypeCounter)); + goto ErrorExit; + } + + psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = psCmdCompleteData; + + /* clear memory */ + OSMemSet(psCmdCompleteData, 0x00, ui32AllocSize); + + /* setup sync pointers */ + psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*) + (((IMG_UINTPTR_T)psCmdCompleteData) + + sizeof(COMMAND_COMPLETE_DATA)); + psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*) + (((IMG_UINTPTR_T)psCmdCompleteData->psDstSync) + + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0])); + + psCmdCompleteData->ui32AllocSize = (IMG_UINT32)ui32AllocSize; + } + } + + return PVRSRV_OK; + +ErrorExit: + + /* clean-up if things went wrong */ + if (PVRSRVRemoveCmdProcListKM(ui32DevIndex, ui32CmdCount) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRegisterCmdProcListKM: Failed to clean up after error, device 0x%x", + ui32DevIndex)); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemoveCmdProcListKM + + @Description + + removes a list of private command processing functions and data from the + Queue Manager + + @Input ui32DevIndex : device index + + @Input ui32CmdCount : number of entries in function ptr table + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex, + IMG_UINT32 ui32CmdCount) +{ + SYS_DATA *psSysData; + IMG_UINT32 ui32CmdTypeCounter, ui32CmdCounter; + DEVICE_COMMAND_DATA *psDeviceCommandData; + COMMAND_COMPLETE_DATA *psCmdCompleteData; + IMG_SIZE_T ui32AllocSize; + + /* validate device type */ + if(ui32DevIndex >= SYS_DEVICE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x", + ui32DevIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* acquire system data structure */ + SysAcquireData(&psSysData); + + psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex]; + if(psDeviceCommandData != IMG_NULL) + { + for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++) + { + for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++) + { + psCmdCompleteData = psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter]; + + /* free the cmd complete structure array entries */ + if (psCmdCompleteData != IMG_NULL) + { + PVR_ASSERT(psCmdCompleteData->bInUse == IMG_FALSE); + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, psCmdCompleteData->ui32AllocSize, + psCmdCompleteData, IMG_NULL); + psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = IMG_NULL; + } + } + } + + /* free the cmd complete structure array for the device */ + ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData); + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psDeviceCommandData, IMG_NULL); + psSysData->apsDeviceCommandData[ui32DevIndex] = IMG_NULL; + } + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (queue.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/ra.c b/sgx_km/eurasia_km/services4/srvkm/common/ra.c new file mode 100644 index 0000000..9816a9a --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/ra.c @@ -0,0 +1,2217 @@ +/*************************************************************************/ /*! +@Title Resource Allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description + Implements generic resource allocation. The resource + allocator was originally intended to manage address spaces in + practice the resource allocator is generic and can manages arbitrary + sets of integers. + + Resources are allocated from arenas. Arena's can be created with an + initial span of resources. Further resources spans can be added to + arenas. A call back mechanism allows an arena to request further + resource spans on demand. + + Each arena maintains an ordered list of resource segments each + described by a boundary tag. Each boundary tag describes a segment + of resources which are either 'free', available for allocation, or + 'busy' currently allocated. Adjacent 'free' segments are always + coallesced to avoid fragmentation. + + For allocation, all 'free' segments are kept on lists of 'free' + segments in a table index by pvr_log2(segment size). ie Each table index + n holds 'free' segments in the size range 2**(n-1) -> 2**n. + + Allocation policy is based on an *almost* best fit + stratedy. Choosing any segment from the appropriate table entry + guarantees that we choose a segment which is with a power of 2 of + the size we are allocating. + + Allocated segments are inserted into a self scaling hash table which + maps the base resource of the span to the relevant boundary + tag. This allows the code to get back to the bounary tag without + exporting explicit boundary tag references through the API. + + Each arena has an associated quantum size, all allocations from the + arena are made in multiples of the basic quantum. + + On resource exhaustion in an arena, a callback if provided will be + used to request further resources. Resouces spans allocated by the + callback mechanism are delimited by special boundary tag markers of + zero span, 'span' markers. Span markers are never coallesced. Span + markers are used to detect when an imported span is completely free + and can be deallocated by the callback mechanism. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Issues: + * - flags, flags are passed into the resource allocator but are not currently used. + * - determination, of import size, is currently braindead. + * - debug code should be moved out to own module and #ifdef'd + */ + +#include "services_headers.h" +#include "hash.h" +#include "ra.h" +#include "buffer_manager.h" +#include "osfunc.h" + +#if defined(__linux__) && defined(__KERNEL__) +#include +#include "proc.h" +#endif + +#ifdef USE_BM_FREESPACE_CHECK +#include +#endif + +/* The initial, and minimum size of the live address -> boundary tag + structure hash table. The value 64 is a fairly arbitrary + choice. The hash table resizes on demand so the value choosen is + not critical. */ +#define MINIMUM_HASH_SIZE (64) + +#if defined(VALIDATE_ARENA_TEST) + +/* This test validates the doubly linked ordered list of boundary tags, by +checking that adjacent members of the list have compatible eResourceSpan +and eResourceType values. */ + +typedef enum RESOURCE_DESCRIPTOR_TAG { + + RESOURCE_SPAN_LIVE = 10, + RESOURCE_SPAN_FREE, + IMPORTED_RESOURCE_SPAN_START, + IMPORTED_RESOURCE_SPAN_LIVE, + IMPORTED_RESOURCE_SPAN_FREE, + IMPORTED_RESOURCE_SPAN_END, + +} RESOURCE_DESCRIPTOR; + +typedef enum RESOURCE_TYPE_TAG { + + IMPORTED_RESOURCE_TYPE = 20, + NON_IMPORTED_RESOURCE_TYPE + +} RESOURCE_TYPE; + + +static IMG_UINT32 ui32BoundaryTagID = 0; + +IMG_UINT32 ValidateArena(RA_ARENA *pArena); +#endif + +/* boundary tags, used to describe a resource segment */ +struct _BT_ +{ + enum bt_type + { + btt_span, /* span markers */ + btt_free, /* free resource segment */ + btt_live /* allocated resource segment */ + } type; + + /* The base resource and extent of this segment */ + IMG_UINTPTR_T base; + IMG_SIZE_T uSize; + + /* doubly linked ordered list of all segments within the arena */ + struct _BT_ *pNextSegment; + struct _BT_ *pPrevSegment; + /* doubly linked un-ordered list of free segments. */ + struct _BT_ *pNextFree; + struct _BT_ *pPrevFree; + /* a user reference associated with this span, user references are + * currently only provided in the callback mechanism */ + BM_MAPPING *psMapping; + +#if defined(VALIDATE_ARENA_TEST) + RESOURCE_DESCRIPTOR eResourceSpan; + RESOURCE_TYPE eResourceType; + + /* This variable provides a reference (used in debug messages) to incompatible + boundary tags within the doubly linked ordered list. */ + IMG_UINT32 ui32BoundaryTagID; +#endif + +}; +typedef struct _BT_ BT; + + +/* resource allocation arena */ +struct _RA_ARENA_ +{ + /* arena name for diagnostics output */ + IMG_CHAR *name; + + /* allocations within this arena are quantum sized */ + IMG_SIZE_T uQuantum; + + /* import interface, if provided */ + IMG_BOOL (*pImportAlloc)(IMG_VOID *, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase); + IMG_VOID (*pImportFree) (IMG_VOID *, + IMG_UINTPTR_T, + BM_MAPPING *psMapping); + IMG_VOID (*pBackingStoreFree) (IMG_VOID *, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE); + + /* arbitrary handle provided by arena owner to be passed into the + * import alloc and free hooks */ + IMG_VOID *pImportHandle; + + /* head of list of free boundary tags for indexed by pvr_log2 of the + boundary tag size */ +#define FREE_TABLE_LIMIT 32 + + /* power-of-two table of free lists */ + BT *aHeadFree [FREE_TABLE_LIMIT]; + + /* resource ordered segment list */ + BT *pHeadSegment; + BT *pTailSegment; + + /* segment address to boundary tag hash table */ + HASH_TABLE *pSegmentHash; + +#ifdef RA_STATS + RA_STATISTICS sStatistics; +#endif + +#if defined(CONFIG_PROC_FS) && defined(DEBUG) +#define PROC_NAME_SIZE 64 + + struct pvr_proc_dir_entry* pProcInfo; + struct pvr_proc_dir_entry* pProcSegs; + + IMG_BOOL bInitProcEntry; +#endif +}; +/* #define ENABLE_RA_DUMP 1 */ +#if defined(ENABLE_RA_DUMP) +IMG_VOID RA_Dump (RA_ARENA *pArena); +#endif + +#if defined(CONFIG_PROC_FS) && defined(DEBUG) +static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el); +static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off); + +static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el); +static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off); +#endif /* defined(CONFIG_PROC_FS) && defined(DEBUG) */ + +#ifdef USE_BM_FREESPACE_CHECK +IMG_VOID CheckBMFreespace(IMG_VOID); +#endif + +#if defined(CONFIG_PROC_FS) && defined(DEBUG) +static IMG_CHAR *ReplaceSpaces(IMG_CHAR * const pS) +{ + IMG_CHAR *pT; + + for(pT = pS; *pT != 0; pT++) + { + if (*pT == ' ' || *pT == '\t') + { + *pT = '_'; + } + } + + return pS; +} +#endif + +/*! +****************************************************************************** + @Function _RequestAllocFail + + @Description Default callback allocator used if no callback is + specified, always fails to allocate further resources to the + arena. + + @Input _h - callback handle + @Input _uSize - requested allocation size + @Output _pActualSize - actual allocation size + @Input _pRef - user reference + @Input _uflags - allocation flags + @Input _pvPrivData - private data + @Input _ui32PrivDataLength - private data length + @Input _pBase - receives allocated base + + @Return IMG_FALSE, this function always fails to allocate. +******************************************************************************/ +static IMG_BOOL +_RequestAllocFail (IMG_VOID *_h, + IMG_SIZE_T _uSize, + IMG_SIZE_T *_pActualSize, + BM_MAPPING **_ppsMapping, + IMG_UINT32 _uFlags, + IMG_PVOID _pvPrivData, + IMG_UINT32 _ui32PrivDataLength, + IMG_UINTPTR_T *_pBase) +{ + PVR_UNREFERENCED_PARAMETER (_h); + PVR_UNREFERENCED_PARAMETER (_uSize); + PVR_UNREFERENCED_PARAMETER (_pActualSize); + PVR_UNREFERENCED_PARAMETER (_ppsMapping); + PVR_UNREFERENCED_PARAMETER (_uFlags); + PVR_UNREFERENCED_PARAMETER (_pBase); + PVR_UNREFERENCED_PARAMETER (_pvPrivData); + PVR_UNREFERENCED_PARAMETER (_ui32PrivDataLength); + + return IMG_FALSE; +} + +/*! +****************************************************************************** + @Function pvr_log2 + + @Description Computes the floor of the log base 2 of a unsigned integer + + @Input n - unsigned integer + + @Return Floor(Log2(n)) +******************************************************************************/ +static IMG_UINT32 +pvr_log2 (IMG_SIZE_T n) +{ + IMG_UINT32 l = 0; + n>>=1; + while (n>0) + { + n>>=1; + l++; + } + return l; +} + +/*! +****************************************************************************** + @Function _SegmentListInsertAfter + + @Description Insert a boundary tag into an arena segment list after a + specified boundary tag. + + @Input pArena - the arena. + @Input pInsertionPoint - the insertion point. + @Input pBT - the boundary tag to insert. + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +_SegmentListInsertAfter (RA_ARENA *pArena, + BT *pInsertionPoint, + BT *pBT) +{ + PVR_ASSERT (pArena != IMG_NULL); + PVR_ASSERT (pInsertionPoint != IMG_NULL); + + if ((pInsertionPoint == IMG_NULL) || (pArena == IMG_NULL)) + { + PVR_DPF ((PVR_DBG_ERROR,"_SegmentListInsertAfter: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pBT->pNextSegment = pInsertionPoint->pNextSegment; + pBT->pPrevSegment = pInsertionPoint; + if (pInsertionPoint->pNextSegment == IMG_NULL) + pArena->pTailSegment = pBT; + else + pInsertionPoint->pNextSegment->pPrevSegment = pBT; + pInsertionPoint->pNextSegment = pBT; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function _SegmentListInsert + + @Description Insert a boundary tag into an arena segment list at the + appropriate point. + + @Input pArena - the arena. + @Input pBT - the boundary tag to insert. + + @Return None +******************************************************************************/ +static PVRSRV_ERROR +_SegmentListInsert (RA_ARENA *pArena, BT *pBT) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* insert into the segment chain */ + if (pArena->pHeadSegment == IMG_NULL) + { + pArena->pHeadSegment = pArena->pTailSegment = pBT; + pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL; + } + else + { + BT *pBTScan; + + if (pBT->base < pArena->pHeadSegment->base) + { + /* The base address of pBT is less than the base address of the boundary tag + at the head of the list - so insert this boundary tag at the head. */ + pBT->pNextSegment = pArena->pHeadSegment; + pArena->pHeadSegment->pPrevSegment = pBT; + pArena->pHeadSegment = pBT; + pBT->pPrevSegment = IMG_NULL; + } + else + { + + /* The base address of pBT is greater than or equal to that of the boundary tag + at the head of the list. Search for the insertion point: pBT must be inserted + before the first boundary tag with a greater base value - or at the end of the list. + */ + pBTScan = pArena->pHeadSegment; + + while ((pBTScan->pNextSegment != IMG_NULL) && (pBT->base >= pBTScan->pNextSegment->base)) + { + pBTScan = pBTScan->pNextSegment; + } + + eError = _SegmentListInsertAfter (pArena, pBTScan, pBT); + if (eError != PVRSRV_OK) + { + return eError; + } + } + } + return eError; +} + +/*! +****************************************************************************** + @Function _SegmentListRemove + + @Description Remove a boundary tag from an arena segment list. + + @Input pArena - the arena. + @Input pBT - the boundary tag to remove. + + @Return None +******************************************************************************/ +static IMG_VOID +_SegmentListRemove (RA_ARENA *pArena, BT *pBT) +{ + if (pBT->pPrevSegment == IMG_NULL) + pArena->pHeadSegment = pBT->pNextSegment; + else + pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; + + if (pBT->pNextSegment == IMG_NULL) + pArena->pTailSegment = pBT->pPrevSegment; + else + pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; +} + +/*! +****************************************************************************** + @Function _SegmentSplit + + @Description Split a segment into two, maintain the arena segment list. The + boundary tag should not be in the free table. Neither the + original or the new neighbour bounary tag will be in the free + table. + + @Input pArena - the arena. + @Input pBT - the boundary tag to split. + @Input uSize - the required segment size of boundary tag after + splitting. + + @Return New neighbour boundary tag. + +******************************************************************************/ +static BT * +_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize) +{ + BT *pNeighbour; + + PVR_ASSERT (pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: invalid parameter - pArena")); + return IMG_NULL; + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(BT), + (IMG_VOID **)&pNeighbour, IMG_NULL, + "Boundary Tag") != PVRSRV_OK) + { + return IMG_NULL; + } + + OSMemSet(pNeighbour, 0, sizeof(BT)); + +#if defined(VALIDATE_ARENA_TEST) + pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID; +#endif + + pNeighbour->pPrevSegment = pBT; + pNeighbour->pNextSegment = pBT->pNextSegment; + if (pBT->pNextSegment == IMG_NULL) + pArena->pTailSegment = pNeighbour; + else + pBT->pNextSegment->pPrevSegment = pNeighbour; + pBT->pNextSegment = pNeighbour; + + pNeighbour->type = btt_free; + pNeighbour->uSize = pBT->uSize - uSize; + pNeighbour->base = pBT->base + uSize; + pNeighbour->psMapping = pBT->psMapping; + pBT->uSize = uSize; + +#if defined(VALIDATE_ARENA_TEST) + if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE) + { + pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE; + pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE; + } + else if (pNeighbour->pPrevSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE) + { + pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE; + pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE; + } + else + { + PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized")); + PVR_DBG_BREAK; + } +#endif + + return pNeighbour; +} + +/*! +****************************************************************************** + @Function _FreeListInsert + + @Description Insert a boundary tag into an arena free table. + + @Input pArena - the arena. + @Input pBT - the boundary tag. + + @Return None + +******************************************************************************/ +static IMG_VOID +_FreeListInsert (RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2 (pBT->uSize); + pBT->type = btt_free; + pBT->pNextFree = pArena->aHeadFree [uIndex]; + pBT->pPrevFree = IMG_NULL; + if (pArena->aHeadFree[uIndex] != IMG_NULL) + pArena->aHeadFree[uIndex]->pPrevFree = pBT; + pArena->aHeadFree [uIndex] = pBT; +} + +/*! +****************************************************************************** + @Function _FreeListRemove + + @Description Remove a boundary tag from an arena free table. + + @Input pArena - the arena. + @Input pBT - the boundary tag. + + @Return None + +******************************************************************************/ +static IMG_VOID +_FreeListRemove (RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2 (pBT->uSize); + if (pBT->pNextFree != IMG_NULL) + pBT->pNextFree->pPrevFree = pBT->pPrevFree; + if (pBT->pPrevFree == IMG_NULL) + pArena->aHeadFree[uIndex] = pBT->pNextFree; + else + pBT->pPrevFree->pNextFree = pBT->pNextFree; +} + +/*! +****************************************************************************** + @Function _BuildSpanMarker + + @Description Construct a span marker boundary tag. + + @Input pArena - arena to contain span marker + @Input base - the base of the bounary tag. + + @Return span marker boundary tag + +******************************************************************************/ +static BT * +_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + BT *pBT; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(BT), + (IMG_VOID **)&pBT, IMG_NULL, + "Boundary Tag") != PVRSRV_OK) + { + return IMG_NULL; + } + + OSMemSet(pBT, 0, sizeof(BT)); + +#if defined(VALIDATE_ARENA_TEST) + pBT->ui32BoundaryTagID = ++ui32BoundaryTagID; +#endif + + pBT->type = btt_span; + pBT->base = base; + pBT->uSize = uSize; + pBT->psMapping = IMG_NULL; + + return pBT; +} + +/*! +****************************************************************************** + @Function _BuildBT + + @Description Construct a boundary tag for a free segment. + + @Input base - the base of the resource segment. + @Input uSize - the extent of the resouce segment. + + @Return boundary tag + +******************************************************************************/ +static BT * +_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + BT *pBT; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(BT), + (IMG_VOID **)&pBT, IMG_NULL, + "Boundary Tag") != PVRSRV_OK) + { + return IMG_NULL; + } + + OSMemSet(pBT, 0, sizeof(BT)); + +#if defined(VALIDATE_ARENA_TEST) + pBT->ui32BoundaryTagID = ++ui32BoundaryTagID; +#endif + + pBT->type = btt_free; + pBT->base = base; + pBT->uSize = uSize; + + return pBT; +} + +/*! +****************************************************************************** + @Function _InsertResource + + @Description Add a free resource segment to an arena. + + @Input pArena - the arena. + @Input base - the base of the resource segment. + @Input uSize - the extent of the resource segment. + + @Return New bucket pointer + IMG_NULL failure + +******************************************************************************/ +static BT * +_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + BT *pBT; + PVR_ASSERT (pArena!=IMG_NULL); + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: invalid parameter - pArena")); + return IMG_NULL; + } + + pBT = _BuildBT (base, uSize); + if (pBT != IMG_NULL) + { + +#if defined(VALIDATE_ARENA_TEST) + pBT->eResourceSpan = RESOURCE_SPAN_FREE; + pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE; +#endif + + if (_SegmentListInsert (pArena, pBT) != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: call to _SegmentListInsert failed")); + return IMG_NULL; + } + _FreeListInsert (pArena, pBT); +#ifdef RA_STATS + pArena->sStatistics.uTotalResourceCount+=uSize; + pArena->sStatistics.uFreeResourceCount+=uSize; + pArena->sStatistics.uSpanCount++; +#endif + } + return pBT; +} + +/*! +****************************************************************************** + @Function _InsertResourceSpan + + @Description Add a free resource span to an arena, complete with span markers. + + @Input pArena - the arena. + @Input base - the base of the resource segment. + @Input uSize - the extent of the resource segment. + + @Return the boundary tag representing the free resource segment, + or IMG_NULL on failure. +******************************************************************************/ +static BT * +_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + PVRSRV_ERROR eError; + BT *pSpanStart; + BT *pSpanEnd; + BT *pBT; + + PVR_ASSERT (pArena != IMG_NULL); + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_InsertResourceSpan: invalid parameter - pArena")); + return IMG_NULL; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_InsertResourceSpan: arena='%s', base=0x" UINTPTR_FMT ", size=0x%" SIZE_T_FMT_LEN "x", + pArena->name, base, uSize)); + + pSpanStart = _BuildSpanMarker (base, uSize); + if (pSpanStart == IMG_NULL) + { + goto fail_start; + } + +#if defined(VALIDATE_ARENA_TEST) + pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START; + pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE; +#endif + + pSpanEnd = _BuildSpanMarker (base + uSize, 0); + if (pSpanEnd == IMG_NULL) + { + goto fail_end; + } + +#if defined(VALIDATE_ARENA_TEST) + pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END; + pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE; +#endif + + pBT = _BuildBT (base, uSize); + if (pBT == IMG_NULL) + { + goto fail_bt; + } + +#if defined(VALIDATE_ARENA_TEST) + pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE; + pBT->eResourceType = IMPORTED_RESOURCE_TYPE; +#endif + + eError = _SegmentListInsert (pArena, pSpanStart); + if (eError != PVRSRV_OK) + { + goto fail_SegListInsert; + } + + eError = _SegmentListInsertAfter (pArena, pSpanStart, pBT); + if (eError != PVRSRV_OK) + { + goto fail_SegListInsert; + } + + _FreeListInsert (pArena, pBT); + + eError = _SegmentListInsertAfter (pArena, pBT, pSpanEnd); + if (eError != PVRSRV_OK) + { + goto fail_SegListInsert; + } + +#ifdef RA_STATS + pArena->sStatistics.uTotalResourceCount+=uSize; +/* pArena->sStatistics.uFreeResourceCount+=uSize; + This has got to be wrong as uFreeResourceCount ends + up larger than uTotalResourceCount by uTotalResourceCount + - allocated memory +*/ +#endif + return pBT; + + fail_SegListInsert: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); + /*not nulling pointer, out of scope*/ + fail_bt: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL); + /*not nulling pointer, out of scope*/ + fail_end: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL); + /*not nulling pointer, out of scope*/ + fail_start: + return IMG_NULL; +} + +/*! +****************************************************************************** + @Function _FreeBT + + @Description Free a boundary tag taking care of the segment list and the + boundary tag free table. + + @Input pArena - the arena. + @Input pBT - the boundary tag to free. + @Input bFreeBackingStore - Should backing for the memory be freed + as well. + @Return None +******************************************************************************/ +static IMG_VOID +_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore) +{ + BT *pNeighbour; + IMG_UINTPTR_T uOrigBase; + IMG_SIZE_T uOrigSize; + + PVR_ASSERT (pArena!=IMG_NULL); + PVR_ASSERT (pBT!=IMG_NULL); + + if ((pArena == IMG_NULL) || (pBT == IMG_NULL)) + { + PVR_DPF ((PVR_DBG_ERROR,"_FreeBT: invalid parameter")); + return; + } + +#ifdef RA_STATS + pArena->sStatistics.uLiveSegmentCount--; + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount+=pBT->uSize; +#endif + + uOrigBase = pBT->base; + uOrigSize = pBT->uSize; + + /* try and coalesce with left neighbour */ + pNeighbour = pBT->pPrevSegment; + if (pNeighbour!=IMG_NULL + && pNeighbour->type == btt_free + && pNeighbour->base + pNeighbour->uSize == pBT->base) + { + _FreeListRemove (pArena, pNeighbour); + _SegmentListRemove (pArena, pNeighbour); + pBT->base = pNeighbour->base; + pBT->uSize += pNeighbour->uSize; + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL); + /*not nulling original pointer, already overwritten*/ +#ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount--; +#endif + } + + /* try to coalesce with right neighbour */ + pNeighbour = pBT->pNextSegment; + if (pNeighbour!=IMG_NULL + && pNeighbour->type == btt_free + && pBT->base + pBT->uSize == pNeighbour->base) + { + _FreeListRemove (pArena, pNeighbour); + _SegmentListRemove (pArena, pNeighbour); + pBT->uSize += pNeighbour->uSize; + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL); + /*not nulling original pointer, already overwritten*/ +#ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount--; +#endif + } + + /* try to free backing store memory. */ + if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore) + { + IMG_UINTPTR_T uRoundedStart, uRoundedEnd; + + /* Work out the first address we might be able to free. */ + uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum; + /* If a span is still using that address then leave it. */ + if (uRoundedStart < pBT->base) + { + uRoundedStart += pArena->uQuantum; + } + + /* Work out the last address we might be able to free. */ + uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum; + /* If a span is still using that addres then leave it. */ + if (uRoundedEnd > (pBT->base + pBT->uSize)) + { + uRoundedEnd -= pArena->uQuantum; + } + + if (uRoundedStart < uRoundedEnd) + { + pArena->pBackingStoreFree(pArena->pImportHandle, (IMG_SIZE_T)uRoundedStart, (IMG_SIZE_T)uRoundedEnd, (IMG_HANDLE)0); + } + } + + if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span + && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span) + { + BT *next = pBT->pNextSegment; + BT *prev = pBT->pPrevSegment; + _SegmentListRemove (pArena, next); + _SegmentListRemove (pArena, prev); + _SegmentListRemove (pArena, pBT); + pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping); +#ifdef RA_STATS + pArena->sStatistics.uSpanCount--; + pArena->sStatistics.uExportCount++; + pArena->sStatistics.uFreeSegmentCount--; + pArena->sStatistics.uFreeResourceCount-=pBT->uSize; + pArena->sStatistics.uTotalResourceCount-=pBT->uSize; +#endif + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL); + /*not nulling original pointer, already overwritten*/ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL); + /*not nulling original pointer, already overwritten*/ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } + else + _FreeListInsert (pArena, pBT); +} + + +/*! +****************************************************************************** + @Function _AttemptAllocAligned + + @Description Attempt an allocation from an arena. + + @Input pArena - the arena. + @Input uSize - the requested allocation size. + @Output ppsMapping - the user references associated with + the allocated segment. + @Input flags - allocation flags + @Input uAlignment - required uAlignment, or 0 + @Input uAlignmentOffset + @Output base - allocated resource base + + @Return IMG_FALSE failure + IMG_TRUE success +******************************************************************************/ +static IMG_BOOL +_AttemptAllocAligned (RA_ARENA *pArena, + IMG_SIZE_T uSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_UINT32 uAlignment, + IMG_UINT32 uAlignmentOffset, + IMG_UINTPTR_T *base) +{ + IMG_UINT32 uIndex; + PVR_ASSERT (pArena!=IMG_NULL); + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: invalid parameter - pArena")); + return IMG_FALSE; + } + + if (uAlignment>1) + uAlignmentOffset %= uAlignment; + + /* search for a near fit free boundary tag, start looking at the + pvr_log2 free table for our required size and work on up the + table. */ + uIndex = pvr_log2 (uSize); + + while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL) + uIndex++; + + while (uIndex < FREE_TABLE_LIMIT) + { + if (pArena->aHeadFree[uIndex]!=IMG_NULL) + { + /* we have a cached free boundary tag */ + BT *pBT; + + pBT = pArena->aHeadFree [uIndex]; + while (pBT!=IMG_NULL) + { + IMG_UINTPTR_T aligned_base; + + if (uAlignment>1) + aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset; + else + aligned_base = pBT->base; + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_AttemptAllocAligned: pBT-base=0x" UINTPTR_FMT " " + "pBT-size=0x%" SIZE_T_FMT_LEN "x alignedbase=0x" + UINTPTR_FMT " size=0x%" SIZE_T_FMT_LEN "x", + pBT->base, + pBT->uSize, + aligned_base, + uSize)); + + if (pBT->base + pBT->uSize >= aligned_base + uSize) + { + if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags) + { + _FreeListRemove (pArena, pBT); + + PVR_ASSERT (pBT->type == btt_free); + +#ifdef RA_STATS + pArena->sStatistics.uLiveSegmentCount++; + pArena->sStatistics.uFreeSegmentCount--; + pArena->sStatistics.uFreeResourceCount-=pBT->uSize; +#endif + + /* with uAlignment we might need to discard the front of this segment */ + if (aligned_base > pBT->base) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit (pArena, pBT, (IMG_SIZE_T)(aligned_base - pBT->base)); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour==IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed")); + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert (pArena, pBT); + #ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount+=pBT->uSize; + #endif + pBT = pNeighbour; + } + + /* the segment might be too big, if so, discard the back of the segment */ + if (pBT->uSize > uSize) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit (pArena, pBT, uSize); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour==IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed")); + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert (pArena, pNeighbour); + #ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize; + #endif + } + + pBT->type = btt_live; + +#if defined(VALIDATE_ARENA_TEST) + if (pBT->eResourceType == IMPORTED_RESOURCE_TYPE) + { + pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_LIVE; + } + else if (pBT->eResourceType == NON_IMPORTED_RESOURCE_TYPE) + { + pBT->eResourceSpan = RESOURCE_SPAN_LIVE; + } + else + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized")); + PVR_DBG_BREAK; + } +#endif + if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT)) + { + _FreeBT (pArena, pBT, IMG_FALSE); + return IMG_FALSE; + } + + if (ppsMapping!=IMG_NULL) + *ppsMapping = pBT->psMapping; + + *base = pBT->base; + + return IMG_TRUE; + } + else + { + PVR_DPF ((PVR_DBG_MESSAGE, + "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags)); + + } + } + pBT = pBT->pNextFree; + } + + } + uIndex++; + } + + return IMG_FALSE; +} + + + +/*! +****************************************************************************** + @Function RA_Create + + @Description To create a resource arena. + + @Input name - the name of the arena for diagnostic purposes. + @Input base - the base of an initial resource span or 0. + @Input uSize - the size of an initial resource span or 0. + @Input uQuantum - the arena allocation quantum. + @Input alloc - a resource allocation callback or 0. + @Input free - a resource de-allocation callback or 0. + @Input backingstore_free - a callback to free resources for spans or 0. + @Input pImportHandle - handle passed to alloc and free or 0. + + @Return arena handle, or IMG_NULL. +******************************************************************************/ +RA_ARENA * +RA_Create (IMG_CHAR *name, + IMG_UINTPTR_T base, + IMG_SIZE_T uSize, + BM_MAPPING *psMapping, + IMG_SIZE_T uQuantum, + IMG_BOOL (*imp_alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, IMG_UINT32 _flags, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase), + IMG_VOID (*imp_free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *), + IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE), + IMG_VOID *pImportHandle) +{ + RA_ARENA *pArena; + BT *pBT; + IMG_INT i; + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Create: name='%s', base=0x" UINTPTR_FMT ", uSize=0x%" SIZE_T_FMT_LEN "x, alloc=0x%p, free=0x%p", + name, base, uSize, imp_alloc, imp_free)); + + + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (*pArena), + (IMG_VOID **)&pArena, IMG_NULL, + "Resource Arena") != PVRSRV_OK) + { + goto arena_fail; + } + + pArena->name = name; + pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : &_RequestAllocFail; + pArena->pImportFree = imp_free; + pArena->pBackingStoreFree = backingstore_free; + pArena->pImportHandle = pImportHandle; + for (i=0; iaHeadFree[i] = IMG_NULL; + pArena->pHeadSegment = IMG_NULL; + pArena->pTailSegment = IMG_NULL; + pArena->uQuantum = uQuantum; + +#ifdef RA_STATS + pArena->sStatistics.uSpanCount = 0; + pArena->sStatistics.uLiveSegmentCount = 0; + pArena->sStatistics.uFreeSegmentCount = 0; + pArena->sStatistics.uFreeResourceCount = 0; + pArena->sStatistics.uTotalResourceCount = 0; + pArena->sStatistics.uCumulativeAllocs = 0; + pArena->sStatistics.uCumulativeFrees = 0; + pArena->sStatistics.uImportCount = 0; + pArena->sStatistics.uExportCount = 0; +#endif + +#if defined(CONFIG_PROC_FS) && defined(DEBUG) + if(strcmp(pArena->name,"") != 0) + { + IMG_INT ret; + IMG_CHAR szProcInfoName[PROC_NAME_SIZE]; + IMG_CHAR szProcSegsName[PROC_NAME_SIZE]; + struct pvr_proc_dir_entry* (*pfnCreateProcEntrySeq)(const IMG_CHAR *, + IMG_VOID*, + pvr_next_proc_seq_t, + pvr_show_proc_seq_t, + pvr_off2element_proc_seq_t, + pvr_startstop_proc_seq_t, + pvr_proc_write_t); + + pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL); + + /* Don't put shared heap info into a per process /proc subdirectory */ + pfnCreateProcEntrySeq = pArena->bInitProcEntry ? CreateProcEntrySeq : CreatePerProcessProcEntrySeq; + + ret = snprintf(szProcInfoName, sizeof(szProcInfoName), "ra_info_%s", pArena->name); + if (ret > 0 && ret < sizeof(szProcInfoName)) + { + pArena->pProcInfo = pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName), pArena, NULL, + RA_ProcSeqShowInfo, RA_ProcSeqOff2ElementInfo, NULL, NULL); + } + else + { + pArena->pProcInfo = 0; + PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name)); + } + + ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_segs_%s", pArena->name); + if (ret > 0 && ret < sizeof(szProcInfoName)) + { + pArena->pProcSegs = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL, + RA_ProcSeqShowRegs, RA_ProcSeqOff2ElementRegs, NULL, NULL); + } + else + { + pArena->pProcSegs = 0; + PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name)); + } + } +#endif /* defined(CONFIG_PROC_FS) && defined(DEBUG) */ + + pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE); + if (pArena->pSegmentHash==IMG_NULL) + { + goto hash_fail; + } + if (uSize>0) + { + uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum; + pBT = _InsertResource (pArena, base, uSize); + if (pBT == IMG_NULL) + { + goto insert_fail; + } + pBT->psMapping = psMapping; + + } + return pArena; + +insert_fail: + HASH_Delete (pArena->pSegmentHash); +hash_fail: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL); + /*not nulling pointer, out of scope*/ +arena_fail: + return IMG_NULL; +} + +/*! +****************************************************************************** + @Function RA_Delete + + @Description To delete a resource arena. All resources allocated from + the arena must be freed before deleting the arena. + + @Input pArena - the arena to delete. + + @Return None +******************************************************************************/ +IMG_VOID +RA_Delete (RA_ARENA *pArena) +{ + IMG_UINT32 uIndex; + + PVR_ASSERT(pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena")); + return; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Delete: name='%s'", pArena->name)); + + for (uIndex=0; uIndexaHeadFree[uIndex] = IMG_NULL; + + while (pArena->pHeadSegment != IMG_NULL) + { + BT *pBT = pArena->pHeadSegment; + + if (pBT->type != btt_free) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: allocations still exist in the arena that is being destroyed")); + PVR_DPF ((PVR_DBG_ERROR,"Likely Cause: client drivers not freeing allocations before destroying devmemcontext")); + PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: base = 0x" UINTPTR_FMT " size=0x%" SIZE_T_FMT_LEN "x", pBT->base, pBT->uSize)); + } + + _SegmentListRemove (pArena, pBT); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); + /*not nulling original pointer, it has changed*/ +#ifdef RA_STATS + pArena->sStatistics.uSpanCount--; +#endif + } +#if defined(CONFIG_PROC_FS) && defined(DEBUG) + { + IMG_VOID (*pfnRemoveProcEntrySeq)(struct pvr_proc_dir_entry*); + + pfnRemoveProcEntrySeq = pArena->bInitProcEntry ? RemoveProcEntrySeq : RemovePerProcessProcEntrySeq; + + if (pArena->pProcInfo != 0) + { + pfnRemoveProcEntrySeq( pArena->pProcInfo ); + } + + if (pArena->pProcSegs != 0) + { + pfnRemoveProcEntrySeq( pArena->pProcSegs ); + } + } +#endif + HASH_Delete (pArena->pSegmentHash); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL); + /*not nulling pointer, copy on stack*/ +} + +/*! +****************************************************************************** + @Function RA_TestDelete + + @Description To test whether it is safe to delete a resource arena. If any + allocations have not been freed, the RA must not be deleted. + + @Input pArena - the arena to test. + + @Return IMG_BOOL - IMG_TRUE if is safe to go on and call RA_Delete. +******************************************************************************/ +IMG_BOOL +RA_TestDelete (RA_ARENA *pArena) +{ + PVR_ASSERT(pArena != IMG_NULL); + + if (pArena != IMG_NULL) + { + while (pArena->pHeadSegment != IMG_NULL) + { + BT *pBT = pArena->pHeadSegment; + if (pBT->type != btt_free) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: detected resource leak!")); + PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: base = 0x" UINTPTR_FMT " size=0x%" SIZE_T_FMT_LEN "x", pBT->base, pBT->uSize)); + return IMG_FALSE; + } + } + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + @Function RA_Add + + @Description To add a resource span to an arena. The span must not + overlapp with any span previously added to the arena. + + @Input pArena - the arena to add a span into. + @Input base - the base of the span. + @Input uSize - the extent of the span. + + @Return IMG_TRUE - Success + IMG_FALSE - failure +******************************************************************************/ +IMG_BOOL +RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + PVR_ASSERT (pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena")); + return IMG_FALSE; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Add: name='%s', base=0x" UINTPTR_FMT ", size=0x%" SIZE_T_FMT_LEN "x", pArena->name, base, uSize)); + + uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum; + return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL)); +} + +/*! +****************************************************************************** + @Function RA_Alloc + + @Description To allocate resource from an arena. + + @Input pArena - the arena + @Input uRequestSize - the size of resource segment requested. + @Output pActualSize - the actual size of resource segment + allocated, typcially rounded up by quantum. + @Output ppsMapping - the user reference associated with allocated resource span. + @Input uFlags - flags influencing allocation policy. + @Input uAlignment - the uAlignment constraint required for the + allocated segment, use 0 if uAlignment not required. + @Input uAlignmentOffset + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + + @Output base - allocated base resource + + @Return IMG_TRUE - success + IMG_FALSE - failure +******************************************************************************/ +IMG_BOOL +RA_Alloc (RA_ARENA *pArena, + IMG_SIZE_T uRequestSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_UINT32 uAlignment, + IMG_UINT32 uAlignmentOffset, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *base) +{ + IMG_BOOL bResult; + IMG_SIZE_T uSize = uRequestSize; + + PVR_ASSERT (pArena!=IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena")); + return IMG_FALSE; + } + +#if defined(VALIDATE_ARENA_TEST) + ValidateArena(pArena); +#endif + +#ifdef USE_BM_FREESPACE_CHECK + CheckBMFreespace(); +#endif + + if (pActualSize != IMG_NULL) + { + *pActualSize = uSize; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Alloc: arena='%s', size=0x%" SIZE_T_FMT_LEN "x(0x%" SIZE_T_FMT_LEN "x), alignment=0x%x, offset=0x%x", + pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset)); + + /* if allocation failed then we might have an import source which + can provide more resource, else we will have to fail the + allocation to the caller. */ + bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags, + uAlignment, uAlignmentOffset, base); + if (!bResult) + { + BM_MAPPING *psImportMapping; + IMG_UINTPTR_T import_base; + IMG_SIZE_T uImportSize = uSize; + + /* + Ensure that we allocate sufficient space to meet the uAlignment + constraint + */ + if (uAlignment > pArena->uQuantum) + { + uImportSize += (uAlignment - 1); + } + + /* ensure that we import according to the quanta of this arena */ + uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum; + + bResult = + pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize, + &psImportMapping, uFlags, + pvPrivData, ui32PrivDataLength, &import_base); + if (bResult) + { + BT *pBT; + pBT = _InsertResourceSpan (pArena, import_base, uImportSize); + /* successfully import more resource, create a span to + represent it and retry the allocation attempt */ + if (pBT == IMG_NULL) + { + /* insufficient resources to insert the newly acquired span, + so free it back again */ + pArena->pImportFree(pArena->pImportHandle, import_base, + psImportMapping); + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Alloc: name='%s', size=0x%" SIZE_T_FMT_LEN "x failed!", + pArena->name, uSize)); + /* RA_Dump (arena); */ + return IMG_FALSE; + } + pBT->psMapping = psImportMapping; +#ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount += uImportSize; + pArena->sStatistics.uImportCount++; + pArena->sStatistics.uSpanCount++; +#endif + bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags, + uAlignment, uAlignmentOffset, + base); + if (!bResult) + { + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Alloc: name='%s' uAlignment failed!", + pArena->name)); + } + } + } +#if defined PVRSRV_DEVMEM_TIME_STATS + else + { + /* If sub-allocation succeeded, the allocation was already mapped to Device MMU. So change timing to '0' */ + if (ppsMapping) + { + (*ppsMapping)->ui32TimeToDevMap = 0; + } + } +#endif + +#ifdef RA_STATS + if (bResult) + { + pArena->sStatistics.uCumulativeAllocs++; + } +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Alloc: name='%s', size=0x%" SIZE_T_FMT_LEN "x, *base=0x" UINTPTR_FMT " = %d", + pArena->name, uSize, *base, bResult)); + + /* RA_Dump (pArena); + ra_stats (pArena); + */ + +#if defined(VALIDATE_ARENA_TEST) + ValidateArena(pArena); +#endif + + return bResult; +} + + +#if defined(VALIDATE_ARENA_TEST) + +/*! +****************************************************************************** + @Function ValidateArena + + @Description Validate an arena by checking that adjacent members of the + double linked ordered list are compatible. PVR_DBG_BREAK and + PVR_DPF messages are used when an error is detected. + NOTE: A DEBUG build is required for PVR_DBG_BREAK to operate. + + @Input pArena - the arena + + @Return 0 +******************************************************************************/ +IMG_UINT32 ValidateArena(RA_ARENA *pArena) +{ + BT* pSegment; + RESOURCE_DESCRIPTOR eNextSpan; + + pSegment = pArena->pHeadSegment; + + if (pSegment == IMG_NULL) + { + return 0; + } + + if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE) + { + PVR_ASSERT(pSegment->eResourceSpan == IMPORTED_RESOURCE_SPAN_START); + + while (pSegment->pNextSegment) + { + eNextSpan = pSegment->pNextSegment->eResourceSpan; + + switch (pSegment->eResourceSpan) + { + case IMPORTED_RESOURCE_SPAN_LIVE: + + if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_END))) + { + /* error - next span must be live, free or end */ + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + } + break; + + case IMPORTED_RESOURCE_SPAN_FREE: + + if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_END))) + { + /* error - next span must be live or end */ + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + } + break; + + case IMPORTED_RESOURCE_SPAN_END: + + if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_END)) + { + /* error - next span cannot be live, free or end */ + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + } + break; + + + case IMPORTED_RESOURCE_SPAN_START: + + if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE))) + { + /* error - next span must be live or free */ + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + } + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + break; + } + pSegment = pSegment->pNextSegment; + } + } + else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE) + { + PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE) || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE)); + + while (pSegment->pNextSegment) + { + eNextSpan = pSegment->pNextSegment->eResourceSpan; + + switch (pSegment->eResourceSpan) + { + case RESOURCE_SPAN_LIVE: + + if (!((eNextSpan == RESOURCE_SPAN_FREE) || + (eNextSpan == RESOURCE_SPAN_LIVE))) + { + /* error - next span must be free or live */ + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + } + break; + + case RESOURCE_SPAN_FREE: + + if (!((eNextSpan == RESOURCE_SPAN_FREE) || + (eNextSpan == RESOURCE_SPAN_LIVE))) + { + /* error - next span must be free or live */ + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + } + break; + + default: + PVR_DPF((PVR_DBG_ERROR, + "ValidateArena ERROR: adjacent boundary tags %d (base=0x" UINTPTR_FMT + ") and %d (base=0x" UINTPTR_FMT ") are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, + pSegment->base, + pSegment->pNextSegment->ui32BoundaryTagID, + pSegment->pNextSegment->base, + pArena->name)); + + PVR_DBG_BREAK; + break; + } + pSegment = pSegment->pNextSegment; + } + + } + else + { + PVR_DPF ((PVR_DBG_ERROR,"ValidateArena ERROR: pSegment->eResourceType unrecognized")); + + PVR_DBG_BREAK; + } + + return 0; +} + +#endif + + +/*! +****************************************************************************** + @Function RA_Free + + @Description To free a resource segment. + + @Input pArena - the arena the segment was originally allocated from. + @Input base - the base of the resource span to free. + @Input bFreeBackingStore - Should backing store memory be freed. + + @Return None +******************************************************************************/ +IMG_VOID +RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore) +{ + BT *pBT; + + PVR_ASSERT (pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena")); + return; + } + +#ifdef USE_BM_FREESPACE_CHECK + CheckBMFreespace(); +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Free: name='%s', base=0x" UINTPTR_FMT, pArena->name, base)); + + pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base); + PVR_ASSERT (pBT != IMG_NULL); + + if (pBT) + { + PVR_ASSERT (pBT->base == base); + +#ifdef RA_STATS + pArena->sStatistics.uCumulativeFrees++; +#endif + +#ifdef USE_BM_FREESPACE_CHECK +{ + IMG_BYTE* p; + IMG_BYTE* endp; + + p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(); + endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize)); + while ((IMG_UINT32)p & 3) + { + *p++ = 0xAA; + } + while (p < (IMG_BYTE*)((IMG_UINT32)endp & 0xfffffffc)) + { + *(IMG_UINT32*)p = 0xAAAAAAAA; + p += sizeof(IMG_UINT32); + } + while (p < endp) + { + *p++ = 0xAA; + } + PVR_DPF((PVR_DBG_MESSAGE, + "BM_FREESPACE_CHECK: RA_Free Cleared %p to %p (size=0x%" SIZE_T_FMT_LEN "x)", + (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(), + endp - 1, + pBT->uSize)); +} +#endif + _FreeBT (pArena, pBT, bFreeBackingStore); + } +} + + +/*! +****************************************************************************** + @Function RA_GetNextLiveSegment + + @Description Returns details of the next live resource segments + + @Input pArena - the arena the segment was originally allocated from. + @InOut psSegDetails - rtn details of segments + + @Return IMG_TRUE if operation succeeded +******************************************************************************/ +IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails) +{ + BT *pBT; + + if (psSegDetails->hSegment) + { + pBT = (BT *)psSegDetails->hSegment; + } + else + { + RA_ARENA *pArena = (RA_ARENA *)hArena; + + pBT = pArena->pHeadSegment; + } + /* walk the arena segments and write live one to the buffer */ + while (pBT != IMG_NULL) + { + if (pBT->type == btt_live) + { + psSegDetails->uiSize = pBT->uSize; + psSegDetails->sCpuPhyAddr.uiAddr = pBT->base; + psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment; + + return IMG_TRUE; + } + + pBT = pBT->pNextSegment; + } + + psSegDetails->uiSize = 0; + psSegDetails->sCpuPhyAddr.uiAddr = 0; + psSegDetails->hSegment = (IMG_HANDLE)IMG_UNDEF; + + return IMG_FALSE; +} + + +#ifdef USE_BM_FREESPACE_CHECK +RA_ARENA* pJFSavedArena = IMG_NULL; + +IMG_VOID CheckBMFreespace(IMG_VOID) +{ + BT *pBT; + IMG_BYTE* p; + IMG_BYTE* endp; + + if (pJFSavedArena != IMG_NULL) + { + for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + if (pBT->type == btt_free) + { + p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(); + endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc); + + while ((IMG_UINT32)p & 3) + { + if (*p++ != 0xAA) + { + fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p); + for (;;); + break; + } + } + while (p < endp) + { + if (*(IMG_UINT32*)p != 0xAAAAAAAA) + { + fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p); + for (;;); + break; + } + p += 4; + } + } + } + } +} +#endif + + +#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS) +static IMG_CHAR * +_BTType (IMG_INT eType) +{ + switch (eType) + { + case btt_span: return "span"; + case btt_free: return "free"; + case btt_live: return "live"; + } + return "junk"; +} +#endif /*defined(CONFIG_PROC_FS) && defined(DEBUG)*/ + +#if defined(ENABLE_RA_DUMP) +/*! +****************************************************************************** + @Function RA_Dump + + @Description To dump a readable description of an arena. Diagnostic only. + + @Input pArena - the arena to dump. + + @Return None +******************************************************************************/ +IMG_VOID +RA_Dump (RA_ARENA *pArena) +{ + BT *pBT; + PVR_ASSERT (pArena != IMG_NULL); + PVR_DPF ((PVR_DBG_MESSAGE,"Arena '%s':", pArena->name)); + PVR_DPF ((PVR_DBG_MESSAGE," alloc=%p free=%p handle=%p quantum=%d", + pArena->pImportAlloc, pArena->pImportFree, pArena->pImportHandle, + pArena->uQuantum)); + PVR_DPF ((PVR_DBG_MESSAGE," segment Chain:")); + if (pArena->pHeadSegment != IMG_NULL && + pArena->pHeadSegment->pPrevSegment != IMG_NULL) + PVR_DPF ((PVR_DBG_MESSAGE," error: head boundary tag has invalid pPrevSegment")); + if (pArena->pTailSegment != IMG_NULL && + pArena->pTailSegment->pNextSegment != IMG_NULL) + PVR_DPF ((PVR_DBG_MESSAGE," error: tail boundary tag has invalid pNextSegment")); + + for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + PVR_DPF ((PVR_DBG_MESSAGE,"\tbase=0x" UINTPTR_FMT " size=0x%" SIZE_T_FMT_LEN "x type=%s", + pBT->base, pBT->uSize, _BTType (pBT->type))); + } + +#ifdef HASH_TRACE + HASH_Dump (pArena->pSegmentHash); +#endif +} +#endif /* #if defined(ENABLE_RA_DUMP) */ + + +#if defined(CONFIG_PROC_FS) && defined(DEBUG) + +static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el) +{ + RA_ARENA *pArena = (RA_ARENA *)PVRProcGetData(sfile->private); + IMG_UINTPTR_T off = (IMG_UINTPTR_T)el; + + switch (off) + { + case 1: + seq_printf(sfile, "quantum\t\t\t%" SIZE_T_FMT_LEN "u\n", pArena->uQuantum); + break; + case 2: + seq_printf(sfile, "import_handle\t\t%p\n", pArena->pImportHandle); + break; +#ifdef RA_STATS + case 3: + seq_printf(sfile,"span count\t\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uSpanCount); + break; + case 4: + seq_printf(sfile, "live segment count\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uLiveSegmentCount); + break; + case 5: + seq_printf(sfile, "free segment count\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uFreeSegmentCount); + break; + case 6: + seq_printf(sfile, "free resource count\t%" SIZE_T_FMT_LEN "u (0x%" SIZE_T_FMT_LEN "x)\n", + pArena->sStatistics.uFreeResourceCount, + pArena->sStatistics.uFreeResourceCount); + break; + case 7: + seq_printf(sfile, "total allocs\t\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uCumulativeAllocs); + break; + case 8: + seq_printf(sfile, "total frees\t\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uCumulativeFrees); + break; + case 9: + seq_printf(sfile, "import count\t\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uImportCount); + break; + case 10: + seq_printf(sfile, "export count\t\t%" SIZE_T_FMT_LEN "u\n", pArena->sStatistics.uExportCount); + break; +#endif + } + +} + +static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off) +{ +#ifdef RA_STATS + if(off <= 9) +#else + if(off <= 1) +#endif + return (void*)(IMG_UINTPTR_T)(off+1); + return 0; +} + +static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el) +{ + RA_ARENA *pArena = (RA_ARENA *)PVRProcGetData(sfile->private); + BT *pBT = (BT*)el; + + if (el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name); + return; + } + + if (pBT) + { + seq_printf(sfile, "%p %" SIZE_T_FMT_LEN "x %4s %p\n", + (IMG_PVOID)pBT->base, pBT->uSize, _BTType (pBT->type), + pBT->psMapping); + } +} + +static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off) +{ + RA_ARENA *pArena = (RA_ARENA *)PVRProcGetData(sfile->private); + BT *pBT = 0; + + if(off == 0) + return PVR_PROC_SEQ_START_TOKEN; + + for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment); + + return (void*)pBT; +} +#endif /* defined(CONFIG_PROC_FS) && defined(DEBUG) */ + + +#ifdef RA_STATS +/*! +****************************************************************************** + @Function RA_GetStats + + @Description Gets the arena stats and places in client buffer + + @Input pArena - the arena to print statistics for. + @Input ppszStr - caller string to fill + @Input pui32StrLen - length of caller string + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen) +{ + IMG_CHAR *pszStr = *ppszStr; + IMG_UINT32 ui32StrLen = *pui32StrLen; + IMG_INT32 i32Count; + BT *pBT; + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " allocCB=%p freeCB=%p handle=%p quantum=%" SIZE_T_FMT_LEN "u\n", + pArena->pImportAlloc, + pArena->pImportFree, + pArena->pImportHandle, + pArena->uQuantum); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uSpanCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uLiveSegmentCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uFreeSegmentCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%" SIZE_T_FMT_LEN "u (0x%" SIZE_T_FMT_LEN "x)\n", + pArena->sStatistics.uFreeResourceCount, + pArena->sStatistics.uFreeResourceCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uCumulativeAllocs); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uCumulativeFrees); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uImportCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%" SIZE_T_FMT_LEN "u\n", + pArena->sStatistics.uExportCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + if (pArena->pHeadSegment != IMG_NULL && + pArena->pHeadSegment->pPrevSegment != IMG_NULL) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " error: head boundary tag has invalid pPrevSegment\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + if (pArena->pTailSegment != IMG_NULL && + pArena->pTailSegment->pNextSegment != IMG_NULL) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " error: tail boundary tag has invalid pNextSegment\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%p size=0x%" SIZE_T_FMT_LEN "x type=%s ref=%p\n", + (void *)pBT->base, + pBT->uSize, + _BTType(pBT->type), + pBT->psMapping); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + *ppszStr = pszStr; + *pui32StrLen = ui32StrLen; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RA_GetStatsFreeMem(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen) +{ + IMG_CHAR *pszStr = *ppszStr; + IMG_UINT32 ui32StrLen = *pui32StrLen; + IMG_INT32 i32Count; + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "Bytes free: Arena %-30s: %" SIZE_T_FMT_LEN "u (0x%" SIZE_T_FMT_LEN "x)\n", pArena->name, + pArena->sStatistics.uFreeResourceCount, + pArena->sStatistics.uFreeResourceCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + *ppszStr = pszStr; + *pui32StrLen = ui32StrLen; + + return PVRSRV_OK; +} +#endif + +/****************************************************************************** + End of file (ra.c) +******************************************************************************/ + + + + diff --git a/sgx_km/eurasia_km/services4/srvkm/common/refcount.c b/sgx_km/eurasia_km/services4/srvkm/common/refcount.c new file mode 100644 index 0000000..aacb94c --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/refcount.c @@ -0,0 +1,760 @@ +/*************************************************************************/ /*! +@Title Services reference count debugging +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PVRSRV_REFCOUNT_DEBUG) + +#include "services_headers.h" + +#ifndef __linux__ +#warning Reference count debugging is not thread-safe on this platform +#define PVRSRV_LOCK_CCB() +#define PVRSRV_UNLOCK_CCB() +#else /* __linux__ */ +#include +static DEFINE_SPINLOCK(gsCCBLock); +#define PVRSRV_LOCK_CCB() \ + { \ + unsigned long flags; \ + spin_lock_irqsave(&gsCCBLock, flags); +#define PVRSRV_UNLOCK_CCB() \ + spin_unlock_irqrestore(&gsCCBLock, flags); \ + } +#endif /* __linux__ */ + +#define PVRSRV_REFCOUNT_CCB_MAX 512 +#define PVRSRV_REFCOUNT_CCB_MESG_MAX 80 + +#define PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO (1U << 0) +#define PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO (1U << 1) +#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF (1U << 2) +#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2 (1U << 3) +#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC (1U << 4) + +#if defined(__linux__) +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP (1U << 16) +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2 (1U << 17) +#define PVRSRV_REFCOUNT_CCB_DEBUG_ION_SYNC (1U << 18) +#define PVRSRV_REFCOUNT_CCB_DEBUG_DMABUF_SYNC (1U << 19) +#else +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP 0 +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2 0 +#define PVRSRV_REFCOUNT_CCB_DEBUG_ION_SYNC 0 +#define PVRSRV_REFCOUNT_CCB_DEBUG_DMABUF_SYNC 0 +#endif + +#define PVRSRV_REFCOUNT_CCB_DEBUG_ALL ~0U + +/*static const IMG_UINT guiDebugMask = PVRSRV_REFCOUNT_CCB_DEBUG_ALL;*/ +static const IMG_UINT guiDebugMask = + PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO | +#if defined(SUPPORT_ION) + PVRSRV_REFCOUNT_CCB_DEBUG_ION_SYNC | +#endif +#if defined(SUPPORT_DMABUF) + PVRSRV_REFCOUNT_CCB_DEBUG_DMABUF_SYNC | +#endif + PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2; + +typedef struct +{ + const IMG_CHAR *pszFile; + IMG_INT iLine; + IMG_UINT32 ui32PID; + IMG_CHAR pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX]; +} +PVRSRV_REFCOUNT_CCB; + +static PVRSRV_REFCOUNT_CCB gsRefCountCCB[PVRSRV_REFCOUNT_CCB_MAX]; +static IMG_UINT giOffset; + +static const IMG_CHAR gszHeader[] = + /* 10 20 30 40 50 60 70 + * 345678901234567890123456789012345678901234567890123456789012345678901 + */ + "TYPE SYNCINFO MEMINFO MEMHANDLE OTHER REF REF' SIZE PID"; + /* NCINFO deadbeef deadbeef deadbeef deadbeef 1234 1234 deadbeef */ + +#define PVRSRV_REFCOUNT_CCB_FMT_STRING "%8.8s %8p %8p %8p %8p %.4d %.4d %.8x" + +IMG_INTERNAL +void PVRSRVDumpRefCountCCB(void) +{ + int i; + + PVRSRV_LOCK_CCB(); + + PVR_LOG(("%s", gszHeader)); + + for(i = 0; i < PVRSRV_REFCOUNT_CCB_MAX; i++) + { + PVRSRV_REFCOUNT_CCB *psRefCountCCBEntry = + &gsRefCountCCB[(giOffset + i) % PVRSRV_REFCOUNT_CCB_MAX]; + + /* Early on, we won't have MAX_REFCOUNT_CCB_SIZE messages */ + if(!psRefCountCCBEntry->pszFile) + continue; + + PVR_LOG(("%s %d %s:%d", psRefCountCCBEntry->pcMesg, + psRefCountCCBEntry->ui32PID, + psRefCountCCBEntry->pszFile, + psRefCountCCBEntry->iLine)); + } + + PVRSRV_UNLOCK_CCB(); +} + +IMG_INTERNAL +void PVRSRVKernelSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + IMG_UINT32 ui32RefValue = OSAtomicRead(psKernelSyncInfo->pvRefCount); + + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "SYNCINFO", + psKernelSyncInfo, + psKernelMemInfo, + NULL, + (psKernelMemInfo) ? psKernelMemInfo->sMemBlk.hOSMemHandle : NULL, + ui32RefValue, + ui32RefValue + 1, + (psKernelMemInfo) ? psKernelMemInfo->uAllocSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + PVRSRVAcquireSyncInfoKM(psKernelSyncInfo); +} + +IMG_INTERNAL +void PVRSRVKernelSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + IMG_UINT32 ui32RefValue = OSAtomicRead(psKernelSyncInfo->pvRefCount); + + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "SYNCINFO", + psKernelSyncInfo, + psKernelMemInfo, + (psKernelMemInfo) ? psKernelMemInfo->sMemBlk.hOSMemHandle : NULL, + NULL, + ui32RefValue, + ui32RefValue - 1, + (psKernelMemInfo) ? psKernelMemInfo->uAllocSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + PVRSRVReleaseSyncInfoKM(psKernelSyncInfo); +} + +IMG_INTERNAL +void PVRSRVKernelMemInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MEMINFO", + psKernelMemInfo->psKernelSyncInfo, + psKernelMemInfo, + psKernelMemInfo->sMemBlk.hOSMemHandle, + NULL, + psKernelMemInfo->ui32RefCount, + psKernelMemInfo->ui32RefCount + 1, + psKernelMemInfo->uAllocSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psKernelMemInfo->ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVKernelMemInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MEMINFO", + psKernelMemInfo->psKernelSyncInfo, + psKernelMemInfo, + psKernelMemInfo->sMemBlk.hOSMemHandle, + NULL, + psKernelMemInfo->ui32RefCount, + psKernelMemInfo->ui32RefCount - 1, + psKernelMemInfo->uAllocSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psKernelMemInfo->ui32RefCount--; +} + +IMG_INTERNAL +void PVRSRVBMBufIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32RefCount, + pBuf->ui32RefCount + 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVBMBufDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32RefCount, + pBuf->ui32RefCount - 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32RefCount--; +} + +IMG_INTERNAL +void PVRSRVBMBufIncExport2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF2", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32ExportCount, + pBuf->ui32ExportCount + 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32ExportCount++; +} + +IMG_INTERNAL +void PVRSRVBMBufDecExport2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF2", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32ExportCount, + pBuf->ui32ExportCount - 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32ExportCount--; +} + +IMG_INTERNAL +void PVRSRVBMXProcIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_XPROC", + NULL, + NULL, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle, + (IMG_VOID *) ui32Index, + gXProcWorkaroundShareData[ui32Index].ui32RefCount, + gXProcWorkaroundShareData[ui32Index].ui32RefCount + 1, + gXProcWorkaroundShareData[ui32Index].ui32Size); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + gXProcWorkaroundShareData[ui32Index].ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVBMXProcDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_XPROC", + NULL, + NULL, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle, + (IMG_VOID *) ui32Index, + gXProcWorkaroundShareData[ui32Index].ui32RefCount, + gXProcWorkaroundShareData[ui32Index].ui32RefCount - 1, + gXProcWorkaroundShareData[ui32Index].ui32Size); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + gXProcWorkaroundShareData[ui32Index].ui32RefCount--; +} + +#if defined(__linux__) + +/* mmap refcounting is Linux specific */ + +IMG_INTERNAL +void PVRSRVOffsetStructIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32RefCount, + psOffsetStruct->ui32RefCount + 1, + psOffsetStruct->uiRealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVOffsetStructDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32RefCount, + psOffsetStruct->ui32RefCount - 1, + psOffsetStruct->uiRealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32RefCount--; +} + +IMG_INTERNAL +void PVRSRVOffsetStructIncMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP2", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32Mapped, + psOffsetStruct->ui32Mapped + 1, + psOffsetStruct->uiRealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32Mapped++; +} + +IMG_INTERNAL +void PVRSRVOffsetStructDecMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP2", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32Mapped, + psOffsetStruct->ui32Mapped - 1, + psOffsetStruct->uiRealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32Mapped--; +} + +#if defined(SUPPORT_ION) +PVRSRV_ERROR PVRSRVIonBufferSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_HANDLE hUnique, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_ION_SYNC_INFO **ppsIonSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVRSRV_ERROR eError; + + /* + We have to do the call 1st as we need to Ion syninfo which it returns + */ + eError = PVRSRVIonBufferSyncAcquire(hUnique, + hDevCookie, + hDevMemContext, + ppsIonSyncInfo); + + if (eError == PVRSRV_OK) + { + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_ION_SYNC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "ION_SYNC", + (*ppsIonSyncInfo)->psSyncInfo, + psKernelMemInfo, + NULL, + *ppsIonSyncInfo, + (*ppsIonSyncInfo)->ui32RefCount - 1, + (*ppsIonSyncInfo)->ui32RefCount, + 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + } + +skip: + return eError; +} + +void PVRSRVIonBufferSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_ION_SYNC_INFO *psIonSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_ION_SYNC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "ION_SYNC", + psIonSyncInfo->psSyncInfo, + psKernelMemInfo, + NULL, + psIonSyncInfo, + psIonSyncInfo->ui32RefCount, + psIonSyncInfo->ui32RefCount - 1, + 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); +skip: + PVRSRVIonBufferSyncRelease(psIonSyncInfo); +} + +#endif /* defined (SUPPORT_ION) */ + +#if defined(SUPPORT_DMABUF) +PVRSRV_ERROR PVRSRVDmaBufSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_HANDLE hUnique, + IMG_HANDLE hPriv, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_DMABUF_SYNC_INFO **ppsDmaBufSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVDmaBufSyncAcquire(hUnique, + hPriv, + hDevCookie, + hDevMemContext, + ppsDmaBufSyncInfo); + + if (eError == PVRSRV_OK) + { + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_DMABUF_SYNC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "DMA-BUF_SYNC", + (*ppsDmaBufSyncInfo)->psSyncInfo, + psKernelMemInfo, + NULL, + *ppsDmaBufSyncInfo, + (*ppsDmaBufSyncInfo)->ui32RefCount - 1, + (*ppsDmaBufSyncInfo)->ui32RefCount, + 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + } + +skip: + return eError; +} + +void PVRSRVDmaBufSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_DMABUF_SYNC_INFO *psDmaBufSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_DMABUF_SYNC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "DMA-BUF_SYNC", + psDmaBufSyncInfo->psSyncInfo, + psKernelMemInfo, + NULL, + psDmaBufSyncInfo, + psDmaBufSyncInfo->ui32RefCount, + psDmaBufSyncInfo->ui32RefCount - 1, + 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); +skip: + PVRSRVDmaBufSyncRelease(psDmaBufSyncInfo); +} +#endif /* defined (SUPPORT_DMABUF) */ + +#endif /* defined(__linux__) */ + +#endif /* defined(PVRSRV_REFCOUNT_DEBUG) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/resman.c b/sgx_km/eurasia_km/services4/srvkm/common/resman.c new file mode 100644 index 0000000..a8b8a2f --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/resman.c @@ -0,0 +1,990 @@ +/*************************************************************************/ /*! +@Title Resource Manager +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "services_headers.h" +#include "resman.h" + +#ifdef __linux__ +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) +#include +#else +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +#include +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) +#include +#else +#include +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +static DEFINE_MUTEX(lock); +#define DOWN(m) mutex_lock(m) +#define UP(m) mutex_unlock(m) +#else +static DECLARE_MUTEX(lock); +#define DOWN(m) down(m) +#define UP(m) up(m) +#endif + +#define ACQUIRE_SYNC_OBJ do { \ + if (in_interrupt()) { \ + printk("ISR cannot take RESMAN mutex\n"); \ + BUG(); \ + } \ + else DOWN(&lock); \ +} while (0) +#define RELEASE_SYNC_OBJ UP(&lock) + +#else + +#define ACQUIRE_SYNC_OBJ +#define RELEASE_SYNC_OBJ + +#endif + +#define RESMAN_SIGNATURE 0x12345678 + +/****************************************************************************** + * resman structures + *****************************************************************************/ + +/* resman item structure */ +typedef struct _RESMAN_ITEM_ +{ +#ifdef DEBUG + IMG_UINT32 ui32Signature; +#endif + struct _RESMAN_ITEM_ **ppsThis; /*!< list navigation */ + struct _RESMAN_ITEM_ *psNext; /*!< list navigation */ + + IMG_UINT32 ui32Flags; /*!< flags */ + IMG_UINT32 ui32ResType;/*!< res type */ + + IMG_PVOID pvParam; /*!< param1 for callback */ + IMG_UINT32 ui32Param; /*!< param2 for callback */ + + RESMAN_FREE_FN pfnFreeResource;/*!< resman item free callback */ +} RESMAN_ITEM; + + +/* resman context structure */ +typedef struct _RESMAN_CONTEXT_ +{ +#ifdef DEBUG + IMG_UINT32 ui32Signature; +#endif + struct _RESMAN_CONTEXT_ **ppsThis;/*!< list navigation */ + struct _RESMAN_CONTEXT_ *psNext;/*!< list navigation */ + + PVRSRV_PER_PROCESS_DATA *psPerProc; /* owner of resources */ + + RESMAN_ITEM *psResItemList;/*!< res item list for context */ + +} RESMAN_CONTEXT; + + +/* resman list structure */ +typedef struct +{ + RESMAN_CONTEXT *psContextList; /*!< resman context list */ + +} RESMAN_LIST, *PRESMAN_LIST; /* PRQA S 3205 */ + + +PRESMAN_LIST gpsResList = IMG_NULL; + +#include "lists.h" /* PRQA S 5087 */ /* include lists.h required here */ + +static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM) +static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, IMG_BOOL, IMG_FALSE) +static IMPLEMENT_LIST_INSERT(RESMAN_ITEM) +static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM) +static IMPLEMENT_LIST_REVERSE(RESMAN_ITEM) + +static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT) +static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT) + + +#define PRINT_RESLIST(x, y, z) + +/******************************************************** Forword references */ + +static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback, IMG_BOOL bForceCleanup); + +static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bExecuteCallback); + + +#ifdef DEBUG + static IMG_VOID ValidateResList(PRESMAN_LIST psResList); + #define VALIDATERESLIST() ValidateResList(gpsResList) +#else + #define VALIDATERESLIST() +#endif + + + + + + +/*! +****************************************************************************** + + @Function ResManInit + + @Description initialises the resman + + @Return none + +******************************************************************************/ +PVRSRV_ERROR ResManInit(IMG_VOID) +{ + if (gpsResList == IMG_NULL) + { + /* If not already initialised */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*gpsResList), + (IMG_VOID **)&gpsResList, IMG_NULL, + "Resource Manager List") != PVRSRV_OK) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Init list, the linked list has dummy entries at both ends */ + gpsResList->psContextList = IMG_NULL; + + /* Check resource list */ + VALIDATERESLIST(); + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function ResManDeInit + + @Description de-initialises the resman + + @Return none + +******************************************************************************/ +IMG_VOID ResManDeInit(IMG_VOID) +{ + if (gpsResList != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL); + gpsResList = IMG_NULL; + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVResManConnect + + @Description Opens a connection to the Resource Manager + + @input hPerProc - Per-process data (if applicable) + @output phResManContext - Resman context + + @Return error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc, + PRESMAN_CONTEXT *phResManContext) +{ + PVRSRV_ERROR eError; + PRESMAN_CONTEXT psResManContext; + + /*Acquire resource list sync object*/ + ACQUIRE_SYNC_OBJ; + + /*Check resource list*/ + VALIDATERESLIST(); + + /* Allocate memory for the new context. */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext), + (IMG_VOID **)&psResManContext, IMG_NULL, + "Resource Manager Context"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVResManConnect: ERROR allocating new RESMAN context struct")); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return eError; + } + +#ifdef DEBUG + psResManContext->ui32Signature = RESMAN_SIGNATURE; +#endif /* DEBUG */ + psResManContext->psResItemList = IMG_NULL; + psResManContext->psPerProc = hPerProc; + + /* Insert new context struct after the dummy first entry */ + List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + *phResManContext = psResManContext; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVResManDisconnect + + @Description Closes a Resource Manager connection and frees all resources + + @input hResManContext - Resman context + @input bKernelContext - IMG_TRUE for kernel contexts + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext, + IMG_BOOL bKernelContext) +{ + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + /* Check resource list */ + VALIDATERESLIST(); + + /* Print and validate resource list */ + PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE); + + /* Free all auto-freed resources in order */ + + if (!bKernelContext) + { + /* OS specific User-mode Mappings: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE); + + /* VGX types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DMA_CLIENT_FIFO_DATA, 0, 0, IMG_TRUE); + + /* Event Object */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE); + + /* syncobject state (Read/Write Complete values) */ + /* Must be FIFO, so we reverse the list, twice */ + List_RESMAN_ITEM_Reverse(&psResManContext->psResItemList); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, IMG_TRUE); + List_RESMAN_ITEM_Reverse(&psResManContext->psResItemList); // (could survive without this - all following items would be cleared up "fifo" too) + + /* SGX types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE); + + /* COMMON types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SYNC_INFO, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE); +#if defined(SUPPORT_ION) + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ION, 0, 0, IMG_TRUE); +#endif +#if defined(SUPPORT_DMABUF) + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_DMABUF, 0, 0, IMG_TRUE); +#endif + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_MEM_INFO, 0, 0, IMG_TRUE); + + /* DISPLAY CLASS types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE); + + /* BUFFER CLASS types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE); + } + + /* Ensure that there are no resources left */ + PVR_ASSERT(psResManContext->psResItemList == IMG_NULL); + + /* Remove the context struct from the list */ + List_RESMAN_CONTEXT_Remove(psResManContext); + + /* Free the context struct */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), psResManContext, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + + /* Check resource list */ + VALIDATERESLIST(); + + /* Print and validate resource list */ + PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; +} + + +/*! +****************************************************************************** + @Function ResManRegisterRes + + @Description : Inform the resource manager that the given resource has + been alloacted and freeing of it will be the responsibility + of the resource manager + + @input psResManContext - resman context + @input ui32ResType - identify what kind of resource it is + @input pvParam - address of resource + @input ui32Param - size of resource + @input pfnFreeResource - pointer to function that frees this resource + + @Return On success a pointer to an opaque data structure that represents + the allocated resource, else NULL + +**************************************************************************/ +PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + RESMAN_FREE_FN pfnFreeResource) +{ + PRESMAN_ITEM psNewResItem; + + PVR_ASSERT(psResManContext != IMG_NULL); + PVR_ASSERT(ui32ResType != 0); + + if (psResManContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: invalid parameter - psResManContext")); + return (PRESMAN_ITEM) IMG_NULL; + } + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + /* Check resource list */ + VALIDATERESLIST(); + + PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource " + "Context 0x%p, ResType 0x%x, pvParam 0x%p, ui32Param 0x%x, " + "FreeFunc %p", + psResManContext, + ui32ResType, + pvParam, + ui32Param, + pfnFreeResource)); + + /* Allocate memory for the new resource structure */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem, + IMG_NULL, + "Resource Manager Item") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: " + "ERROR allocating new resource item")); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return((PRESMAN_ITEM)IMG_NULL); + } + + /* Fill in details about this resource */ +#ifdef DEBUG + psNewResItem->ui32Signature = RESMAN_SIGNATURE; +#endif /* DEBUG */ + psNewResItem->ui32ResType = ui32ResType; + psNewResItem->pvParam = pvParam; + psNewResItem->ui32Param = ui32Param; + psNewResItem->pfnFreeResource = pfnFreeResource; + psNewResItem->ui32Flags = 0; + + /* Insert new structure after dummy first entry */ + List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return(psNewResItem); +} + +/*! +****************************************************************************** + @Function ResManFreeResByPtr + + @Description frees a resource by matching on pointer type + + @inputs psResItem - pointer to resource item to free + bForceCleanup - ignored uKernel re-sync + + @Return PVRSRV_ERROR +**************************************************************************/ +PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM *psResItem, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psResItem != IMG_NULL); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do")); + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %p", + psResItem)); + + /*Acquire resource list sync object*/ + ACQUIRE_SYNC_OBJ; + + /*Check resource list*/ + VALIDATERESLIST(); + + /*Free resource*/ + eError = FreeResourceByPtr(psResItem, IMG_TRUE, bForceCleanup); + + /*Check resource list*/ + VALIDATERESLIST(); + + /*Release resource list sync object*/ + RELEASE_SYNC_OBJ; + + return(eError); +} + + +/*! +****************************************************************************** + @Function ResManFreeResByCriteria + + @Description frees a resource by matching on criteria + + @inputs hResManContext - handle for resman context + @inputs ui32SearchCriteria - indicates which parameters should be + used in search for resources to free + @inputs ui32ResType - identify what kind of resource to free + @inputs pvParam - address of resource to be free + @inputs ui32Param - size of resource to be free + + @Return PVRSRV_ERROR +**************************************************************************/ +PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psResManContext != IMG_NULL); + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + /* Check resource list */ + VALIDATERESLIST(); + + PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: " + "Context 0x%p, Criteria 0x%x, Type 0x%x, Addr 0x%p, Param 0x%x", + psResManContext, ui32SearchCriteria, ui32ResType, + pvParam, ui32Param)); + + /* Free resources by criteria for this context */ + eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria, + ui32ResType, pvParam, ui32Param, + IMG_TRUE); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return eError; +} + + +/*! +****************************************************************************** + @Function ResManDissociateRes + + @Description Moves a resource from one context to another. + + @inputs psResItem - pointer to resource item to dissociate + @inputs psNewResManContext - new resman context for the resource + + @Return IMG_VOID +**************************************************************************/ +PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM *psResItem, + PRESMAN_CONTEXT psNewResManContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psResItem != IMG_NULL); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: invalid parameter - psResItem")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#ifdef DEBUG /* QAC fix */ + PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE); +#endif + + if (psNewResManContext != IMG_NULL) + { + /* Remove this item from its old resource list */ + List_RESMAN_ITEM_Remove(psResItem); + + /* Re-insert into new list */ + List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList, psResItem); + + } + else + { + eError = FreeResourceByPtr(psResItem, IMG_FALSE, CLEANUP_WITH_POLL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: failed to free resource by pointer")); + return eError; + } + } + + return eError; +} + +/*! +****************************************************************************** + @Function ResManFindResourceByPtr_AnyVaCb + + @Description + Compares the resman item with a given pointer. + + @inputs psCurItem - theThe item to check + @inputs va - Variable argument list with: + psItem - pointer to resource item to find + + @Return IMG_BOOL +**************************************************************************/ +static IMG_BOOL ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va) +{ + RESMAN_ITEM *psItem; + + psItem = va_arg(va, RESMAN_ITEM*); + + return (IMG_BOOL)(psCurItem == psItem); +} + + +/*! +****************************************************************************** + @Function ResManFindResourceByPtr + + @Description + Attempts to find a resource in the list for this context + + @inputs hResManContext - handle for resman context + @inputs psItem - pointer to resource item to find + + @Return PVRSRV_ERROR +**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT psResManContext, + RESMAN_ITEM *psItem) +{ +/* RESMAN_ITEM *psCurItem;*/ + + PVRSRV_ERROR eResult; + + PVR_ASSERT(psResManContext != IMG_NULL); + PVR_ASSERT(psItem != IMG_NULL); + + if ((psItem == IMG_NULL) || (psResManContext == IMG_NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "ResManFindResourceByPtr: invalid parameter")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#ifdef DEBUG /* QAC fix */ + PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE); +#endif + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + PVR_DPF((PVR_DBG_MESSAGE, + "FindResourceByPtr: psItem=%p, psItem->psNext=%p", + psItem, psItem->psNext)); + + PVR_DPF((PVR_DBG_MESSAGE, + "FindResourceByPtr: Resource Ctx 0x%p, Type 0x%x, Addr 0x%p, " + "Param 0x%x, FnCall %p, Flags 0x%x", + psResManContext, + psItem->ui32ResType, + psItem->pvParam, + psItem->ui32Param, + psItem->pfnFreeResource, + psItem->ui32Flags)); + + /* Search resource items starting at after the first dummy item */ + if(List_RESMAN_ITEM_IMG_BOOL_Any_va(psResManContext->psResItemList, + &ResManFindResourceByPtr_AnyVaCb, + psItem)) + { + eResult = PVRSRV_OK; + } + else + { + eResult = PVRSRV_ERROR_NOT_OWNER; + } + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + +/* return PVRSRV_ERROR_NOT_OWNER;*/ + return eResult; +} + +/*! +****************************************************************************** + @Function FreeResourceByPtr + + @Description + Frees a resource and move it from the list + NOTE : this function must be called with the resource + list sync object held + + @inputs psItem - pointer to resource item to free + bExecuteCallback - execute callback? + bForceCleanup - skips uKernel re-sync + + @Return PVRSRV_ERROR +**************************************************************************/ +static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, + IMG_BOOL bExecuteCallback, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psItem != IMG_NULL); + + if (psItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#ifdef DEBUG /* QAC fix */ + PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE); +#endif + + PVR_DPF((PVR_DBG_MESSAGE, + "FreeResourceByPtr: psItem=%p, psItem->psNext=%p", + psItem, psItem->psNext)); + + PVR_DPF((PVR_DBG_MESSAGE, + "FreeResourceByPtr: Type 0x%x, Addr 0x%p, " + "Param 0x%x, FnCall %p, Flags 0x%x", + psItem->ui32ResType, + psItem->pvParam, + psItem->ui32Param, + psItem->pfnFreeResource, psItem->ui32Flags)); + + /* Release resource list sync object just in case the free routine calls the resource manager */ + RELEASE_SYNC_OBJ; + + /* Call the freeing routine */ + if (bExecuteCallback) + { + eError = psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param, bForceCleanup); + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function")); + } + } + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Remove this item from the resource list */ + List_RESMAN_ITEM_Remove(psItem); + + /* Free memory for the resource item */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL); + } + + return(eError); +} + +/*! +****************************************************************************** + @Function FreeResourceByCriteria_AnyVaCb + + @Description + Matches a resource manager item with a given criteria. + + @inputs psCuItem - the item to be matched + @inputs va - a variable argument list with:. + ui32SearchCriteria - indicates which parameters should be used + search for resources to free + ui32ResType - identify what kind of resource to free + pvParam - address of resource to be free + ui32Param - size of resource to be free + + + @Return psCurItem if matched, IMG_NULL otherwise. +**************************************************************************/ +static IMG_VOID* FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va) +{ + IMG_UINT32 ui32SearchCriteria; + IMG_UINT32 ui32ResType; + IMG_PVOID pvParam; + IMG_UINT32 ui32Param; + + ui32SearchCriteria = va_arg(va, IMG_UINT32); + ui32ResType = va_arg(va, IMG_UINT32); + pvParam = va_arg(va, IMG_PVOID); + ui32Param = va_arg(va, IMG_UINT32); + + /*check that for all conditions are either disabled or eval to true*/ + if( + /* Check resource type */ + (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) || + (psCurItem->ui32ResType == ui32ResType)) + && + /* Check address */ + (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) || + (psCurItem->pvParam == pvParam)) + && + /* Check size */ + (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) || + (psCurItem->ui32Param == ui32Param)) + ) + { + return psCurItem; + } + else + { + return IMG_NULL; + } +} + +/*! +****************************************************************************** + @Function FreeResourceByCriteria + + @Description + Frees all resources that match the given criteria for the + context. + NOTE : this function must be called with the resource + list sync object held + + @inputs psResManContext - pointer to resman context + @inputs ui32SearchCriteria - indicates which parameters should be used + @inputs search for resources to free + @inputs ui32ResType - identify what kind of resource to free + @inputs pvParam - address of resource to be free + @inputs ui32Param - size of resource to be free + @inputs ui32AutoFreeLev - auto free level to free + @inputs bExecuteCallback - execute callback? + + @Return PVRSRV_ERROR +**************************************************************************/ +static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bExecuteCallback) +{ + PRESMAN_ITEM psCurItem; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Search resource items starting at after the first dummy item */ + /*while we get a match and not an error*/ + while((psCurItem = (PRESMAN_ITEM) + List_RESMAN_ITEM_Any_va(psResManContext->psResItemList, + &FreeResourceByCriteria_AnyVaCb, + ui32SearchCriteria, + ui32ResType, + pvParam, + ui32Param)) != IMG_NULL + && eError == PVRSRV_OK) + { + do + { + eError = FreeResourceByPtr(psCurItem, bExecuteCallback, CLEANUP_WITH_POLL); + if (eError == PVRSRV_ERROR_RETRY) + { + RELEASE_SYNC_OBJ; + OSReleaseBridgeLock(); + /* Give a chance for other threads to come in and SGX to do more work */ + OSSleepms(MAX_CLEANUP_TIME_WAIT_US/1000); + OSReacquireBridgeLock(); + ACQUIRE_SYNC_OBJ; + } + } while (eError == PVRSRV_ERROR_RETRY); + } + + return eError; +} + + +#ifdef DEBUG +/*! +****************************************************************************** + @Function ValidateResList + + @Description + Walks the resource list check the pointers + NOTE : this function must be called with the resource + list sync object held + + @Return none +**************************************************************************/ +static IMG_VOID ValidateResList(PRESMAN_LIST psResList) +{ + PRESMAN_ITEM psCurItem, *ppsThisItem; + PRESMAN_CONTEXT psCurContext, *ppsThisContext; + + /* check we're initialised */ + if (psResList == IMG_NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet")); + return; + } + + psCurContext = psResList->psContextList; + ppsThisContext = &psResList->psContextList; + + /* Walk the context list */ + while(psCurContext != IMG_NULL) + { + /* Check current item */ + PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE); + if (psCurContext->ppsThis != ppsThisContext) + { + PVR_DPF((PVR_DBG_WARNING, + "psCC=%p psCC->ppsThis=%p psCC->psNext=%p ppsTC=%p", + psCurContext, + psCurContext->ppsThis, + psCurContext->psNext, + ppsThisContext)); + PVR_ASSERT(psCurContext->ppsThis == ppsThisContext); + } + + /* Walk the list for this context */ + psCurItem = psCurContext->psResItemList; + ppsThisItem = &psCurContext->psResItemList; + while(psCurItem != IMG_NULL) + { + /* Check current item */ + PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE); + if (psCurItem->ppsThis != ppsThisItem) + { + PVR_DPF((PVR_DBG_WARNING, + "psCurItem=%p psCurItem->ppsThis=%p psCurItem->psNext=%p ppsThisItem=%p", + psCurItem, + psCurItem->ppsThis, + psCurItem->psNext, + ppsThisItem)); + PVR_ASSERT(psCurItem->ppsThis == ppsThisItem); + } + + /* Move to next item */ + ppsThisItem = &psCurItem->psNext; + psCurItem = psCurItem->psNext; + } + + /* Move to next context */ + ppsThisContext = &psCurContext->psNext; + psCurContext = psCurContext->psNext; + } +} +#endif /* DEBUG */ + + +/****************************************************************************** + End of file (resman.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/common/ttrace.c b/sgx_km/eurasia_km/services4/srvkm/common/ttrace.c new file mode 100644 index 0000000..d1dd788 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/common/ttrace.c @@ -0,0 +1,601 @@ +/*************************************************************************/ /*! +@Title Timed Trace functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined (TTRACE) + +#include "services_headers.h" +#include "ttrace.h" + +#if defined(PVRSRV_NEED_PVR_DPF) +#define CHECKSIZE(n,m) \ + if ((n & m) != n) \ + PVR_DPF((PVR_DBG_ERROR,"Size check failed for " #m)) +#else +#define CHECKSIZE(n,m) +#endif + +#define TIME_TRACE_HASH_TABLE_SIZE 32 + +HASH_TABLE *g_psBufferTable; +IMG_UINT32 g_ui32HostUID; +IMG_HANDLE g_psTimer; + +/* Trace buffer struct */ +typedef struct +{ + IMG_UINT32 ui32Woff; /* Offset to where next item will be written */ + IMG_UINT32 ui32Roff; /* Offset to where to start reading from */ + IMG_UINT32 ui32ByteCount; /* Number of bytes in buffer */ + IMG_UINT8 ui8Data[0]; +} sTimeTraceBuffer; + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceItemSize + + @Description + + Calculate the size of a trace item + + @Input psTraceItem : Trace item + + @Return size of trace item + +******************************************************************************/ +static IMG_UINT32 +PVRSRVTimeTraceItemSize(IMG_UINT32 *psTraceItem) +{ + IMG_UINT32 ui32Size = PVRSRV_TRACE_ITEM_SIZE; + + ui32Size += READ_HEADER(SIZE, psTraceItem[PVRSRV_TRACE_DATA_HEADER]); + + return ui32Size; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceAllocItem + + @Description + + Allocate a trace item from the buffer of the current process + + @Output ppsTraceItem : Pointer to allocated trace item + + @Input ui32Size : Size of data packet to be allocated + + @Return none + +******************************************************************************/ +static IMG_VOID +PVRSRVTimeTraceAllocItem(IMG_UINT32 **pui32Item, IMG_UINT32 ui32Size) +{ + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + IMG_UINT32 ui32AllocOffset; + sTimeTraceBuffer *psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + + /* The caller only asks for extra data space */ + ui32Size += PVRSRV_TRACE_ITEM_SIZE; + + /* Always round to 32-bit */ + ui32Size = ((ui32Size - 1) & (~0x3)) + 0x04; + + if (!psBuffer) + { + PVRSRV_ERROR eError; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVTimeTraceAllocItem: Creating buffer for PID %u", ui32PID)); + eError = PVRSRVTimeTraceBufferCreate(ui32PID); + if (eError != PVRSRV_OK) + { + *pui32Item = IMG_NULL; + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceAllocItem: Failed to create buffer")); + return; + } + + psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + if (psBuffer == IMG_NULL) + { + *pui32Item = NULL; + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceAllocItem: Failed to retrieve buffer")); + return; + } + } + + /* Can't allocate more then buffer size */ + if (ui32Size >= TIME_TRACE_BUFFER_SIZE) + { + *pui32Item = NULL; + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceAllocItem: Error trace item too large (%d)", ui32Size)); + return; + } + + /* FIXME: Enter critical section? */ + + /* Always ensure we have enough space to write a padding message */ + if ((psBuffer->ui32Woff + ui32Size + PVRSRV_TRACE_ITEM_SIZE) > TIME_TRACE_BUFFER_SIZE) + { + IMG_UINT32 *ui32WriteEOB = (IMG_UINT32 *) &psBuffer->ui8Data[psBuffer->ui32Woff]; + IMG_UINT32 ui32Remain = TIME_TRACE_BUFFER_SIZE - psBuffer->ui32Woff; + + /* Not enough space at the end of the buffer, back to the start */ + *ui32WriteEOB++ = WRITE_HEADER(GROUP, PVRSRV_TRACE_GROUP_PADDING); + *ui32WriteEOB++ = 0; /* Don't need timestamp */ + *ui32WriteEOB++ = 0; /* Don't need UID */ + *ui32WriteEOB = WRITE_HEADER(SIZE, (ui32Remain - PVRSRV_TRACE_ITEM_SIZE)); + psBuffer->ui32ByteCount += ui32Remain; + psBuffer->ui32Woff = ui32AllocOffset = 0; + } + else + ui32AllocOffset = psBuffer->ui32Woff; + + psBuffer->ui32Woff = psBuffer->ui32Woff + ui32Size; + psBuffer->ui32ByteCount += ui32Size; + + /* This allocation will start overwriting past our read pointer, move the read pointer along */ + while (psBuffer->ui32ByteCount > TIME_TRACE_BUFFER_SIZE) + { + IMG_UINT32 *psReadItem = (IMG_UINT32 *) &psBuffer->ui8Data[psBuffer->ui32Roff]; + IMG_UINT32 ui32ReadSize; + + ui32ReadSize = PVRSRVTimeTraceItemSize(psReadItem); + psBuffer->ui32Roff = (psBuffer->ui32Roff + ui32ReadSize) & (TIME_TRACE_BUFFER_SIZE - 1); + psBuffer->ui32ByteCount -= ui32ReadSize; + } + + *pui32Item = (IMG_UINT32 *) &psBuffer->ui8Data[ui32AllocOffset]; + /* FIXME: Exit critical section? */ +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceBufferCreate + + @Description + + Create a trace buffer. + + Note: We assume that this will only be called once per process. + + @Input ui32PID : PID of the process that is creating the buffer + + @Return none + +******************************************************************************/ +PVRSRV_ERROR PVRSRVTimeTraceBufferCreate(IMG_UINT32 ui32PID) +{ + sTimeTraceBuffer *psBuffer; + PVRSRV_ERROR eError = PVRSRV_OK; + + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, + (IMG_VOID **)&psBuffer, IMG_NULL, + "Time Trace Buffer"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferCreate: Error allocating trace buffer")); + return eError; + } + + OSMemSet(psBuffer, 0, TIME_TRACE_BUFFER_SIZE); + + if (!HASH_Insert(g_psBufferTable, (IMG_UINTPTR_T) ui32PID, (IMG_UINTPTR_T) psBuffer)) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, + psBuffer, NULL); + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferCreate: Error adding trace buffer to hash table")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceBufferDestroy + + @Description + + Destroy a trace buffer. + + Note: We assume that this will only be called once per process. + + @Input ui32PID : PID of the process that is creating the buffer + + @Return none + +******************************************************************************/ +PVRSRV_ERROR PVRSRVTimeTraceBufferDestroy(IMG_UINT32 ui32PID) +{ +#if !defined(TTRACE_KEEP_BUFFER_ON_EXIT) + sTimeTraceBuffer *psBuffer; + +#if defined(DUMP_TTRACE_BUFFERS_ON_EXIT) + PVRSRVDumpTimeTraceBuffers(); +#endif + psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + if (psBuffer) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, + psBuffer, NULL); + HASH_Remove(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferDestroy: Can't find trace buffer in hash table")); + return PVRSRV_ERROR_INVALID_PARAMS; +#else + return PVRSRV_OK; +#endif +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceInit + + @Description + + Initialise the timed trace subsystem. + + @Return Error + +******************************************************************************/ +PVRSRV_ERROR PVRSRVTimeTraceInit(IMG_VOID) +{ + g_psBufferTable = HASH_Create(TIME_TRACE_HASH_TABLE_SIZE); + + /* Create hash table to store the per process buffers in */ + if (!g_psBufferTable) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceInit: Error creating hash table")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Create the kernel buffer */ + PVRSRVTimeTraceBufferCreate(KERNEL_ID); + + g_psTimer = OSFuncHighResTimerCreate(); + + if (!g_psTimer) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceInit: Error creating timer")); + return PVRSRV_ERROR_INIT_FAILURE; + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR _PVRSRVTimeTraceBufferDestroy(IMG_UINTPTR_T hKey, IMG_UINTPTR_T hData) +{ + PVR_UNREFERENCED_PARAMETER(hData); + PVR_DPF((PVR_DBG_MESSAGE, "_PVRSRVTimeTraceBufferDestroy: Destroying buffer for PID %u", (IMG_UINT32) hKey)); + + PVRSRVTimeTraceBufferDestroy(hKey); + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceDeinit + + @Description + + De-initialise the timed trace subsystem. + + @Return Error + +******************************************************************************/ +IMG_VOID PVRSRVTimeTraceDeinit(IMG_VOID) +{ + PVRSRVTimeTraceBufferDestroy(KERNEL_ID); + /* Free any buffers the where created at alloc item time */ + HASH_Iterate(g_psBufferTable, _PVRSRVTimeTraceBufferDestroy); + HASH_Delete(g_psBufferTable); + OSFuncHighResTimerDestroy(g_psTimer); +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceWriteHeader + + @Description + + Write the header for a trace item. + + @Input pui32TraceItem : Pointer to trace item + + @Input ui32Group : Trace item's group ID + + @Input ui32Class : Trace item's class ID + + @Input ui32Token : Trace item's ui32Token ID + + @Input ui32Size : Trace item's data payload size + + @Input ui32Type : Trace item's data type + + @Input ui32Count : Trace item's data count + + @Return Pointer to data payload space, or NULL if no data payload + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceWriteHeader) +#endif +static INLINE IMG_VOID *PVRSRVTimeTraceWriteHeader(IMG_UINT32 *pui32TraceItem, IMG_UINT32 ui32Group, + IMG_UINT32 ui32Class, IMG_UINT32 ui32Token, + IMG_UINT32 ui32Size, IMG_UINT32 ui32Type, + IMG_UINT32 ui32Count) +{ + /* Sanity check arg's */ + CHECKSIZE(ui32Group, PVRSRV_TRACE_GROUP_MASK); + CHECKSIZE(ui32Class, PVRSRV_TRACE_CLASS_MASK); + CHECKSIZE(ui32Token, PVRSRV_TRACE_TOKEN_MASK); + + CHECKSIZE(ui32Size, PVRSRV_TRACE_SIZE_MASK); + CHECKSIZE(ui32Type, PVRSRV_TRACE_TYPE_MASK); + CHECKSIZE(ui32Count, PVRSRV_TRACE_COUNT_MASK); + + /* Trace header */ + pui32TraceItem[PVRSRV_TRACE_HEADER] = WRITE_HEADER(GROUP, ui32Group); + pui32TraceItem[PVRSRV_TRACE_HEADER] |= WRITE_HEADER(CLASS, ui32Class); + pui32TraceItem[PVRSRV_TRACE_HEADER] |= WRITE_HEADER(TOKEN, ui32Token); + + /* Data header */ + pui32TraceItem[PVRSRV_TRACE_DATA_HEADER] = WRITE_HEADER(SIZE, ui32Size); + pui32TraceItem[PVRSRV_TRACE_DATA_HEADER] |= WRITE_HEADER(TYPE, ui32Type); + pui32TraceItem[PVRSRV_TRACE_DATA_HEADER] |= WRITE_HEADER(COUNT, ui32Count); + + pui32TraceItem[PVRSRV_TRACE_TIMESTAMP] = OSFuncHighResTimerGetus(g_psTimer); + pui32TraceItem[PVRSRV_TRACE_HOSTUID] = g_ui32HostUID++; + + return ui32Size?((IMG_VOID *) &pui32TraceItem[PVRSRV_TRACE_DATA_PAYLOAD]):NULL; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceArray + + @Description + + Write trace item with an array of data + + @Input ui32Group : Trace item's group ID + + @Input ui32Class : Trace item's class ID + + @Input ui32Token : Trace item's ui32Token ID + + @Input ui32Size : Trace item's data payload size + + @Input ui32Type : Trace item's data type + + @Input ui32Count : Trace item's data count + + @Input pui8Data : Pointer to data array + + @Return Pointer to data payload space, or NULL if no data payload + +******************************************************************************/ +IMG_VOID PVRSRVTimeTraceArray(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, IMG_UINT32 ui32Token, + IMG_UINT32 ui32Type, IMG_UINT32 ui32Count, IMG_UINT8 *pui8Data) +{ + IMG_UINT32 *pui32TraceItem; + IMG_UINT32 ui32Size, ui32TypeSize; + IMG_UINT8 *ui8Ptr; + + /* Only the 1st 4 sizes are for ui types, others are "special" */ + switch (ui32Type) + { + case PVRSRV_TRACE_TYPE_UI8: ui32TypeSize = 1; + break; + case PVRSRV_TRACE_TYPE_UI16: ui32TypeSize = 2; + break; + case PVRSRV_TRACE_TYPE_UI32: ui32TypeSize = 4; + break; + case PVRSRV_TRACE_TYPE_UI64: ui32TypeSize = 8; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported size\n")); + return; + } + + ui32Size = ui32TypeSize * ui32Count; + + /* Allocate space from the buffer */ + PVRSRVTimeTraceAllocItem(&pui32TraceItem, ui32Size); + + if (!pui32TraceItem) + { + PVR_DPF((PVR_DBG_ERROR, "Can't find buffer\n")); + return; + } + + ui8Ptr = PVRSRVTimeTraceWriteHeader(pui32TraceItem, ui32Group, ui32Class, ui32Token, + ui32Size, ui32Type, ui32Count); + + if (ui8Ptr) + { + OSMemCopy(ui8Ptr, pui8Data, ui32Size); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceSyncObject + + @Description + + Write trace item with a sync object + + @Input ui32Group : Trace item's group ID + + @Input ui32Token : Trace item's ui32Token ID + + @Input psSync : Sync object + + @Input ui8SyncOpp : Sync object operation + + @Return None + +******************************************************************************/ +IMG_VOID PVRSRVTimeTraceSyncObject(IMG_UINT32 ui32Group, IMG_UINT32 ui32Token, + PVRSRV_KERNEL_SYNC_INFO *psSync, IMG_UINT8 ui8SyncOp) +{ + IMG_UINT32 *pui32TraceItem; + IMG_UINT32 *ui32Ptr; + IMG_UINT32 ui32Size = PVRSRV_TRACE_TYPE_SYNC_SIZE; + + + PVRSRVTimeTraceAllocItem(&pui32TraceItem, ui32Size); + + if (!pui32TraceItem) + { + PVR_DPF((PVR_DBG_ERROR, "Can't find buffer\n")); + return; + } + + ui32Ptr = PVRSRVTimeTraceWriteHeader(pui32TraceItem, ui32Group, PVRSRV_TRACE_CLASS_SYNC, + ui32Token, ui32Size, PVRSRV_TRACE_TYPE_SYNC, 1); + + ui32Ptr[PVRSRV_TRACE_SYNC_UID] = psSync->ui32UID; + ui32Ptr[PVRSRV_TRACE_SYNC_WOP] = psSync->psSyncData->ui32WriteOpsPending; + ui32Ptr[PVRSRV_TRACE_SYNC_WOC] = psSync->psSyncData->ui32WriteOpsComplete; + ui32Ptr[PVRSRV_TRACE_SYNC_ROP] = psSync->psSyncData->ui32ReadOpsPending; + ui32Ptr[PVRSRV_TRACE_SYNC_ROC] = psSync->psSyncData->ui32ReadOpsComplete; + ui32Ptr[PVRSRV_TRACE_SYNC_RO2P] = psSync->psSyncData->ui32ReadOps2Pending; + ui32Ptr[PVRSRV_TRACE_SYNC_RO2C] = psSync->psSyncData->ui32ReadOps2Complete; + ui32Ptr[PVRSRV_TRACE_SYNC_WO_DEV_VADDR] = psSync->sWriteOpsCompleteDevVAddr.uiAddr; + ui32Ptr[PVRSRV_TRACE_SYNC_RO_DEV_VADDR] = psSync->sReadOpsCompleteDevVAddr.uiAddr; + ui32Ptr[PVRSRV_TRACE_SYNC_RO2_DEV_VADDR] = psSync->sReadOps2CompleteDevVAddr.uiAddr; + ui32Ptr[PVRSRV_TRACE_SYNC_OP] = ui8SyncOp; +} + +/*! +****************************************************************************** + + @Function PVRSRVDumpTimeTraceBuffer + + @Description + + Dump the contents of the trace buffer. + + @Input hKey : Trace item's group ID + + @Input hData : Trace item's ui32Token ID + + @Return Error + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVDumpTimeTraceBuffer(IMG_UINTPTR_T hKey, IMG_UINTPTR_T hData) +{ + sTimeTraceBuffer *psBuffer = (sTimeTraceBuffer *) hData; + IMG_UINT32 ui32ByteCount = psBuffer->ui32ByteCount; + IMG_UINT32 ui32Walker = psBuffer->ui32Roff; + IMG_UINT32 ui32Read, ui32LineLen, ui32EOL, ui32MinLine; + + PVR_LOG(("TTB for PID %u:\n", (IMG_UINT32) hKey)); + + while (ui32ByteCount) + { + IMG_UINT32 *pui32Buffer = (IMG_UINT32 *) &psBuffer->ui8Data[ui32Walker]; + + ui32LineLen = (ui32ByteCount/sizeof(IMG_UINT32)); + ui32EOL = (TIME_TRACE_BUFFER_SIZE - ui32Walker)/sizeof(IMG_UINT32); + ui32MinLine = (ui32LineLen < ui32EOL)?ui32LineLen:ui32EOL; + + if (ui32MinLine >= 4) + { + PVR_LOG(("\t(TTB-%X) %08X %08X %08X %08X [", ui32ByteCount, + pui32Buffer[0], pui32Buffer[1], pui32Buffer[2], pui32Buffer[3])); + ui32Read = 4 * sizeof(IMG_UINT32); + } + else if (ui32MinLine >= 3) + { + PVR_LOG(("\t(TTB-%X) %08X %08X %08X [", ui32ByteCount, + pui32Buffer[0], pui32Buffer[1], pui32Buffer[2])); + ui32Read = 3 * sizeof(IMG_UINT32); + } + else if (ui32MinLine >= 2) + { + PVR_LOG(("\t(TTB-%X) %08X %08X [", ui32ByteCount, + pui32Buffer[0], pui32Buffer[1])); + ui32Read = 2 * sizeof(IMG_UINT32); + } + else + { + PVR_LOG(("\t(TTB-%X) %08X [", ui32ByteCount, + pui32Buffer[0])); + ui32Read = sizeof(IMG_UINT32); + } + + ui32Walker = (ui32Walker + ui32Read) & (TIME_TRACE_BUFFER_SIZE - 1); + ui32ByteCount -= ui32Read; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDumpTimeTraceBuffers + + @Description + + Dump the contents of all the trace buffers. + + @Return None + +******************************************************************************/ +IMG_VOID PVRSRVDumpTimeTraceBuffers(IMG_VOID) +{ + HASH_Iterate(g_psBufferTable, PVRSRVDumpTimeTraceBuffer); +} + +#endif /* TTRACE */ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.c new file mode 100644 index 0000000..774d151 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.c @@ -0,0 +1,4734 @@ +/*************************************************************************/ /*! +@Title MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "sgxdefs.h" +#include "sgxmmu.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "hash.h" +#include "ra.h" +#include "pdump_km.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "mmu.h" +#include "sgxconfig.h" +#include "sgx_bridge_km.h" +#include "pdump_osfunc.h" + +#define UINT32_MAX_VALUE 0xFFFFFFFFUL + +/* + MMU performs device virtual to physical translation. + terminology: + page directory (PD) + pagetable (PT) + data page (DP) + + Incoming 32bit Device Virtual Addresses are deconstructed into 3 fields: + --------------------------------------------------------- + | PD Index/tag: | PT Index: | DP offset: | + | bits 31:22 | bits 21:n | bits (n-1):0 | + --------------------------------------------------------- + where typically n=12 for a standard 4k DP + but n=16 for a 64k DP + + MMU page directory (PD), pagetable (PT) and data page (DP) config: + PD: + - always one page per address space + - up to 4k in size to span 4Gb (32bit) + - contains up to 1024 32bit entries + - entries are indexed by the top 12 bits of an incoming 32bit device virtual address + - the PD entry selected contains the physical address of the PT to + perform the next stage of the V to P translation + + PT: + - size depends on the DP size, e.g. 4k DPs have 4k PTs but 16k DPs have 1k PTs + - each PT always spans 4Mb of device virtual address space irrespective of DP size + - number of entries in a PT depend on DP size and ranges from 1024 to 4 entries + - entries are indexed by the PT Index field of the device virtual address (21:n) + - the PT entry selected contains the physical address of the DP to access + + DP: + - size varies from 4k to 4M in multiple of 4 steppings + - DP offset field of the device virtual address ((n-1):0) is used as a byte offset + to address into the DP itself +*/ + +#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT)) + +#if defined(FIX_HW_BRN_31620) +/* Sim doesn't use the address mask */ +#define SGX_MMU_PDE_DUMMY_PAGE (0)//(0x00000020U) +#define SGX_MMU_PTE_DUMMY_PAGE (0)//(0x00000020U) + +/* 4MB adress range per page table */ +#define BRN31620_PT_ADDRESS_RANGE_SHIFT 22 +#define BRN31620_PT_ADDRESS_RANGE_SIZE (1 << BRN31620_PT_ADDRESS_RANGE_SHIFT) + +/* 64MB address range per PDE cache line */ +#define BRN31620_PDE_CACHE_FILL_SHIFT 26 +#define BRN31620_PDE_CACHE_FILL_SIZE (1 << BRN31620_PDE_CACHE_FILL_SHIFT) +#define BRN31620_PDE_CACHE_FILL_MASK (BRN31620_PDE_CACHE_FILL_SIZE - 1) + +/* Page Directory Enteries per cache line */ +#define BRN31620_PDES_PER_CACHE_LINE_SHIFT (BRN31620_PDE_CACHE_FILL_SHIFT - BRN31620_PT_ADDRESS_RANGE_SHIFT) +#define BRN31620_PDES_PER_CACHE_LINE_SIZE (1 << BRN31620_PDES_PER_CACHE_LINE_SHIFT) +#define BRN31620_PDES_PER_CACHE_LINE_MASK (BRN31620_PDES_PER_CACHE_LINE_SIZE - 1) + +/* Macros for working out offset for dummy pages */ +#define BRN31620_DUMMY_PAGE_OFFSET (1 * SGX_MMU_PAGE_SIZE) +#define BRN31620_DUMMY_PDE_INDEX (BRN31620_DUMMY_PAGE_OFFSET / BRN31620_PT_ADDRESS_RANGE_SIZE) +#define BRN31620_DUMMY_PTE_INDEX ((BRN31620_DUMMY_PAGE_OFFSET - (BRN31620_DUMMY_PDE_INDEX * BRN31620_PT_ADDRESS_RANGE_SIZE))/SGX_MMU_PAGE_SIZE) + +/* Cache number of cache lines */ +#define BRN31620_CACHE_FLUSH_SHIFT (32 - BRN31620_PDE_CACHE_FILL_SHIFT) +#define BRN31620_CACHE_FLUSH_SIZE (1 << BRN31620_CACHE_FLUSH_SHIFT) + +/* Cache line bits in a UINT32 */ +#define BRN31620_CACHE_FLUSH_BITS_SHIFT 5 +#define BRN31620_CACHE_FLUSH_BITS_SIZE (1 << BRN31620_CACHE_FLUSH_BITS_SHIFT) +#define BRN31620_CACHE_FLUSH_BITS_MASK (BRN31620_CACHE_FLUSH_BITS_SIZE - 1) + +/* Cache line index in array */ +#define BRN31620_CACHE_FLUSH_INDEX_BITS (BRN31620_CACHE_FLUSH_SHIFT - BRN31620_CACHE_FLUSH_BITS_SHIFT) +#define BRN31620_CACHE_FLUSH_INDEX_SIZE (1 << BRN31620_CACHE_FLUSH_INDEX_BITS) + +#define BRN31620_DUMMY_PAGE_SIGNATURE 0xFEEBEE01 +#endif + +typedef struct _MMU_PT_INFO_ +{ + /* note: may need a union here to accommodate a PT page address for local memory */ + IMG_VOID *hPTPageOSMemHandle; + IMG_CPU_VIRTADDR PTPageCpuVAddr; + /* Map of reserved PTEs. + * Reserved PTEs are like "valid" PTEs in that they (and the DevVAddrs they represent) + * cannot be assigned to another allocation but their "reserved" status persists through + * any amount of mapping and unmapping, until the allocation is finally destroyed. + * + * Reserved and Valid are independent. + * When a PTE is first reserved, it will have Reserved=1 and Valid=0. + * When the PTE is actually mapped, it will have Reserved=1 and Valid=1. + * When the PTE is unmapped, it will have Reserved=1 and Valid=0. + * At this point, the PT will can not be destroyed because although there is + * not an active mapping on the PT, it is known a PTE is reserved for use. + * + * The above sequence of mapping and unmapping may repeat any number of times + * until the allocation is unmapped and destroyed which causes the PTE to have + * Valid=0 and Reserved=0. + */ + /* Number of PTEs set up. + * i.e. have a valid SGX Phys Addr and the "VALID" PTE bit == 1 + */ + IMG_UINT32 ui32ValidPTECount; +} MMU_PT_INFO; + +#define MMU_CONTEXT_NAME_SIZE 50 +struct _MMU_CONTEXT_ +{ + /* the device node */ + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* Page Directory CPUVirt and DevPhys Addresses */ + IMG_CPU_VIRTADDR pvPDCpuVAddr; + IMG_DEV_PHYADDR sPDDevPAddr; + + IMG_VOID *hPDOSMemHandle; + + /* information about dynamically allocated pagetables */ + MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES]; + + PVRSRV_SGXDEV_INFO *psDevInfo; + +#if defined(PDUMP) + IMG_UINT32 ui32PDumpMMUContextID; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + IMG_BOOL bPDumpActive; +#endif +#endif + + IMG_UINT32 ui32PID; + IMG_CHAR szName[MMU_CONTEXT_NAME_SIZE]; + +#if defined (FIX_HW_BRN_31620) + IMG_UINT32 ui32PDChangeMask[BRN31620_CACHE_FLUSH_INDEX_SIZE]; + IMG_UINT32 ui32PDCacheRangeRefCount[BRN31620_CACHE_FLUSH_SIZE]; + MMU_PT_INFO *apsPTInfoListSave[SGX_MAX_PD_ENTRIES]; +#endif + struct _MMU_CONTEXT_ *psNext; +}; + +struct _MMU_HEAP_ +{ + /* MMU context */ + MMU_CONTEXT *psMMUContext; + + /* + heap specific details: + */ + /* the Base PD index for the heap */ + IMG_UINT32 ui32PDBaseIndex; + /* number of pagetables in this heap */ + IMG_UINT32 ui32PageTableCount; + /* total number of pagetable entries in this heap which may be mapped to data pages */ + IMG_UINT32 ui32PTETotalUsable; + /* PD entry DP size control field */ + IMG_UINT32 ui32PDEPageSizeCtrl; + + /* + Data Page (DP) Details: + */ + /* size in bytes of a data page */ + IMG_UINT32 ui32DataPageSize; + /* bit width of the data page offset addressing field */ + IMG_UINT32 ui32DataPageBitWidth; + /* bit mask of the data page offset addressing field */ + IMG_UINT32 ui32DataPageMask; + + /* + PageTable (PT) Details: + */ + /* bit shift to base of PT addressing field */ + IMG_UINT32 ui32PTShift; + /* bit width of the PT addressing field */ + IMG_UINT32 ui32PTBitWidth; + /* bit mask of the PT addressing field */ + IMG_UINT32 ui32PTMask; + /* size in bytes of a pagetable */ + IMG_UINT32 ui32PTSize; + /* Allocated PT Entries per PT */ + IMG_UINT32 ui32PTNumEntriesAllocated; + /* Usable PT Entries per PT (may be different to num allocated for 4MB data page) */ + IMG_UINT32 ui32PTNumEntriesUsable; + + /* + PageDirectory Details: + */ + /* bit shift to base of PD addressing field */ + IMG_UINT32 ui32PDShift; + /* bit width of the PD addressing field */ + IMG_UINT32 ui32PDBitWidth; + /* bit mask of the PT addressing field */ + IMG_UINT32 ui32PDMask; + + /* + Arena Info: + */ + RA_ARENA *psVMArena; + DEV_ARENA_DESCRIPTOR *psDevArena; + + /* If we have sparse mappings then we can't do PT level sanity checks */ + IMG_BOOL bHasSparseMappings; +#if defined(PDUMP) + PDUMP_MMU_ATTRIB sMMUAttrib; +#endif +}; + + + +#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE) +#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF +#endif + +/* local prototypes: */ +static IMG_VOID +_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT); + +#if defined (MEM_TRACK_INFO_DEBUG) +IMG_IMPORT IMG_VOID PVRSRVPrintMemTrackInfo(IMG_UINT32 ui32FaultAddr); +#endif + +#if defined(PDUMP) +static IMG_VOID +MMU_PDumpPageTables (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SIZE_T uSize, + IMG_BOOL bForUnmap, + IMG_HANDLE hUniqueTag); +#endif /* #if defined(PDUMP) */ + +/* This option tests page table memory, for use during device bring-up. */ +#define PAGE_TEST 0 +#if PAGE_TEST +static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr); +#endif + +/* This option dumps out the PT if an assert fails */ +#define PT_DUMP 1 + +/* This option sanity checks page table PTE valid count matches active PTEs */ +#define PT_DEBUG 0 +#if (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) +static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList) +{ + IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr; + IMG_UINT32 i; + + /* 1024 entries in a 4K page table */ + for(i = 0; i < 1024; i += 8) + { + PVR_LOG(("%08X %08X %08X %08X %08X %08X %08X %08X", + p[i + 0], p[i + 1], p[i + 2], p[i + 3], + p[i + 4], p[i + 5], p[i + 6], p[i + 7])); + } +} +#else /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */ +static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList) +{ + PVR_UNREFERENCED_PARAMETER(psPTInfoList); +} +#endif /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */ + +#if PT_DEBUG +static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList) +{ + IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr; + IMG_UINT32 i, ui32Count = 0; + + /* 1024 entries in a 4K page table */ + for(i = 0; i < 1024; i++) + if(p[i] & SGX_MMU_PTE_VALID) + ui32Count++; + + if(psPTInfoList->ui32ValidPTECount != ui32Count) + { + PVR_DPF((PVR_DBG_ERROR, "ui32ValidPTECount: %u ui32Count: %u", + psPTInfoList->ui32ValidPTECount, ui32Count)); + DumpPT(psPTInfoList); + PVR_DBG_BREAK; + } +} +#else /* PT_DEBUG */ +static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList) +{ + PVR_UNREFERENCED_PARAMETER(psPTInfoList); +} +#endif /* PT_DEBUG */ + +/* + Debug functionality that allows us to make the CPU + mapping of pagetable memory readonly and only make + it read/write when we alter it. This allows us + to check that our memory isn't being overwritten +*/ +#if defined(__linux__) && defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#else +#include +#endif + +#include +#include +#include +#include +#include + +static IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr) +{ + pgd_t *psPGD; + pud_t *psPUD; + pmd_t *psPMD; + pte_t *psPTE; + pte_t ptent; + IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr; + + psPGD = pgd_offset_k(ui32CPUVAddr); + if (pgd_none(*psPGD) || pgd_bad(*psPGD)) + { + PVR_ASSERT(0); + } + + psPUD = pud_offset(psPGD, ui32CPUVAddr); + if (pud_none(*psPUD) || pud_bad(*psPUD)) + { + PVR_ASSERT(0); + } + + psPMD = pmd_offset(psPUD, ui32CPUVAddr); + if (pmd_none(*psPMD) || pmd_bad(*psPMD)) + { + PVR_ASSERT(0); + } + psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr); + + ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE); + ptent = pte_mkwrite(ptent); + ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent); + + flush_tlb_all(); +} + +static IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr) +{ + pgd_t *psPGD; + pud_t *psPUD; + pmd_t *psPMD; + pte_t *psPTE; + pte_t ptent; + IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr; + + OSWriteMemoryBarrier(); + + psPGD = pgd_offset_k(ui32CPUVAddr); + if (pgd_none(*psPGD) || pgd_bad(*psPGD)) + { + PVR_ASSERT(0); + } + + psPUD = pud_offset(psPGD, ui32CPUVAddr); + if (pud_none(*psPUD) || pud_bad(*psPUD)) + { + PVR_ASSERT(0); + } + + psPMD = pmd_offset(psPUD, ui32CPUVAddr); + if (pmd_none(*psPMD) || pmd_bad(*psPMD)) + { + PVR_ASSERT(0); + } + + psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr); + + ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE); + ptent = pte_wrprotect(ptent); + ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent); + + flush_tlb_all(); + +} + +#else /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */ + +static INLINE IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr) +{ + PVR_UNREFERENCED_PARAMETER(ulCPUVAddr); +} + +static INLINE IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr) +{ + PVR_UNREFERENCED_PARAMETER(ulCPUVAddr); +} + +#endif /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */ + +/*___________________________________________________________________________ + + Information for SUPPORT_PDUMP_MULTI_PROCESS feature. + + The client marked for pdumping will set the bPDumpActive flag in + the MMU Context (see MMU_Initialise). + + Shared heap allocations should be persistent so all apps which + are pdumped will see the allocation. Persistent flag over-rides + the bPDumpActive flag (see pdump_common.c/DbgWrite function). + + The idea is to dump PT,DP for shared heap allocations, but only + dump the PDE if the allocation is mapped into the kernel or active + client context. This ensures if a background app allocates on a + shared heap then all clients can access it in the pdump toolchain. + + + + PD PT DP + +-+ + | |---> +-+ + +-+ | |---> +-+ + +-+ + + + +-+ + + PD allocation/free: pdump flags are 0 (only need PD for active apps) + PT allocation/free: pdump flags are 0 + unless PT is for a shared heap, in which case persistent is set + PD entries (MMU init/insert shared heap): + only pdump if PDE is on the active MMU context, flags are 0 + PD entries (PT alloc): + pdump flags are 0 if kernel heap + pdump flags are 0 if shared heap and PDE is on active MMU context + otherwise ignore. + PT entries pdump flags are 0 + unless PTE is for a shared heap, in which case persistent is set + + NOTE: PDump common code:- + PDumpMallocPages and PDumpMemKM also set the persistent flag for + shared heap allocations. + + ___________________________________________________________________________ +*/ + + +/*! +****************************************************************************** + FUNCTION: MMU_IsHeapShared + + PURPOSE: Is this heap shared? + PARAMETERS: In: pMMU_Heap + RETURNS: true if heap is shared +******************************************************************************/ +IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMUHeap) +{ + switch(pMMUHeap->psDevArena->DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED : + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED : + return IMG_TRUE; + case DEVICE_MEMORY_HEAP_PERCONTEXT : + case DEVICE_MEMORY_HEAP_KERNEL : + return IMG_FALSE; + default: + { + PVR_DPF((PVR_DBG_ERROR, "MMU_IsHeapShared: ERROR invalid heap type")); + return IMG_FALSE; + } + } +} + +#ifdef SUPPORT_SGX_MMU_BYPASS +/*! +****************************************************************************** + FUNCTION: EnableHostAccess + + PURPOSE: Enables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +EnableHostAccess (MMU_CONTEXT *psMMUContext) +{ + IMG_UINT32 ui32RegVal; + IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM; + + /* + bypass the MMU for the host port requestor, + conserving bypass state of other requestors + */ + ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL); + + OSWriteHWReg(pvRegsBaseKM, + EUR_CR_BIF_CTRL, + ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK); + /* assume we're not wiping-out any other bits */ + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK); +} + +/*! +****************************************************************************** + FUNCTION: DisableHostAccess + + PURPOSE: Disables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +DisableHostAccess (MMU_CONTEXT *psMMUContext) +{ + IMG_UINT32 ui32RegVal; + IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM; + + /* + disable MMU-bypass for the host port requestor, + conserving bypass state of other requestors + and flushing all caches/tlbs + */ + OSWriteHWReg(pvRegsBaseKM, + EUR_CR_BIF_CTRL, + ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK); + /* assume we're not wiping-out any other bits */ + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, 0); +} +#endif + + +#if defined(SGX_FEATURE_SYSTEM_CACHE) +/*! +****************************************************************************** + FUNCTION: MMU_InvalidateSystemLevelCache + + PURPOSE: Invalidates the System Level Cache to purge stale PDEs and PTEs + + PARAMETERS: In: psDevInfo + RETURNS: None + +******************************************************************************/ +static IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + #if defined(SGX_FEATURE_MP) + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL; + #else + /* The MMU always bypasses the SLC */ + PVR_UNREFERENCED_PARAMETER(psDevInfo); + #endif /* SGX_FEATURE_MP */ +} +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + +/*! +****************************************************************************** + FUNCTION: MMU_InvalidateDirectoryCache + + PURPOSE: Invalidates the page directory cache + page table cache + requestor TLBs + + PARAMETERS: In: psDevInfo + RETURNS: None + +******************************************************************************/ +IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD; + #if defined(SGX_FEATURE_SYSTEM_CACHE) + MMU_InvalidateSystemLevelCache(psDevInfo); + #endif /* SGX_FEATURE_SYSTEM_CACHE */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_InvalidatePageTableCache + + PURPOSE: Invalidates the page table cache + requestor TLBs + + PARAMETERS: In: psDevInfo + RETURNS: None + +******************************************************************************/ +static IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT; + #if defined(SGX_FEATURE_SYSTEM_CACHE) + MMU_InvalidateSystemLevelCache(psDevInfo); + #endif /* SGX_FEATURE_SYSTEM_CACHE */ +} + +#if defined(FIX_HW_BRN_31620) +/*! +****************************************************************************** + FUNCTION: BRN31620InvalidatePageTableEntry + + PURPOSE: Frees page tables in PDE cache line chunks re-wiring the + dummy page when required + + PARAMETERS: In: psMMUContext, ui32PDIndex, ui32PTIndex + RETURNS: None + +******************************************************************************/ +static IMG_VOID BRN31620InvalidatePageTableEntry(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32PDIndex, IMG_UINT32 ui32PTIndex, IMG_UINT32 *pui32PTE) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo; + + /* + * Note: We can't tell at this stage if this PT will be freed before + * the end of the function so we always wire up the dummy page to + * to the PT. + */ + if (((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX) + && (ui32PTIndex == BRN31620_DUMMY_PTE_INDEX)) + { + *pui32PTE = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID; + } + else + { + *pui32PTE = 0; + } +} + +/*! +****************************************************************************** + FUNCTION: BRN31620FreePageTable + + PURPOSE: Frees page tables in PDE cache line chunks re-wiring the + dummy page when required + + PARAMETERS: In: psMMUContext, ui32PDIndex + RETURNS: IMG_TRUE if we freed any PT's + +******************************************************************************/ +static IMG_BOOL BRN31620FreePageTable(MMU_HEAP *psMMUHeap, IMG_UINT32 ui32PDIndex) +{ + MMU_CONTEXT *psMMUContext = psMMUHeap->psMMUContext; + PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo; + IMG_UINT32 ui32PDCacheLine = ui32PDIndex >> BRN31620_PDES_PER_CACHE_LINE_SHIFT; + IMG_UINT32 bFreePTs = IMG_FALSE; + IMG_UINT32 *pui32Tmp; + + PVR_ASSERT(psMMUHeap != IMG_NULL); + + /* + * Clear the PT info for this PD index so even if we don't + * free the memory here apsPTInfoList[PDIndex] will trigger + * an "allocation" in _DeferredAllocPagetables which + * bumps up the refcount. + */ + PVR_ASSERT(psMMUContext->apsPTInfoListSave[ui32PDIndex] == IMG_NULL); + + psMMUContext->apsPTInfoListSave[ui32PDIndex] = psMMUContext->apsPTInfoList[ui32PDIndex]; + psMMUContext->apsPTInfoList[ui32PDIndex] = IMG_NULL; + + /* Check if this was the last PT in the cache line */ + if (--psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine] == 0) + { + IMG_UINT32 i; + IMG_UINT32 ui32PDIndexStart = ui32PDCacheLine * BRN31620_PDES_PER_CACHE_LINE_SIZE; + IMG_UINT32 ui32PDIndexEnd = ui32PDIndexStart + BRN31620_PDES_PER_CACHE_LINE_SIZE; + IMG_UINT32 ui32PDBitMaskIndex, ui32PDBitMaskShift; + + /* Free all PT's in cache line */ + for (i=ui32PDIndexStart;iapsPTInfoList[i] = psMMUContext->apsPTInfoListSave[i]; + psMMUContext->apsPTInfoListSave[i] = IMG_NULL; + _DeferredFreePageTable(psMMUHeap, i - psMMUHeap->ui32PDBaseIndex, IMG_TRUE); + } + + ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT; + ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK; + + /* Check if this is a shared heap */ + if (MMU_IsHeapShared(psMMUHeap)) + { + /* Mark the remove of the Page Table from all memory contexts */ + MMU_CONTEXT *psMMUContextWalker = (MMU_CONTEXT*) psMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContextWalker) + { + psMMUContextWalker->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift; + + /* + * We've just cleared a cache line's worth of PDE's so we need + * to wire up the dummy PT + */ + MakeKernelPageReadWrite(psMMUContextWalker->pvPDCpuVAddr); + pui32Tmp = (IMG_UINT32 *) psMMUContextWalker->pvPDCpuVAddr; + pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_DUMMY_PAGE + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(psMMUContextWalker->pvPDCpuVAddr); + + PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block"); + PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContextWalker->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + psMMUContextWalker = psMMUContextWalker->psNext; + } + } + else + { + psMMUContext->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift; + + /* + * We've just cleared a cache line's worth of PDE's so we need + * to wire up the dummy PT + */ + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + pui32Tmp = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; + pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_DUMMY_PAGE + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + + PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block"); + PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + /* We've freed a cachline's worth of PDE's so trigger a PD cache flush */ + bFreePTs = IMG_TRUE; + } + + return bFreePTs; +} +#endif + +/*! +****************************************************************************** + FUNCTION: _AllocPageTableMemory + + PURPOSE: Allocate physical memory for a page table + + PARAMETERS: In: pMMUHeap - the mmu + In: psPTInfoList - PT info + Out: psDevPAddr - device physical address for new PT + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failed +******************************************************************************/ +static IMG_BOOL +_AllocPageTableMemory (MMU_HEAP *pMMUHeap, + MMU_PT_INFO *psPTInfoList, + IMG_DEV_PHYADDR *psDevPAddr) +{ + IMG_DEV_PHYADDR sDevPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + + /* + depending on the specific system, pagetables are allocated from system memory + or device local memory. For now, just look for at least a valid local heap/arena + */ + if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL) + { + //FIXME: replace with an RA, this allocator only handles 4k allocs + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + pMMUHeap->ui32PTSize, + SGX_MMU_PAGE_SIZE,//FIXME: assume 4K page size for now (wastes memory for smaller pagetables + IMG_NULL, + 0, + IMG_NULL, + (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr, + &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed")); + return IMG_FALSE; + } + + /* + Force the page to read only, we will make it read/write as + and when we need to + */ + MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr); + + /* translate address to device physical */ + if(psPTInfoList->PTPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle, + psPTInfoList->PTPageCpuVAddr); + } + else + { + /* This isn't used in all cases since not all ports currently support + * OSMemHandleToCpuPAddr() */ + sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0); + } + + sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + } + else + { + /* + We cannot use IMG_SYS_PHYADDR here, as that is 64-bit for 32-bit PAE builds. + The physical address in this call to RA_Alloc is specifically the SysPAddr + of local (card) space, and it is highly unlikely we would ever need to + support > 4GB of local (card) memory (this does assume that such local + memory will be mapped into System physical memory space at a low address so + that any and all local memory exists within the 4GB SYSPAddr range). + */ + IMG_UINTPTR_T uiLocalPAddr; + IMG_SYS_PHYADDR sSysPAddr; + + /* + just allocate from the first local memory arena + (unlikely to be more than one local mem area(?)) + */ + //FIXME: just allocate a 4K page for each PT for now + if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize, + 0, + IMG_NULL, + 0, + &uiLocalPAddr)!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed")); + return IMG_FALSE; + } + + /* Munge the local PAddr back into the SysPAddr */ + sSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */ + psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psPTInfoList->hPTPageOSMemHandle); + if(!psPTInfoList->PTPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables")); + return IMG_FALSE; + } + + /* translate address to device physical */ + sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + + #if PAGE_TEST + PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr); + #endif + } + + PVR_ASSERT(psPTInfoList->PTPageCpuVAddr != IMG_NULL); + + MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + { + IMG_UINT32 *pui32Tmp; + IMG_UINT32 i; + + pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr; + /* point the new PT entries to the dummy data page */ + for(i=0; iui32PTNumEntriesUsable; i++) + { + pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + } + /* zero the remaining allocated entries, if any */ + for(; iui32PTNumEntriesAllocated; i++) + { + pui32Tmp[i] = 0; + } + } +#else + /* Zero the page table. */ + OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize); +#endif + MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr); + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = 0; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* make sure shared heap PT allocs are always pdumped */ + ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + /* pdump the PT malloc */ + PDUMPMALLOCPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, psPTInfoList->hPTPageOSMemHandle, 0, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG); + /* pdump the PT Pages */ + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfoList->hPTPageOSMemHandle, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif + + /* return the DevPAddr */ + *psDevPAddr = sDevPAddr; + + return IMG_TRUE; +} + + +/*! +****************************************************************************** + FUNCTION: _FreePageTableMemory + + PURPOSE: Free physical memory for a page table + + PARAMETERS: In: pMMUHeap - the mmu + In: psPTInfoList - PT info to free + RETURNS: NONE +******************************************************************************/ +static IMG_VOID +_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList) +{ + /* + free the PT page: + depending on the specific system, pagetables are allocated from system memory + or device local memory. For now, just look for at least a valid local heap/arena + */ + if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL) + { + /* Force the page to read write before we free it*/ + MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr); + + //FIXME: replace with an RA, this allocator only handles 4k allocs + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + pMMUHeap->ui32PTSize, + psPTInfoList->PTPageCpuVAddr, + psPTInfoList->hPTPageOSMemHandle); + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + + /* derive the system physical address */ + sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle, + psPTInfoList->PTPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr); + + /* unmap the CPU mapping */ + /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */ + OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psPTInfoList->hPTPageOSMemHandle); + + /* + just free from the first local memory arena + (unlikely to be more than one local mem area(?)) + Note that the cast to IMG_UINTPTR_T is ok as we're local mem. + */ + RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE); + } +} + + + +/*! +****************************************************************************** + FUNCTION: _DeferredFreePageTable + + PURPOSE: Free one page table associated with an MMU. + + PARAMETERS: In: pMMUHeap - the mmu heap + In: ui32PTIndex - index of the page table to free relative + to the base of heap. + RETURNS: None +******************************************************************************/ +static IMG_VOID +_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT) +{ + IMG_UINT32 *pui32PDEntry; + IMG_UINT32 i; + IMG_UINT32 ui32PDIndex; + SYS_DATA *psSysData; + MMU_PT_INFO **ppsPTInfoList; + + SysAcquireData(&psSysData); + + /* find the index/offset in PD entries */ + ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* set the base PT info */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + { +#if PT_DEBUG + if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0) + { + DumpPT(ppsPTInfoList[ui32PTIndex]); + /* Fall-through, will fail assert */ + } +#endif + + /* Assert that all mappings have gone */ + PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0); + } + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = 0; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + /* pdump the PT free */ + PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount); + if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr) + { + PDUMPFREEPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG); + } + } +#endif + + switch(pMMUHeap->psDevArena->DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED : + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED : + { + /* Remove Page Table from all memory contexts */ + MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContext) + { + /* get the PD CPUVAddr base and advance to the first entry */ + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PD entry to the dummy PT */ + pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr + >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; +#else + /* free the entry */ + if(bOSFreePT) + { + pui32PDEntry[ui32PTIndex] = 0; + } +#endif + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + #if defined(PDUMP) + /* pdump the PD Page modifications */ + #if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) + #endif + { + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + #endif + /* advance to next context */ + psMMUContext = psMMUContext->psNext; + } + break; + } + case DEVICE_MEMORY_HEAP_PERCONTEXT : + case DEVICE_MEMORY_HEAP_KERNEL : + { + MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr); + /* Remove Page Table from this memory context only */ + pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PD entry to the dummy PT */ + pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr + >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; +#else + /* free the entry */ + if(bOSFreePT) + { + pui32PDEntry[ui32PTIndex] = 0; + } +#endif + MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr); + + /* pdump the PD Page modifications */ + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type")); + return; + } + } + + /* clear the PT entries in each PT page */ + if(ppsPTInfoList[ui32PTIndex] != IMG_NULL) + { + if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL) + { + IMG_PUINT32 pui32Tmp; + + MakeKernelPageReadWrite(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr); + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr; + + /* clear the entries */ + for(i=0; + (iui32PTETotalUsable) && (iui32PTNumEntriesUsable); + i++) + { + /* over-allocated PT entries for 4MB data page case should never be non-zero */ + pui32Tmp[i] = 0; + } + MakeKernelPageReadOnly(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr); + + /* + free the pagetable memory + */ + if(bOSFreePT) + { + _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]); + } + + /* + decrement the PT Entry Count by the number + of entries we've cleared in this pass + */ + pMMUHeap->ui32PTETotalUsable -= i; + } + else + { + /* decrement the PT Entry Count by a page's worth of entries */ + pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable; + } + + if(bOSFreePT) + { + /* free the pt info */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(MMU_PT_INFO), + ppsPTInfoList[ui32PTIndex], + IMG_NULL); + ppsPTInfoList[ui32PTIndex] = IMG_NULL; + } + } + else + { + /* decrement the PT Entry Count by a page's worth of usable entries */ + pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable; + } + + PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount); +} + +/*! +****************************************************************************** + FUNCTION: _DeferredFreePageTables + + PURPOSE: Free the page tables associated with an MMU. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +static IMG_VOID +_DeferredFreePageTables (MMU_HEAP *pMMUHeap) +{ + IMG_UINT32 i; +#if defined(FIX_HW_BRN_31620) + MMU_CONTEXT *psMMUContext = pMMUHeap->psMMUContext; + IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 *pui32Tmp; + IMG_UINT32 j; +#endif +#if defined(PDUMP) + PDUMPCOMMENT("Free PTs (MMU Context ID == %u, PDBaseIndex == %u, PT count == 0x%x)", + pMMUHeap->psMMUContext->ui32PDumpMMUContextID, + pMMUHeap->ui32PDBaseIndex, + pMMUHeap->ui32PageTableCount); +#endif +#if defined(FIX_HW_BRN_31620) + for(i=0; iui32PageTableCount; i++) + { + ui32PDIndex = (pMMUHeap->ui32PDBaseIndex + i); + + if (psMMUContext->apsPTInfoList[ui32PDIndex]) + { + if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr) + { + /* + * We have to do this to setup the dummy page as + * not all heaps are PD cache size or aligned + */ + for (j=0;japsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + BRN31620InvalidatePageTableEntry(psMMUContext, ui32PDIndex, j, &pui32Tmp[j]); + } + } + /* Free the PT and NULL's out the PTInfo */ + if (BRN31620FreePageTable(pMMUHeap, ui32PDIndex) == IMG_TRUE) + { + bInvalidateDirectoryCache = IMG_TRUE; + } + } + } + + /* + * Due to freeing PT's in chunks we might need to flush the PT cache + * rather then the directory cache + */ + if (bInvalidateDirectoryCache) + { + MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); + } + else + { + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); + } +#else + for(i=0; iui32PageTableCount; i++) + { + _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE); + } + MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); +#endif +} + + +/*! +****************************************************************************** + FUNCTION: _DeferredAllocPagetables + + PURPOSE: allocates page tables at time of allocation + + PARAMETERS: In: pMMUHeap - the mmu heap + DevVAddr - devVAddr of allocation + ui32Size - size of allocation + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failed +******************************************************************************/ +static IMG_BOOL +_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size) +{ + IMG_UINT32 ui32PageTableCount; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 i; + IMG_UINT32 *pui32PDEntry; + MMU_PT_INFO **ppsPTInfoList; + SYS_DATA *psSysData; + IMG_DEV_VIRTADDR sHighDevVAddr; +#if defined(FIX_HW_BRN_31620) + IMG_BOOL bFlushSystemCache = IMG_FALSE; + IMG_BOOL bSharedPT = IMG_FALSE; + IMG_DEV_VIRTADDR sDevVAddrRequestStart; + IMG_DEV_VIRTADDR sDevVAddrRequestEnd; + IMG_UINT32 ui32PDRequestStart; + IMG_UINT32 ui32PDRequestEnd; + IMG_UINT32 ui32ModifiedCachelines[BRN31620_CACHE_FLUSH_INDEX_SIZE]; +#endif + + /* Check device linear address */ +#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32 + PVR_ASSERT(DevVAddr.uiAddr < (1<> pMMUHeap->ui32PDShift; + + /* how many PDs does the allocation occupy? */ + /* first check for overflows */ + if((UINT32_MAX_VALUE - DevVAddr.uiAddr) + < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask)) + { + /* detected overflow, clamp to highest address, reserve all PDs */ + sHighDevVAddr.uiAddr = UINT32_MAX_VALUE; + ui32PageTableCount = 1024; + } + else + { + sHighDevVAddr.uiAddr = DevVAddr.uiAddr + + ui32Size + + pMMUHeap->ui32DataPageMask + + pMMUHeap->ui32PTMask; + + ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + } + + + /* Fix allocation of last 4MB */ + if (ui32PageTableCount == 0) + ui32PageTableCount = 1024; + +#if defined(FIX_HW_BRN_31620) + for (i=0;i> pMMUHeap->ui32PDShift; + } + + ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* Fix allocation of last 4MB */ + if (ui32PageTableCount == 0) + ui32PageTableCount = 1024; +#endif + + ui32PageTableCount -= ui32PDIndex; + + /* get the PD CPUVAddr base and advance to the first entry */ + pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + + /* and advance to the first PT info list */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = 0; + + /* pdump the PD Page modifications */ + if( MMU_IsHeapShared(pMMUHeap) ) + { + ui32Flags |= PDUMP_FLAGS_CONTINUOUS; + } + PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc PTs (MMU Context ID == %u, PDBaseIndex == %u, Size == 0x%x, Shared = %s)", + pMMUHeap->psMMUContext->ui32PDumpMMUContextID, + pMMUHeap->ui32PDBaseIndex, + ui32Size, + MMU_IsHeapShared(pMMUHeap)?"True":"False"); + PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc page table (page count == %08X)", ui32PageTableCount); + PDUMPCOMMENTWITHFLAGS(ui32Flags, "Page directory mods (page count == %08X)", ui32PageTableCount); + } +#endif + /* walk the psPTInfoList to see what needs allocating: */ + for(i=0; ipsMMUContext->apsPTInfoListSave[ui32PDIndex + i]) + { + /* Only make this PTInfo "live" if it's requested */ + if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd)) + { + IMG_UINT32 ui32PDCacheLine = (ui32PDIndex + i) >> BRN31620_PDES_PER_CACHE_LINE_SHIFT; + + ppsPTInfoList[i] = pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i]; + pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = IMG_NULL; + + pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++; + } + } + else + { +#endif + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (MMU_PT_INFO), + (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL, + "MMU Page Table Info"); + if (ppsPTInfoList[i] == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed")); + return IMG_FALSE; + } + OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO)); +#if defined(FIX_HW_BRN_31620) + } +#endif + } +#if defined(FIX_HW_BRN_31620) + /* Only try to allocate if ppsPTInfoList[i] is valid */ + if (ppsPTInfoList[i]) + { +#endif + if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL + && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL) + { + IMG_DEV_PHYADDR sDevPAddr = { 0 }; +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + IMG_UINT32 *pui32Tmp; + IMG_UINT32 j; +#else +#if !defined(FIX_HW_BRN_31620) + /* no page table has been allocated so allocate one */ + PVR_ASSERT(pui32PDEntry[i] == 0); +#endif +#endif + if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed")); + return IMG_FALSE; + } +#if defined(FIX_HW_BRN_31620) + bFlushSystemCache = IMG_TRUE; + /* Bump up the page table count if required */ + { + IMG_UINT32 ui32PD; + IMG_UINT32 ui32PDCacheLine; + IMG_UINT32 ui32PDBitMaskIndex; + IMG_UINT32 ui32PDBitMaskShift; + + ui32PD = ui32PDIndex + i; + ui32PDCacheLine = ui32PD >> BRN31620_PDES_PER_CACHE_LINE_SHIFT; + ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT; + ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK; + ui32ModifiedCachelines[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift; + + /* Add 1 to ui32PD as we want the count, not a range */ + if ((pMMUHeap->ui32PDBaseIndex + pMMUHeap->ui32PageTableCount) < (ui32PD + 1)) + { + pMMUHeap->ui32PageTableCount = (ui32PD + 1) - pMMUHeap->ui32PDBaseIndex; + } + + if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd)) + { + pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++; + } + } +#endif + switch(pMMUHeap->psDevArena->DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED : + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED : + { + /* insert Page Table into all memory contexts */ + MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo; +#endif + while(psMMUContext) + { + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + /* get the PD CPUVAddr base and advance to the first entry */ + pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + + /* insert the page, specify the data page size and make the pde valid */ + pui32PDEntry[i] = (IMG_UINT32)(sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | pMMUHeap->ui32PDEPageSizeCtrl + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + #if defined(PDUMP) + /* pdump the PD Page modifications */ + #if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) + #endif + { +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* + Any modification of the uKernel memory context + needs to be PDumped when we're multi-process + */ + IMG_UINT32 ui32HeapFlags = ( psMMUContext->sPDDevPAddr.uiAddr == psDevInfo->sKernelPDDevPAddr.uiAddr ) ? PDUMP_FLAGS_PERSISTENT : 0; +#else + IMG_UINT32 ui32HeapFlags = 0; +#endif + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), ui32HeapFlags, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + #endif /* PDUMP */ + /* advance to next context */ + psMMUContext = psMMUContext->psNext; + } +#if defined(FIX_HW_BRN_31620) + bSharedPT = IMG_TRUE; +#endif + break; + } + case DEVICE_MEMORY_HEAP_PERCONTEXT : + case DEVICE_MEMORY_HEAP_KERNEL : + { + MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr); + /* insert Page Table into only this memory context */ + pui32PDEntry[i] = (IMG_UINT32)(sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | pMMUHeap->ui32PDEPageSizeCtrl + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr); + /* pdump the PD Page modifications */ + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type")); + return IMG_FALSE; + } + } + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + /* This is actually not to do with multiple mem contexts, but to do with the directory cache. + In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the + page directory memory, and requires updating whenever the page directory changes, even if there + was no previous value in a particular entry + */ + MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); +#endif +#if defined(FIX_HW_BRN_31620) + /* If this PT is not in the requested range then save it and null out the main PTInfo */ + if (((ui32PDIndex + i) < ui32PDRequestStart) || ((ui32PDIndex + i) > ui32PDRequestEnd)) + { + pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = ppsPTInfoList[i]; + ppsPTInfoList[i] = IMG_NULL; + } +#endif + } + else + { +#if !defined(FIX_HW_BRN_31620) + /* already have an allocated PT */ + PVR_ASSERT(pui32PDEntry[i] != 0); +#endif + } +#if defined(FIX_HW_BRN_31620) + } +#endif + } + + #if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(FIX_HW_BRN_31620) + /* This function might not allocate any new PT's so check before flushing */ + if (bFlushSystemCache) + #endif + { + MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo); + } + #endif /* SGX_FEATURE_SYSTEM_CACHE */ + + #if defined(FIX_HW_BRN_31620) + /* Handle the last 4MB roll over */ + sHighDevVAddr.uiAddr = sHighDevVAddr.uiAddr - 1; + + /* Update our PD flush mask if required */ + if (bFlushSystemCache) + { + MMU_CONTEXT *psMMUContext; + + if (bSharedPT) + { + MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContext) + { + for (i=0;iui32PDChangeMask[i] |= ui32ModifiedCachelines[i]; + } + + /* advance to next context */ + psMMUContext = psMMUContext->psNext; + } + } + else + { + for (i=0;ipsMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i]; + } + } + + /* + * Always hook up the dummy page when we allocate a new range of PTs. + * It might be this is overwritten before the SGX access the dummy page + * but we don't care, it's a lot simpler to add this logic here. + */ + psMMUContext = pMMUHeap->psMMUContext; + for (i=0;ipsDevInfo; + MMU_PT_INFO *psTempPTInfo = IMG_NULL; + IMG_UINT32 *pui32Tmp; + + ui32PDIndex = (((i * BRN31620_CACHE_FLUSH_BITS_SIZE) + j) * BRN31620_PDES_PER_CACHE_LINE_SIZE) + BRN31620_DUMMY_PDE_INDEX; + + /* The PT for the dummy page might not be "live". If not get it from the saved pointer */ + if (psMMUContext->apsPTInfoList[ui32PDIndex]) + { + psTempPTInfo = psMMUContext->apsPTInfoList[ui32PDIndex]; + } + else + { + psTempPTInfo = psMMUContext->apsPTInfoListSave[ui32PDIndex]; + } + + PVR_ASSERT(psTempPTInfo != IMG_NULL); + + MakeKernelPageReadWrite(psTempPTInfo->PTPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *) psTempPTInfo->PTPageCpuVAddr; + PVR_ASSERT(pui32Tmp != IMG_NULL); + pui32Tmp[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID; + MakeKernelPageReadOnly(psTempPTInfo->PTPageCpuVAddr); + PDUMPCOMMENT("BRN31620 Dump PTE for dummy page after wireing up new PT"); + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psTempPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32Tmp[BRN31620_DUMMY_PTE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + } + } + } + #endif + + return IMG_TRUE; +} + + +#if defined(PDUMP) +/*! + * FUNCTION: MMU_GetPDumpContextID + * + * RETURNS: pdump MMU context ID + */ +IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext) +{ + BM_CONTEXT *pBMContext = hDevMemContext; + PVR_ASSERT(pBMContext); + /* PRQA S 0505 1 */ /* PVR_ASSERT should catch NULL ptr */ + return pBMContext->psMMUContext->ui32PDumpMMUContextID; +} + +/*! + * FUNCTION: MMU_SetPDumpAttribs + * + * PURPOSE: Called from MMU_Initialise and MMU_Create. + * Sets up device-specific attributes for pdumping. + * FIXME: breaks variable size PTs. Really need separate per context + * and per heap attribs. + * + * INPUT: psDeviceNode - used to access deviceID + * INPUT: ui32DataPageMask - data page mask + * INPUT: ui32PTSize - PT size + * + * OUTPUT: psMMUAttrib - pdump MMU attributes + * + * RETURNS: none + */ +#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) +# error "FIXME: breaks variable size pagetables" +#endif +static IMG_VOID MMU_SetPDumpAttribs(PDUMP_MMU_ATTRIB *psMMUAttrib, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DataPageMask, + IMG_UINT32 ui32PTSize) +{ + /* Sets up device ID, contains pdump memspace name */ + psMMUAttrib->sDevId = psDeviceNode->sDevId; + + psMMUAttrib->pszPDRegRegion = IMG_NULL; + psMMUAttrib->ui32DataPageMask = ui32DataPageMask; + + psMMUAttrib->ui32PTEValid = SGX_MMU_PTE_VALID; + psMMUAttrib->ui32PTSize = ui32PTSize; + psMMUAttrib->ui32PTEAlignShift = SGX_MMU_PTE_ADDR_ALIGNSHIFT; + + psMMUAttrib->ui32PDEMask = SGX_MMU_PDE_ADDR_MASK; + psMMUAttrib->ui32PDEAlignShift = SGX_MMU_PDE_ADDR_ALIGNSHIFT; +} +#endif /* PDUMP */ + +/*! +****************************************************************************** + FUNCTION: MMU_Initialise + + PURPOSE: Called from BM_CreateContext. + Allocates the top level Page Directory 4k Page for the new context. + + PARAMETERS: None + RETURNS: PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR +MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr) +{ + IMG_UINT32 *pui32Tmp; + IMG_UINT32 i; + IMG_CPU_VIRTADDR pvPDCpuVAddr = IMG_NULL; + IMG_DEV_PHYADDR sPDDevPAddr = {0}; + IMG_CPU_PHYADDR sCpuPAddr; + MMU_CONTEXT *psMMUContext; + IMG_HANDLE hPDOSMemHandle = IMG_NULL; + SYS_DATA *psSysData; + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError = PVRSRV_OK; +#if defined(PDUMP) + PDUMP_MMU_ATTRIB sMMUAttrib; +#endif + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise")); + + SysAcquireData(&psSysData); +#if defined(PDUMP) + /* Note: these attribs are on the stack, used only to pdump the MMU context + * creation. */ + MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode, + SGX_MMU_PAGE_MASK, + SGX_MMU_PT_SIZE * sizeof(IMG_UINT32)); +#endif + + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (MMU_CONTEXT), + (IMG_VOID **)&psMMUContext, IMG_NULL, + "MMU Context"); + if (psMMUContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT)); + + /* stick the devinfo in the context for subsequent use */ + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + psMMUContext->psDevInfo = psDevInfo; + + /* record device node for subsequent use */ + psMMUContext->psDeviceNode = psDeviceNode; + + /* allocate 4k page directory page for the new context */ + if(psDeviceNode->psLocalDevMemArena == IMG_NULL) + { + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &pvPDCpuVAddr, + &hPDOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + goto exit_setting_values; + } + + if(pvPDCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(hPDOSMemHandle, + pvPDCpuVAddr); + } + else + { + /* This is not used in all cases, since not all ports currently + * support OSMemHandleToCpuPAddr */ + sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0); + } + sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + + #if PAGE_TEST + PageTest(pvPDCpuVAddr, sPDDevPAddr); + #endif + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* Allocate dummy PT and Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + /* Dummy PT page */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvDummyPTPageCpuVAddr, + &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + goto exit_setting_values; + } + + if(psDevInfo->pvDummyPTPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle, + psDevInfo->pvDummyPTPageCpuVAddr); + } + else + { + /* This is not used in all cases, since not all ports currently + * support OSMemHandleToCpuPAddr */ + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0); + } + psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + + /* Dummy Data page */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvDummyDataPageCpuVAddr, + &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + goto exit_setting_values; + } + + if(psDevInfo->pvDummyDataPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle, + psDevInfo->pvDummyDataPageCpuVAddr); + } + else + { + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0); + } + psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + } +#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ +#if defined(FIX_HW_BRN_31620) + /* Allocate dummy Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + IMG_UINT32 j; + /* Allocate dummy page */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvBRN31620DummyPageCpuVAddr, + &psDevInfo->hBRN31620DummyPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + goto exit_setting_values; + } + + /* Get a physical address */ + if(psDevInfo->pvBRN31620DummyPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle, + psDevInfo->pvBRN31620DummyPageCpuVAddr); + } + else + { + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPageOSMemHandle, 0); + } + + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr; + for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++) + { + pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE; + } + + psDevInfo->sBRN31620DummyPageDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + /* Allocate dummy PT */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvBRN31620DummyPTCpuVAddr, + &psDevInfo->hBRN31620DummyPTOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + goto exit_setting_values; + } + + /* Get a physical address */ + if(psDevInfo->pvBRN31620DummyPTCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle, + psDevInfo->pvBRN31620DummyPTCpuVAddr); + } + else + { + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPTOSMemHandle, 0); + } + + OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE); + psDevInfo->sBRN31620DummyPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + } +#endif + } + else + { + /* + We cannot use IMG_SYS_PHYADDR here, as that is 64-bit for 32-bit PAE builds. + The physical address in this call to RA_Alloc is specifically the SysPAddr + of local (card) space, and it is highly unlikely we would ever need to + support > 4GB of local (card) memory (this does assume that such local + memory will be mapped into System physical memory space at a low address so + that any and all local memory exists within the 4GB SYSPAddr range). + */ + IMG_UINTPTR_T uiLocalPAddr; + IMG_SYS_PHYADDR sSysPAddr; + + /* allocate from the device's local memory arena */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &uiLocalPAddr)!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + goto exit_setting_values; + } + + /* Munge the local PAddr back into the SysPAddr */ + sSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &hPDOSMemHandle); + if(!pvPDCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + goto exit_setting_values; + } + + #if PAGE_TEST + PageTest(pvPDCpuVAddr, sPDDevPAddr); + #endif + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* Allocate dummy PT and Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + /* Dummy PT page */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &uiLocalPAddr)!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + goto exit_setting_values; + } + + /* Munge the local PAddr back into the SysPAddr */ + sSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hDummyPTPageOSMemHandle); + if(!psDevInfo->pvDummyPTPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + goto exit_setting_values; + } + + /* Dummy Data page */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &uiLocalPAddr)!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + goto exit_setting_values; + } + + /* Munge the local PAddr back into the SysPAddr */ + sSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hDummyDataPageOSMemHandle); + if(!psDevInfo->pvDummyDataPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + goto exit_setting_values; + } + } +#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ +#if defined(FIX_HW_BRN_31620) + /* Allocate dummy PT and Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + IMG_UINT32 j; + /* Allocate dummy page */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &uiLocalPAddr)!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + goto exit_setting_values; + } + + /* Munge the local PAddr back into the SysPAddr */ + sSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sBRN31620DummyPageDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvBRN31620DummyPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hBRN31620DummyPageOSMemHandle); + if(!psDevInfo->pvBRN31620DummyPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + goto exit_setting_values; + } + + MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr; + for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++) + { + pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE; + } + MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPageCpuVAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + /* Allocate dummy PT */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &uiLocalPAddr)!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + goto exit_setting_values; + } + + /* Munge the local PAddr back into the SysPAddr */ + sSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sBRN31620DummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvBRN31620DummyPTCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hBRN31620DummyPTOSMemHandle); + + if(!psDevInfo->pvBRN31620DummyPTCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + eError = PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + goto exit_setting_values; + } + + OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + } +#endif /* #if defined(FIX_HW_BRN_31620) */ + } + +#if defined(FIX_HW_BRN_31620) + if (!psDevInfo->pvMMUContextList) + { + /* Save the kernel MMU context which is always the 1st to be created */ + psDevInfo->hKernelMMUContext = psMMUContext; + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: saving kernel mmu context: %p", psMMUContext)); + } +#endif + +#if defined(PDUMP) +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Find out if this context is for the active pdump client. + * If it is, need to ensure PD entries are pdumped whenever another + * process allocates from a shared heap. */ + { + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc == IMG_NULL) + { + /* changes to the kernel context PD/PTs should be pdumped */ + psMMUContext->bPDumpActive = IMG_TRUE; + } + else + { + psMMUContext->bPDumpActive = psPerProc->bPDumpActive; + } + } +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + /* pdump the PD malloc */ + PDUMPCOMMENT("Alloc page directory for new MMU context (PDDevPAddr == 0x" DEVPADDR_FMT ")", sPDDevPAddr.uiAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDOSMemHandle, 0, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG); +#endif /* PDUMP */ + +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(psMMUContext); +#endif + + if (pvPDCpuVAddr) + { + pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid")); + eError = PVRSRV_ERROR_INVALID_CPU_ADDR; + goto exit_setting_values; + } + + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + MakeKernelPageReadWrite(pvPDCpuVAddr); + /* wire-up the new PD to the dummy PT */ + for(i=0; isDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; + } + MakeKernelPageReadOnly(pvPDCpuVAddr); + + if(!psDevInfo->pvMMUContextList) + { + /* + if we've just allocated the dummy pages + wire up the dummy PT to the dummy data page + */ + MakeKernelPageReadWrite(psDevInfo->pvDummyPTPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr; + for(i=0; isDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + } + MakeKernelPageReadOnly(psDevInfo->pvDummyPTPageCpuVAddr); + /* pdump the Dummy PT Page */ + PDUMPCOMMENT("Dummy Page table contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + + /* + write a signature to the dummy data page + */ + MakeKernelPageReadWrite(psDevInfo->pvDummyDataPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr; + for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++) + { + pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE; + } + MakeKernelPageReadOnly(psDevInfo->pvDummyDataPageCpuVAddr); + /* pdump the Dummy Data Page */ + PDUMPCOMMENT("Dummy Data Page contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#else /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ + /* initialise the PD to invalid address state */ + MakeKernelPageReadWrite(pvPDCpuVAddr); + for(i=0; ibPDumpActive) +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + { + /* pdump the PD Page */ + PDUMPCOMMENT("Page directory contents"); + PDUMPPDENTRIES(&sMMUAttrib, hPDOSMemHandle, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif +#if defined(FIX_HW_BRN_31620) + { + IMG_UINT32 i; + IMG_UINT32 ui32PDCount = 0; + IMG_UINT32 *pui32PT; + pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr; + + PDUMPCOMMENT("BRN31620 Set up dummy PT"); + + MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPTCpuVAddr); + pui32PT = (IMG_UINT32 *) psDevInfo->pvBRN31620DummyPTCpuVAddr; + pui32PT[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID; + MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPTCpuVAddr); + +#if defined(PDUMP) + /* Dump initial contents */ + PDUMPCOMMENT("BRN31620 Dump dummy PT contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + PDUMPCOMMENT("BRN31620 Dump dummy page contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + + /* Dump the wiring */ + for(i=0;ihBRN31620DummyPTOSMemHandle, &pui32PT[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif + PDUMPCOMMENT("BRN31620 Dump PDE wire up"); + /* Walk the PD wireing up the PT's */ + for(i=0;isBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_DUMMY_PAGE + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(pvPDCpuVAddr); + } + PDUMPMEMPTENTRIES(&sMMUAttrib, hPDOSMemHandle, (IMG_VOID *) &pui32Tmp[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + ui32PDCount++; + if (ui32PDCount == BRN31620_PDES_PER_CACHE_LINE_SIZE) + { + /* Reset PT count */ + ui32PDCount = 0; + } + } + + + /* pdump the Dummy PT Page */ + PDUMPCOMMENT("BRN31620 dummy Page table contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif +#if defined(PDUMP) + /* pdump set MMU context */ + { + /* default MMU type is 1, 4k page */ + IMG_UINT32 ui32MMUType = 1; + + #if defined(SGX_FEATURE_36BIT_MMU) + ui32MMUType = 3; + #else + #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) + ui32MMUType = 2; + #endif + #endif + + eError = PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX, + psDeviceNode->sDevId.pszPDumpDevName, + &psMMUContext->ui32PDumpMMUContextID, + ui32MMUType, + PDUMP_PT_UNIQUETAG, + hPDOSMemHandle, + pvPDCpuVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed")); + goto exit_setting_values; + } + } + + /* PDump the context ID */ + PDUMPCOMMENT("Set MMU context complete (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID); +#endif + +#if defined(FIX_HW_BRN_31620) + for(i=0;iui32PDChangeMask[i] = 0; + } + + for(i=0;iui32PDCacheRangeRefCount[i] = 0; + } + + for(i=0;iapsPTInfoListSave[i] = IMG_NULL; + } +#endif + +exit_setting_values: + + /* store PD info in the MMU context */ + psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr; + psMMUContext->sPDDevPAddr = sPDDevPAddr; + psMMUContext->hPDOSMemHandle = hPDOSMemHandle; + + /* Get some process information to aid debug */ + psMMUContext->ui32PID = OSGetCurrentProcessIDKM(); + psMMUContext->szName[0] = '\0'; + OSGetCurrentProcessNameKM(psMMUContext->szName, MMU_CONTEXT_NAME_SIZE); + + /* return context */ + *ppsMMUContext = psMMUContext; + + /* return the PD DevVAddr */ + *psPDDevPAddr = sPDDevPAddr; + + + /* add the new MMU context onto the list of MMU contexts */ + psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList; + psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext; + +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(psMMUContext); +#endif + + return eError; +} + +/*! +****************************************************************************** + FUNCTION: MMU_Finalise + + PURPOSE: Finalise the mmu module, deallocate all resources. + + PARAMETERS: In: psMMUContext - MMU context to deallocate + RETURNS: None. +******************************************************************************/ +IMG_VOID +MMU_Finalise (MMU_CONTEXT *psMMUContext) +{ + IMG_UINT32 *pui32Tmp, i; + SYS_DATA *psSysData; + MMU_CONTEXT **ppsMMUContext; +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined(FIX_HW_BRN_31620) + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo; + MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList; +#endif + + SysAcquireData(&psSysData); + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + /* pdump the MMU context clear */ + PDUMPCOMMENT("Clear MMU context (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID); + PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->psDeviceNode->sDevId.pszPDumpDevName, psMMUContext->ui32PDumpMMUContextID, 2); + + /* pdump the PD free */ + PDUMPCOMMENT("Free page directory (PDDevPAddr == 0x" DEVPADDR_FMT ")", + psMMUContext->sPDDevPAddr.uiAddr); + + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psMMUContext->hPDOSMemHandle, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, ui32Flags, PDUMP_PT_UNIQUETAG); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, ui32Flags, PDUMP_PT_UNIQUETAG); + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, ui32Flags, PDUMP_PT_UNIQUETAG); +#endif + } +#endif /* PDUMP */ + + pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr; + + if (pui32Tmp) + { + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + /* initialise the PD to invalid address state */ + for(i=0; ipvPDCpuVAddr); + } + + /* + free the PD: + depending on the specific system, the PD is allocated from system memory + or device local memory. For now, just look for at least a valid local heap/arena + */ + if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL) + { +#if defined(FIX_HW_BRN_31620) + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo; +#endif + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psMMUContext->pvPDCpuVAddr, + psMMUContext->hPDOSMemHandle); + +#if defined(FIX_HW_BRN_31620) + /* If this is the _last_ MMU context it must be the uKernel */ + if (!psMMUContextList->psNext) + { + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvBRN31620DummyPageCpuVAddr, + psDevInfo->hBRN31620DummyPageOSMemHandle); + + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvBRN31620DummyPTCpuVAddr, + psDevInfo->hBRN31620DummyPTOSMemHandle); + + } +#endif +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* if this is the last context free the dummy pages too */ + if(!psMMUContextList->psNext) + { + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvDummyPTPageCpuVAddr, + psDevInfo->hDummyPTPageOSMemHandle); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvDummyDataPageCpuVAddr, + psDevInfo->hDummyDataPageOSMemHandle); + } +#endif + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + + /* derive the system physical address */ + sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->hPDOSMemHandle, + psMMUContext->pvPDCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psMMUContext->hPDOSMemHandle); + /* and free the memory, Note that the cast to IMG_UINTPTR_T is ok as we're local mem. */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, (IMG_UINTPTR_T)sSysPAddr.uiAddr, IMG_FALSE); + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* if this is the last context free the dummy pages too */ + if(!psMMUContextList->psNext) + { + /* free the Dummy PT Page */ + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle, + psDevInfo->pvDummyPTPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hDummyPTPageOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + + /* free the Dummy Data Page */ + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyDataPageOSMemHandle, + psDevInfo->pvDummyDataPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hDummyDataPageOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } +#endif +#if defined(FIX_HW_BRN_31620) + /* if this is the last context free the dummy pages too */ + if(!psMMUContextList->psNext) + { + /* free the Page */ + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle, + psDevInfo->pvBRN31620DummyPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hBRN31620DummyPageOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + + /* free the Dummy PT */ + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle, + psDevInfo->pvBRN31620DummyPTCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPTCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hBRN31620DummyPTOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } +#endif + } + + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise")); + + /* remove the MMU context from the list of MMU contexts */ + ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList; + while(*ppsMMUContext) + { + if(*ppsMMUContext == psMMUContext) + { + /* remove item from the list */ + *ppsMMUContext = psMMUContext->psNext; + break; + } + + /* advance to next next */ + ppsMMUContext = &((*ppsMMUContext)->psNext); + } + + /* free the context itself. */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL); + /*not nulling pointer, copy on stack*/ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_InsertHeap + + PURPOSE: Copies PDEs from shared/exported heap into current MMU context. + + PARAMETERS: In: psMMUContext - the mmu + In: psMMUHeap - a shared/exported heap + + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap) +{ + IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; + IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr; + IMG_UINT32 ui32PDEntry; +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; +#endif + + /* advance to the first entry */ + pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift; + pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift; + + /* + update the PD range relating to the heap's + device virtual address range + */ +#if defined(PDUMP) + PDUMPCOMMENT("Page directory shared heap range copy"); + PDUMPCOMMENT(" (Source heap MMU Context ID == %u, PT count == 0x%x)", + psMMUHeap->psMMUContext->ui32PDumpMMUContextID, + psMMUHeap->ui32PageTableCount); + PDUMPCOMMENT(" (Destination MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID); +#endif /* PDUMP */ +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(psMMUContext); +#endif + + for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++) + { +#if (!defined(SUPPORT_SGX_MMU_DUMMY_PAGE)) && (!defined(FIX_HW_BRN_31620)) + /* check we have invalidated target PDEs */ + PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0); +#endif + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + /* copy over the PDEs */ + pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry]; + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + if (pui32PDCpuVAddr[ui32PDEntry]) + { + /* Ensure the shared heap allocation is mapped into the context/PD + * for the active pdump process/app. The PTs and backing physical + * should also be pdumped (elsewhere). + * MALLOC (PT) + * LDB (init PT) + * MALLOC (data page) + * WRW (PTE->data page) + * LDB (init data page) -- could be useful to ensure page is initialised + */ + #if defined(PDUMP) + //PDUMPCOMMENT("MMU_InsertHeap: Mapping shared heap to new context %d (%s)", psMMUContext->ui32PDumpMMUContextID, (psMMUContext->bPDumpActive) ? "active" : ""); + #if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) + #endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + { + PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + #endif +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + bInvalidateDirectoryCache = IMG_TRUE; +#endif + } + } + +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(psMMUContext); +#endif + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + if (bInvalidateDirectoryCache) + { + /* This is actually not to do with multiple mem contexts, but to do with the directory cache. + In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the + page directory memory, and requires updating whenever the page directory changes, even if there + was no previous value in a particular entry + */ + MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo); + } +#endif +} + + +/*! +****************************************************************************** + FUNCTION: MMU_UnmapPagesAndFreePTs + + PURPOSE: unmap pages, invalidate virtual address and try to free the PTs + + PARAMETERS: In: psMMUHeap - the mmu. + In: sDevVAddr - the device virtual address. + In: ui32PageCount - page count + In: hUniqueTag - A unique ID for use as a tag identifier + + RETURNS: None +******************************************************************************/ +static IMG_VOID +MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 i; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32Tmp; + IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; + +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + /* setup tmp devvaddr to base of allocation */ + sTmpDevVAddr = sDevVAddr; + + for(i=0; i> psMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + { + /* find the index/offset of the first PT in the first PT page */ + ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift; + + /* Is the PT page valid? */ + if (!ppsPTInfoList[0]) + { + /* + With sparse mappings we expect that the PT could be freed + before we reach the end of it as the unmapped pages don't + bump ui32ValidPTECount so it can reach zero before we reach + the end of the PT. + */ + if (!psMMUHeap->bHasSparseMappings) + { + PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex )); + } + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize; + + /* Try to unmap the remaining allocation pages */ + continue; + } + + /* setup pointer to the first entry in the PT page */ + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + + /* Is PTPageCpuVAddr valid ? */ + if (!pui32Tmp) + { + continue; + } + + CheckPT(ppsPTInfoList[0]); + + /* Decrement the valid page count only if the current page is valid*/ + if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) + { + ppsPTInfoList[0]->ui32ValidPTECount--; + } + else + { + if (!psMMUHeap->bHasSparseMappings) + { + PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex )); + } + } + + /* The page table count should not go below zero */ + PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0); + MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PT entry to the dummy data page */ + pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; +#else + /* invalidate entry */ +#if defined(FIX_HW_BRN_31620) + BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]); +#else + pui32Tmp[ui32PTIndex] = 0; +#endif +#endif + MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr); + CheckPT(ppsPTInfoList[0]); + } + + /* + Free a page table if we can. + */ + if (ppsPTInfoList[0] && (ppsPTInfoList[0]->ui32ValidPTECount == 0) + ) + { +#if defined(FIX_HW_BRN_31620) + if (BRN31620FreePageTable(psMMUHeap, ui32PDIndex) == IMG_TRUE) + { + bInvalidateDirectoryCache = IMG_TRUE; + } +#else + _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE); + bInvalidateDirectoryCache = IMG_TRUE; +#endif + } + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize; + } + + if(bInvalidateDirectoryCache) + { + MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo); + } + else + { + MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo); + } + +#if defined(PDUMP) + MMU_PDumpPageTables(psMMUHeap, + sDevVAddr, + psMMUHeap->ui32DataPageSize * ui32PageCount, + IMG_TRUE, + hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_FreePageTables + + PURPOSE: Call back from RA_Free to zero page table entries used by freed + spans. + + PARAMETERS: In: pvMMUHeap + In: ui32Start + In: ui32End + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: +******************************************************************************/ +static IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap, + IMG_SIZE_T uStart, + IMG_SIZE_T uEnd, + IMG_HANDLE hUniqueTag) +{ + MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap; + IMG_DEV_VIRTADDR Start; + + Start.uiAddr = (IMG_UINT32)uStart; + + MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (IMG_UINT32)((uEnd - uStart) >> pMMUHeap->ui32PTShift), hUniqueTag); +} + +/*! +****************************************************************************** + FUNCTION: MMU_Create + + PURPOSE: Create an mmu device virtual heap. + + PARAMETERS: In: psMMUContext - MMU context + In: psDevArena - device memory resource arena + Out: ppsVMArena - virtual mapping arena + RETURNS: MMU_HEAP + RETURNS: +******************************************************************************/ +MMU_HEAP * +MMU_Create (MMU_CONTEXT *psMMUContext, + DEV_ARENA_DESCRIPTOR *psDevArena, + RA_ARENA **ppsVMArena, + PDUMP_MMU_ATTRIB **ppsMMUAttrib) +{ + MMU_HEAP *pMMUHeap; + IMG_UINT32 ui32ScaleSize; + + PVR_UNREFERENCED_PARAMETER(ppsMMUAttrib); + + PVR_ASSERT (psDevArena != IMG_NULL); + + if (psDevArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter")); + return IMG_NULL; + } + + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (MMU_HEAP), + (IMG_VOID **)&pMMUHeap, IMG_NULL, + "MMU Heap"); + if (pMMUHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed")); + return IMG_NULL; + } + + pMMUHeap->psMMUContext = psMMUContext; + pMMUHeap->psDevArena = psDevArena; + + /* + generate page table and data page mask and shift values + based on the data page size + */ + switch(pMMUHeap->psDevArena->ui32DataPageSize) + { + case 0x1000: + ui32ScaleSize = 0; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K; + break; +#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) + case 0x4000: + ui32ScaleSize = 2; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K; + break; + case 0x10000: + ui32ScaleSize = 4; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K; + break; + case 0x40000: + ui32ScaleSize = 6; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K; + break; + case 0x100000: + ui32ScaleSize = 8; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M; + break; + case 0x400000: + ui32ScaleSize = 10; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M; + break; +#endif /* #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) */ + default: + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size")); + goto ErrorFreeHeap; + } + + /* number of bits of address offset into the data page */ + pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize; + pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize; + pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1; + /* number of bits of address indexing into a pagetable */ + pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth; + pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize; + pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<ui32PTSize = (IMG_UINT32)(1UL<ui32PTBitWidth) * sizeof(IMG_UINT32); + + /* note: PT size must be at least 4 entries, even for 4Mb data page size */ + if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32)) + { + pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32); + } + pMMUHeap->ui32PTNumEntriesAllocated = pMMUHeap->ui32PTSize >> 2; + + /* find the number of actual PT entries per PD entry range. For 4MB data + * pages we only use the first entry although the PT has 16 byte allocation/alignment + * (due to 4 LSbits of the PDE are reserved for control) */ + pMMUHeap->ui32PTNumEntriesUsable = (IMG_UINT32)(1UL << pMMUHeap->ui32PTBitWidth); + + /* number of bits of address indexing into a page directory */ + pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift; + pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth; + pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE)); + + /* External system cache violates this rule */ +#if !defined (SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* + The heap must start on a PT boundary to avoid PT sharing across heaps + The only exception is the first heap which can start at any address + from 0 to the end of the first PT boundary + */ + if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask)) + { + /* + if for some reason the first heap starts after the end of the first PT boundary + but is not aligned to a PT boundary then the assert will trigger unncessarily + */ + PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr + & (pMMUHeap->ui32DataPageMask + | pMMUHeap->ui32PTMask)) == 0); + } +#endif + /* how many PT entries do we need? */ + pMMUHeap->ui32PTETotalUsable = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift; + + /* calculate the PD Base index for the Heap (required for page mapping) */ + pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift; + + /* + how many page tables? + round up to nearest entries to the nearest page table sized block + */ + pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotalUsable + pMMUHeap->ui32PTNumEntriesUsable - 1) + >> pMMUHeap->ui32PTBitWidth; + PVR_ASSERT(pMMUHeap->ui32PageTableCount > 0); + + /* Create the arena */ + pMMUHeap->psVMArena = RA_Create(psDevArena->pszName, + psDevArena->BaseDevVAddr.uiAddr, + psDevArena->ui32Size, + IMG_NULL, + MIN(HOST_PAGESIZE(), pMMUHeap->ui32DataPageSize), + IMG_NULL, + IMG_NULL, + &MMU_FreePageTables, + pMMUHeap); + + if (pMMUHeap->psVMArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed")); + goto ErrorFreePagetables; + } + +#if defined(PDUMP) + /* setup per-heap PDUMP MMU attributes */ + MMU_SetPDumpAttribs(&pMMUHeap->sMMUAttrib, + psMMUContext->psDeviceNode, + pMMUHeap->ui32DataPageMask, + pMMUHeap->ui32PTSize); + *ppsMMUAttrib = &pMMUHeap->sMMUAttrib; + + PDUMPCOMMENT("Create MMU device from arena %s (Size == 0x%x, DataPageSize == 0x%x, BaseDevVAddr == 0x%x)", + psDevArena->pszName, + psDevArena->ui32Size, + pMMUHeap->ui32DataPageSize, + psDevArena->BaseDevVAddr.uiAddr); +#endif /* PDUMP */ + + /* + And return the RA for VM arena management + */ + *ppsVMArena = pMMUHeap->psVMArena; + + return pMMUHeap; + + /* drop into here if errors */ +ErrorFreePagetables: + _DeferredFreePageTables (pMMUHeap); + +ErrorFreeHeap: + OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return IMG_NULL; +} + +/*! +****************************************************************************** + FUNCTION: MMU_Delete + + PURPOSE: Delete an MMU device virtual heap. + + PARAMETERS: In: pMMUHeap - The MMU heap to delete. + RETURNS: +******************************************************************************/ +IMG_VOID +MMU_Delete (MMU_HEAP *pMMUHeap) +{ + if (pMMUHeap != IMG_NULL) + { + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete")); + + if(pMMUHeap->psVMArena) + { + RA_Delete (pMMUHeap->psVMArena); + } + +#if defined(PDUMP) + PDUMPCOMMENT("Delete MMU device from arena %s (BaseDevVAddr == 0x%x, PT count for deferred free == 0x%x)", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->BaseDevVAddr.uiAddr, + pMMUHeap->ui32PageTableCount); +#endif /* PDUMP */ + +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(pMMUHeap->psMMUContext); +#endif + _DeferredFreePageTables (pMMUHeap); +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(pMMUHeap->psMMUContext); +#endif + + OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + FUNCTION: MMU_Alloc + PURPOSE: Allocate space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to allocate on. + In: uSize - Size in bytes to allocate. + Out: pActualSize - If non null receives actual size allocated. + In: uFlags - Allocation flags. + In: uDevVAddrAlignment - Required alignment. + Out: DevVAddr - Receives base address of allocation. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +******************************************************************************/ +IMG_BOOL +MMU_Alloc (MMU_HEAP *pMMUHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 uDevVAddrAlignment, + IMG_DEV_VIRTADDR *psDevVAddr) +{ + IMG_BOOL bStatus; + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_Alloc: uSize=0x%" SIZE_T_FMT_LEN "x, flags=0x%x, align=0x%x", + uSize, uFlags, uDevVAddrAlignment)); + + /* + Only allocate a VM address if the caller did not supply one + */ + if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) + { + IMG_UINTPTR_T uiAddr; + + bStatus = RA_Alloc (pMMUHeap->psVMArena, + uSize, + pActualSize, + IMG_NULL, + 0, + uDevVAddrAlignment, + 0, + IMG_NULL, + 0, + &uiAddr); + if(!bStatus) + { + IMG_CHAR asCurrentProcessName[128]; + + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed")); + OSGetCurrentProcessNameKM(asCurrentProcessName, 128); + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Alloc of DevVAddr failed from heap %s ID%d, pid: %d, task: %s", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID, + OSGetCurrentProcessIDKM(), + asCurrentProcessName)); + #if defined (MEM_TRACK_INFO_DEBUG) + PVRSRVPrintMemTrackInfo(0); + #endif + + return bStatus; + } + + psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr); + } + + #ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(pMMUHeap->psMMUContext); + #endif + + /* allocate page tables to cover allocation as required */ + bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, (IMG_UINT32)uSize); + + #ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(pMMUHeap->psMMUContext); + #endif + + if (!bStatus) + { + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed")); + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to alloc pagetable(s) for DevVAddr 0x%8.8x from heap %s ID%d", + psDevVAddr->uiAddr, + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); + if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) + { + /* free the VM address */ + RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE); + } + } + + return bStatus; +} + +/*! +****************************************************************************** + FUNCTION: MMU_Free + PURPOSE: Free space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to deallocate on. + In: DevVAddr - Base address to deallocate. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size) +{ + PVR_ASSERT (pMMUHeap != IMG_NULL); + + if (pMMUHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter")); + return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%08X from heap %s ID%d", + DevVAddr.uiAddr, + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); + + if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) && + (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size)) + { + RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE); + return; + } + + PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't free DevVAddr %08X from heap %s ID%d (not in range of heap))", + DevVAddr.uiAddr, + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); +} + +/*! +****************************************************************************** + FUNCTION: MMU_Enable + + PURPOSE: Enable an mmu. Establishes pages tables and takes the mmu out + of bypass and waits for the mmu to acknowledge enabled. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Enable (MMU_HEAP *pMMUHeap) +{ + PVR_UNREFERENCED_PARAMETER(pMMUHeap); + /* SGX mmu is always enabled (stub function) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_Disable + + PURPOSE: Disable an mmu, takes the mmu into bypass. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Disable (MMU_HEAP *pMMUHeap) +{ + PVR_UNREFERENCED_PARAMETER(pMMUHeap); + /* SGX mmu is always enabled (stub function) */ +} + +#if defined(FIX_HW_BRN_31620) +/*! +****************************************************************************** + FUNCTION: MMU_GetCacheFlushRange + + PURPOSE: Gets device physical address of the mmu context. + + PARAMETERS: In: pMMUContext - the mmu context + Out: pui32RangeMask - Bit mask showing which PD cache + lines have changed + RETURNS: None +******************************************************************************/ + +IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask) +{ + IMG_UINT32 i; + + for (i=0;iui32PDChangeMask[i]; + + /* Clear bit mask for the next set of allocations */ + pMMUContext->ui32PDChangeMask[i] = 0; + } +} + +/*! +****************************************************************************** + FUNCTION: MMU_GetPDPhysAddr + + PURPOSE: Gets device physical address of the mmu contexts PD. + + PARAMETERS: In: pMMUContext - the mmu context + Out: psDevPAddr - Address of PD + RETURNS: None +******************************************************************************/ + +IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr) +{ + *psDevPAddr = pMMUContext->sPDDevPAddr; +} + +#endif +#if defined(PDUMP) +/*! +****************************************************************************** + FUNCTION: MMU_PDumpPageTables + + PURPOSE: PDump the linear mapping for a range of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: uSize - size of memory range in bytes + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +static IMG_VOID +MMU_PDumpPageTables (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SIZE_T uSize, + IMG_BOOL bForUnmap, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 ui32NumPTEntries; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32PTEntry; + + MMU_PT_INFO **ppsPTInfoList; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTDumpCount; + +#if defined(FIX_HW_BRN_31620) + PVRSRV_SGXDEV_INFO *psDevInfo = pMMUHeap->psMMUContext->psDevInfo; +#endif + /* find number of PT entries to dump */ + ui32NumPTEntries = (IMG_UINT32)((uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift); + + /* find the index/offset in PD entries */ + ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* set the base PT info */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + /* find the index/offset of the first PT entry in the first PT page */ + ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift; + + /* pdump the PT Page modification */ + PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : ""); + + /* walk the PT pages, dumping as we go */ + while(ui32NumPTEntries > 0) + { + MMU_PT_INFO* psPTInfo = *ppsPTInfoList++; + + if(ui32NumPTEntries <= pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex) + { + ui32PTDumpCount = ui32NumPTEntries; + } + else + { + ui32PTDumpCount = pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex; + } + + if (psPTInfo) + { +#if defined(FIX_HW_BRN_31620) + IMG_UINT32 i; +#endif + IMG_UINT32 ui32Flags = 0; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr; +#if defined(FIX_HW_BRN_31620) + if ((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX) + { + for (i=ui32PTIndex;i<(ui32PTIndex + ui32PTDumpCount);i++) + { + if (pui32PTEntry[i] == ((psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID)) + { + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG); + } + else + { + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag); + } + } + } + else +#endif + { + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag); + } + } + + /* decrement PT entries left */ + ui32NumPTEntries -= ui32PTDumpCount; + + /* reset offset in page */ + ui32PTIndex = 0; + +#if defined(FIX_HW_BRN_31620) + /* For 31620 we need to know which PD index we're working on */ + ui32PDIndex++; +#endif + } + + PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : ""); +} +#endif /* #if defined(PDUMP) */ + + +/*! +****************************************************************************** + FUNCTION: MMU_MapPage + + PURPOSE: Create a mapping for one page at a specified virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: DevPAddr - the device physical address of the page to map. + In: ui32MemFlags - BM r/w/cache flags + RETURNS: None +******************************************************************************/ +static IMG_VOID +MMU_MapPage (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_DEV_PHYADDR DevPAddr, + IMG_UINT32 ui32MemFlags) +{ + IMG_UINT32 ui32Index; + IMG_UINT32 *pui32Tmp; + IMG_UINT32 ui32MMUFlags = 0; + MMU_PT_INFO **ppsPTInfoList; + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + /* + unravel the read/write/cache flags + */ + if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE)) + { + /* read/write */ + ui32MMUFlags = 0; + } + else if(PVRSRV_MEM_READ & ui32MemFlags) + { + /* read only */ + ui32MMUFlags |= SGX_MMU_PTE_READONLY; + } + else if(PVRSRV_MEM_WRITE & ui32MemFlags) + { + /* write only */ + ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY; + } + + /* cache coherency */ + if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags) + { + ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT; + } + +#if !defined(FIX_HW_BRN_25503) + /* EDM protection */ + if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags) + { + ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT; + } +#endif + + /* + we receive a device physical address for the page that is to be mapped + and a device virtual address representing where it should be mapped to + */ + + /* find the index/offset in PD entries */ + ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index]; + + CheckPT(ppsPTInfoList[0]); + + /* find the index/offset of the first PT in the first PT page */ + ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift; + + /* setup pointer to the first entry in the PT page */ + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + +#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + { + IMG_UINT32 uTmp = pui32Tmp[ui32Index]; + + /* Is the current page already valid? (should not be unless it was allocated and not deallocated) */ +#if defined(FIX_HW_BRN_31620) + if ((uTmp & SGX_MMU_PTE_VALID) && ((DevVAddr.uiAddr & BRN31620_PDE_CACHE_FILL_MASK) != BRN31620_DUMMY_PAGE_OFFSET)) +#else + if ((uTmp & SGX_MMU_PTE_VALID) != 0) +#endif + + { + PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08X PDIdx:%u PTIdx:%u", + DevVAddr.uiAddr, + DevVAddr.uiAddr >> pMMUHeap->ui32PDShift, + ui32Index )); + PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08X", uTmp)); + + PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x" DEVPADDR_FMT, + DevPAddr.uiAddr)); + +#if PT_DUMP + DumpPT(ppsPTInfoList[0]); +#endif + } +#if !defined(FIX_HW_BRN_31620) + PVR_ASSERT((uTmp & SGX_MMU_PTE_VALID) == 0); +#endif + } +#endif + + /* One more valid entry in the page table. */ + ppsPTInfoList[0]->ui32ValidPTECount++; + + MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr); + /* map in the physical page */ + pui32Tmp[ui32Index] = ((IMG_UINT32)(DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)) + | SGX_MMU_PTE_VALID + | ui32MMUFlags; + MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr); + CheckPT(ppsPTInfoList[0]); +} + + +/*! +****************************************************************************** + FUNCTION: MMU_MapScatter + + PURPOSE: Create a linear mapping for a range of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: psSysAddr - the device physical address of the page to + map. + In: uSize - size of memory range in bytes + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapScatter (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR *psSysAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ +#if defined(PDUMP) + IMG_DEV_VIRTADDR MapBaseDevVAddr; +#endif /*PDUMP*/ + IMG_UINT32 uCount, i, j; + IMG_UINT32 ui32NumDevicePages; + IMG_DEV_PHYADDR DevPAddr; + + PVR_ASSERT (pMMUHeap != IMG_NULL); + +#if defined(PDUMP) + MapBaseDevVAddr = DevVAddr; +#else + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif /*PDUMP*/ + + PVR_ASSERT((HOST_PAGESIZE() % pMMUHeap->ui32DataPageSize) == 0); + + ui32NumDevicePages = HOST_PAGESIZE() / pMMUHeap->ui32DataPageSize; + + for (i=0, uCount=0; uCountui32DataPageMask) == 0); + + for(j=0; j< ui32NumDevicePages; j++) + { + DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr); + + MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags); + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_MapScatter: devVAddr=%x, SysAddr=" SYSPADDR_FMT ", size=0x%x/0x%" SIZE_T_FMT_LEN "x", + DevVAddr.uiAddr, sSysAddr.uiAddr, (uCount + j*pMMUHeap->ui32DataPageSize), uSize)); + + DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize; + sSysAddr.uiAddr += pMMUHeap->ui32DataPageSize; + } + } + +#if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1) + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); +#endif + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_MapPages + + PURPOSE: Create a linear mapping for a ranege of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to + map. + In: uSize - size of memory range in bytes + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPages (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_PHYADDR DevPAddr; +#if defined(PDUMP) + IMG_DEV_VIRTADDR MapBaseDevVAddr; +#endif /*PDUMP*/ + IMG_UINT32 uCount; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + + PVR_ASSERT (pMMUHeap != IMG_NULL); + + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPages: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=" SYSPADDR_FMT ", size=0x%" SIZE_T_FMT_LEN "x", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID, + DevVAddr.uiAddr, + SysPAddr.uiAddr, + uSize)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + +#if defined(PDUMP) + MapBaseDevVAddr = DevVAddr; +#else + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif /*PDUMP*/ + + DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr); + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + /* + for dummy allocations there is only one physical + page backing the virtual range + */ + if(ui32MemFlags & PVRSRV_MEM_DUMMY) + { + ui32PAdvance = 0; + } + + for (uCount=0; uCount 1) + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); +#endif + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_MapPagesSparse + + PURPOSE: Create a linear mapping for a ranege of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to + map. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPagesSparse (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_PHYADDR DevPAddr; +#if defined(PDUMP) + IMG_DEV_VIRTADDR MapBaseDevVAddr; +#endif /*PDUMP*/ + IMG_UINT32 uCount; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + IMG_SIZE_T uSizeVM = ui32ChunkSize * ui32NumVirtChunks; +#if !defined(PVRSRV_NEED_PVR_DPF) + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); +#endif + + PVR_ASSERT (pMMUHeap != IMG_NULL); + + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPagesSparse: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=" SYSPADDR_FMT ", VM space=0x%" SIZE_T_FMT_LEN "x, PHYS space=0x%x", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID, + DevVAddr.uiAddr, + SysPAddr.uiAddr, + uSizeVM, + ui32ChunkSize * ui32NumPhysChunks)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + +#if defined(PDUMP) + MapBaseDevVAddr = DevVAddr; +#else + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif /*PDUMP*/ + + DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr); + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + /* + for dummy allocations there is only one physical + page backing the virtual range + */ + if(ui32MemFlags & PVRSRV_MEM_DUMMY) + { + ui32PAdvance = 0; + } + + for (uCount=0; uCountbHasSparseMappings = IMG_TRUE; + +#if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1) + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); +#endif + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSizeVM, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_MapShadow + + PURPOSE: Create a mapping for a range of pages from either a CPU + virtual adddress, (or if NULL a hOSMemHandle) to a specified + device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: uByteSize - A page aligned mapping length in bytes. + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated - It used to return a byte aligned + device virtual address corresponding to the + cpu virtual address (When CpuVAddr wasn't + constrained to be page aligned.) Now it just + returns MapBaseDevVAddr. Unaligned semantics + can easily be handled above this API if required. + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadow (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_SIZE_T uByteSize, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR *pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 i; + IMG_UINT32 uOffset = 0; + IMG_DEV_VIRTADDR MapDevVAddr; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_MapShadow: DevVAddr:%08X, Bytes:0x%" SIZE_T_FMT_LEN "x, CPUVAddr:%p", + MapBaseDevVAddr.uiAddr, + uByteSize, + CpuVAddr)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + + /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */ + PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); + PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0); + pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr; + + /* + for dummy allocations there is only one physical + page backing the virtual range + */ + if(ui32MemFlags & PVRSRV_MEM_DUMMY) + { + ui32PAdvance = 0; + } + + /* Loop through cpu memory and map page by page */ + MapDevVAddr = MapBaseDevVAddr; + for (i=0; iui32DataPageMask) == 0); + + PVR_DPF ((PVR_DBG_MESSAGE, + "Offset=0x%x: CpuVAddr=%p, CpuPAddr=" CPUPADDR_FMT ", DevVAddr=%08X, DevPAddr=" DEVPADDR_FMT, + uOffset, + (IMG_PVOID)((IMG_UINTPTR_T)CpuVAddr + uOffset), + CpuPAddr.uiAddr, + MapDevVAddr.uiAddr, + DevPAddr.uiAddr)); + + MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags); + + /* loop update */ + MapDevVAddr.uiAddr += ui32VAdvance; + uOffset += ui32PAdvance; + } + +#if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1) + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); +#endif + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_MapShadowSparse + + PURPOSE: Create a mapping for a range of pages from either a CPU + virtual adddress, (or if NULL a hOSMemHandle) to a specified + device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated - It used to return a byte aligned + device virtual address corresponding to the + cpu virtual address (When CpuVAddr wasn't + constrained to be page aligned.) Now it just + returns MapBaseDevVAddr. Unaligned semantics + can easily be handled above this API if required. + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadowSparse (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR *pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 i; + IMG_UINT32 uOffset = 0; + IMG_DEV_VIRTADDR MapDevVAddr; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + IMG_SIZE_T uiSizeVM = ui32ChunkSize * ui32NumVirtChunks; + IMG_UINT32 ui32ChunkIndex = 0; + IMG_UINT32 ui32ChunkOffset = 0; +#if !defined(PVRSRV_NEED_PVR_DPF) + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); +#endif +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_MapShadowSparse: DevVAddr:%08X, VM space:0x%" SIZE_T_FMT_LEN "x, CPUVAddr:%p PHYS space:0x%x", + MapBaseDevVAddr.uiAddr, + uiSizeVM, + CpuVAddr, + ui32ChunkSize * ui32NumPhysChunks)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + + /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */ + PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); + PVR_ASSERT(((IMG_UINT32)uiSizeVM & pMMUHeap->ui32DataPageMask) == 0); + pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr; + + /* Shouldn't come through the sparse interface */ + PVR_ASSERT((ui32MemFlags & PVRSRV_MEM_DUMMY) == 0); + + /* Loop through cpu memory and map page by page */ + MapDevVAddr = MapBaseDevVAddr; + for (i=0; iui32DataPageMask) == 0); + + PVR_DPF ((PVR_DBG_MESSAGE, + "Offset=0x%x: CpuVAddr=%p, CpuPAddr=" CPUPADDR_FMT ", DevVAddr=%08X, DevPAddr=" DEVPADDR_FMT, + uOffset, + (void *)((IMG_UINTPTR_T)CpuVAddr + uOffset), + CpuPAddr.uiAddr, + MapDevVAddr.uiAddr, + DevPAddr.uiAddr)); + + MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags); + uOffset += ui32PAdvance; + } + + /* loop update */ + MapDevVAddr.uiAddr += ui32VAdvance; + + if (ui32ChunkOffset == ui32ChunkSize) + { + ui32ChunkIndex++; + ui32ChunkOffset = 0; + } + } + + pMMUHeap->bHasSparseMappings = IMG_TRUE; + +#if (SGX_FEATURE_PT_CACHE_ENTRIES_PER_LINE > 1) + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); +#endif + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uiSizeVM, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_UnmapPages + + PURPOSE: unmap pages and invalidate virtual address + + PARAMETERS: In: psMMUHeap - the mmu. + In: sDevVAddr - the device virtual address. + In: ui32PageCount - page count + In: hUniqueTag - A unique ID for use as a tag identifier + + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_UnmapPages (MMU_HEAP *psMMUHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize; + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 i; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32Tmp; + +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + + /* setup tmp devvaddr to base of allocation */ + sTmpDevVAddr = sDevVAddr; + + for(i=0; i> psMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + /* find the index/offset of the first PT in the first PT page */ + ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift; + + /* Is the PT page valid? */ + if ((!ppsPTInfoList[0]) && (!psMMUHeap->bHasSparseMappings)) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u", + sTmpDevVAddr.uiAddr, + sDevVAddr.uiAddr, + i, + ui32PDIndex, + ui32PTIndex)); + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += uPageSize; + + /* Try to unmap the remaining allocation pages */ + continue; + } + + CheckPT(ppsPTInfoList[0]); + + /* setup pointer to the first entry in the PT page */ + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + + /* Decrement the valid page count only if the current page is valid*/ + if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) + { + ppsPTInfoList[0]->ui32ValidPTECount--; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u", + sTmpDevVAddr.uiAddr, + sDevVAddr.uiAddr, + i, + ui32PDIndex, + ui32PTIndex)); + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08X", pui32Tmp[ui32PTIndex])); + } + + /* The page table count should not go below zero */ + PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0); + + MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PT entry to the dummy data page */ + pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; +#else + /* invalidate entry */ +#if defined(FIX_HW_BRN_31620) + BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]); +#else + pui32Tmp[ui32PTIndex] = 0; +#endif +#endif + MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr); + + CheckPT(ppsPTInfoList[0]); + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += uPageSize; + } + + MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo); + +#if defined(PDUMP) + MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_GetPhysPageAddr + + PURPOSE: extracts physical address from MMU page tables + + PARAMETERS: In: pMMUHeap - the mmu + PARAMETERS: In: sDevVPageAddr - the virtual address to extract physical + page mapping from + RETURNS: None +******************************************************************************/ +IMG_DEV_PHYADDR +MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr) +{ + IMG_UINT32 *pui32PageTable; + IMG_UINT32 ui32Index; + IMG_DEV_PHYADDR sDevPAddr; + MMU_PT_INFO **ppsPTInfoList; + + /* find the index/offset in PD entries */ + ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index]; + if (!ppsPTInfoList[0]) + { + /* Heaps with sparse mappings are allowed invalid pages */ + if (!pMMUHeap->bHasSparseMappings) + { + PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr)); + } + sDevPAddr.uiAddr = 0; + return sDevPAddr; + } + + /* find the index/offset of the first PT in the first PT page */ + ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift; + + /* setup pointer to the first entry in the PT page */ + pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + + /* read back physical page */ + sDevPAddr.uiAddr = pui32PageTable[ui32Index]; + + /* Mask off non-address bits */ + sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT); + + /* and align the address */ + sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT; + + return sDevPAddr; +} + + +IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext) +{ + return (pMMUContext->sPDDevPAddr); +} + + +/*! +****************************************************************************** + FUNCTION: SGXGetPhysPageAddr + + PURPOSE: Gets DEV and CPU physical address of sDevVAddr + + PARAMETERS: In: hDevMemHeap - device mem heap handle + PARAMETERS: In: sDevVAddr - the base virtual address to unmap from + PARAMETERS: Out: pDevPAddr - DEV physical address + PARAMETERS: Out: pCpuPAddr - CPU physical address + RETURNS: None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR *pDevPAddr, + IMG_CPU_PHYADDR *pCpuPAddr) +{ + MMU_HEAP *pMMUHeap; + IMG_DEV_PHYADDR DevPAddr; + + /* + Get MMU Heap From hDevMemHeap + */ + pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap); + + DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr); + pCpuPAddr->uiAddr = DevPAddr.uiAddr; /* SysDevPAddrToCPUPAddr(DevPAddr) */ + pDevPAddr->uiAddr = DevPAddr.uiAddr; + + return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS; +} + + +/*! +****************************************************************************** + FUNCTION: SGXGetMMUPDAddrKM + + PURPOSE: Gets PD device physical address of hDevMemContext + + PARAMETERS: In: hDevCookie - device cookie + PARAMETERS: In: hDevMemContext - memory context + PARAMETERS: Out: psPDDevPAddr - MMU PD address + RETURNS: None +******************************************************************************/ +PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_DEV_PHYADDR *psPDDevPAddr) +{ + if (!hDevCookie || !hDevMemContext || !psPDDevPAddr) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* return the address */ + *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + FUNCTION: MMU_BIFResetPDAlloc + + PURPOSE: Allocate a dummy Page Directory, Page Table and Page which can + be used for dynamic dummy page mapping during SGX reset. + Note: since this is only used for hardware recovery, no + pdumping is performed. + + PARAMETERS: In: psDevInfo - device info + RETURNS: PVRSRV_OK or error +******************************************************************************/ +PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + RA_ARENA *psLocalDevMemArena; + IMG_HANDLE hOSMemHandle = IMG_NULL; + IMG_BYTE *pui8MemBlock = IMG_NULL; + IMG_SYS_PHYADDR sMemBlockSysPAddr; + IMG_CPU_PHYADDR sMemBlockCpuPAddr; + + SysAcquireData(&psSysData); + + psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; + + /* allocate 3 pages - for the PD, PT and dummy page */ + if(psLocalDevMemArena == IMG_NULL) + { + /* UMA system */ + eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + 3 * SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + (IMG_VOID **)&pui8MemBlock, + &hOSMemHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed")); + return eError; + } + + /* translate address to device physical */ + if(pui8MemBlock) + { + sMemBlockCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, + pui8MemBlock); + } + else + { + /* This isn't used in all cases since not all ports currently support + * OSMemHandleToCpuPAddr() */ + sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0); + } + } + else + { + /* non-UMA system */ + + /* + We cannot use IMG_SYS_PHYADDR here, as that is 64-bit for 32-bit PAE builds. + The physical address in this call to RA_Alloc is specifically the SysPAddr + of local (card) space, and it is highly unlikely we would ever need to + support > 4GB of local (card) memory (this does assume that such local + memory will be mapped into System physical memory space at a low address so + that any and all local memory exists within the 4GB SYSPAddr range). + */ + IMG_UINTPTR_T uiLocalPAddr; + + if(RA_Alloc(psLocalDevMemArena, + 3 * SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &uiLocalPAddr) != IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Munge the local PAddr back into the SysPAddr */ + sMemBlockSysPAddr.uiAddr = uiLocalPAddr; + + /* derive the CPU virtual address */ + sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr); + pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr, + SGX_MMU_PAGE_SIZE * 3, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &hOSMemHandle); + if(!pui8MemBlock) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables")); + return PVRSRV_ERROR_BAD_MAPPING; + } + } + + PVR_ASSERT(pui8MemBlock != IMG_NULL); + + psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle; + psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr); + psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE; + psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE; + /* override pointer cast warnings */ + /* PRQA S 3305,509 2 */ + psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock; + psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE); + + /* Invalidate entire PD and PT. */ + OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE); + OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE); + /* Fill dummy page with markers. */ + OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + FUNCTION: MMU_BIFResetPDFree + + PURPOSE: Free resources allocated in MMU_BIFResetPDAlloc. + + PARAMETERS: In: psDevInfo - device info + RETURNS: +******************************************************************************/ +IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + SYS_DATA *psSysData; + RA_ARENA *psLocalDevMemArena; + IMG_SYS_PHYADDR sPDSysPAddr; + + SysAcquireData(&psSysData); + + psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; + + /* free the page directory */ + if(psLocalDevMemArena == IMG_NULL) + { + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + 3 * SGX_MMU_PAGE_SIZE, + psDevInfo->pui32BIFResetPD, + psDevInfo->hBIFResetPDOSMemHandle); + } + else + { + OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD, + 3 * SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hBIFResetPDOSMemHandle); + + sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr); + /* Note that the cast to IMG_UINTPTR_T is ok as we're local mem. */ + RA_Free(psLocalDevMemArena, (IMG_UINTPTR_T)sPDSysPAddr.uiAddr, IMG_FALSE); + } +} + +IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32FaultAddr) +{ + MMU_CONTEXT *psMMUContext = psDevInfo->pvMMUContextList; + + while (psMMUContext && (psMMUContext->sPDDevPAddr.uiAddr != ui32PDDevPAddr)) + { + psMMUContext = psMMUContext->psNext; + } + + if (psMMUContext) + { + IMG_UINT32 ui32PTIndex; + IMG_UINT32 ui32PDIndex; + + PVR_LOG(("Found MMU context for page fault 0x%08x", ui32FaultAddr)); + PVR_LOG(("GPU memory context is for PID=%d (%s)", psMMUContext->ui32PID, psMMUContext->szName)); + + ui32PTIndex = (ui32FaultAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + ui32PDIndex = (ui32FaultAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PT_SHIFT + SGX_MMU_PAGE_SHIFT); + + if (psMMUContext->apsPTInfoList[ui32PDIndex]) + { + if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr) + { + IMG_UINT32 *pui32Ptr = psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + IMG_UINT32 ui32PTE = pui32Ptr[ui32PTIndex]; + + PVR_LOG(("PDE valid: PTE = 0x%08x (PhysAddr = 0x%08x, %s)", + ui32PTE, + ui32PTE & SGX_MMU_PTE_ADDR_MASK, + ui32PTE & SGX_MMU_PTE_VALID?"valid":"Invalid")); + } + else + { + PVR_LOG(("Found PT info but no CPU address")); + } + } + else + { + PVR_LOG(("No PDE found")); + } + } +} + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) +/*! +****************************************************************************** + FUNCTION: MMU_MapExtSystemCacheRegs + + PURPOSE: maps external system cache control registers into SGX MMU + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_UINT32 *pui32PT; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + PDUMP_MMU_ATTRIB sMMUAttrib; + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + + sMMUAttrib = psDevInfo->sMMUAttrib; +#if defined(PDUMP) + MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode, + SGX_MMU_PAGE_MASK, + SGX_MMU_PT_SIZE * sizeof(IMG_UINT32)); +#endif + +#if defined(PDUMP) + { + IMG_CHAR szScript[128]; + + sprintf(szScript, "MALLOC :EXTSYSCACHE:PA_%08X%08X %u %u 0x%p\r\n", 0, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr); + PDumpOSWriteString2(szScript, PDUMP_FLAGS_CONTINUOUS); + } +#endif + + ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); + ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + + pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + + MakeKernelPageReadWrite(pui32PT); + /* map the PT to the registers */ + pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + MakeKernelPageReadOnly(pui32PT); +#if defined(PDUMP) + /* Add the entery to the PT */ + { + IMG_DEV_PHYADDR sDevPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32PageMask; + IMG_UINT32 ui32PTE; + PVRSRV_ERROR eErr; + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + ui32PageMask = sMMUAttrib.ui32PTSize - 1; + sCpuPAddr = OSMapLinToCPUPhys(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->hPTPageOSMemHandle, &pui32PT[ui32PTIndex]); + sDevPAddr = SysCpuPAddrToDevPAddr(sMMUAttrib.sDevId.eDeviceType, sCpuPAddr); + ui32PTE = *((IMG_UINT32 *) (&pui32PT[ui32PTIndex])); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%p%p:0x%08X :%s:PA_%p%08X:0x%08X\r\n", + sMMUAttrib.sDevId.pszPDumpDevName, + PDUMP_PT_UNIQUETAG, + (IMG_PVOID)((sDevPAddr.uiAddr) & ~ui32PageMask), + (sDevPAddr.uiAddr) & ui32PageMask, + "EXTSYSCACHE", + PDUMP_PD_UNIQUETAG, + (ui32PTE & sMMUAttrib.ui32PDEMask) << sMMUAttrib.ui32PTEAlignShift, + ui32PTE & ~sMMUAttrib.ui32PDEMask); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS); + } +#endif + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + FUNCTION: MMU_UnmapExtSystemCacheRegs + + PURPOSE: unmaps external system cache control registers + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + SYS_DATA *psSysData; + RA_ARENA *psLocalDevMemArena; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32PT; + PDUMP_MMU_ATTRIB sMMUAttrib; + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + + sMMUAttrib = psDevInfo->sMMUAttrib; + +#if defined(PDUMP) + MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode, + SGX_MMU_PAGE_MASK, + SGX_MMU_PT_SIZE * sizeof(IMG_UINT32)); +#endif + SysAcquireData(&psSysData); + + psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; + + /* unmap the MMU page table from the PD */ + ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); + ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + + /* Only unmap it if the PT hasn't already been freed */ + if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]) + { + if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr) + { + pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + } + } + + MakeKernelPageReadWrite(pui32PT); + pui32PT[ui32PTIndex] = 0; + MakeKernelPageReadOnly(pui32PT); + + PDUMPMEMPTENTRIES(&sMMUAttrib, psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->hPDOSMemHandle, &pui32PT[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + + return PVRSRV_OK; +} +#endif + + +#if PAGE_TEST +/*! +****************************************************************************** + FUNCTION: PageTest + + PURPOSE: Tests page table memory, for use during device bring-up. + + PARAMETERS: In: void* pMem - page address (CPU mapped) + PARAMETERS: In: IMG_DEV_PHYADDR sDevPAddr - page device phys address + RETURNS: None, provides debug output and breaks if an error is detected. +******************************************************************************/ +static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr) +{ + volatile IMG_UINT32 ui32WriteData; + volatile IMG_UINT32 ui32ReadData; + volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem; + IMG_INT n; + IMG_BOOL bOK=IMG_TRUE; + + ui32WriteData = 0xffffffff; + + for (n=0; n<1024; n++) + { + pMem32[n] = ui32WriteData; + ui32ReadData = pMem32[n]; + + if (ui32WriteData != ui32ReadData) + { + // Mem fault + PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x" DEVPADDR_FMT, sDevPAddr.uiAddr + (n<<2) )); + PVR_DBG_BREAK; + bOK = IMG_FALSE; + } + } + + ui32WriteData = 0; + + for (n=0; n<1024; n++) + { + pMem32[n] = ui32WriteData; + ui32ReadData = pMem32[n]; + + if (ui32WriteData != ui32ReadData) + { + // Mem fault + PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x" DEVPADDR_FMT, sDevPAddr.uiAddr + (n<<2))); + PVR_DBG_BREAK; + bOK = IMG_FALSE; + } + } + + if (bOK) + { + PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x" DEVPADDR_FMT " is OK", sDevPAddr.uiAddr)); + } + else + { + PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x" DEVPADDR_FMT " *** FAILED ***", sDevPAddr.uiAddr)); + } +} +#endif + +/****************************************************************************** + End of file (mmu.c) +******************************************************************************/ + + diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.h b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.h new file mode 100644 index 0000000..3c849fc --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/mmu.h @@ -0,0 +1,501 @@ +/*************************************************************************/ /*! +@Title MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _MMU_H_ +#define _MMU_H_ + +#include "sgxinfokm.h" + +/* +****************************************************************************** + FUNCTION: MMU_Initialise + + PURPOSE: Initialise the mmu module. + + PARAMETERS: None + RETURNS: PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR +MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr); + +/* +****************************************************************************** + FUNCTION: MMU_Finalise + + PURPOSE: Finalise the mmu module, deallocate all resources. + + PARAMETERS: None. + RETURNS: None. +******************************************************************************/ +IMG_VOID +MMU_Finalise (MMU_CONTEXT *psMMUContext); + + +/* +****************************************************************************** + FUNCTION: MMU_InsertHeap + + PURPOSE: Inserts shared heap into the specified context + from the kernel context + + PARAMETERS: None. + RETURNS: None. +******************************************************************************/ +IMG_VOID +MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_Create + + PURPOSE: Create an mmu device. + + PARAMETERS: In: psMMUContext - + In: psDevArena - + Out: ppsVMArena + RETURNS: MMU_HEAP +******************************************************************************/ +MMU_HEAP * +MMU_Create (MMU_CONTEXT *psMMUContext, + DEV_ARENA_DESCRIPTOR *psDevArena, + RA_ARENA **ppsVMArena, + PDUMP_MMU_ATTRIB **ppsMMUAttrib); + +/* +****************************************************************************** + FUNCTION: MMU_Delete + + PURPOSE: Delete an mmu device. + + PARAMETERS: In: pMMUHeap - The mmu to delete. + RETURNS: +******************************************************************************/ +IMG_VOID +MMU_Delete (MMU_HEAP *pMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_Alloc + PURPOSE: Allocate space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to allocate on. + In: uSize - Size in bytes to allocate. + Out: pActualSize - If non null receives actual size allocated. + In: uFlags - Allocation flags. + In: uDevVAddrAlignment - Required alignment. + Out: pDevVAddr - Receives base address of allocation. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +******************************************************************************/ +IMG_BOOL +MMU_Alloc (MMU_HEAP *pMMUHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 uDevVAddrAlignment, + IMG_DEV_VIRTADDR *pDevVAddr); + +/* +****************************************************************************** + FUNCTION: MMU_Free + PURPOSE: Frees space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to free on. + In: DevVAddr - Base address of allocation. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +******************************************************************************/ +IMG_VOID +MMU_Free (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_UINT32 ui32Size); + +/* +****************************************************************************** + FUNCTION: MMU_Enable + + PURPOSE: Enable an mmu. Establishes pages tables and takes the mmu out + of bypass and waits for the mmu to acknowledge enabled. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Enable (MMU_HEAP *pMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_Disable + + PURPOSE: Disable an mmu, takes the mmu into bypass. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Disable (MMU_HEAP *pMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_MapPages + + PURPOSE: Create a mapping for a range of pages from a device physical + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to map. + In: uSize - size of memory range in bytes + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPages (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapPagesSparse + + PURPOSE: Create a mapping for a range of pages from a device physical + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to map. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPagesSparse (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapShadow + + PURPOSE: Create a mapping for a range of pages from a CPU virtual + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: uByteSize - A page aligned mapping length in bytes. + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadow (MMU_HEAP * pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_SIZE_T uByteSize, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR * pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapShadowSparse + + PURPOSE: Create a mapping for a range of pages from a CPU virtual + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadowSparse (MMU_HEAP * pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL * pabMapChunk, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR * pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_UnmapPages + + PURPOSE: unmaps pages and invalidates virtual address. + + PARAMETERS: In: psMMUHeap - the mmu. + In: sDevVAddr - the device virtual address. + In: ui32PageCount - page count. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_UnmapPages (MMU_HEAP *psMMUHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapScatter + + PURPOSE: Create a mapping for a list of pages to a specified device + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: psSysAddr - the list of physical addresses of the pages to + map. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapScatter (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR *psSysAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + + +/* +****************************************************************************** + FUNCTION: MMU_GetPhysPageAddr + + PURPOSE: extracts physical address from MMU page tables + + PARAMETERS: In: pMMUHeap - the mmu + PARAMETERS: In: sDevVPageAddr - the virtual address to extract physical + page mapping from + RETURNS: IMG_DEV_PHYADDR +******************************************************************************/ +IMG_DEV_PHYADDR +MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr); + + +/* +****************************************************************************** + FUNCTION: MMU_GetPDDevPAddr + + PURPOSE: returns PD given the MMU context (SGX to MMU API) + + PARAMETERS: In: pMMUContext - the mmu + RETURNS: IMG_DEV_PHYADDR +******************************************************************************/ +IMG_DEV_PHYADDR +MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext); + + +#ifdef SUPPORT_SGX_MMU_BYPASS +/* +****************************************************************************** + FUNCTION: EnableHostAccess + + PURPOSE: Enables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +EnableHostAccess (MMU_CONTEXT *psMMUContext); + + +/* +****************************************************************************** + FUNCTION: DisableHostAccess + + PURPOSE: Disables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +DisableHostAccess (MMU_CONTEXT *psMMUContext); +#endif + +/* +****************************************************************************** + FUNCTION: MMU_InvalidateDirectoryCache + + PURPOSE: Invalidates the page directory cache + + PARAMETERS: In: psDevInfo + RETURNS: None +******************************************************************************/ +IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo); + +/* +****************************************************************************** + FUNCTION: MMU_BIFResetPDAlloc + + PURPOSE: Allocate a dummy Page Directory which causes all virtual + addresses to page fault. + + PARAMETERS: In: psDevInfo - device info + RETURNS: PVRSRV_OK or error +******************************************************************************/ +PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo); + +/* +****************************************************************************** + FUNCTION: MMU_BIFResetPDFree + + PURPOSE: Free resources allocated in MMU_BIFResetPDAlloc. + + PARAMETERS: In: psDevInfo - device info + RETURNS: +******************************************************************************/ +IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo); + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) +/* +****************************************************************************** + FUNCTION: MMU_MapExtSystemCacheRegs + + PURPOSE: maps external system cache control registers into SGX MMU + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* +****************************************************************************** + FUNCTION: MMU_UnmapExtSystemCacheRegs + + PURPOSE: unmaps external system cache control registers + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif /* #if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) */ + +/* +****************************************************************************** + FUNCTION: MMU_IsHeapShared + + PURPOSE: Is this heap shared? + PARAMETERS: In: pMMU_Heap + RETURNS: true if heap is shared +******************************************************************************/ +IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMU_Heap); + +#if defined(FIX_HW_BRN_31620) +/* +****************************************************************************** + FUNCTION: MMU_GetCacheFlushRange + + PURPOSE: Gets device physical address of the mmu context. + + PARAMETERS: In: pMMUContext - the mmu context + Out: pui32RangeMask - Bit mask showing which PD cache + lines have changed + RETURNS: None +******************************************************************************/ +IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask); + +/* +****************************************************************************** + FUNCTION: MMU_GetPDPhysAddr + + PURPOSE: Gets device physical address of the mmu contexts PD. + + PARAMETERS: In: pMMUContext - the mmu context + Out: psDevPAddr - Address of PD + RETURNS: None +******************************************************************************/ +IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr); + +#endif + + +IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32RegVal); + +#if defined(PDUMP) +/* +****************************************************************************** + FUNCTION: MMU_GetPDumpContextID + + PURPOSE: translates device mem context to unique pdump identifier + + PARAMETERS: In: hDevMemContext - device memory per-process context + RETURNS: context identifier used internally in pdump +******************************************************************************/ +IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext); +#endif /* #ifdef PDUMP */ + +#endif /* #ifndef _MMU_H_ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/pb.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/pb.c new file mode 100644 index 0000000..26e2ded --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/pb.c @@ -0,0 +1,493 @@ +/*************************************************************************/ /*! +@Title Parameter Buffer management functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "services_headers.h" +#include "sgx_bridge_km.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "pvr_bridge_km.h" +#include "pdump_km.h" +#include "sgxutils.h" + +#if !defined(__linux__) && !defined(__QNXNTO__) +#pragma message("FIXME: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE") +#endif + +#include "lists.h" + +static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC) +static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC) + +static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL; +static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL; + +static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy); +static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy); + +/* override level pointer indirection */ +/* PRQA S 5102 12 */ +IMG_EXPORT PVRSRV_ERROR +SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_BOOL bLockOnFailure, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos, + IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount) +{ + PVRSRV_STUB_PBDESC *psStubPBDesc; + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL; + PVRSRV_SGXDEV_INFO *psSGXDevInfo; + PVRSRV_ERROR eError; + + psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + psStubPBDesc = psSGXDevInfo->psStubPBDescListKM; + if (psStubPBDesc != IMG_NULL) + { + IMG_UINT32 i; + PRESMAN_ITEM psResItem; + + if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) + { + PVR_DPF((PVR_DBG_WARNING, + "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored", + ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize)); + } + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) + * psStubPBDesc->ui32SubKernelMemInfosCount, + (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos, + IMG_NULL, + "Array of Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed")); + + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ExitNotFound; + } + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC, + psStubPBDesc, + 0, + &SGXCleanupSharedPBDescCallback); + + if (psResItem == IMG_NULL) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount, + ppsSharedPBDescSubKernelMemInfos, + 0); + /*not nulling pointer, out of scope*/ + + PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed")); + + eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; + goto ExitNotFound; + } + + *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo; + *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo; + *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo; + *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo; + + *ui32SharedPBDescSubKernelMemInfosCount = + psStubPBDesc->ui32SubKernelMemInfosCount; + + *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos; + + for(i=0; iui32SubKernelMemInfosCount; i++) + { + ppsSharedPBDescSubKernelMemInfos[i] = + psStubPBDesc->ppsSubKernelMemInfos[i]; + } + + psStubPBDesc->ui32RefCount++; + *phSharedPBDesc = (IMG_HANDLE)psResItem; + return PVRSRV_OK; + } + + eError = PVRSRV_OK; + if (bLockOnFailure) + { + if (psResItemCreateSharedPB == IMG_NULL) + { + psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, + psPerProc, + 0, + &SGXCleanupSharedPBDescCreateLockCallback); + + if (psResItemCreateSharedPB == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed")); + + eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; + goto ExitNotFound; + } + PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL); + psPerProcCreateSharedPB = psPerProc; + } + else + { + eError = PVRSRV_ERROR_PROCESSING_BLOCKED; + } + } +ExitNotFound: + *phSharedPBDesc = IMG_NULL; + + return eError; +} + + +static PVRSRV_ERROR +SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn) +{ + /*PVRSRV_STUB_PBDESC **ppsStubPBDesc;*/ + IMG_UINT32 i; + PVRSRV_DEVICE_NODE *psDeviceNode; + + psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie; + + psStubPBDescIn->ui32RefCount--; + if (psStubPBDescIn->ui32RefCount == 0) + { + IMG_DEV_VIRTADDR sHWPBDescDevVAddr = psStubPBDescIn->sHWPBDescDevVAddr; + List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn); + for(i=0 ; iui32SubKernelMemInfosCount; i++) + { + PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, + psStubPBDescIn->ppsSubKernelMemInfos[i]); + } + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount, + psStubPBDescIn->ppsSubKernelMemInfos, + 0); + psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL; + + PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo); + + PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo); + + PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo); + + PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo); + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_STUB_PBDESC), + psStubPBDescIn, + 0); + /*not nulling pointer, copy on stack*/ + + /* signal the microkernel to clear its sTAHWPBDesc and s3DHWPBDesc values in sTA3DCtl */ + SGXCleanupRequest(psDeviceNode, + &sHWPBDescDevVAddr, + PVRSRV_CLEANUPCMD_PB, + CLEANUP_WITH_POLL); + } + return PVRSRV_OK; + /*return PVRSRV_ERROR_INVALID_PARAMS;*/ +} + +static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy) +{ + PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + return SGXCleanupSharedPBDescKM(psStubPBDesc); +} + +static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy) +{ +#ifdef DEBUG + PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam; + PVR_ASSERT(psPerProc == psPerProcCreateSharedPB); +#else + PVR_UNREFERENCED_PARAMETER(pvParam); +#endif + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psPerProcCreateSharedPB = IMG_NULL; + psResItemCreateSharedPB = IMG_NULL; + + return PVRSRV_OK; +} + + +IMG_EXPORT PVRSRV_ERROR +SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc) +{ + PVR_ASSERT(hSharedPBDesc != IMG_NULL); + + return ResManFreeResByPtr(hSharedPBDesc, CLEANUP_WITH_POLL); +} + + +IMG_EXPORT PVRSRV_ERROR +SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos, + IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount, + IMG_DEV_VIRTADDR sHWPBDescDevVAddr) +{ + PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL; + PVRSRV_ERROR eRet = PVRSRV_ERROR_INVALID_PERPROC; + IMG_UINT32 i; + PVRSRV_SGXDEV_INFO *psSGXDevInfo; + PRESMAN_ITEM psResItem; + + /* + * The caller must have previously called SGXFindSharedPBDesc with + * bLockOnFailure set, and not managed to find a suitable shared PB. + */ + if (psPerProcCreateSharedPB != psPerProc) + { + goto NoAdd; + } + else + { + PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL); + + ResManFreeResByPtr(psResItemCreateSharedPB, CLEANUP_WITH_POLL); + + PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL); + PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL); + } + + psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + psStubPBDesc = psSGXDevInfo->psStubPBDescListKM; + if (psStubPBDesc != IMG_NULL) + { + if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) + { + PVR_DPF((PVR_DBG_WARNING, + "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored", + ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize)); + + } + + /* + * We make the caller think the add was successful, + * but return the existing shared PB desc rather than + * a new one. + */ + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC, + psStubPBDesc, + 0, + &SGXCleanupSharedPBDescCallback); + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "SGXAddSharedPBDescKM: " + "Failed to register existing shared " + "PBDesc with the resource manager")); + goto NoAddKeepPB; + } + + /* + * The caller will unreference the PB desc after + * a successful add, so up the reference count. + */ + psStubPBDesc->ui32RefCount++; + + *phSharedPBDesc = (IMG_HANDLE)psResItem; + eRet = PVRSRV_OK; + goto NoAddKeepPB; + } + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_STUB_PBDESC), + (IMG_VOID **)&psStubPBDesc, + 0, + "Stub Parameter Buffer Description") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc " + "StubPBDesc")); + eRet = PVRSRV_ERROR_OUT_OF_MEMORY; + goto NoAdd; + } + + + psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL; + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) + * ui32SharedPBDescSubKernelMemInfosCount, + (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos, + 0, + "Array of Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " + "Failed to alloc " + "StubPBDesc->ppsSubKernelMemInfos")); + eRet = PVRSRV_ERROR_OUT_OF_MEMORY; + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + psStubPBDesc->ui32RefCount = 1; + psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize; + psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo; + psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo; + psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo; + psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo; + + psStubPBDesc->ui32SubKernelMemInfosCount = + ui32SharedPBDescSubKernelMemInfosCount; + for(i=0; ippsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i]; + if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i]) + != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " + "Failed to dissociate shared PBDesc " + "from process")); + goto NoAdd; + } + } + + psStubPBDesc->sHWPBDescDevVAddr = sHWPBDescDevVAddr; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC, + psStubPBDesc, + 0, + &SGXCleanupSharedPBDescCallback); + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " + "Failed to register shared PBDesc " + " with the resource manager")); + goto NoAdd; + } + psStubPBDesc->hDevCookie = hDevCookie; + + /* Finally everything was prepared successfully so link the new + * PB in to place. */ + List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM), + psStubPBDesc); + + *phSharedPBDesc = (IMG_HANDLE)psResItem; + + return PVRSRV_OK; + +NoAdd: + if(psStubPBDesc) + { + if(psStubPBDesc->ppsSubKernelMemInfos) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount, + psStubPBDesc->ppsSubKernelMemInfos, + 0); + psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL; + } + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_STUB_PBDESC), + psStubPBDesc, + 0); + /*not nulling pointer, out of scope*/ + } + +NoAddKeepPB: + for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) + { + PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]); + } + + PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo); + + PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo); + + return eRet; +} + +/****************************************************************************** + End of file (pb.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgx_bridge_km.h b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgx_bridge_km.h new file mode 100644 index 0000000..260a265 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgx_bridge_km.h @@ -0,0 +1,254 @@ +/*************************************************************************/ /*! +@Title SGX Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the SGX Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SGX_BRIDGE_KM_H__) +#define __SGX_BRIDGE_KM_H__ + +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "sgx_bridge.h" +#include "pvr_bridge.h" +#include "perproc.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +IMG_IMPORT +PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick); + +#if defined(SGX_FEATURE_2D_HARDWARE) +IMG_IMPORT +PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick); +#endif + +IMG_IMPORT +PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, + SGX_CCB_KICK *psCCBKick); + +IMG_IMPORT +PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR *pDevPAddr, + IMG_CPU_PHYADDR *pCpuPAddr); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_DEV_PHYADDR *psPDDevPAddr); + +IMG_IMPORT +PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie, + SGX_CLIENT_INFO* psClientInfo); + +IMG_IMPORT +PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, + SGX_MISC_INFO *psMiscInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext); + +IMG_IMPORT +PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ArraySize, + PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData, + IMG_UINT32 *pui32DataCount, + IMG_UINT32 *pui32ClockSpeed, + IMG_UINT32 *pui32HostTimeStamp); + +IMG_IMPORT +PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_BOOL bWaitForComplete); + +IMG_IMPORT +PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, + SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo); + +IMG_IMPORT +PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevHandle, + SGX_BRIDGE_INIT_INFO *psInitInfo); + +/*! + * ***************************************************************************** + * @brief Looks for a parameter buffer description that corresponds to + * a buffer of size ui32TotalPBSize, optionally taking the lock + * needed for SharedPBCreation on failure. + * + * Note if a PB Desc is found then its internal reference counter + * is automatically incremented. It is your responsability to call + * SGXUnrefSharedPBDesc to decrement this reference and free associated + * resources when you are done. + * + * If bLockOnFailure is set, and a suitable shared PB isn't found, + * an internal flag is set, allowing this process to create a + * shared PB. Any other process calling this function with + * bLockOnFailure set, will receive the return code + * PVRSRV_ERROR_PROCESSING_BLOCKED, indicating that it needs + * to retry the function call. The internal flag is cleared + * when this process creates a shared PB. + * + * Note: You are responsible for freeing the list returned in + * pppsSharedPBDescSubKernelMemInfos + * via OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + * sizeof(PVRSRV_KERNEL_MEM_INFO *) + * * ui32SharedPBDescSubKernelMemInfosCount, + * ppsSharedPBDescSubKernelMemInfos, + * NULL); + * + * @param[in] psPerProc + * @param[in] hDevCookie + * @param[in] bLockOnError + * @param[in] ui32TotalPBSize + * @param[in] phSharedPBDesc + * @param[out] ppsSharedPBDescKernelMemInfo + * @param[out] ppsHWPBDescKernelMemInfo + * @param[out] pppsSharedPBDescSubKernelMemInfos A list of integral sub meminfos. + * @param[out] ui32SharedPBDescSubKernelMemInfosCount + * + * @return PVRSRV_ERROR + ********************************************************************************/ +/* disable QAC pointer level check for over 2 */ +/* PRQA S 5102++ */ +IMG_IMPORT PVRSRV_ERROR +SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_BOOL bLockOnFailure, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos, + IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount); + +/*! + * ***************************************************************************** + * @brief Decrements the reference counter and frees all userspace resources + * associated with a SharedPBDesc. + * + * @param hSharedPBDesc + * + * @return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc); + +/*! + * ***************************************************************************** + * @brief Links a new SharedPBDesc into a kernel managed list that can + * then be queried by other clients. + * + * As a side affect this function also dissociates the SharedPBDesc + * from the calling process so that the memory won't be freed if the + * process dies/exits. (The kernel assumes responsability over the + * memory at the same time) + * + * As well as the psSharedPBDescKernelMemInfo you must also pass + * a complete list of other meminfos that are integral to the + * shared PB description. (Although the kernel doesn't have direct + * access to the shared PB desc it still needs to be able to + * clean up all the associated resources when it is no longer + * in use.) + * + * If the dissociation fails then all the memory associated with + * the psSharedPBDescKernelMemInfo and all entries in psKernelMemInfos + * will be freed by kernel services! Because of this, you are + * responsible for freeing the corresponding client meminfos _before_ + * calling SGXAddSharedPBDescKM. + * + * This function will return an error unless a succesful call to + * SGXFindSharedPBDesc, with bLockOnFailure set, has been made. + * + * @param psPerProc + * @param hDevCookie + * @param psSharedPBDescKernelMemInfo + * @param psHWPBDescKernelMemInfo + * @param psBlockKernelMemInfo + * @param ui32TotalPBSize The size of the associated parameter buffer + * @param ppsSharedPBDescSubKernelMemInfos A list of other meminfos integral to + * the shared PB description. + * @param ui32SharedPBDescSubKernelMemInfosCount The number of entires in + * psKernelMemInfos + * @param sHWPBDescDevVAddr The device virtual address of the HWPBDesc + * + * @return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos, + IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount, + IMG_DEV_VIRTADDR sHWPBDescDevVAddr); + + +/*! + * ***************************************************************************** + * @brief Gets device information that is not intended to be passed + on beyond the srvclient libs. + * + * @param[in] hDevCookie + * @param[out] psSGXInternalDevInfo + * + * @return + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, + SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo); + +#if defined (__cplusplus) +} +#endif + +#endif /* __SGX_BRIDGE_KM_H__ */ + +/****************************************************************************** + End of file (sgx_bridge_km.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxconfig.h b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxconfig.h new file mode 100644 index 0000000..009a8d7 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxconfig.h @@ -0,0 +1,645 @@ +/*************************************************************************/ /*! +@Title device configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SGXCONFIG_H__ +#define __SGXCONFIG_H__ + +#include "sgxdefs.h" + +#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX +#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D + +#define DEV_MAJOR_VERSION 1 +#define DEV_MINOR_VERSION 0 + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) +#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00001000 +#else +#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00000000 +#endif + + +#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32 + +#if defined (SGX_FEATURE_ADDRESS_SPACE_EXTENSION) +/* + * Constraints: (Additional to above ones) + * -GENERAL, PDS, USE_CODE, KERNEL_DATA heaps should be within [0-256MB] range only. + * -(3DPARAMETERS_HEAP_BASE+TADATA_HEAP) <= 256MB, within same 256MB range. + * + * BIF_REQ_BASE Setting: + * -PDS_REQ_BASE = USCE_REQ_BASE = VDM_REQ_BASE = 0x00000000 + * 3D Task: + * -ISPP_REQ_BASE = ISPZ_REQ_BASE = 3D_REQ_BASE = 3DPARAMETERS_HEAP_BASE + * TA Task: + * -TA_COMMON_REQ_BASE = TA_REQ_BASE = 3DPARAMETERS_HEAP_BASE + * TQ Task: + * -3D_REQ_BASE = ISPP_REQ_BASE = ISPZ_REQ_BASE = ISP stream CCB base + */ +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000 + #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01400000-0x00001000-0x00001000) + + #define SGX_GENERAL_HEAP_BASE 0x01400000 + #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000) + +#else + #define SGX_GENERAL_HEAP_BASE 0x00001000 + #define SGX_GENERAL_HEAP_SIZE (0x08400000-0x00001000-0x00001000) +#endif + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x08400000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0x0A400000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0A800000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000) + + #define SGX_PIXELSHADER_HEAP_BASE 0x0C400000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x00800000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0x0CC00000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x00400000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0x0D000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x02F00000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + /* ==== Heaps 256MB onwards ==== */ + + #define SGX_3DPARAMETERS_HEAP_SIZE 0x04000000 + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x10000000 + + /* Size is defined above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defined above */ + + #define SGX_TADATA_HEAP_BASE 0x14000000 + #define SGX_TADATA_HEAP_SIZE (0x04000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0x18000000 + #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_TEXTURE_HEAP_BASE 0x19000000 + #define SGX_TEXTURE_HEAP_SIZE (0xE6000000-0x00001000) + +#else /* defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) */ +#if defined(FIX_HW_BRN_31620) + #if defined(SGX_FEATURE_2D_HARDWARE) + #define SGX_2D_HEAP_BASE 0x04000000 + #define SGX_2D_HEAP_SIZE (0x08000000-0x04000000-0x00001000) + #endif + + #define SGX_GENERAL_HEAP_BASE 0x08000000 + #define SGX_GENERAL_HEAP_SIZE (0xB8000000-0x00001000) + + /* + * For hybrid PB we have to split virtual PB range between the shared + * PB and percontext PB due to the fact we only have one heap config + * per device. + * If hybrid PB is enabled we split the space according to HYBRID_SHARED_PB_SIZE. + * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the + * remainder is the size of the percontext PB. + * If hybrid PB is not enabled then we still create both heaps (helps keep + * the code clean) and define the size of the unused one to 0 + */ + + #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000 + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xC0000000 + /* Size is defined above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defined above */ + + #define SGX_TADATA_HEAP_BASE 0xD0000000 + #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0xE0000000 + #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xE4000000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0xE8000000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xEC000000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0xF0000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x03000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + /* Actual Pixel and Vertex shared heaps sizes may be reduced by + * override - see SGX_USE_CODE_SEGMENT_RANGE_BITS.*/ + #define SGX_PIXELSHADER_HEAP_BASE 0xF4000000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0xFC000000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000) +#else /* FIX_HW_BRN_31620 */ + #if defined(SGX_FEATURE_2D_HARDWARE) + #define SGX_2D_HEAP_BASE 0x00100000 + #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000) + #endif + + #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000 + #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000) + #endif + + #if !defined(SUPPORT_MEMORY_TILING) + #define SGX_GENERAL_HEAP_BASE 0x10000000 + #define SGX_GENERAL_HEAP_SIZE (0xC2000000-0x00001000) + #else + #include + /* Create heaps with memory tiling enabled. + * SGX HW limit is 10 heaps. + */ + /* Tiled heap space is taken from general heap */ + #define SGX_GENERAL_HEAP_BASE 0x10000000 + #define SGX_GENERAL_HEAP_SIZE (0xB5000000-0x00001000) + + #define SGX_VPB_TILED_HEAP_STRIDE TILING_TILE_STRIDE_2K + #define SGX_VPB_TILED_HEAP_BASE 0xC5000000 + #define SGX_VPB_TILED_HEAP_SIZE (0x0D000000-0x00001000) + + /* Check tiled heap base alignment */ + #if((SGX_VPB_TILED_HEAP_BASE & SGX_BIF_TILING_ADDR_INV_MASK) != 0) + #error "sgxconfig.h: SGX_VPB_TILED_HEAP has insufficient alignment" + #endif + + #endif /* SUPPORT_MEMORY_TILING */ + + /* + * For hybrid PB we have to split virtual PB range between the shared + * PB and percontext PB due to the fact we only have one heap config + * per device. + * If hybrid PB is enabled we split the space according to HYBRID_SHARED_PB_SIZE. + * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the + * remainder is the size of the percontext PB. + * If hybrid PB is not enabled then we still create both heaps (helps keep + * the code clean) and define the size of the unused one to 0 + */ + + #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000 + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xD2000000 + /* Size is defined above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defined above */ + + #define SGX_TADATA_HEAP_BASE 0xE2000000 + #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0xEF000000 + #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF0000000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0xF2000000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF2400000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0xF4000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + /* Actual Pixel and Vertex shared heaps sizes may be reduced by + * override - see SGX_USE_CODE_SEGMENT_RANGE_BITS.*/ + #define SGX_PIXELSHADER_HEAP_BASE 0xF9000000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0xFE000000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000) +#endif /* FIX_HW_BRN_31620 */ + /* signal we've identified the core by the build */ +#endif /* defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) */ + #define SGX_CORE_IDENTIFIED +#endif /* SGX_FEATURE_ADDRESS_SPACE_SIZE == 32 */ + +#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28 + +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000 + #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000) + + #define SGX_GENERAL_HEAP_BASE 0x01800000 + #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000) + +#else + #define SGX_GENERAL_HEAP_BASE 0x00001000 +#if defined(SUPPORT_LARGE_GENERAL_HEAP) + #define SGX_GENERAL_HEAP_SIZE (0x0B800000-0x00001000-0x00001000) +#else + #define SGX_GENERAL_HEAP_SIZE (0x08800000-0x00001000-0x00001000) +#endif +#endif + + /* + * For hybrid PB we have to split virtual PB range between the shared + * PB and percontext PB due to the fact we only have one heap config + * per device. + * If hybrid PB is enabled we split the space according to HYBRID_SHARED_PB_SIZE. + * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the + * remainder is the size of the percontext PB. + * If hybrid PB is not enabled then we still create both heaps (helps keep + * the code clean) and define the size of the unused one to 0 + */ +#if defined(SUPPORT_LARGE_GENERAL_HEAP) + #define SGX_3DPARAMETERS_HEAP_SIZE 0x01000000 +#else + #define SGX_3DPARAMETERS_HEAP_SIZE 0x04000000 +#endif + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + +#if defined(SUPPORT_LARGE_GENERAL_HEAP) + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x0B800000 +#else + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x08800000 +#endif + + /* Size is defined above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defined above */ + + #define SGX_TADATA_HEAP_BASE 0x0C800000 + #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0x0D800000 + #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000) + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0x0F000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000) + + /* signal we've identified the core by the build */ + #define SGX_CORE_IDENTIFIED + +#endif /* SGX_FEATURE_ADDRESS_SPACE_SIZE == 28 */ + +#if !defined(SGX_CORE_IDENTIFIED) + #error "sgxconfig.h: ERROR: unspecified SGX Core version" +#endif + +#if !defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) +/********************************************************************************* + * + * SGX_PDSPIXEL_CODEDATA_HEAP_BASE + 64MB range must include PDSVERTEX_CODEDATA and KERNEL_CODE heaps + * + ********************************************************************************/ +#if !defined (SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE) + #if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000) + #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE" + #endif + + #if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000) + #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE" + #endif +#endif + +/********************************************************************************* + * + * The General Mapping heap must be within the 2D requestor range of the 2D heap base + * + ********************************************************************************/ +#if defined(SGX_FEATURE_2D_HARDWARE) && defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE - SGX_2D_HEAP_BASE) >= EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) + #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP inaccessable by 2D requestor" + #endif +#endif + +/********************************************************************************* + * + * The kernel code heap base must be aligned to a USSE code page + * + ********************************************************************************/ +#if defined (EURASIA_USE_CODE_PAGE_SIZE) + #if ((SGX_KERNEL_CODE_HEAP_BASE & (EURASIA_USE_CODE_PAGE_SIZE - 1)) != 0) + #error "sgxconfig.h: ERROR: Kernel code heap base misalignment" + #endif +#endif + +/********************************************************************************* + * + * Heap overlap check + * + ********************************************************************************/ +#if defined(SGX_FEATURE_2D_HARDWARE) + #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_MAPPING_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_MAPPING_HEAP" + #endif + #else + #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_HEAP_BASE" + #endif + #endif +#endif + +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP overlaps SGX_GENERAL_HEAP" + #endif +#endif + +#if defined(SUPPORT_HYBRID_PB) + #if ((HYBRID_SHARED_PB_SIZE + 0x000001000) > SGX_3DPARAMETERS_HEAP_SIZE) + #error "sgxconfig.h: ERROR: HYBRID_SHARED_PB_SIZE too large" + #endif +#endif + +#if defined(SUPPORT_MEMORY_TILING) + #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_VPB_TILED_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_VPB_TILED_HEAP" + #endif + #if ((SGX_VPB_TILED_HEAP_BASE + SGX_VPB_TILED_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_VPB_TILED_HEAP overlaps SGX_3DPARAMETERS_HEAP" + #endif +#else + #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_3DPARAMETERS_HEAP" + #endif +#endif + +#if (((SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE + SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE) >= SGX_TADATA_HEAP_BASE) && (SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE > 0)) + #error "sgxconfig.h: ERROR: SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE overlaps SGX_TADATA_HEAP" +#endif + +#if ((SGX_TADATA_HEAP_BASE + SGX_TADATA_HEAP_SIZE) >= SGX_SYNCINFO_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_TADATA_HEAP overlaps SGX_SYNCINFO_HEAP" +#endif + +#if ((SGX_SYNCINFO_HEAP_BASE + SGX_SYNCINFO_HEAP_SIZE) >= SGX_PDSPIXEL_CODEDATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_SYNCINFO_HEAP overlaps SGX_PDSPIXEL_CODEDATA_HEAP" +#endif + +#if ((SGX_PDSPIXEL_CODEDATA_HEAP_BASE + SGX_PDSPIXEL_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_CODE_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PDSPIXEL_CODEDATA_HEAP overlaps SGX_KERNEL_CODE_HEAP" +#endif + +#if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE) >= SGX_PDSVERTEX_CODEDATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP overlaps SGX_PDSVERTEX_CODEDATA_HEAP" +#endif + +#if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_DATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP overlaps SGX_KERNEL_DATA_HEAP" +#endif + +#if ((SGX_KERNEL_DATA_HEAP_BASE + SGX_KERNEL_DATA_HEAP_SIZE) >= SGX_PIXELSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_KERNEL_DATA_HEAP overlaps SGX_PIXELSHADER_HEAP" +#endif + +#if ((SGX_PIXELSHADER_HEAP_BASE + SGX_PIXELSHADER_HEAP_SIZE) >= SGX_VERTEXSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PIXELSHADER_HEAP overlaps SGX_VERTEXSHADER_HEAP" +#endif + +#if ((SGX_VERTEXSHADER_HEAP_BASE + SGX_VERTEXSHADER_HEAP_SIZE) < SGX_VERTEXSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_VERTEXSHADER_HEAP_BASE size cause wraparound" +#endif +#else /* !defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) */ + +/********************************************************************************* + * + * SGX_PDSPIXEL_CODEDATA_HEAP_BASE + 64MB range must include PDSVERTEX_CODEDATA and KERNEL_CODE heaps + * + ********************************************************************************/ +#if !defined (SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE) + #if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000) + #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE" + #endif + + #if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000) + #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE" + #endif +#endif + +/********************************************************************************* + * + * The kernel code heap base must be aligned to a USSE code page + * + ********************************************************************************/ +#if defined (EURASIA_USE_CODE_PAGE_SIZE) + #if ((SGX_KERNEL_CODE_HEAP_BASE & (EURASIA_USE_CODE_PAGE_SIZE - 1)) != 0) + #error "sgxconfig.h: ERROR: Kernel code heap base misalignment" + #endif +#endif + +/********************************************************************************* + * + * Heap overlap check + * + ********************************************************************************/ +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP overlaps SGX_GENERAL_HEAP" + #endif +#endif + +#if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_PDSPIXEL_CODEDATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_PDSPIXEL_CODEDATA_HEAP" +#endif + +#if ((SGX_PDSPIXEL_CODEDATA_HEAP_BASE + SGX_PDSPIXEL_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_CODE_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PDSPIXEL_CODEDATA_HEAP overlaps SGX_KERNEL_CODE_HEAP" +#endif + +#if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE) >= SGX_PDSVERTEX_CODEDATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP overlaps SGX_PDSVERTEX_CODEDATA_HEAP" +#endif + +#if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE) >= SGX_PIXELSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP overlaps SGX_KERNEL_DATA_HEAP" +#endif + +#if ((SGX_PIXELSHADER_HEAP_BASE + SGX_PIXELSHADER_HEAP_SIZE) >= SGX_VERTEXSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PIXELSHADER_HEAP overlaps SGX_VERTEXSHADER_HEAP" +#endif + +#if ((SGX_VERTEXSHADER_HEAP_BASE + SGX_VERTEXSHADER_HEAP_SIZE) >= SGX_KERNEL_DATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_VERTEXSHADER_HEAP_BASE size overlaps SGX_KERNEL_DATA_HEAP" +#endif + +/* check if last heap in 0-256MB spill out of 256MB */ +#if ((SGX_KERNEL_DATA_HEAP_BASE + SGX_KERNEL_DATA_HEAP_SIZE) > 0x0FFFFFFF) + #error "sgxconfig.h: ERROR: SGX_KERNEL_DATA_HEAP spill out of 256MB" +#endif + + +/* check for heaps out 0f 0-256MB range */ + + +#if ((SGX_SHARED_3DPARAMETERS_HEAP_BASE < 0x0FFFFFFF)) + #error "sgxconfig.h: ERROR: put SGX_SHARED_3DPARAMETERS_HEAP out side of 0-256MB" +#endif + +#if defined(SUPPORT_HYBRID_PB) + #if ((HYBRID_SHARED_PB_SIZE + 0x000001000) > SGX_3DPARAMETERS_HEAP_SIZE) + #error "sgxconfig.h: ERROR: HYBRID_SHARED_PB_SIZE too large" + #endif +#endif + +#if (((SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE + SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE) >= SGX_TADATA_HEAP_BASE) && (SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE > 0)) + #error "sgxconfig.h: ERROR: SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE overlaps SGX_TADATA_HEAP" +#endif + +#if ((SGX_TADATA_HEAP_BASE + SGX_TADATA_HEAP_SIZE) >= SGX_SYNCINFO_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_TADATA_HEAP overlaps SGX_SYNCINFO_HEAP" +#endif + +#if ((SGX_SYNCINFO_HEAP_BASE + SGX_SYNCINFO_HEAP_SIZE) >= SGX_TEXTURE_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_SYNCINFO_HEAP overlaps SGX_TEXTURE_HEAP_BASE" +#endif + +#if ((SGX_TEXTURE_HEAP_BASE + SGX_TEXTURE_HEAP_SIZE) < SGX_TEXTURE_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_TEXTURE_HEAP size cause wraparound" +#endif + +#endif /* !defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) */ +#endif /* __SGXCONFIG_H__ */ + +/***************************************************************************** + End of file (sgxconfig.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinfokm.h b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinfokm.h new file mode 100644 index 0000000..eb1b21d --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinfokm.h @@ -0,0 +1,631 @@ +/*************************************************************************/ /*! +@Title SGX kernel services structues/functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Structures and inline functions for KM services component +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __SGXINFOKM_H__ +#define __SGXINFOKM_H__ + +#include "sgxdefs.h" +#include "device.h" +#include "power.h" +#include "sysconfig.h" +#include "sgxscript.h" +#include "sgxinfo.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************************************************/ +/* kernel only defines: */ +/****************************************************************************/ +/* SGXDeviceMap Flag defines */ +#define SGX_HOSTPORT_PRESENT 0x00000001UL + + +/* + SGX PDUMP register bank name (prefix) +*/ +#define SGX_PDUMPREG_NAME "SGXREG" + +/****************************************************************************/ +/* kernel only structures: */ +/****************************************************************************/ + +/*Forward declaration*/ +typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) +typedef struct _PVRSRV_SYSTRACE_JOB_ +{ + IMG_UINT32 ui32JobID; + IMG_UINT32 ui32FrameNum; + IMG_UINT32 ui32RTData; + +} PVRSRV_SYSTRACE_JOB; + +typedef struct _PVRSRV_SYSTRACE_CONTEXT_ +{ + IMG_UINT32 ui32PID; + IMG_UINT32 ui32CtxID; + + /*Every PID has a circular buffer of jobs*/ + IMG_UINT32 ui32Start; + IMG_UINT32 ui32End; + IMG_UINT32 ui32CurrentJobID; + PVRSRV_SYSTRACE_JOB asJobs[16]; + +} PVRSRV_SYSTRACE_CONTEXT; + +typedef struct _PVRSRV_SYSTRACE_TIMECORR_ +{ + IMG_UINT64 ui64HostTime; + IMG_UINT32 ui32SGXClocksx16; +} PVRSRV_SYSTRACE_TIMECORR; + +typedef struct _PVRSRV_SYSTRACE_DATA_ +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32CurrentCtxID; + + PVRSRV_SYSTRACE_CONTEXT asSystraceContext[8]; + PVRSRV_SYSTRACE_TIMECORR asTimeCorrArray[32]; /* Array to store HostTime and corresponding SGXClks (Max value is PVRSRV_SYSTRACE_TIMEINDEX_LIMIT set in systrace.c */ + IMG_UINT32 ui32TimeCorrIndex; /* Global current index */ + + IMG_BOOL bLastPowerDown; /* During last MISR Device was powered off */ +} PVRSRV_SYSTRACE_DATA; +#endif + +typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO; + +typedef struct _PVRSRV_SGXDEV_INFO_ +{ + PVRSRV_DEVICE_TYPE eDeviceType; + PVRSRV_DEVICE_CLASS eDeviceClass; + + IMG_UINT8 ui8VersionMajor; + IMG_UINT8 ui8VersionMinor; + IMG_UINT32 ui32CoreConfig; + IMG_UINT32 ui32CoreFlags; + + /* Kernel mode linear address of device registers */ + IMG_PVOID pvRegsBaseKM; + +#if defined(SGX_FEATURE_HOST_PORT) + /* Kernel mode linear address of host port */ + IMG_PVOID pvHostPortBaseKM; + /* HP size */ + IMG_UINT32 ui32HPSize; + /* HP syspaddr */ + IMG_SYS_PHYADDR sHPSysPAddr; +#endif + + /* FIXME: The alloc for this should go through OSAllocMem in future */ + IMG_HANDLE hRegMapping; + + /* System physical address of device registers*/ + IMG_SYS_PHYADDR sRegsPhysBase; + /* Register region size in bytes */ + IMG_UINT32 ui32RegSize; + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* external system cache register region size in bytes */ + IMG_UINT32 ui32ExtSysCacheRegsSize; + /* external system cache register device relative physical address */ + IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase; + /* ptr to page table */ + IMG_UINT32 *pui32ExtSystemCacheRegsPT; + /* handle to page table alloc/mapping */ + IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle; + /* sys phys addr of PT */ + IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr; +#endif + + /* SGX clock speed */ + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32uKernelTimerClock; + IMG_BOOL bSGXIdle; + + PVRSRV_STUB_PBDESC *psStubPBDescListKM; + + + /* kernel memory context info */ + IMG_DEV_PHYADDR sKernelPDDevPAddr; + + IMG_UINT32 ui32HeapCount; /*!< heap count */ + IMG_VOID *pvDeviceMemoryHeap; + PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo; /*!< meminfo for CCB in device accessible memory */ + PVRSRV_SGX_KERNEL_CCB *psKernelCCB; /*!< kernel mode linear address of CCB in device accessible memory */ + PPVRSRV_SGX_CCB_INFO psKernelCCBInfo; /*!< CCB information structure */ + PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo; /*!< meminfo for CCB control in device accessible memory */ + PVRSRV_SGX_CCB_CTL *psKernelCCBCtl; /*!< kernel mode linear address of CCB control in device accessible memory */ + PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; /*!< meminfo for kernel CCB event kicker */ + IMG_UINT32 *pui32KernelCCBEventKicker; /*!< kernel mode linear address of kernel CCB event kicker */ +#if defined(PDUMP) + IMG_UINT32 ui32KernelCCBEventKickerDumpVal; /*!< pdump copy of the kernel CCB event kicker */ +#endif /* PDUMP */ + PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo; /*!< kernel mode linear address of SGX misc info buffer */ + IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX]; /*!< ukernel host kick offests */ +#if defined(SGX_SUPPORT_HWPROFILING) + PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo; +#endif + PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo; /*!< Meminfo for hardware performace circular buffer */ + PPVRSRV_KERNEL_MEM_INFO psKernelTASigBufferMemInfo; /*!< Meminfo for TA signature buffer */ + PPVRSRV_KERNEL_MEM_INFO psKernel3DSigBufferMemInfo; /*!< Meminfo for 3D signature buffer */ +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + PPVRSRV_KERNEL_MEM_INFO psKernelVDMStateUpdateBufferMemInfo; /*!< Meminfo for state update buffer */ +#endif +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo; /*!< Meminfo for EDM status buffer */ +#endif + /* Client reference count */ + IMG_UINT32 ui32ClientRefCount; + + /* cache control word for micro kernel cache flush/invalidates */ + IMG_UINT32 ui32CacheControl; + + /* client-side build options */ + IMG_UINT32 ui32ClientBuildOptions; + + /* client-side microkernel structure sizes */ + SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes; + + /* + if we don't preallocate the pagetables we must + insert newly allocated page tables dynamically + */ + IMG_VOID *pvMMUContextList; + + /* Copy of registry ForcePTOff entry */ + IMG_BOOL bForcePTOff; + + IMG_UINT32 ui32EDMTaskReg0; + IMG_UINT32 ui32EDMTaskReg1; + + IMG_UINT32 ui32ClkGateCtl; + IMG_UINT32 ui32ClkGateCtl2; + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) + IMG_UINT32 ui32MasterClkGateStatusReg; + IMG_UINT32 ui32MasterClkGateStatusMask; + IMG_UINT32 ui32MasterClkGateStatus2Reg; + IMG_UINT32 ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ +#if defined(SGX_FEATURE_AUTOCLOCKGATING) + IMG_BOOL bDisableClockGating; +#endif + SGX_INIT_SCRIPTS sScripts; + + /* Members associated with dummy PD needed for BIF reset */ + IMG_HANDLE hBIFResetPDOSMemHandle; + IMG_DEV_PHYADDR sBIFResetPDDevPAddr; + IMG_DEV_PHYADDR sBIFResetPTDevPAddr; + IMG_DEV_PHYADDR sBIFResetPageDevPAddr; + IMG_UINT32 *pui32BIFResetPD; + IMG_UINT32 *pui32BIFResetPT; + + +#if defined(SUPPORT_HW_RECOVERY) + /* Timeout callback handle */ + IMG_HANDLE hTimer; + /* HW recovery Time stamp */ + IMG_UINT32 ui32TimeStamp; +#endif + + /* Number of SGX resets */ + IMG_UINT32 ui32NumResets; + + /* host control */ + PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo; + SGXMKIF_HOST_CTL *psSGXHostCtl; + + /* TA/3D control */ + PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo; + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + PVRSRV_KERNEL_MEM_INFO *psKernelSGXPTLAWriteBackMemInfo; +#endif + + /* memory tiling range usage */ + IMG_UINT32 ui32MemTilingUsage; + + #if defined(PDUMP) + PVRSRV_SGX_PDUMP_CONTEXT sPDContext; + #endif + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* SGX MMU dummy page details */ + IMG_VOID *pvDummyPTPageCpuVAddr; + IMG_DEV_PHYADDR sDummyPTDevPAddr; + IMG_HANDLE hDummyPTPageOSMemHandle; + IMG_VOID *pvDummyDataPageCpuVAddr; + IMG_DEV_PHYADDR sDummyDataDevPAddr; + IMG_HANDLE hDummyDataPageOSMemHandle; +#endif +#if defined(PDUMP) + PDUMP_MMU_ATTRIB sMMUAttrib; +#endif + IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA]; + +#if defined(FIX_HW_BRN_31620) + /* Dummy page refs */ + IMG_VOID *pvBRN31620DummyPageCpuVAddr; + IMG_HANDLE hBRN31620DummyPageOSMemHandle; + IMG_DEV_PHYADDR sBRN31620DummyPageDevPAddr; + + /* Dummy PT refs */ + IMG_VOID *pvBRN31620DummyPTCpuVAddr; + IMG_HANDLE hBRN31620DummyPTOSMemHandle; + IMG_DEV_PHYADDR sBRN31620DummyPTDevPAddr; + + IMG_HANDLE hKernelMMUContext; +#endif + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + IMG_BOOL bSystraceInitialised; + PVRSRV_SYSTRACE_DATA *psSystraceData; +#endif + +} PVRSRV_SGXDEV_INFO; + + +typedef struct _SGX_TIMING_INFORMATION_ +{ + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32HWRecoveryFreq; + IMG_BOOL bEnableActivePM; + IMG_UINT32 ui32ActivePowManLatencyms; + IMG_UINT32 ui32uKernelFreq; +} SGX_TIMING_INFORMATION; + +/* FIXME Rename this structure to sg more generalised as it's been extended*/ +/* SGX device map */ +typedef struct _SGX_DEVICE_MAP_ +{ + IMG_UINT32 ui32Flags; + + /* Registers */ + IMG_SYS_PHYADDR sRegsSysPBase; + IMG_CPU_PHYADDR sRegsCpuPBase; + IMG_CPU_VIRTADDR pvRegsCpuVBase; + IMG_UINT32 ui32RegsSize; + +#if defined(SGX_FEATURE_HOST_PORT) + IMG_SYS_PHYADDR sHPSysPBase; + IMG_CPU_PHYADDR sHPCpuPBase; + IMG_UINT32 ui32HPSize; +#endif + + /* Local Device Memory Region: (if present) */ + IMG_SYS_PHYADDR sLocalMemSysPBase; + IMG_DEV_PHYADDR sLocalMemDevPBase; + IMG_CPU_PHYADDR sLocalMemCpuPBase; + IMG_UINT32 ui32LocalMemSize; + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + IMG_UINT32 ui32ExtSysCacheRegsSize; + IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase; +#endif + + /* device interrupt IRQ */ + IMG_UINT32 ui32IRQ; + +#if !defined(SGX_DYNAMIC_TIMING_INFO) + /* timing information*/ + SGX_TIMING_INFORMATION sTimingInfo; +#endif +#if defined(PDUMP) + /* pdump memory region name */ + IMG_CHAR *pszPDumpDevName; +#endif +} SGX_DEVICE_MAP; + + +struct _PVRSRV_STUB_PBDESC_ +{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32TotalPBSize; + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos; + IMG_UINT32 ui32SubKernelMemInfosCount; + IMG_HANDLE hDevCookie; + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo; + IMG_DEV_VIRTADDR sHWPBDescDevVAddr; + PVRSRV_STUB_PBDESC *psNext; + PVRSRV_STUB_PBDESC **ppsThis; +}; + +/*! + ****************************************************************************** + * CCB control structure for SGX + *****************************************************************************/ +typedef struct _PVRSRV_SGX_CCB_INFO_ +{ + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo; /*!< meminfo for CCB in device accessible memory */ + PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo; /*!< meminfo for CCB control in device accessible memory */ + SGXMKIF_COMMAND *psCommands; /*!< linear address of the array of commands */ + IMG_UINT32 *pui32WriteOffset; /*!< linear address of the write offset into array of commands */ + volatile IMG_UINT32 *pui32ReadOffset; /*!< linear address of the read offset into array of commands */ +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; /*!< for pdumping */ +#endif +} PVRSRV_SGX_CCB_INFO; + + +typedef struct _SGX_BRIDGE_INIT_INFO_KM_ +{ + IMG_HANDLE hKernelCCBMemInfo; + IMG_HANDLE hKernelCCBCtlMemInfo; + IMG_HANDLE hKernelCCBEventKickerMemInfo; + IMG_HANDLE hKernelSGXHostCtlMemInfo; + IMG_HANDLE hKernelSGXTA3DCtlMemInfo; +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + IMG_HANDLE hKernelSGXPTLAWriteBackMemInfo; +#endif + IMG_HANDLE hKernelSGXMiscMemInfo; + + IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX]; + + SGX_INIT_SCRIPTS sScripts; + + IMG_UINT32 ui32ClientBuildOptions; + SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes; + +#if defined(SGX_SUPPORT_HWPROFILING) + IMG_HANDLE hKernelHWProfilingMemInfo; +#endif +#if defined(SUPPORT_SGX_HWPERF) + IMG_HANDLE hKernelHWPerfCBMemInfo; +#endif + IMG_HANDLE hKernelTASigBufferMemInfo; + IMG_HANDLE hKernel3DSigBufferMemInfo; + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + IMG_HANDLE hKernelEDMStatusBufferMemInfo; +#endif + + IMG_UINT32 ui32EDMTaskReg0; + IMG_UINT32 ui32EDMTaskReg1; + + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) +// IMG_UINT32 ui32MasterClkGateStatusReg; +// IMG_UINT32 ui32MasterClkGateStatusMask; +// IMG_UINT32 ui32MasterClkGateStatus2Reg; +// IMG_UINT32 ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ + + IMG_UINT32 ui32CacheControl; + + IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA]; + IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES]; + +} SGX_BRIDGE_INIT_INFO_KM; + + +typedef struct _SGX_INTERNEL_STATUS_UPDATE_KM_ +{ + CTL_STATUS sCtlStatus; + IMG_HANDLE hKernelMemInfo; +} SGX_INTERNEL_STATUS_UPDATE_KM; + + +typedef struct _SGX_CCB_KICK_KM_ +{ + SGXMKIF_COMMAND sCommand; + IMG_HANDLE hCCBKernelMemInfo; + + IMG_UINT32 ui32NumDstSyncObjects; + IMG_HANDLE hKernelHWSyncListMemInfo; + + /* DST syncs */ + IMG_HANDLE *pahDstSyncHandles; + + IMG_UINT32 ui32NumTAStatusVals; + IMG_UINT32 ui32Num3DStatusVals; + +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + SGX_INTERNEL_STATUS_UPDATE_KM asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS]; + SGX_INTERNEL_STATUS_UPDATE_KM as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS]; +#else + IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS]; + IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS]; +#endif + + IMG_BOOL bFirstKickOrResume; +#if defined(NO_HARDWARE) || defined(PDUMP) + IMG_BOOL bTerminateOrAbort; +#endif + + /* CCB offset of data structure associated with this kick */ + IMG_UINT32 ui32CCBOffset; + + /* SRC syncs */ + IMG_UINT32 ui32NumSrcSyncs; + IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA]; + + /* TA/3D dependency data */ + IMG_BOOL bTADependency; + IMG_HANDLE hTA3DSyncInfo; + + IMG_HANDLE hTASyncInfo; + IMG_HANDLE h3DSyncInfo; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +#if defined(NO_HARDWARE) + IMG_UINT32 ui32WriteOpsPendingVal; +#endif +} SGX_CCB_KICK_KM; + + +#if defined(TRANSFER_QUEUE) +typedef struct _PVRSRV_TRANSFER_SGX_KICK_KM_ +{ + IMG_HANDLE hCCBMemInfo; + IMG_UINT32 ui32SharedCmdCCBOffset; + + IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; + + IMG_HANDLE hTASyncInfo; + IMG_HANDLE h3DSyncInfo; + + IMG_UINT32 ui32NumSrcSync; + IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; + + IMG_UINT32 ui32NumDstSync; + IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; + + IMG_UINT32 ui32Flags; + + IMG_UINT32 ui32PDumpFlags; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +} PVRSRV_TRANSFER_SGX_KICK_KM, *PPVRSRV_TRANSFER_SGX_KICK_KM; + +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct _PVRSRV_2D_SGX_KICK_KM_ +{ + IMG_HANDLE hCCBMemInfo; + IMG_UINT32 ui32SharedCmdCCBOffset; + + IMG_DEV_VIRTADDR sHW2DContextDevVAddr; + + IMG_UINT32 ui32NumSrcSync; + IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS]; + + /* need to be able to check reads and writes on dest, and update writes */ + IMG_HANDLE hDstSyncInfo; + + /* need to be able to check reads and writes on TA ops, and update writes */ + IMG_HANDLE hTASyncInfo; + + /* need to be able to check reads and writes on 2D ops, and update writes */ + IMG_HANDLE h3DSyncInfo; + + IMG_UINT32 ui32PDumpFlags; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +} PVRSRV_2D_SGX_KICK_KM, *PPVRSRV_2D_SGX_KICK_KM; +#endif /* defined(SGX_FEATURE_2D_HARDWARE) */ +#endif /* #if defined(TRANSFER_QUEUE) */ + +/****************************************************************************/ +/* kernel only functions prototypes */ +/****************************************************************************/ +PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_VOID SGXOSTimer(IMG_VOID *pvData); + +IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery, + IMG_UINT32 ui32PDUMPFlags); + +IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags); + +PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery); +PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie); + +PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo); + +IMG_VOID RunSGXREGDebugScripts(PVRSRV_SGXDEV_INFO *psDevInfo); + +IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bDumpSGXRegs); + +PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); + +#if defined(SGX_DYNAMIC_TIMING_INFO) +IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo); +#endif + +/****************************************************************************/ +/* kernel only functions: */ +/****************************************************************************/ +#if defined(NO_HARDWARE) +static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32StatusRegister, + IMG_UINT32 ui32StatusValue, + IMG_UINT32 ui32StatusMask) +{ + IMG_UINT32 ui32RegVal; + + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister); + + ui32RegVal &= ~ui32StatusMask; + ui32RegVal |= (ui32StatusValue & ui32StatusMask); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* __SGXINFOKM_H__ */ + +/***************************************************************************** + End of file (sgxinfokm.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinit.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinit.c new file mode 100644 index 0000000..0277ac6 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxinit.c @@ -0,0 +1,3642 @@ +/*************************************************************************/ /*! +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "sgxdefs.h" +#include "sgxmmu.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgx_mkif_km.h" +#include "sgxconfig.h" +#include "sysconfig.h" +#include "pvr_bridge_km.h" + +#include "sgx_bridge_km.h" + +#include "pdump_km.h" +#include "ra.h" +#include "mmu.h" +#include "handle.h" +#include "perproc.h" + +#include "sgxutils.h" +#include "pvrversion.h" +#include "sgx_options.h" + +#include "lists.h" +#include "srvkm.h" +#include "ttrace.h" + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) +#include "systrace.h" +#endif + +IMG_UINT32 g_ui32HostIRQCountSample = 0; + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + +static const IMG_CHAR *SGXUKernelStatusString(IMG_UINT32 code) +{ + switch(code) + { +#define MKTC_ST(x) \ + case x: \ + return #x; +#include "sgx_ukernel_status_codes.h" + default: + return "(Unknown)"; + } +} + +#endif /* defined(PVRSRV_USSE_EDM_STATUS_DEBUG) */ + +#define VAR(x) #x +/* PRQA S 0881 11 */ /* ignore 'order of evaluation' warning */ +#define CHECK_SIZE(NAME) \ +{ \ + if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \ + VAR(NAME), \ + psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \ + psSGXStructSizes->ui32Sizeof_##NAME )); \ + bStructSizesFailed = IMG_TRUE; \ + } \ +} + +#if defined (SYS_USING_INTERRUPTS) +IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData); +#endif + + +static +PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext); +#if defined(PDUMP) +static +PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +/*! +******************************************************************************* + + @Function SGXCommandComplete + + @Description + + SGX command complete handler + + @Input psDeviceNode - SGX device node + + @Return none + +******************************************************************************/ +static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(OS_SUPPORTS_IN_LISR) + if (OSInLISR(psDeviceNode->psSysData)) + { + /* + * We shouldn't call SGXScheduleProcessQueuesKM in an + * LISR, as it may attempt to power up SGX. + * We assume that the LISR will schedule the MISR, which + * will test the following flag, and call + * SGXScheduleProcessQueuesKM if the flag is set. + */ + psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE; + } + else + { + SGXScheduleProcessQueuesKM(psDeviceNode); + } +#else + SGXScheduleProcessQueuesKM(psDeviceNode); +#endif +} + +/*! +******************************************************************************* + + @Function DeinitDevInfo + + @Description + + Deinits DevInfo + + @Input none + + @Return none + +******************************************************************************/ +static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + if (psDevInfo->psKernelCCBInfo != IMG_NULL) + { + /* + Free CCB info. + */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL); + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function InitDevInfo + + @Description + + Loads DevInfo + + @Input psDeviceNode + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_DEVICE_NODE *psDeviceNode, + SGX_BRIDGE_INIT_INFO *psInitInfo) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL; + + PVR_UNREFERENCED_PARAMETER(psPerProc); + psDevInfo->sScripts = psInitInfo->sScripts; + + psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo; + psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM; + + psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo; + psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM; + + psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo; + psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM; + + psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo; + psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM; + + psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo; + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + psDevInfo->psKernelSGXPTLAWriteBackMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXPTLAWriteBackMemInfo; +#endif + + psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo; + +#if defined(SGX_SUPPORT_HWPROFILING) + psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo; +#endif +#if defined(SUPPORT_SGX_HWPERF) + psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo; +#endif + psDevInfo->psKernelTASigBufferMemInfo = psInitInfo->hKernelTASigBufferMemInfo; + psDevInfo->psKernel3DSigBufferMemInfo = psInitInfo->hKernel3DSigBufferMemInfo; +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + psDevInfo->psKernelVDMStateUpdateBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMStateUpdateBufferMemInfo; +#endif +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo; +#endif + /* + * Assign client-side build options for later verification + */ + psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions; + + /* + * Assign microkernel IF structure sizes for later verification + */ + psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes; + + /* + Setup the kernel version of the CCB control + */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_SGX_CCB_INFO), + (IMG_VOID **)&psKernelCCBInfo, 0, + "SGX Circular Command Buffer Info"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory")); + goto failed_allockernelccb; + } + + + OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO)); + psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo; + psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo; + psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands; + psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset; + psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset; + psDevInfo->psKernelCCBInfo = psKernelCCBInfo; + + /* + Copy the USE code addresses for the host kick. + */ + OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr, + SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0])); + + psDevInfo->bForcePTOff = IMG_FALSE; + + psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl; + + psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0; + psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1; + psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl; + psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2; + psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg; + psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) + psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg; + psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask; + psDevInfo->ui32MasterClkGateStatus2Reg = psInitInfo->ui32MasterClkGateStatus2Reg; + psDevInfo->ui32MasterClkGateStatus2Mask = psInitInfo->ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ +#if defined(SGX_FEATURE_AUTOCLOCKGATING) + psDevInfo->bDisableClockGating = psInitInfo->bDisableClockGating; +#endif + + + /* Initialise Dev Data */ + OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData)); + + return PVRSRV_OK; + +failed_allockernelccb: + DeinitDevInfo(psDevInfo); + + return eError; +} + + + + +static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands) +{ + IMG_UINT32 ui32PC; + SGX_INIT_COMMAND *psComm; + + for (ui32PC = 0, psComm = psScript; + ui32PC < ui32NumInitCommands; + ui32PC++, psComm++) + { + switch (psComm->eOp) + { + case SGX_INIT_OP_WRITE_HW_REG: + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value); + PDUMPCOMMENT("SGXRunScript: Write HW reg operation"); + PDUMPREG(SGX_PDUMPREG_NAME, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value); + break; + } + case SGX_INIT_OP_READ_HW_REG: + { + psComm->sReadHWReg.ui32Value = OSReadHWReg(psDevInfo->pvRegsBaseKM, psComm->sReadHWReg.ui32Offset); +#if defined(PDUMP) + PDUMPCOMMENT("SGXRunScript: Read HW reg operation"); + PDumpRegRead(SGX_PDUMPREG_NAME, psComm->sReadHWReg.ui32Offset, PDUMP_FLAGS_CONTINUOUS); +#endif + break; + } + case SGX_INIT_OP_PRINT_HW_REG: + { + psComm->sReadHWReg.ui32Value = OSReadHWReg(psDevInfo->pvRegsBaseKM, psComm->sReadHWReg.ui32Offset); + PVR_LOG(("\t(SGXREG) 0x%08X : 0x%08X", psComm->sReadHWReg.ui32Offset, psComm->sReadHWReg.ui32Value)); + + break; + } + +#if defined(PDUMP) + case SGX_INIT_OP_PDUMP_HW_REG: + { + PDUMPCOMMENT("SGXRunScript: Dump HW reg operation"); + PDUMPREG(SGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value); + break; + } +#endif + case SGX_INIT_OP_HALT: + { + return PVRSRV_OK; + } + case SGX_INIT_OP_ILLEGAL: + /* FALLTHROUGH */ + default: + { + PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp)); + return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION; + } + } + + } + + return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION; +} + +/* Run scripts on given core */ +static PVRSRV_ERROR SGXRunScriptOnCore(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands, IMG_UINT32 ui32CoreNum) +{ + IMG_UINT32 ui32PC; + SGX_INIT_COMMAND *psComm; + +#if !defined(SGX_FEATURE_MP) + PVR_UNREFERENCED_PARAMETER(ui32CoreNum); +#endif + + for (ui32PC = 0, psComm = psScript; + ui32PC < ui32NumInitCommands; + ui32PC++, psComm++) + { + switch (psComm->eOp) + { + case SGX_INIT_OP_WRITE_HW_REG: + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(psComm->sWriteHWReg.ui32Offset,ui32CoreNum), psComm->sWriteHWReg.ui32Value); + PDUMPCOMMENT("SGXRunScriptOnCore: Write HW reg operation"); + PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(psComm->sWriteHWReg.ui32Offset,ui32CoreNum), psComm->sWriteHWReg.ui32Value); + break; + } + case SGX_INIT_OP_READ_HW_REG: + { + psComm->sReadHWReg.ui32Value = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(psComm->sReadHWReg.ui32Offset, ui32CoreNum)); +#if defined(PDUMP) + PDUMPCOMMENT("SGXRunScriptOnCore: Read HW reg operation"); + PDumpRegRead(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(psComm->sReadHWReg.ui32Offset,ui32CoreNum), PDUMP_FLAGS_CONTINUOUS); +#endif + break; + } + case SGX_INIT_OP_PRINT_HW_REG: + { + psComm->sReadHWReg.ui32Value = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(psComm->sReadHWReg.ui32Offset,ui32CoreNum)); + PVR_LOG(("\t(SGXREG) 0x%08X : 0x%08X", SGX_MP_CORE_SELECT(psComm->sReadHWReg.ui32Offset, ui32CoreNum), psComm->sReadHWReg.ui32Value)); + + break; + } + +#if defined(PDUMP) + case SGX_INIT_OP_PDUMP_HW_REG: + { + PDUMPCOMMENT("SGXRunScriptOnCore: Dump HW reg operation"); + PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(psComm->sPDumpHWReg.ui32Offset, ui32CoreNum), psComm->sPDumpHWReg.ui32Value); + break; + } +#endif + case SGX_INIT_OP_HALT: + { + return PVRSRV_OK; + } + case SGX_INIT_OP_ILLEGAL: + /* FALLTHROUGH */ + default: + { + PVR_DPF((PVR_DBG_ERROR,"SGXRunScriptOnCore: PC %d: Illegal command: %d", ui32PC, psComm->eOp)); + return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION; + } + } + + } + + return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION; +} + +#if defined(SUPPORT_MEMORY_TILING) +static PVRSRV_ERROR SGX_AllocMemTilingRangeInt(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32TilingStride, + IMG_UINT32 *pui32RangeIndex) +{ + IMG_UINT32 i; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Val; + + /* HW supports 10 ranges */ + for(i=0; i < SGX_BIF_NUM_TILING_RANGES; i++) + { + if((psDevInfo->ui32MemTilingUsage & (1U << i)) == 0) + { + /* mark in use */ + psDevInfo->ui32MemTilingUsage |= 1U << i; + /* output range index if the caller wants it */ + if(pui32RangeIndex != IMG_NULL) + { + *pui32RangeIndex = i; + } + goto RangeAllocated; + } + } + + PVR_DPF((PVR_DBG_ERROR,"SGX_AllocMemTilingRange: all tiling ranges in use")); + return PVRSRV_ERROR_EXCEEDED_HW_LIMITS; + +RangeAllocated: + + /* An improperly aligned range could cause BIF not to tile some memory which is intended to be tiled, + * or cause BIF to tile some memory which is not intended to be. + */ + if(ui32Start & ~SGX_BIF_TILING_ADDR_MASK) + { + PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range start (0x%08X) fails" + "alignment test", ui32Start)); + } + if((ui32End + 0x00001000) & ~SGX_BIF_TILING_ADDR_MASK) + { + PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range end (0x%08X) fails" + "alignment test", ui32End)); + } + + ui32Offset = EUR_CR_BIF_TILE0 + (i<<2); + + ui32Val = ((ui32TilingStride << EUR_CR_BIF_TILE0_CFG_SHIFT) & EUR_CR_BIF_TILE0_CFG_MASK) + | (((ui32End>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK) + | (((ui32Start>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK) + | (EUR_CR_BIF_TILE0_ENABLE << EUR_CR_BIF_TILE0_CFG_SHIFT); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val); + PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val); + +#if defined(SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS) + ui32Offset = EUR_CR_BIF_TILE0_ADDR_EXT + (i<<2); + + ui32Val = (((ui32End>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MAX_MASK) + | (((ui32Start>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MIN_MASK); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val); + PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val); +#endif /* SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS */ + + return PVRSRV_OK; +} + +#endif /* SUPPORT_MEMORY_TILING */ + +/*! +******************************************************************************* + + @Function SGXInitialise + + @Description + + (client invoked) chip-reset and initialisation + + @Input pvDeviceNode - device info. structure + @Input bHardwareRecovery - true if recovering powered hardware, + false if powering up + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery) +{ + PVRSRV_ERROR eError; + PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo; + SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM; + static IMG_BOOL bFirstTime = IMG_TRUE; +#if defined(PDUMP) + IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended(); +#endif /* PDUMP */ + +#if defined(SGX_FEATURE_MP) + /* Slave core clocks must be enabled during reset */ +#else + SGXInitClocks(psDevInfo, PDUMP_FLAGS_CONTINUOUS); +#endif /* SGX_FEATURE_MP */ + + /* + Part 1 of the initialisation script runs before resetting SGX. + */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n"); + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError)); + return eError; + } + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n"); + + /* Reset the chip */ + psDevInfo->ui32NumResets++; + +#if !defined(SGX_FEATURE_MP) + bHardwareRecovery |= bFirstTime; +#endif /* SGX_FEATURE_MP */ + + SGXReset(psDevInfo, bHardwareRecovery, PDUMP_FLAGS_CONTINUOUS); + +#if defined(EUR_CR_POWER) +#if defined(SGX531) + /* + Disable half the pipes. + 531 has 2 pipes within a 4 pipe framework, so + the 2 redundant pipes must be disabled even + though they do not exist. + */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 1); +#else + /* set the default pipe count (all fully enabled) */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 0); +#endif +#endif + + /* Initialise the kernel CCB event kicker value */ + *psDevInfo->pui32KernelCCBEventKicker = 0; +#if defined(PDUMP) + if (!bPDumpIsSuspended) + { + psDevInfo->ui32KernelCCBEventKickerDumpVal = 0; + PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal, + psDevInfo->psKernelCCBEventKickerMemInfo, 0, + sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + } +#endif /* PDUMP */ + +#if defined(SUPPORT_MEMORY_TILING) + { + /* Initialise EUR_CR_BIF_TILE registers for any tiling heaps */ + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDevInfo->pvDeviceMemoryHeap; + IMG_UINT32 i; + + psDevInfo->ui32MemTilingUsage = 0; + + for(i=0; iui32HeapCount; i++) + { + if(psDeviceMemoryHeap[i].ui32XTileStride > 0) + { + /* Set up the HW control registers */ + eError = SGX_AllocMemTilingRangeInt( + psDevInfo, + psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr, + psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr + + psDeviceMemoryHeap[i].ui32HeapSize, + psDeviceMemoryHeap[i].ui32XTileStride, + NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to allocate SGX BIF tiling range for heap: %s", + psDeviceMemoryHeap[i].pszName)); + break; + } + } + } + } +#endif + + /* + Part 2 of the initialisation script runs after resetting SGX. + */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n"); + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError)); + return eError; + } + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n"); + + /* Record the system timestamp for the microkernel */ + psSGXHostCtl->ui32HostClock = OSClockus(); + + psSGXHostCtl->ui32InitStatus = 0; +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Reset the SGX microkernel initialisation status\n"); + PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32InitStatus), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psSGXHostCtlMemInfo)); + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Initialise the microkernel\n"); +#endif /* PDUMP */ + +#if defined(SGX_FEATURE_MULTI_EVENT_KICK) + OSWriteMemoryBarrier(); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), + EUR_CR_EVENT_KICK2_NOW_MASK); +#else + *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF; + OSWriteMemoryBarrier(); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), + EUR_CR_EVENT_KICK_NOW_MASK); +#endif /* SGX_FEATURE_MULTI_EVENT_KICK */ + + OSMemoryBarrier(); + +#if defined(PDUMP) + /* + Dump the host kick. + */ + if (!bPDumpIsSuspended) + { +#if defined(SGX_FEATURE_MULTI_EVENT_KICK) + PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK); +#else + psDevInfo->ui32KernelCCBEventKickerDumpVal = 1; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "First increment of the SGX event kicker value\n"); + PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal, + psDevInfo->psKernelCCBEventKickerMemInfo, + 0, + sizeof(IMG_UINT32), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK); +#endif /* SGX_FEATURE_MULTI_EVENT_KICK */ + } +#endif /* PDUMP */ + +#if !defined(NO_HARDWARE) + /* + Wait for the microkernel to finish initialising. + */ + if (PollForValueKM(&psSGXHostCtl->ui32InitStatus, + PVRSRV_USSE_EDM_INIT_COMPLETE, + PVRSRV_USSE_EDM_INIT_COMPLETE, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed")); + + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + + return PVRSRV_ERROR_RETRY; + } +#endif /* NO_HARDWARE */ + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Wait for the SGX microkernel initialisation to complete"); + PDUMPMEMPOL(psSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32InitStatus), + PVRSRV_USSE_EDM_INIT_COMPLETE, + PVRSRV_USSE_EDM_INIT_COMPLETE, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psSGXHostCtlMemInfo)); +#endif /* PDUMP */ + + PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset); + + bFirstTime = IMG_FALSE; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function SGXDeinitialise + + @Description + + (client invoked) chip-reset and deinitialisation + + @Input hDevCookie - device info. handle + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie) + +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie; + PVRSRV_ERROR eError; + + /* Did SGXInitialise map the SGX registers in? */ + if (psDevInfo->pvRegsBaseKM == IMG_NULL) + { + return PVRSRV_OK; + } + + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function DevInitSGXPart1 + + @Description + + Reset and initialise Chip + + @Input pvDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode) +{ + IMG_HANDLE hDevMemHeap = IMG_NULL; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_HANDLE hKernelDevMemContext; + IMG_DEV_PHYADDR sPDDevPAddr; + IMG_UINT32 i; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + PVRSRV_ERROR eError; + + /* pdump info about the core */ + PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME); + + #if defined(SGX_FEATURE_MP) + #if !defined(SGX_FEATURE_MP_PLUS) + PDUMPCOMMENT("SGX Multi-processor: %d cores", SGX_FEATURE_MP_CORE_COUNT); + #else + PDUMPCOMMENT("SGX Multi-processor: %d TA cores, %d 3D cores", SGX_FEATURE_MP_CORE_COUNT_TA, SGX_FEATURE_MP_CORE_COUNT_3D); + #endif + #endif /* SGX_FEATURE_MP */ + +#if (SGX_CORE_REV == 0) + PDUMPCOMMENT("SGX Core Revision Information: head RTL"); +#else + PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV); +#endif + + #if defined(SGX_FEATURE_SYSTEM_CACHE) + PDUMPCOMMENT("SGX System Level Cache is present\r\n"); + #if defined(SGX_BYPASS_SYSTEM_CACHE) + PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n"); + #endif /* SGX_BYPASS_SYSTEM_CACHE */ + #endif /* SGX_FEATURE_SYSTEM_CACHE */ + + PDUMPCOMMENT("SGX Initialisation Part 1"); + + /* Allocate device control block */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_SGXDEV_INFO), + (IMG_VOID **)&psDevInfo, IMG_NULL, + "SGX Device Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO)); + + /* setup info from jdisplayconfig.h (variations controlled by build) */ + psDevInfo->eDeviceType = DEV_DEVICE_TYPE; + psDevInfo->eDeviceClass = DEV_DEVICE_CLASS; + + /* Initialize SGX idle status */ + psDevInfo->bSGXIdle = IMG_TRUE; + + /* Store the devinfo as its needed by dynamically enumerated systems called from BM */ + psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo; + + /* get heap info from the devnode */ + psDevInfo->ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap; + + /* create the kernel memory context */ + hKernelDevMemContext = BM_CreateContext(psDeviceNode, + &sPDDevPAddr, + IMG_NULL, + IMG_NULL); + if (hKernelDevMemContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psDevInfo->sKernelPDDevPAddr = sPDDevPAddr; + + /* create the kernel, shared and shared_exported heaps */ + for(i=0; isDevMemoryInfo.ui32HeapCount; i++) + { + switch(psDeviceMemoryHeap[i].DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_KERNEL: + case DEVICE_MEMORY_HEAP_SHARED: + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + /* Shared PB heap could be zero size */ + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap (hKernelDevMemContext, + &psDeviceMemoryHeap[i]); + /* + in the case of kernel context heaps just store + the heap handle in the heap info structure + */ + psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap; + } + break; + } + } + } +#if defined(PDUMP) + if(hDevMemHeap) + { + /* set up the MMU pdump info */ + psDevInfo->sMMUAttrib = *((BM_HEAP*)hDevMemHeap)->psMMUAttrib; + } +#endif + eError = MMU_BIFResetPDAlloc(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset")); + return eError; + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function SGXGetInfoForSrvinitKM + + @Description + + Get SGX related information necessary for initilisation server + + @Input hDevHandle - device handle + psInitInfo - pointer to structure for returned information + + @Output psInitInfo - pointer to structure containing returned information + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("SGXGetInfoForSrvinit"); + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + + psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr; + + eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError)); + return eError; + } + + return eError; +} + +/*! +******************************************************************************* + + @Function DevInitSGXPart2KM + + @Description + + Reset and initialise Chip + + @Input pvDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevHandle, + SGX_BRIDGE_INIT_INFO *psInitInfo) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + SGX_DEVICE_MAP *psSGXDeviceMap; + PVRSRV_DEV_POWER_STATE eDefaultPowerState; + + PDUMPCOMMENT("SGX Initialisation Part 2"); + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + + /* + Init devinfo + */ + eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program")); + goto failed_init_dev_info; + } + + + eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!")); + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* Registers already mapped? */ + if (psSGXDeviceMap->pvRegsCpuVBase) + { + psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase; + } + else + { + /* Map Regs */ + psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase, + psSGXDeviceMap->ui32RegsSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + if (!psDevInfo->pvRegsBaseKM) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n")); + return PVRSRV_ERROR_BAD_MAPPING; + } + } + psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize; + psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase; + + +#if defined(SGX_FEATURE_HOST_PORT) + if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT) + { + /* Map Host Port */ + psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase, + psSGXDeviceMap->ui32HPSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + if (!psDevInfo->pvHostPortBaseKM) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n")); + return PVRSRV_ERROR_BAD_MAPPING; + } + psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize; + psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase; + } +#endif/* #ifdef SGX_FEATURE_HOST_PORT */ + +#if defined (SYS_USING_INTERRUPTS) + + /* Set up ISR callback information. */ + psDeviceNode->pvISRData = psDeviceNode; + /* ISR handler address was set up earlier */ + PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler); + +#endif /* SYS_USING_INTERRUPTS */ + + /* Prevent the microkernel being woken up before there is something to do. */ + psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK; + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; + /* Register the device with the power manager. */ + eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex, + &SGXPrePowerState, &SGXPostPowerState, + &SGXPreClockSpeedChange, &SGXPostClockSpeedChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager")); + return eError; + } + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* map the external system cache control registers into the SGX MMU */ + psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize; + psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase; + eError = MMU_MapExtSystemCacheRegs(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers")); + return eError; + } +#endif /* SUPPORT_EXTERNAL_SYSTEM_CACHE */ + + /* + Initialise the Kernel CCB + */ + OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB)); + OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL)); + OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker)); + PDUMPCOMMENT("Initialise Kernel CCB"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo)); + PDUMPCOMMENT("Initialise Kernel CCB Control"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo)); + PDUMPCOMMENT("Initialise Kernel CCB Event Kicker"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + + return PVRSRV_OK; + +failed_init_dev_info: + return eError; +} + +/*! +******************************************************************************* + + @Function DevDeInitSGX + + @Description + + Reset and deinitialise Chip + + @Input pvDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT32 ui32Heap; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + SGX_DEVICE_MAP *psSGXDeviceMap; + + if (!psDevInfo) + { + /* Can happen if DevInitSGX failed */ + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo")); + return PVRSRV_OK; + } + +#if defined(SUPPORT_HW_RECOVERY) + if (psDevInfo->hTimer) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer")); + return eError; + } + psDevInfo->hTimer = IMG_NULL; + } +#endif /* SUPPORT_HW_RECOVERY */ + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* unmap the external system cache control registers */ + eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers")); + return eError; + } +#endif /* SUPPORT_EXTERNAL_SYSTEM_CACHE */ + + MMU_BIFResetPDFree(psDevInfo); + + /* + DeinitDevInfo the DevInfo + */ + DeinitDevInfo(psDevInfo); + + /* Destroy heaps. */ + psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap; + for(ui32Heap=0; ui32HeapsDevMemoryInfo.ui32HeapCount; ui32Heap++) + { + switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_KERNEL: + case DEVICE_MEMORY_HEAP_SHARED: + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL) + { + BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap); + } + break; + } + } + } + + /* Destroy the kernel context. */ + eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context")); + return eError; + } + + /* remove the device from the power manager */ + eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!")); + return eError; + } + + /* Only unmap the registers if they were mapped here */ + if (!psSGXDeviceMap->pvRegsCpuVBase) + { + /* UnMap Regs */ + if (psDevInfo->pvRegsBaseKM != IMG_NULL) + { + OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + } + } + +#if defined(SGX_FEATURE_HOST_PORT) + if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT) + { + /* unMap Host Port */ + if (psDevInfo->pvHostPortBaseKM != IMG_NULL) + { + OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM, + psDevInfo->ui32HPSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + } + } +#endif /* #ifdef SGX_FEATURE_HOST_PORT */ + + /* DeAllocate devinfo */ + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_SGXDEV_INFO), + psDevInfo, + 0); + + psDeviceNode->pvDevice = IMG_NULL; + + if (psDeviceMemoryHeap != IMG_NULL) + { + /* Free the device memory heap info. */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID, + psDeviceMemoryHeap, + 0); + } + + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function SGXDumpDebugReg + + @Description + + Dump a single SGX debug register value + + @Input psDevInfo - SGX device info + @Input ui32CoreNum - processor number + @Input pszName - string used for logging + @Input ui32RegAddr - SGX register offset + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXDumpDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CoreNum, + IMG_CHAR *pszName, + IMG_UINT32 ui32RegAddr) +{ + IMG_UINT32 ui32RegVal; + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(ui32RegAddr, ui32CoreNum)); + PVR_LOG(("(P%u) %s%08X", ui32CoreNum, pszName, ui32RegVal)); +} + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) || defined(FIX_HW_BRN_31620) +static INLINE IMG_UINT32 GetDirListBaseReg(IMG_UINT32 ui32Index) +{ + if (ui32Index == 0) + { + return EUR_CR_BIF_DIR_LIST_BASE0; + } + else + { + return (EUR_CR_BIF_DIR_LIST_BASE1 + ((ui32Index - 1) * 0x4)); + } +} +#endif + +/*! + * ************************************************************************** + * @Function RunSGXREGDebugScripts + * @Description Runs the SGXREG debug scripts + * + * @Input PVRSRV_SGXDEV_INFO + * @Output + * @Return IMG_VOID + * **************************************************************************/ + +IMG_VOID RunSGXREGDebugScripts (PVRSRV_SGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32Core; + PVRSRV_ERROR eError; + + /* Run SGXREGDebug scripts */ +#if defined(SGX_FEATURE_MP) + PVR_LOG(("(HYD)")); + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asSGXREGDebugCommandsMaster, SGX_MAX_PRINT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"RunSGXREGDebugScripts: SGXREGDebugCommandsMaster SGXRunScript failed (%d)", eError)); + } +#endif + + /* Run on each core */ + for (ui32Core = 0; ui32Core < SGX_FEATURE_MP_CORE_COUNT_3D; ui32Core++) + { + PVR_LOG(("(P%u)",ui32Core)); + eError = SGXRunScriptOnCore(psDevInfo, psDevInfo->sScripts.asSGXREGDebugCommandsSlave, SGX_MAX_PRINT_COMMANDS, ui32Core); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"RunSGXREGDebugScripts: SGXREGDebugCommandsSlave SGXRunScript failed (%d)", eError)); + } + } + /* Scripts end */ +} + + +/*! +******************************************************************************* + + @Function SGXDumpDebugInfo + + @Description + + Dump useful debugging info + + @Input psDevInfo - SGX device info + @Input bDumpSGXRegs - Whether to dump SGX debug registers. Must not be done + when SGX is not powered. + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bDumpSGXRegs) +{ + IMG_UINT32 ui32CoreNum; + + PVR_LOG(("SGX debug (%s)", PVRVERSION_STRING)); + + if (bDumpSGXRegs) + { + PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM)); + PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x" SYSPADDR_FMT, psDevInfo->sRegsPhysBase.uiAddr)); + + /* Run SGXREGDebug Scripts */ + PVR_LOG(("Running SGXREG Debug Scripts:")); + RunSGXREGDebugScripts(psDevInfo); + + PVR_LOG(("SGX Register Dump:")); + SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_ID: ", EUR_CR_CORE_ID); + SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_REVISION: ", EUR_CR_CORE_REVISION); + + for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++) + { + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS: ", EUR_CR_EVENT_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS2: ", EUR_CR_EVENT_STATUS2); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_CTRL: ", EUR_CR_BIF_CTRL); + #if defined(EUR_CR_BIF_BANK0) + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_BANK0: ", EUR_CR_BIF_BANK0); + #endif + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_INT_STAT: ", EUR_CR_BIF_INT_STAT); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_FAULT: ", EUR_CR_BIF_FAULT); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_MEM_REQ_STAT: ", EUR_CR_BIF_MEM_REQ_STAT); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATECTL: ", EUR_CR_CLKGATECTL); + #if defined(EUR_CR_PDS_PC_BASE) + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_PDS_PC_BASE: ", EUR_CR_PDS_PC_BASE); + #endif + } + + #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && !defined(FIX_HW_BRN_31620) + { + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32PDDevPAddr; + + /* + If there was a SGX pagefault check the page table too see if the + host thinks the fault is correct + */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); + #if defined(EUR_CR_BIF_INT_STAT_PF_N_RW_MASK) + if (ui32RegVal & EUR_CR_BIF_INT_STAT_PF_N_RW_MASK) + #else + if (ui32RegVal & EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK) + #endif + { + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); + ui32RegVal &= EUR_CR_BIF_FAULT_ADDR_MASK; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0); + ui32PDDevPAddr &= EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK; + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32RegVal); + } + } + #else + { + IMG_UINT32 ui32FaultAddress; + IMG_UINT32 ui32Bank0; + IMG_UINT32 ui32DirListIndex; + IMG_UINT32 ui32PDDevPAddr; + IMG_UINT32 ui32RegVal; + +#if defined(SGX_FEATURE_MP) + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_INT_STAT); +#if defined(EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK) + if( ui32RegVal & EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK) +#else + if( ui32RegVal & EUR_CR_BIF_INT_STAT_PF_N_RW_MASK) +#endif + { + ui32FaultAddress = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_FAULT); + if(ui32FaultAddress) + { + ui32Bank0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0); + + /* Check the EDM's's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_EDM_MASK) >> EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking EDM memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + /* Check the TA's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_TA_MASK) >> EUR_CR_BIF_BANK0_INDEX_TA_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking TA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + /* Check the 3D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_3D_MASK) >> EUR_CR_BIF_BANK0_INDEX_3D_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking 3D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + #if defined(EUR_CR_BIF_BANK0_INDEX_2D_MASK) + /* Check the 2D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_2D_MASK) >> EUR_CR_BIF_BANK0_INDEX_2D_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking 2D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + + #if defined(EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) + /* Check the 2D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) >> EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking PTLA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + + #if defined(EUR_CR_BIF_BANK0_INDEX_HOST_MASK) + /* Check the Host's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_HOST_MASK) >> EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking Host memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + #endif + } + } +#endif + for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++) + { + + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_INT_STAT, ui32CoreNum)); + #if defined(EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK) + if( ui32RegVal & EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK) + #else + if( ui32RegVal & EUR_CR_BIF_INT_STAT_PF_N_RW_MASK) + #endif + { + ui32FaultAddress = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_FAULT, ui32CoreNum)); + ui32FaultAddress &= EUR_CR_BIF_FAULT_ADDR_MASK; + if(ui32FaultAddress) + { + ui32Bank0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0); + + /* Check the EDM's's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_EDM_MASK) >> EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking EDM memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + /* Check the TA's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_TA_MASK) >> EUR_CR_BIF_BANK0_INDEX_TA_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking TA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + /* Check the 3D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_3D_MASK) >> EUR_CR_BIF_BANK0_INDEX_3D_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking 3D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + #if defined(EUR_CR_BIF_BANK0_INDEX_2D_MASK) + /* Check the 2D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_2D_MASK) >> EUR_CR_BIF_BANK0_INDEX_2D_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking 2D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + + #if defined(EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) + /* Check the 2D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) >> EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking PTLA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + + #if defined(EUR_CR_BIF_BANK0_INDEX_HOST_MASK) + /* Check the Host's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_HOST_MASK) >> EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking Host memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + #endif + } + } + } + } + #endif + } + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + /* + Dump out the outstanding queue items. + */ + QueueDumpDebugInfo(); +#endif + + { + /* + Dump out the Host control. + */ + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psSGXHostCtl; + IMG_UINT32 ui32LoopCounter; + + /* Report which defines are enabled that affect the HostCTL structure being dumped-out here */ + { + IMG_UINT32 ui32CtlFlags = 0; + #if defined(PVRSRV_USSE_EDM_BREAKPOINTS) + ui32CtlFlags = ui32CtlFlags | 0x0001; + #endif + #if defined(FIX_HW_BRN_28889) + ui32CtlFlags = ui32CtlFlags | 0x0002; + #endif + #if defined(SUPPORT_HW_RECOVERY) + ui32CtlFlags = ui32CtlFlags | 0x0004; + #endif + #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS) + ui32CtlFlags = ui32CtlFlags | 0x0008; + #endif + PVR_LOG((" Host Ctl flags= %08x", ui32CtlFlags)); + } + + if (psSGXHostCtl->ui32AssertFail != 0) + { + PVR_LOG(("SGX Microkernel assert fail: 0x%08X", psSGXHostCtl->ui32AssertFail)); + psSGXHostCtl->ui32AssertFail = 0; + } + + PVR_LOG(("SGX Host control:")); + + for (ui32LoopCounter = 0; + ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer); + ui32LoopCounter += 4) + { + PVR_LOG(("\t(HC-%" SIZE_T_FMT_LEN "X) 0x%08X 0x%08X 0x%08X 0x%08X", + ui32LoopCounter * sizeof(*pui32HostCtlBuffer), + pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1], + pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3])); + } + } + + { + /* + Dump out the TA/3D control. + */ + IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM; + IMG_UINT32 ui32LoopCounter; + + PVR_LOG(("SGX TA/3D control:")); + + for (ui32LoopCounter = 0; + ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->uAllocSize / sizeof(*pui32TA3DCtlBuffer); + ui32LoopCounter += 4) + { + PVR_LOG(("\t(T3C-%" SIZE_T_FMT_LEN "X) 0x%08X 0x%08X 0x%08X 0x%08X", + ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer), + pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1], + pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3])); + } + } + + #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + { + IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM; + IMG_UINT32 ui32LastStatusCode, ui32WriteOffset; + + ui32LastStatusCode = *pui32MKTraceBuffer; + pui32MKTraceBuffer++; + ui32WriteOffset = *pui32MKTraceBuffer; + pui32MKTraceBuffer++; + + PVR_LOG(("Last SGX microkernel status code: %08X %s", + ui32LastStatusCode, SGXUKernelStatusString(ui32LastStatusCode))); + + #if defined(PVRSRV_DUMP_MK_TRACE) + /* + Dump the raw microkernel trace buffer to the log. + */ + { + IMG_UINT32 ui32LoopCounter; + + for (ui32LoopCounter = 0; + ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE; + ui32LoopCounter++) + { + IMG_UINT32 *pui32BufPtr; + pui32BufPtr = pui32MKTraceBuffer + + (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4); + PVR_LOG(("\t(MKT-%X) %08X %08X %08X %08X %s", ui32LoopCounter, + pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0], + SGXUKernelStatusString(pui32BufPtr[0]))); + } + } + #endif /* PVRSRV_DUMP_MK_TRACE */ + } + #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */ + + { + /* + Dump out the kernel CCB. + */ + PVR_LOG(("SGX Kernel CCB WO:0x%X RO:0x%X", + psDevInfo->psKernelCCBCtl->ui32WriteOffset, + psDevInfo->psKernelCCBCtl->ui32ReadOffset)); + + #if defined(PVRSRV_DUMP_KERNEL_CCB) + { + IMG_UINT32 ui32LoopCounter; + + for (ui32LoopCounter = 0; + ui32LoopCounter < sizeof(psDevInfo->psKernelCCB->asCommands) / + sizeof(psDevInfo->psKernelCCB->asCommands[0]); + ui32LoopCounter++) + { + SGXMKIF_COMMAND *psCommand = &psDevInfo->psKernelCCB->asCommands[ui32LoopCounter]; + + PVR_LOG(("\t(KCCB-%X) %08X %08X - %08X %08X %08X %08X", ui32LoopCounter, + psCommand->ui32ServiceAddress, psCommand->ui32CacheControl, + psCommand->ui32Data[0], psCommand->ui32Data[1], + psCommand->ui32Data[2], psCommand->ui32Data[3])); + } + } + #endif /* PVRSRV_DUMP_KERNEL_CCB */ + } + #if defined (TTRACE) + PVRSRVDumpTimeTraceBuffers(); + #endif + + #if defined (SUPPORT_FORCE_SYNC_DUMP) + PVRSRVDumpSyncs(IMG_FALSE); + #else + PVRSRVDumpSyncs(IMG_TRUE); + #endif + + +#if defined (MEM_TRACK_INFO_DEBUG) + { + IMG_UINT32 ui32FaultAddress = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_FAULT, 0)); + PVRSRVPrintMemTrackInfo(ui32FaultAddress); + } +#endif + + +#if defined(PVRSRV_DEBUG_CCB_MAX) + PVRSRVDebugPrintfDumpCCB(); +#endif +} + + +#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY) +/*! +******************************************************************************* + + @Function HWRecoveryResetSGX + + @Description + + Resets SGX + + Note: may be called from an ISR so should not call pdump. + + @Input psDevInfo - dev info + + @Input ui32Component - core component to reset + + @Return IMG_VOID + +******************************************************************************/ +static +IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Component, + IMG_UINT32 ui32CallerID) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + +#if defined(SUPPORT_HWRECOVERY_TRACE_LIMIT) + static IMG_UINT32 ui32Clockinus = 0; + static IMG_UINT32 ui32HWRecoveryCount=0; + IMG_UINT32 ui32TempClockinus=0; +#endif + + PVR_UNREFERENCED_PARAMETER(ui32Component); + + /* + Ensure that hardware recovery is serialised with any power transitions. + */ + eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); + if(eError != PVRSRV_OK) + { + /* + Unable to obtain lock because there is already a power transition + in progress. + */ + PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress")); + return; + } + + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR; + + PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered")); + +#if defined(SUPPORT_HWRECOVERY_TRACE_LIMIT) +/* + * The following defines are system specific and should be defined in + * the corresponding sysconfig.h file. The values indicated are examples only. + SYS_SGX_HWRECOVERY_TRACE_RESET_TIME_PERIOD 5000000 //(5 Seconds) + SYS_SGX_MAX_HWRECOVERY_OCCURANCE_COUNT 5 + */ + ui32TempClockinus = OSClockus(); + if((ui32TempClockinus-ui32Clockinus) < SYS_SGX_HWRECOVERY_TRACE_RESET_TIME_PERIOD){ + ui32HWRecoveryCount++; + if(SYS_SGX_MAX_HWRECOVERY_OCCURANCE_COUNT <= ui32HWRecoveryCount){ + OSPanic(); + } + }else{ + ui32Clockinus = ui32TempClockinus; + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE); + ui32HWRecoveryCount = 0; + } +#else + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE); +#endif + + /* Suspend pdumping. */ + PDUMPSUSPEND(); + + /* Reset and re-initialise SGX. */ + eError = SGXInitialise(psDevInfo, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError)); + } + + /* Resume pdumping. */ + PDUMPRESUME(); + + PVRSRVPowerUnlock(ui32CallerID); + + /* Send a dummy kick so that we start processing again */ + SGXScheduleProcessQueuesKM(psDeviceNode); + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + /* Flush any old commands from the queues. */ + PVRSRVProcessQueues(IMG_TRUE); +#endif +} +#endif /* #if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY) */ + + +#if defined(SUPPORT_HW_RECOVERY) +/*! +****************************************************************************** + + @Function SGXOSTimer + + @Description + + Timer function for SGX + + @Input pvData - private data + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_VOID SGXOSTimer(IMG_VOID *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + static IMG_UINT32 ui32EDMTasks = 0; + static IMG_UINT32 ui32LockupCounter = 0; /* To prevent false positives */ + static IMG_UINT32 ui32OpenCLDelayCounter = 0; + static IMG_UINT32 ui32NumResets = 0; +#if defined(FIX_HW_BRN_31093) + static IMG_BOOL bBRN31093Inval = IMG_FALSE; +#endif + IMG_UINT32 ui32CurrentEDMTasks; + IMG_UINT32 ui32CurrentOpenCLDelayCounter=0; + IMG_BOOL bLockup = IMG_FALSE; + IMG_BOOL bPoweredDown; + + /* increment a timestamp */ + psDevInfo->ui32TimeStamp++; + +#if defined(NO_HARDWARE) + bPoweredDown = IMG_TRUE; +#else + bPoweredDown = (SGXIsDevicePowered(psDeviceNode)) ? IMG_FALSE : IMG_TRUE; +#endif /* NO_HARDWARE */ + + /* + * Check whether EDM timer tasks are getting scheduled. If not, assume + * that SGX has locked up and reset the chip. + */ + + /* Check whether the timer should be running */ + if (bPoweredDown) + { + ui32LockupCounter = 0; + #if defined(FIX_HW_BRN_31093) + bBRN31093Inval = IMG_FALSE; + #endif + } + else + { + /* The PDS timer should be running. */ + ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0); + if (psDevInfo->ui32EDMTaskReg1 != 0) + { + ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1); + } + if ((ui32CurrentEDMTasks == ui32EDMTasks) && + (psDevInfo->ui32NumResets == ui32NumResets)) + { + ui32LockupCounter++; + if (ui32LockupCounter == 3) + { + ui32LockupCounter = 0; + ui32CurrentOpenCLDelayCounter = (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount; + if(0 != ui32CurrentOpenCLDelayCounter) + { + if(ui32OpenCLDelayCounter != ui32CurrentOpenCLDelayCounter){ + ui32OpenCLDelayCounter = ui32CurrentOpenCLDelayCounter; + }else{ + ui32OpenCLDelayCounter -= 1; + (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = ui32OpenCLDelayCounter; + } + goto SGX_NoUKernel_LockUp; + } + + + #if defined(FIX_HW_BRN_31093) + if (bBRN31093Inval == IMG_FALSE) + { + /* It could be a BIF hang so do a INVAL_PTE */ + #if defined(FIX_HW_BRN_29997) + IMG_UINT32 ui32BIFCtrl; + /* Pause the BIF before issuing the invalidate */ + ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_PAUSE_MASK); + /* delay for 200 clocks */ + SGXWaitClocks(psDevInfo, 200); + #endif + /* Flag that we have attempt to un-block the BIF */ + bBRN31093Inval = IMG_TRUE; + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, EUR_CR_BIF_CTRL_INVAL_PTE_MASK); + /* delay for 200 clocks */ + SGXWaitClocks(psDevInfo, 200); + + #if defined(FIX_HW_BRN_29997) + /* un-pause the BIF by restoring the BIF_CTRL */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl); + #endif + } + else + #endif + { + PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks)); + + bLockup = IMG_TRUE; + (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = 0; + } + } + } + else + { + #if defined(FIX_HW_BRN_31093) + bBRN31093Inval = IMG_FALSE; + #endif + ui32LockupCounter = 0; + ui32EDMTasks = ui32CurrentEDMTasks; + ui32NumResets = psDevInfo->ui32NumResets; + } + } +SGX_NoUKernel_LockUp: + + if (bLockup) + { + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + + /* increment the counter so we know the host detected the lockup */ + psSGXHostCtl->ui32HostDetectedLockups ++; + + /* Reset the chip and process the queues. */ + HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID); + } +} +#endif /* defined(SUPPORT_HW_RECOVERY) */ + + + +#if defined(SYS_USING_INTERRUPTS) + +/* + SGX ISR Handler +*/ +IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData) +{ + IMG_BOOL bInterruptProcessed = IMG_FALSE; + + + /* Real Hardware */ + { + IMG_UINT32 ui32EventStatus, ui32EventEnable; + IMG_UINT32 ui32EventClear = 0; +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + IMG_UINT32 ui32EventStatus2, ui32EventEnable2; +#endif + IMG_UINT32 ui32EventClear2 = 0; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + + /* check for null pointers */ + if(pvData == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n")); + return bInterruptProcessed; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + + ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); + ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE); + + /* test only the unmasked bits */ + ui32EventStatus &= ui32EventEnable; + +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + ui32EventStatus2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); + ui32EventEnable2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE2); + + /* test only the unmasked bits */ + ui32EventStatus2 &= ui32EventEnable2; +#endif /* defined(SGX_FEATURE_DATA_BREAKPOINTS) */ + + /* Thought: is it better to insist that the bit assignment in + the "clear" register(s) matches that of the "status" register(s)? + It would greatly simplify this LISR */ + + if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK) + { + ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK; + } + +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK) + { + ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK; + } + + if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK) + { + ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK; + } +#endif /* defined(SGX_FEATURE_DATA_BREAKPOINTS) */ + + if (ui32EventClear || ui32EventClear2) + { + bInterruptProcessed = IMG_TRUE; + + /* Clear master interrupt bit */ + ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK; + + /* clear the events */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32EventClear2); + + /* + Sample the current count from the uKernel _after_ we've cleared the + interrupt. + */ + g_ui32HostIRQCountSample = psDevInfo->psSGXHostCtl->ui32InterruptCount; + } + } + + return bInterruptProcessed; +} + +/* + SGX Systrace Handler +*/ +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) +static IMG_VOID SGXSystraceHandler(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + IMG_UINT32 ui32SgxClockSpeed, ui32DataCount, ui32HostTimestamp; + + /* NOTE: Not thread safe. MISR should only run in one thread */ + static PVRSRV_SGX_HWPERF_CB_ENTRY asSGXHWPerf[16]; + + if(SystraceIsCapturingHWData() && psDevInfo->bSystraceInitialised) + { + SGXReadHWPerfCBKM((IMG_HANDLE) psDeviceNode, + 16, + asSGXHWPerf, + (IMG_UINT32 *)&ui32DataCount, + (IMG_UINT32 *)&ui32SgxClockSpeed, + (IMG_UINT32 *)&ui32HostTimestamp); + + SystraceHWPerfPackets(psDevInfo, asSGXHWPerf, ui32DataCount, ui32SgxClockSpeed); + } + else if(SystraceIsCapturingHWData() && !psDevInfo->bSystraceInitialised) + { + SGX_MISC_INFO sSGXMiscInfo; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SYSTRACE_DATA), + (IMG_VOID **)&psDevInfo->psSystraceData, 0, + "Systrace data storage") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSystraceHandler: Failed to allocate systrace data")); + return; + } + + OSMemSet(psDevInfo->psSystraceData, 0, sizeof(PVRSRV_SYSTRACE_DATA)); + + /* Prepare the SGXMiscInfo request in order to stop recording data*/ + sSGXMiscInfo.eRequest = SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS; + sSGXMiscInfo.uData.sSetHWPerfStatus.ui32NewHWPerfStatus = PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON | PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON; + + /* Call into SGX DDK KM Services*/ + SGXGetMiscInfoKM(psDevInfo, &sSGXMiscInfo, psDeviceNode, NULL); + + psDevInfo->bSystraceInitialised = IMG_TRUE; + + /* Initialize the first context to be 1 (0 is idle)*/ + psDevInfo->psSystraceData->ui32CurrentCtxID = 1; + + /* Initialize current GPU ticks and Host Time */ + SystraceInitializeTimeCorr(psDevInfo); + } + else if(psDevInfo->bSystraceInitialised) + { + SGX_MISC_INFO sSGXMiscInfo; + + /* Prepare the SGXMiscInfo request in order to stop recording data*/ + sSGXMiscInfo.eRequest = SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS; + sSGXMiscInfo.uData.sSetHWPerfStatus.ui32NewHWPerfStatus = 0; + + /* Call into SGX DDK KM Services*/ + SGXGetMiscInfoKM(psDevInfo, &sSGXMiscInfo, psDeviceNode, NULL); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SYSTRACE_DATA), psDevInfo->psSystraceData, NULL); + psDevInfo->bSystraceInitialised = IMG_FALSE; + } +} +#endif + +/* + SGX MISR Handler +*/ +static IMG_VOID SGX_MISRHandler (IMG_VOID *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + + if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) && + ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL)) + { + HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID); + } + + if (psDeviceNode->bReProcessDeviceCommandComplete) + { + SGXScheduleProcessQueuesKM(psDeviceNode); + } + + SGXTestActivePowerEvent(psDeviceNode, ISR_ID); + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + SGXSystraceHandler(psDeviceNode); +#endif + +} +#endif /* #if defined (SYS_USING_INTERRUPTS) */ + +#if defined(SUPPORT_MEMORY_TILING) + +IMG_INTERNAL +PVRSRV_ERROR SGX_AllocMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32XTileStride, + IMG_UINT32 *pui32RangeIndex) +{ + return SGX_AllocMemTilingRangeInt(psDeviceNode->pvDevice, + psMemInfo->sDevVAddr.uiAddr, + psMemInfo->sDevVAddr.uiAddr + ((IMG_UINT32) psMemInfo->uAllocSize) + SGX_MMU_PAGE_SIZE - 1, + ui32XTileStride, + pui32RangeIndex); +} + +IMG_INTERNAL +PVRSRV_ERROR SGX_FreeMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RangeIndex) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Val; + + if(ui32RangeIndex >= 10) + { + PVR_DPF((PVR_DBG_ERROR,"SGX_FreeMemTilingRange: invalid Range index ")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* clear the usage bit */ + psDevInfo->ui32MemTilingUsage &= ~(1<pvRegsBaseKM, ui32Offset, ui32Val); + PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val); + + return PVRSRV_OK; +} + +#endif /* defined(SUPPORT_MEMORY_TILING) */ + + +static IMG_VOID SGXCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + #if defined(SGX_FEATURE_MP) + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL; + #else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + #endif /* SGX_FEATURE_MP */ +} + +/*! +******************************************************************************* + + @Function SGXRegisterDevice + + @Description + + Registers the device with the system + + @Input: psDeviceNode - device node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode) +{ + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + + /* setup details that never change */ + psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE; + psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS; +#if defined(PDUMP) + { + /* memory space names are set up in system code */ + SGX_DEVICE_MAP *psSGXDeviceMemMap; + SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMemMap); + + psDeviceNode->sDevId.pszPDumpDevName = psSGXDeviceMemMap->pszPDumpDevName; + PVR_ASSERT(psDeviceNode->sDevId.pszPDumpDevName != IMG_NULL); + } + + psDeviceNode->sDevId.pszPDumpRegName = SGX_PDUMPREG_NAME; +#endif /* PDUMP */ + + psDeviceNode->pfnInitDevice = &DevInitSGXPart1; + psDeviceNode->pfnDeInitDevice = &DevDeInitSGX; + + psDeviceNode->pfnInitDeviceCompatCheck = &SGXDevInitCompatCheck; +#if defined(PDUMP) + psDeviceNode->pfnPDumpInitDevice = &SGXResetPDump; + psDeviceNode->pfnMMUGetContextID = &MMU_GetPDumpContextID; +#endif + /* + MMU callbacks + */ + psDeviceNode->pfnMMUInitialise = &MMU_Initialise; + psDeviceNode->pfnMMUFinalise = &MMU_Finalise; + psDeviceNode->pfnMMUInsertHeap = &MMU_InsertHeap; + psDeviceNode->pfnMMUCreate = &MMU_Create; + psDeviceNode->pfnMMUDelete = &MMU_Delete; + psDeviceNode->pfnMMUAlloc = &MMU_Alloc; + psDeviceNode->pfnMMUFree = &MMU_Free; + psDeviceNode->pfnMMUMapPages = &MMU_MapPages; + psDeviceNode->pfnMMUMapShadow = &MMU_MapShadow; + psDeviceNode->pfnMMUUnmapPages = &MMU_UnmapPages; + psDeviceNode->pfnMMUMapScatter = &MMU_MapScatter; + psDeviceNode->pfnMMUGetPhysPageAddr = &MMU_GetPhysPageAddr; + psDeviceNode->pfnMMUGetPDDevPAddr = &MMU_GetPDDevPAddr; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + psDeviceNode->pfnMMUIsHeapShared = &MMU_IsHeapShared; +#endif +#if defined(FIX_HW_BRN_31620) + psDeviceNode->pfnMMUGetCacheFlushRange = &MMU_GetCacheFlushRange; + psDeviceNode->pfnMMUGetPDPhysAddr = &MMU_GetPDPhysAddr; +#else + psDeviceNode->pfnMMUGetCacheFlushRange = IMG_NULL; + psDeviceNode->pfnMMUGetPDPhysAddr = IMG_NULL; +#endif + psDeviceNode->pfnMMUMapPagesSparse = &MMU_MapPagesSparse; + psDeviceNode->pfnMMUMapShadowSparse = &MMU_MapShadowSparse; + +#if defined (SYS_USING_INTERRUPTS) + /* + SGX ISR handler + */ + psDeviceNode->pfnDeviceISR = SGX_ISRHandler; + psDeviceNode->pfnDeviceMISR = SGX_MISRHandler; +#endif + +#if defined(SUPPORT_MEMORY_TILING) + psDeviceNode->pfnAllocMemTilingRange = SGX_AllocMemTilingRange; + psDeviceNode->pfnFreeMemTilingRange = SGX_FreeMemTilingRange; +#endif + + /* + SGX command complete handler + */ + psDeviceNode->pfnDeviceCommandComplete = &SGXCommandComplete; + + psDeviceNode->pfnCacheInvalidate = SGXCacheInvalidate; + + /* + and setup the device's memory map: + */ + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + /* size of address space */ + psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE; + + /* flags, backing store details to be specified by system */ + psDevMemoryInfo->ui32Flags = 0; + + /* device memory heap info about each heap in a device address space */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID, + (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0, + "Array of Device Memory Heap Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID); + + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + /* + setup heaps + Note: backing store to be setup by system (defaults to UMA) + */ + + /************* general ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "General"; + psDeviceMemoryHeap->pszBSName = "General BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; +#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) && !defined(SGX5300) + /* specify the mapping heap ID for this device */ + psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); +#endif + psDeviceMemoryHeap++;/* advance to the next heap */ + +#if defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) + /************* Texture Heap ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TEXTURE_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TEXTURE_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_TEXTURE_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + + psDeviceMemoryHeap->pszName = "Texture"; + psDeviceMemoryHeap->pszBSName = "Texture BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + /* The mapping heap ID should be texture heap for SGX5300 */ +#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) && defined(SGX5300) + /* specify the mapping heap ID for this device */ + psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); +#endif + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif + +#if defined(SUPPORT_MEMORY_TILING) + /************* VPB tiling ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VPB_TILED_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VPB_TILED_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_VPB_TILED_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "VPB Tiled"; + psDeviceMemoryHeap->pszBSName = "VPB Tiled BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap->ui32XTileStride = SGX_VPB_TILED_HEAP_STRIDE; + PVR_DPF((PVR_DBG_WARNING, "VPB tiling heap tiling stride = 0x%x", psDeviceMemoryHeap->ui32XTileStride)); + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif + + /************* TA data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "TA Data"; + psDeviceMemoryHeap->pszBSName = "TA Data BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* kernel code ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "Kernel Code"; + psDeviceMemoryHeap->pszBSName = "Kernel Code BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* Kernel Video Data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "KernelData"; + psDeviceMemoryHeap->pszBSName = "KernelData BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* PixelShaderUSSE ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE; + /* + The actual size of the pixel and vertex shader heap must be such that all + addresses are within range of the one of the USSE code base registers, but + the addressable range is hardware-dependent. + SGX_PIXELSHADER_HEAP_SIZE is defined to be the maximum possible size + to ensure that the heap layout is consistent across all SGXs. + */ + psDeviceMemoryHeap->ui32HeapSize = ((10 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000); + PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_PIXELSHADER_HEAP_SIZE); + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "PixelShaderUSSE"; + psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* VertexShaderUSSE ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE; + /* See comment above with PixelShaderUSSE ui32HeapSize */ + psDeviceMemoryHeap->ui32HeapSize = ((4 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000); + PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_VERTEXSHADER_HEAP_SIZE); + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "VertexShaderUSSE"; + psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* PDS Pixel Code/Data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "PDSPixelCodeData"; + psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* PDS Vertex Code/Data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "PDSVertexCodeData"; + psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* CacheCoherent ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "CacheCoherent"; + psDeviceMemoryHeap->pszBSName = "CacheCoherent BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + /* set the sync heap id */ + psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* Shared 3D Parameters ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SHARED_3DPARAMETERS_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SHARED_3DPARAMETERS_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_SHARED_3DPARAMETERS_HEAP_SIZE; + psDeviceMemoryHeap->pszName = "Shared 3DParameters"; + psDeviceMemoryHeap->pszBSName = "Shared 3DParameters BS"; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + /************* Percontext 3D Parameters ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE; + psDeviceMemoryHeap->pszName = "Percontext 3DParameters"; + psDeviceMemoryHeap->pszBSName = "Percontext 3DParameters BS"; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + /************* General Mapping ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "GeneralMapping"; + psDeviceMemoryHeap->pszBSName = "GeneralMapping BS"; + #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) + /* + if((2D hardware is enabled) + && (multi-mem contexts enabled) + && (BRN23410 is present)) + - then don't make the heap per-context otherwise + the TA and 2D requestors must always be aligned to + the same address space which could affect performance + */ + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + #else /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) */ + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) */ + + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + /* specify the mapping heap ID for this device */ + psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif /* #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) */ + + +#if defined(SGX_FEATURE_2D_HARDWARE) + /************* 2D HW Heap ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "2D"; + psDeviceMemoryHeap->pszBSName = "2D BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */ + + + /* set the heap count */ + psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); + + return PVRSRV_OK; +} + +#if defined(PDUMP) +static +PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)(psDeviceNode->pvDevice); + psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0; + PVR_DPF((PVR_DBG_MESSAGE, "Reset pdump CCB write offset.")); + + return PVRSRV_OK; +} +#endif /* PDUMP */ + + +/*! +******************************************************************************* + + @Function SGXGetClientInfoKM + + @Description Gets the client information + + @Input hDevCookie + + @Output psClientInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie, + SGX_CLIENT_INFO* psClientInfo) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + /* + If this is the first client to connect to SGX perform initialisation + */ + psDevInfo->ui32ClientRefCount++; + + /* + Copy information to the client info. + */ + psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM(); + + /* + Copy requested information. + */ + OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData)); + + /* just return OK */ + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function SGXPanic + + @Description + + Called when an unrecoverable situation is detected. Dumps SGX debug + information and tells the OS to panic. + + @Input psDevInfo - SGX device info + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + PVR_LOG(("SGX panic")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + OSPanic(); +} + + +/*! +******************************************************************************* + + @Function SGXDevInitCompatCheck + + @Description + + Check compatibility of host driver and microkernel (DDK and build options) + for SGX devices at services/device initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch; +#if !defined(NO_HARDWARE) + PPVRSRV_KERNEL_MEM_INFO psMemInfo; + PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt; /*!< internal misc info for ukernel */ + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; + SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes; /*!< microkernel structure sizes */ + IMG_BOOL bStructSizesFailed; + + /* Exceptions list for core rev check, format is pairs of (hw rev, sw rev) */ + IMG_BOOL bCheckCoreRev; + const IMG_UINT32 aui32CoreRevExceptions[] = + { + 0x10100, 0x10101 + }; + const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32)); + IMG_UINT i; +#endif + + /* Ensure it's a SGX device */ + if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX) + { + PVR_LOG(("(FAIL) SGXInit: Device not of type SGX")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto chk_exit; + } + + psDevInfo = psDeviceNode->pvDevice; + + /* + * 1. Check kernel-side and client-side build options + * 2. Check ukernel build options against kernel-side build options + */ + + /* + * Check KM build options against client-side host driver + */ + + ui32BuildOptions = (SGX_BUILD_OPTIONS); + if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions; + if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options.")); + PVR_LOG(("Extra options present in client-side driver: (0x%x). Please check sgx_options.h", + psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch)); + } + + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options.")); + PVR_LOG(("Extra options present in KM: (0x%x). Please check sgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch)); + } + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]")); + } + +#if !defined (NO_HARDWARE) + psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + + /* Clear state (not strictly necessary since this is the first call) */ + psSGXMiscInfoInt = psMemInfo->pvLinAddrKM; + psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES; + psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES; + eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, IMG_NULL); + + if(eError != PVRSRV_OK) + { + PVR_LOG(("(FAIL) SGXInit: Unable to validate hardware core revision")); + goto chk_exit; + } + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + /* + * Check hardware core revision is compatible with the one in software + */ + if (psSGXFeatures->ui32CoreRevSW == 0) + { + /* + Head core revision cannot be checked. + */ + PVR_LOG(("SGXInit: HW core rev (%x) check skipped.", + psSGXFeatures->ui32CoreRev)); + } + else + { + /* For some cores the hw/sw core revisions are expected not to match. For these + * exceptional cases the core rev compatibility check should be skipped. + */ + bCheckCoreRev = IMG_TRUE; + for(i=0; iui32CoreRev==aui32CoreRevExceptions[i]) && + (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) ) + { + PVR_LOG(("SGXInit: HW core rev (%x), SW core rev (%x) check skipped.", + psSGXFeatures->ui32CoreRev, + psSGXFeatures->ui32CoreRevSW)); + bCheckCoreRev = IMG_FALSE; + } + } + + if (bCheckCoreRev) + { + if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW) + { + PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%x) and SW core rev (%x).", + psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW)); + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%x) and SW core rev (%x) match. [ OK ]", + psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW)); + } + } + } + + /* + * Check ukernel structure sizes are the same as those in the driver + */ + psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes; + + bStructSizesFailed = IMG_FALSE; + + CHECK_SIZE(HOST_CTL); + CHECK_SIZE(COMMAND); +#if defined(SGX_FEATURE_2D_HARDWARE) + CHECK_SIZE(2DCMD); + CHECK_SIZE(2DCMD_SHARED); +#endif + CHECK_SIZE(CMDTA); + CHECK_SIZE(CMDTA_SHARED); + CHECK_SIZE(TRANSFERCMD); + CHECK_SIZE(TRANSFERCMD_SHARED); + + CHECK_SIZE(3DREGISTERS); + CHECK_SIZE(HWPBDESC); + CHECK_SIZE(HWRENDERCONTEXT); + CHECK_SIZE(HWRENDERDETAILS); + CHECK_SIZE(HWRTDATA); + CHECK_SIZE(HWRTDATASET); + CHECK_SIZE(HWTRANSFERCONTEXT); + + if (bStructSizesFailed == IMG_TRUE) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes.")); + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]")); + } + + /* + * Check ukernel build options against KM host driver + */ + + ui32BuildOptions = psSGXFeatures->ui32BuildOptions; + if (ui32BuildOptions != (SGX_BUILD_OPTIONS)) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS); + if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; " + "extra options present in driver: (0x%x). Please check sgx_options.h", + (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch )); + } + + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; " + "extra options present in microkernel: (0x%x). Please check sgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch )); + } + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]")); + } +#endif // NO_HARDWARE + + eError = PVRSRV_OK; +chk_exit: +#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK) + return PVRSRV_OK; +#else + return eError; +#endif +} + +/* + * @Function SGXGetMiscInfoUkernel + * + * @Description Returns misc info (e.g. SGX build info/flags) from microkernel + * + * @Input psDevInfo : device info from init phase + * @Input psDeviceNode : device node, used for scheduling ukernel to query SGX features + * + * @Return PVRSRV_ERROR : + * + */ +static +PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext) +{ + PVRSRV_ERROR eError; + SGXMKIF_COMMAND sCommandData; /* CCB command data */ + PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt; /*!< internal misc info for ukernel */ + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; /*!< sgx features for client */ + SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes; /*!< internal info: microkernel structure sizes */ + + PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + + if (! psMemInfo->pvLinAddrKM) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address.")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + psSGXMiscInfoInt = psMemInfo->pvLinAddrKM; + psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures; + psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes; + + psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY; + + /* Reset SGX features */ + OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures)); + OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes)); + + /* set up buffer address for SGX features in CCB */ + sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr; /* device V addr of output buffer */ + + PDUMPCOMMENT("Microkernel kick for SGXGetMiscInfo"); + eError = SGXScheduleCCBCommandKM(psDeviceNode, + SGXMKIF_CMD_GETMISCINFO, + &sCommandData, + KERNEL_ID, + 0, + hDevMemContext, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed.")); + return eError; + } + + /* FIXME: DWORD value to determine code path in ukernel? + * E.g. could use getMiscInfo to obtain register values for diagnostics? */ + +#if !defined(NO_HARDWARE) + { + IMG_BOOL bExit; + + bExit = IMG_FALSE; + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0) + { + bExit = IMG_TRUE; + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + + /*if the loop exited because a timeout*/ + if (!bExit) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info.")); + return PVRSRV_ERROR_TIMEOUT; + } + } +#endif /* NO_HARDWARE */ + + return PVRSRV_OK; +} + + + +/* + * @Function SGXGetMiscInfoKM + * + * @Description Returns miscellaneous SGX info + * + * @Input psDevInfo : device info from init phase + * @Input psDeviceNode : device node, used for scheduling ukernel to query SGX features + * + * @Output psMiscInfo : query request plus user-mode mem for holding returned data + * + * @Return PVRSRV_ERROR : + * + */ +IMG_EXPORT +PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, + SGX_MISC_INFO *psMiscInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext) +{ + PVRSRV_ERROR eError; + PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + IMG_UINT32 *pui32MiscInfoFlags; + pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags; + +#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + PVR_UNREFERENCED_PARAMETER(hDevMemContext); +#endif + + switch(psMiscInfo->eRequest) + { +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT: + { + IMG_UINT32 ui32MaskDM; + IMG_UINT32 ui32CtrlWEnable; + IMG_UINT32 ui32CtrlREnable; + IMG_UINT32 ui32CtrlTrapEnable; + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32StartRegVal; + IMG_UINT32 ui32EndRegVal; + SGXMKIF_COMMAND sCommandData; + + /* Set or Clear BP? */ + if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable) + { + /* set the break point */ + IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr; + IMG_DEV_VIRTADDR sBPDevVAddrEnd = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddrEnd; + + /* BP address */ + ui32StartRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_START_ADDRESS_MASK; + ui32EndRegVal = sBPDevVAddrEnd.uiAddr & EUR_CR_BREAKPOINT0_END_ADDRESS_MASK; + + ui32MaskDM = psMiscInfo->uData.sSGXBreakpointInfo.ui32DataMasterMask; + ui32CtrlWEnable = psMiscInfo->uData.sSGXBreakpointInfo.bWrite; + ui32CtrlREnable = psMiscInfo->uData.sSGXBreakpointInfo.bRead; + ui32CtrlTrapEnable = psMiscInfo->uData.sSGXBreakpointInfo.bTrapped; + + /* normal data BP */ + ui32RegVal = ((ui32MaskDM<uData.sSGXBreakpointInfo.ui32BPIndex; + sCommandData.ui32Data[1] = ui32StartRegVal; + sCommandData.ui32Data[2] = ui32EndRegVal; + sCommandData.ui32Data[3] = ui32RegVal; + + /* clear signal flags */ + psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0; + + PDUMPCOMMENT("Microkernel kick for setting a data breakpoint"); + eError = SGXScheduleCCBCommandKM(psDeviceNode, + SGXMKIF_CMD_DATABREAKPOINT, + &sCommandData, + KERNEL_ID, + 0, + hDevMemContext, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: SGXScheduleCCBCommandKM failed.")); + return eError; + } + +#if defined(NO_HARDWARE) + /* clear signal flags */ + psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0; +#else + { + IMG_BOOL bExit; + + bExit = IMG_FALSE; + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (psDevInfo->psSGXHostCtl->ui32BPSetClearSignal != 0) + { + bExit = IMG_TRUE; + /* clear signal flags */ + psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0; + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + + /*if the loop exited because a timeout*/ + if (!bExit) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: Timeout occurred waiting BP set/clear")); + return PVRSRV_ERROR_TIMEOUT; + } + } +#endif /* NO_HARDWARE */ + + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_POLL_BREAKPOINT: + { + /* This request checks to see whether a breakpoint has + been trapped. If so, it returns the number of the + breakpoint number that was trapped in ui32BPIndex, + sTrappedBPDevVAddr to the address which was trapped, + and sets bTrappedBP. Otherwise, bTrappedBP will be + false, and other fields should be ignored. */ + /* The uKernel is not used, since if we are stopped on a + breakpoint, it is not possible to guarantee that the + uKernel would be able to run */ +#if !defined(NO_HARDWARE) +#if defined(SGX_FEATURE_MP) + IMG_BOOL bTrappedBPMaster; + IMG_UINT32 ui32CoreNum, ui32TrappedBPCoreNum; +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + IMG_UINT32 ui32PipeNum, ui32TrappedBPPipeNum; +/* ui32PipeNum is the pipe number plus 1, or 0 to represent "partition" */ +#define NUM_PIPES_PLUS_ONE (SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES+1) +#endif + IMG_BOOL bTrappedBPAny; +#endif /* defined(SGX_FEATURE_MP) */ + IMG_BOOL bFoundOne; + +#if defined(SGX_FEATURE_MP) + ui32TrappedBPCoreNum = 0; + bTrappedBPMaster = !!(EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT)); + bTrappedBPAny = bTrappedBPMaster; +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32TrappedBPPipeNum = 0; /* just to keep the (incorrect) compiler happy */ +#endif + for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++) + { +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + /* FIXME: this macro makes the assumption that the PARTITION regs are the same + distance before the PIPE0 regs as the PIPE1 regs are after it, _and_ + assumes that the fields in the partition regs are in the same place + in the pipe regs. Need to validate these assumptions, or assert them */ +#define SGX_MP_CORE_PIPE_SELECT(r,c,p) \ + ((SGX_MP_CORE_SELECT(EUR_CR_PARTITION_##r,c) + p*(EUR_CR_PIPE0_##r-EUR_CR_PARTITION_##r))) + for (ui32PipeNum = 0; ui32PipeNum < NUM_PIPES_PLUS_ONE; ui32PipeNum++) + { + bFoundOne = + 0 != (EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK & + OSReadHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, + ui32CoreNum, + ui32PipeNum))); + if (bFoundOne) + { + bTrappedBPAny = IMG_TRUE; + ui32TrappedBPCoreNum = ui32CoreNum; + ui32TrappedBPPipeNum = ui32PipeNum; + } + } +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + bFoundOne = !!(EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum))); + if (bFoundOne) + { + bTrappedBPAny = IMG_TRUE; + ui32TrappedBPCoreNum = ui32CoreNum; + } +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + } + + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = bTrappedBPAny; +#else /* defined(SGX_FEATURE_MP) */ +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + #error Not yet considered the case for per-pipe regs in non-mp case +#endif + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = 0 != (EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT)); +#endif /* defined(SGX_FEATURE_MP) */ + + if (psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP) + { + IMG_UINT32 ui32Info0, ui32Info1; + +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum)); + ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum)); +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum)); + ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum)); +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#else /* defined(SGX_FEATURE_MP) */ + ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO0); + ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO1); +#endif /* defined(SGX_FEATURE_MP) */ + +#ifdef SGX_FEATURE_PERPIPE_BKPT_REGS + psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK); + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT; +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_MASK); + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SHIFT; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + /* mp, per-pipe regbanks */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:(ui32TrappedBPCoreNum + (ui32TrappedBPPipeNum<<10)); +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + /* mp, regbanks unsplit */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:ui32TrappedBPCoreNum; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#else /* defined(SGX_FEATURE_MP) */ +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + /* non-mp, per-pipe regbanks */ +#error non-mp perpipe regs not yet supported +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + /* non-mp */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = 65534; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#endif /* defined(SGX_FEATURE_MP) */ + } +#endif /* !defined(NO_HARDWARE) */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_RESUME_BREAKPOINT: + { + /* This request resumes from the currently trapped breakpoint. */ + /* Core number must be supplied */ + /* Polls for notify to be acknowledged by h/w */ +#if !defined(NO_HARDWARE) +#if defined(SGX_FEATURE_MP) + IMG_UINT32 ui32CoreNum; + IMG_BOOL bMaster; +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + IMG_UINT32 ui32PipeNum; +#endif +#endif /* defined(SGX_FEATURE_MP) */ + IMG_UINT32 ui32OldSeqNum, ui32NewSeqNum; + +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32PipeNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum >> 10; + ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum & 1023; + bMaster = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum > 32767; +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum; + bMaster = ui32CoreNum > SGX_FEATURE_MP_CORE_COUNT_3D; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + if (bMaster) + { + /* master */ + /* EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK | EUR_CR_MASTER_BREAKPOINT_SEQNUM_MASK */ + ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT_TRAP, EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_MASK); + do + { + ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT); + } + while (ui32OldSeqNum == ui32NewSeqNum); + } + else +#endif /* defined(SGX_FEATURE_MP) */ + { + /* core */ +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum)); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP, ui32CoreNum, ui32PipeNum), EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK); + do + { + ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum)); + } + while (ui32OldSeqNum == ui32NewSeqNum); +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum)); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP, ui32CoreNum), EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_BREAKPOINT_TRAP_CONTINUE_MASK); + do + { + ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum)); + } + while (ui32OldSeqNum == ui32NewSeqNum); +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + } +#endif /* !defined(NO_HARDWARE) */ + return PVRSRV_OK; + } +#endif /* SGX_FEATURE_DATA_BREAKPOINTS) */ + + case SGX_MISC_INFO_REQUEST_CLOCKSPEED: + { + psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed; + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_CLOCKSPEED_SLCSIZE: + { + psMiscInfo->uData.sQueryClockSpeedSLCSize.ui32SGXClockSpeed = SYS_SGX_CLOCK_SPEED; +#if defined(SGX_FEATURE_SYSTEM_CACHE) && defined(SYS_SGX_SLC_SIZE) + psMiscInfo->uData.sQueryClockSpeedSLCSize.ui32SGXSLCSize = SYS_SGX_SLC_SIZE; +#else + psMiscInfo->uData.sQueryClockSpeedSLCSize.ui32SGXSLCSize = 0; +#endif /* defined(SGX_FEATURE_SYSTEM_CACHE) && defined(SYS_SGX_SLC_SIZE) */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_ACTIVEPOWER: + { + psMiscInfo->uData.sActivePower.ui32NumActivePowerEvents = psDevInfo->psSGXHostCtl->ui32NumActivePowerEvents; + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_LOCKUPS: + { +#if defined(SUPPORT_HW_RECOVERY) + psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = psDevInfo->psSGXHostCtl->ui32uKernelDetectedLockups; + psMiscInfo->uData.sLockups.ui32HostDetectedLockups = psDevInfo->psSGXHostCtl->ui32HostDetectedLockups; +#else + psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = 0; + psMiscInfo->uData.sLockups.ui32HostDetectedLockups = 0; +#endif + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_SPM: + { + /* this is dealt with in UM */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_SGXREV: + { + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; +// PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + + eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, hDevMemContext); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n", + eError)); + return eError; + } + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + + /* Copy SGX features into misc info struct, to return to client */ + psMiscInfo->uData.sSGXFeatures = *psSGXFeatures; + + /* Debug output */ + PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%x, sw ID 0x%x, sw Rev 0x%x\n", + psSGXFeatures->ui32CoreRev, + psSGXFeatures->ui32CoreIdSW, + psSGXFeatures->ui32CoreRevSW)); + PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%x, DDK build 0x%x\n", + psSGXFeatures->ui32DDKVersion, + psSGXFeatures->ui32DDKBuild)); + + /* done! */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV: + { + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; + + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + + /* Reset the misc information to prevent + * confusion with values returned from the ukernel + */ + OSMemSet(psMemInfo->pvLinAddrKM, 0, + sizeof(PVRSRV_SGX_MISCINFO_INFO)); + + psSGXFeatures->ui32DDKVersion = + (PVRVERSION_MAJ << 16) | + (PVRVERSION_MIN << 8); + psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD; + + /* Also report the kernel module build options -- used in SGXConnectionCheck() */ + psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS); + + /* Copy SGX features into misc info struct, to return to client */ + psMiscInfo->uData.sSGXFeatures = *psSGXFeatures; + return PVRSRV_OK; + } + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + case SGX_MISC_INFO_REQUEST_EDM_STATUS_BUFFER_INFO: + { + /* Report the EDM status buffer location in memory */ + psMiscInfo->uData.sEDMStatusBufferInfo.sDevVAEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->sDevVAddr; + psMiscInfo->uData.sEDMStatusBufferInfo.pvEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM; + return PVRSRV_OK; + } +#endif + +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + case SGX_MISC_INFO_REQUEST_MEMREAD: + case SGX_MISC_INFO_REQUEST_MEMCOPY: + { + PVRSRV_ERROR eError; + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; + PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemSrc; /* user-defined mem read */ + PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemDest; /* user-defined mem write */ + + { + *pui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_MEMREAD; + /* Set the mem read flag; src is user-defined */ + *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD; + psSGXMemSrc = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessSrc; + + if(psMiscInfo->sDevVAddrSrc.uiAddr != 0) + { + psSGXMemSrc->sDevVAddr = psMiscInfo->sDevVAddrSrc; /* src address */ + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + if( psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMCOPY) + { + *pui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_MEMWRITE; + /* Set the mem write flag; dest is user-defined */ + *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMWRITE; + psSGXMemDest = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessDest; + + if(psMiscInfo->sDevVAddrDest.uiAddr != 0) + { + psSGXMemDest->sDevVAddr = psMiscInfo->sDevVAddrDest; /* dest address */ + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /* Get physical address of PD for memory read (may need to switch context in microkernel) */ + if(psMiscInfo->hDevMemContext != IMG_NULL) + { + SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemSrc->sPDDevPAddr); + + /* Single app will always use the same src and dest mem context */ + psSGXMemDest->sPDDevPAddr = psSGXMemSrc->sPDDevPAddr; + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Submit the task to the ukernel */ + eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n", + eError)); + return eError; + } + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL) + { + return PVRSRV_ERROR_INVALID_MISCINFO; + } +#endif + /* Copy SGX features into misc info struct, to return to client */ + psMiscInfo->uData.sSGXFeatures = *psSGXFeatures; + return PVRSRV_OK; + } +#endif /* SUPPORT_SGX_EDM_MEMORY_DEBUG */ + +#if defined(SUPPORT_SGX_HWPERF) + case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS: + { + PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS *psSetHWPerfStatus = &psMiscInfo->uData.sSetHWPerfStatus; + const IMG_UINT32 ui32ValidFlags = PVRSRV_SGX_HWPERF_STATUS_RESET_COUNTERS | + PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON | + PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON | + PVRSRV_SGX_HWPERF_STATUS_MK_EXECUTION_ON; + SGXMKIF_COMMAND sCommandData = {0}; + + /* Check for valid flags */ + if ((psSetHWPerfStatus->ui32NewHWPerfStatus & ~ui32ValidFlags) != 0) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + #if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "SGX ukernel HWPerf status %u\n", + psSetHWPerfStatus->ui32NewHWPerfStatus); + #endif /* PDUMP */ + + /* Copy the new group selector(s) to the host ctl for the ukernel */ + #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS) + OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfGroup[0], + &psSetHWPerfStatus->aui32PerfGroup[0], + sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup)); + OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfBit[0], + &psSetHWPerfStatus->aui32PerfBit[0], + sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit)); + psDevInfo->psSGXHostCtl->ui32PerfCounterBitSelect = psSetHWPerfStatus->ui32PerfCounterBitSelect; + psDevInfo->psSGXHostCtl->ui32PerfSumMux = psSetHWPerfStatus->ui32PerfSumMux; + #if defined(PDUMP) + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, aui32PerfGroup), + sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, aui32PerfBit), + sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PerfCounterBitSelect), + sizeof(psDevInfo->psSGXHostCtl->ui32PerfCounterBitSelect), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PerfSumMux), + sizeof(psDevInfo->psSGXHostCtl->ui32PerfSumMux), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + #else + psDevInfo->psSGXHostCtl->ui32PerfGroup = psSetHWPerfStatus->ui32PerfGroup; + #if defined(PDUMP) + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PerfGroup), + sizeof(psDevInfo->psSGXHostCtl->ui32PerfGroup), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + #endif /* SGX_FEATURE_EXTENDED_PERF_COUNTERS */ + + /* Kick the ukernel to update the hardware state */ + sCommandData.ui32Data[0] = psSetHWPerfStatus->ui32NewHWPerfStatus; + eError = SGXScheduleCCBCommandKM(psDeviceNode, + SGXMKIF_CMD_SETHWPERFSTATUS, + &sCommandData, + KERNEL_ID, + 0, + hDevMemContext, + IMG_FALSE); + return eError; + } +#endif /* SUPPORT_SGX_HWPERF */ + + case SGX_MISC_INFO_DUMP_DEBUG_INFO: + { + PVR_LOG(("User requested SGX debug info")); + + /* Dump SGX debug data to the kernel log. */ + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_FALSE); + + return PVRSRV_OK; + } + + case SGX_MISC_INFO_DUMP_DEBUG_INFO_FORCE_REGS: + { + if(!OSProcHasPrivSrvInit()) + { + PVR_DPF((PVR_DBG_ERROR, "Insufficient privileges to dump SGX " + "debug info with registers")); + + return PVRSRV_ERROR_INVALID_MISCINFO; + } + + PVR_LOG(("User requested SGX debug info")); + + /* Dump SGX debug data to the kernel log. */ + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE); + + return PVRSRV_OK; + } + +#if defined(DEBUG) + /* Don't allow user-mode to reboot the device in production drivers */ + case SGX_MISC_INFO_PANIC: + { + PVR_LOG(("User requested SGX panic")); + + SGXPanic(psDeviceNode->pvDevice); + + return PVRSRV_OK; + } +#endif + + default: + { + /* switch statement fell though, so: */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +} + + +IMG_EXPORT +PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ArraySize, + PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry, + IMG_UINT32 *pui32DataCount, + IMG_UINT32 *pui32ClockSpeed, + IMG_UINT32 *pui32HostTimeStamp) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM; + IMG_UINT i; + + for (i = 0; + psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize; + i++) + { + SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff]; + + psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo; + psClientHWPerfEntry[i].ui32PID = psMKPerfEntry->ui32PID; + psClientHWPerfEntry[i].ui32RTData = psMKPerfEntry->ui32RTData; + psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type; + psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal; + psClientHWPerfEntry[i].ui32Info = psMKPerfEntry->ui32Info; + psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo, + psMKPerfEntry->ui32TimeWraps, + psMKPerfEntry->ui32Time); + OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0][0], + &psMKPerfEntry->ui32Counters[0][0], + sizeof(psMKPerfEntry->ui32Counters)); + + OSMemCopy(&psClientHWPerfEntry[i].ui32MiscCounters[0][0], + &psMKPerfEntry->ui32MiscCounters[0][0], + sizeof(psMKPerfEntry->ui32MiscCounters)); + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + psClientHWPerfEntry[i].ui32SystraceIndex = psMKPerfEntry->ui32SystraceIndex; +#endif + + psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1); + } + + *pui32DataCount = i; + *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed; + *pui32HostTimeStamp = OSClockus(); + + return eError; +} + + +/****************************************************************************** + End of file (sgxinit.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxkick.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxkick.c new file mode 100644 index 0000000..e15670d --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxkick.c @@ -0,0 +1,811 @@ +/*************************************************************************/ /*! +@Title Device specific kickTA routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include /* For the macro offsetof() */ +#include "services_headers.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#if defined (PDUMP) +#include "sgxapi_km.h" +#include "pdump_km.h" +#endif +#include "sgx_bridge_km.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "sgxutils.h" +#include "ttrace.h" + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include "pvr_sync_common.h" +#endif + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) +#include "systrace.h" +#endif + +#if defined(SUPPORT_DMABUF) +#include "pvr_linux_fence.h" +#endif + +/*! +****************************************************************************** + + @Function SGXDoKickKM + + @Description + + Really kicks the TA + + @Input hDevHandle - Device handle + + @Return ui32Error - success or failure + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick) +{ + PVRSRV_ERROR eError; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo; + SGXMKIF_CMDTA_SHARED *psTACmd; + IMG_UINT32 i; + IMG_HANDLE hDevMemContext = IMG_NULL; + IMG_HANDLE *pahDstSyncHandles; +#if defined(SUPPORT_DMABUF) + IMG_UINT32 ui32FenceTag = 0; + IMG_UINT32 ui32NumResvObjs = 0; + IMG_BOOL bBlockingFences = IMG_FALSE; +#endif +#if (defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER)) + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; +#endif +#if defined(FIX_HW_BRN_31620) + hDevMemContext = psCCBKick->hDevMemContext; +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, KICK_TOKEN_DOKICK); + + if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset)) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, KICK_TOKEN_DOKICK); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* override QAC warning about stricter alignment */ + /* PRQA S 3305 1 */ + psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset); + + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_START, KICK_TOKEN_DOKICK); + + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FRAMENUM, KICK_TOKEN_FRAMENUM, psCCBKick->ui32FrameNum); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_NONE, KICK_TOKEN_RENDERCONTEXT, psCCBKick->sCommand.ui32Data[1]); + PVR_TTRACE_DEV_VIRTADDR(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_DEVVADDR, KICK_TOKEN_HWRTDATASET, psCCBKick->sHWRTDataSetDevAddr); + PVR_TTRACE_DEV_VIRTADDR(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_DEVVADDR, KICK_TOKEN_HWRTDATA, psCCBKick->sHWRTDataDevAddr); + +#if defined(TTRACE) + if (psCCBKick->bFirstKickOrResume) + { + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, + PVRSRV_TRACE_CLASS_FLAGS, + KICK_TOKEN_FIRST_KICK); + } + + if (psCCBKick->bLastInScene) + { + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, + PVRSRV_TRACE_CLASS_FLAGS, + KICK_TOKEN_LAST_KICK); + } +#endif + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CCB, + KICK_TOKEN_CCB_OFFSET, psCCBKick->ui32CCBOffset); + + /* TA/3D dependency */ + if (psCCBKick->hTA3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA3D_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + + psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + + if (psCCBKick->bTADependency) + { + SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + } + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + + psTACmd->ui32TATQSyncReadOpsPendingVal = SyncTakeReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_3D_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + + psTACmd->ui323DTQSyncReadOpsPendingVal = SyncTakeReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals; + if (psCCBKick->ui32NumTAStatusVals != 0) + { + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i]; + psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending; +#endif + } + } + + psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals; + if (psCCBKick->ui32Num3DStatusVals != 0) + { + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i]; + psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending; +#endif + } + } + + + /* texture dependencies */ +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + eError = PVRSyncPatchCCBKickSyncInfos(psCCBKick->ahSrcKernelSyncInfo, + psTACmd->asSrcSyncs, + &psCCBKick->ui32NumSrcSyncs); + if(eError != PVRSRV_OK) + { + /* We didn't kick yet, or perform PDUMP processing, so we should + * be able to trivially roll back any changes made to the sync + * data. If we don't do this, we'll wedge services cleanup. + */ + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + + if (psCCBKick->hTA3DSyncInfo && psCCBKick->bTADependency) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + + PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: PVRSyncPatchCCBKickSyncInfos failed.")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); + return eError; + } +#else /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ +#if defined(SUPPORT_DMABUF) + ui32NumResvObjs = PVRLinuxFenceNumResvObjs(&bBlockingFences, + psCCBKick->ui32NumSrcSyncs, + psCCBKick->ahSrcKernelSyncInfo, + NULL, + psCCBKick->bFirstKickOrResume ? psCCBKick->ui32NumDstSyncObjects : 0, + (IMG_HANDLE*)psCCBKick->hDstSyncHandles, + NULL); + /* + * If there are no blocking fences, the GPU need not wait whilst + * the reservation objects are being processed. They can be processed + * later, after the kick. + */ + if (ui32NumResvObjs && bBlockingFences) + { + eError = PVRLinuxFenceProcess(&ui32FenceTag, + ui32NumResvObjs, + bBlockingFences, + psCCBKick->ui32NumSrcSyncs, + psCCBKick->ahSrcKernelSyncInfo, + NULL, + psCCBKick->bFirstKickOrResume ? psCCBKick->ui32NumDstSyncObjects : 0, + (IMG_HANDLE*)psCCBKick->hDstSyncHandles, + NULL); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + for (i=0; iui32NumSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_SRC_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + + /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */ + psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = SyncTakeReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + /* Copy ui32WriteOpsPending snapshot into the CCB. */ + psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs; + + if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0) + { + PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo = + (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo; + SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM; + IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects; + + PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->uAllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) + + (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs))); + + psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs; +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n"); + PDUMPMEM(IMG_NULL, + psHWDstSyncListMemInfo, + 0, + sizeof(SGXMKIF_HWDEVICE_SYNC_LIST), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + } +#endif + pahDstSyncHandles = psCCBKick->hDstSyncHandles; + for (i=0; ipsSyncData->ui64LastWrite = ui64KickCount; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_DST_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + + psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; + + #if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + IMG_UINT32 ui32ModifiedValue; + IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)); + IMG_UINT32 ui32WOpsOffset = ui32SyncOffset + + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal); + IMG_UINT32 ui32ROpsOffset = ui32SyncOffset + + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal); + IMG_UINT32 ui32ROps2Offset = ui32SyncOffset + + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal); + + PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i); + + PDUMPMEM(IMG_NULL, + psHWDstSyncListMemInfo, + ui32SyncOffset, + sizeof(PVRSRV_DEVICE_SYNC_OBJECT), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1; + + PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i); + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TA Dst: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + ui32ModifiedValue); +#endif + PDUMPMEM(&ui32ModifiedValue, + psHWDstSyncListMemInfo, + ui32WOpsOffset, + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + + ui32ModifiedValue = 0; + PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psHWDstSyncListMemInfo, + ui32ROpsOffset, + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + + /* + * Force the ROps2Complete value to 0. + */ + PDUMPCOMMENT("Modify RT %d ROps2PendingVal in HWDevSyncList\r\n", i); + PDUMPMEM(&ui32ModifiedValue, + psHWDstSyncListMemInfo, + ui32ROps2Offset, + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, "TA Dst: PDump sync update: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + } + #endif /* defined(PDUMP) */ + } + else + { + psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0; + psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0; + psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr.uiAddr = 0; + + psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0; + psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = 0; + psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0; + } + } + } + + /* + NOTE: THIS MUST BE THE LAST THING WRITTEN TO THE TA COMMAND! + Set the ready for so the uKernel will process the command. + */ + psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY; + +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + PDUMPCOMMENT("Shared part of TA command\r\n"); + + PDUMPMEM(psTACmd, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff, + sizeof(SGXMKIF_CMDTA_SHARED), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + for (i=0; iui32NumSrcSyncs; i++) + { + IMG_UINT32 ui32ModifiedValue; + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1; + + PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i); + + PDUMPMEM(&ui32ModifiedValue, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i); + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TA Src: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, "TA Src: PDump sync update: uiAddr = 0x%08x, ui32LastReadOpDumpVal = 0x%08x\r\n", + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastReadOpDumpVal); +#endif + } + + if (psCCBKick->hTA3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + + PDUMPCOMMENT("Modify TA/3D dependency WOpPendingVal\r\n"); + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TA TADep: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sTA3DDependency.ui32WriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + if (psCCBKick->bTADependency) + { + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, "TA TADep: PDump sync update: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + } + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + + PDUMPCOMMENT("Modify TA/TQ ROpPendingVal\r\n"); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui32TATQSyncReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify TA/TQ WOpPendingVal\r\n"); + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TA TATQ: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui32TATQSyncWriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + } + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + + PDUMPCOMMENT("Modify 3D/TQ ROpPendingVal\r\n"); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui323DTQSyncReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify 3D/TQ WOpPendingVal\r\n"); + +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TA 3DTQ: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui323DTQSyncWriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + } + + for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) + { +#if !defined(SUPPORT_SGX_NEW_STATUS_VALS) + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i]; + PDUMPCOMMENT("Modify TA status value in TA cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); +#endif + } + + for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) + { +#if !defined(SUPPORT_SGX_NEW_STATUS_VALS) + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i]; + PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); +#endif + } + } +#endif /* defined(PDUMP) */ + + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_END, + KICK_TOKEN_DOKICK); + + eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0, hDevMemContext, psCCBKick->bLastInScene); + if (eError == PVRSRV_ERROR_RETRY) + { +#if defined(SUPPORT_DMABUF) + if (ui32NumResvObjs && bBlockingFences) + { + PVRLinuxFenceRelease(ui32FenceTag, + psCCBKick->ui32NumSrcSyncs, + psCCBKick->ahSrcKernelSyncInfo, + NULL, + psCCBKick->bFirstKickOrResume ? psCCBKick->ui32NumDstSyncObjects : 0, + (IMG_HANDLE*)psCCBKick->hDstSyncHandles, + NULL); + } +#endif + if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0) + { + pahDstSyncHandles = psCCBKick->hDstSyncHandles; + for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++) + { + /* Client will retry, so undo the write ops pending increment done above. */ + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pahDstSyncHandles[i]; + + if (psSyncInfo) + { + psSyncInfo->psSyncData->ui32WriteOpsPending--; + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + psSyncInfo->psSyncData->ui32LastOpDumpVal--; + } +#endif + } + } + } + + for (i=0; iui32NumSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + SyncRollBackReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + } + + if (psCCBKick->hTA3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + SyncRollBackReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + } + + if (psCCBKick->hTASyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + SyncRollBackReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + } + + if (psCCBKick->h3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + SyncRollBackReadOp(psSyncInfo, SYNC_OP_CLASS_KICKTA); + } + + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); + return eError; + } + else if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed.")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); +#if defined(SUPPORT_DMABUF) && defined(NO_HARDWARE) + PVRLinuxFenceCheckAll(); +#endif + return eError; + } +#if defined(SUPPORT_DMABUF) + else if (ui32NumResvObjs && !bBlockingFences) + { + eError = PVRLinuxFenceProcess(&ui32FenceTag, + ui32NumResvObjs, + bBlockingFences, + psCCBKick->ui32NumSrcSyncs, + psCCBKick->ahSrcKernelSyncInfo, + NULL, + psCCBKick->bFirstKickOrResume ? psCCBKick->ui32NumDstSyncObjects : 0, + (IMG_HANDLE*)psCCBKick->hDstSyncHandles, + NULL); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + + +#if defined(NO_HARDWARE) + + + /* TA/3D dependency */ + if (psCCBKick->hTA3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + + if (psCCBKick->bTADependency) + { + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo; + /* derive offset into meminfo and write the status value */ + *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM + + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr + - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue; +#endif + } + + /* texture dependencies */ + for (i=0; iui32NumSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + if (psCCBKick->bTerminateOrAbort) + { + if (psCCBKick->ui32NumDstSyncObjects > 0) + { + PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo = + (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo; + SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM; + pahDstSyncHandles = psCCBKick->hDstSyncHandles; + for (i=0; iui32NumDstSyncObjects; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pahDstSyncHandles[i]; + if (psSyncInfo) + psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1; + } + } + + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo; + /* derive offset into meminfo and write the status value */ + *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM + + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr + - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue; +#endif + } + } +#if defined(SUPPORT_DMABUF) + PVRLinuxFenceCheckAll(); +#endif +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + SystraceTAKick(psDevInfo, psCCBKick->ui32FrameNum, psCCBKick->sHWRTDataDevAddr.uiAddr, psCCBKick->bIsFirstKick); +#endif + return eError; +} + +/****************************************************************************** + End of file (sgxkick.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxpower.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxpower.c new file mode 100644 index 0000000..75cdd06 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxpower.c @@ -0,0 +1,666 @@ +/*************************************************************************/ /*! +@Title Device specific power routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "sgxdefs.h" +#include "services_headers.h" +#include "sgxapi_km.h" +#include "sgx_mkif_km.h" +#include "sgxutils.h" +#include "pdump_km.h" + +extern IMG_UINT32 g_ui32HostIRQCountSample; + +#if defined(SUPPORT_HW_RECOVERY) +static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode, + SGX_TIMING_INFORMATION *psSGXTimingInfo, + IMG_HANDLE *phTimer) +{ + /* + Install timer callback for HW recovery at 50 times lower + frequency than the microkernel timer. + */ + *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode, + 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq); + if(*phTimer == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return PVRSRV_OK; +} +#endif /* SUPPORT_HW_RECOVERY*/ + + +/*! +****************************************************************************** + + @Function SGXUpdateTimingInfo + + @Description + + Derives the microkernel timing info from the system-supplied values + + @Input psDeviceNode : SGX Device node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SGX_DYNAMIC_TIMING_INFO) + SGX_TIMING_INFORMATION sSGXTimingInfo = {0}; +#else + SGX_DEVICE_MAP *psSGXDeviceMap; +#endif + IMG_UINT32 ui32ActivePowManSampleRate; + SGX_TIMING_INFORMATION *psSGXTimingInfo; + + +#if defined(SGX_DYNAMIC_TIMING_INFO) + psSGXTimingInfo = &sSGXTimingInfo; + SysGetSGXTimingInformation(psSGXTimingInfo); +#else + SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMap); + psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo; +#endif + +#if defined(SUPPORT_HW_RECOVERY) + { + PVRSRV_ERROR eError; + IMG_UINT32 ui32OlduKernelFreq; + + if (psDevInfo->hTimer != IMG_NULL) + { + ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock; + if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq) + { + /* + The ukernel timer frequency has changed. + */ + IMG_HANDLE hNewTimer; + + eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer); + if (eError == PVRSRV_OK) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer")); + } + psDevInfo->hTimer = hNewTimer; + } + else + { + /* Failed to allocate the new timer, leave the old one. */ + } + } + } + else + { + eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate = + psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq; + } +#endif /* SUPPORT_HW_RECOVERY*/ + + /* Copy the SGX clock speed for use in the kernel */ + psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed; + psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq; + + /* FIXME: no need to duplicate - remove it from psDevInfo */ + psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock; +#if defined(PDUMP) + PDUMPCOMMENT("Host Control - Microkernel clock"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); +#endif /* PDUMP */ + + if (psSGXTimingInfo->bEnableActivePM) + { + ui32ActivePowManSampleRate = + psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000; + /* + ui32ActivePowerCounter has the value 0 when SGX is not idle. + When SGX becomes idle, the value of ui32ActivePowerCounter is changed from 0 to ui32ActivePowManSampleRate. + The ukernel timer routine decrements the value of ui32ActivePowerCounter if it is not 0. + When the ukernel timer decrements ui32ActivePowerCounter from 1 to 0, the ukernel timer will + request power down. + Therefore the minimum value of ui32ActivePowManSampleRate is 1. + */ + ui32ActivePowManSampleRate += 1; + } + else + { + ui32ActivePowManSampleRate = 0; + } + + psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate; +#if defined(PDUMP) + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); +#endif /* PDUMP */ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXStartTimer + + @Description + + Start the microkernel timer + + @Input psDevInfo : SGX Device Info + + @Return IMG_VOID : + +******************************************************************************/ +static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + #if defined(SUPPORT_HW_RECOVERY) + PVRSRV_ERROR eError; + + eError = OSEnableTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer")); + } + #else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + #endif /* SUPPORT_HW_RECOVERY */ +} + + +#if defined(SGX_FEATURE_AUTOCLOCKGATING) +/*! +****************************************************************************** + + @Function SGXPollForClockGating + + @Description + + Wait until the SGX core clocks have gated. + + @Input psDevInfo : SGX Device Info + @Input ui32Register : Offset of register to poll + @Input ui32Register : Value of register to poll for + @Input pszComment : Description of poll + + @Return IMG_VOID : + +******************************************************************************/ +static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Register, + IMG_UINT32 ui32RegisterValue, + IMG_CHAR *pszComment) +{ + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Register); + PVR_UNREFERENCED_PARAMETER(ui32RegisterValue); + PVR_UNREFERENCED_PARAMETER(pszComment); + + #if !defined(NO_HARDWARE) + PVR_ASSERT(psDevInfo != IMG_NULL); + + /* PRQA S 0505 1 */ /* QAC does not like assert() */ + if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2), + 0, + ui32RegisterValue, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPollForClockGating: %s failed.", pszComment)); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif /* NO_HARDWARE */ + + PDUMPCOMMENT("%s", pszComment); + PDUMPREGPOL(SGX_PDUMPREG_NAME, ui32Register, 0, ui32RegisterValue, PDUMP_POLL_OPERATOR_EQUAL); +} +#endif + + +/*! +****************************************************************************** + + @Function SGXPrePowerState + + @Description + + does necessary preparation before power state transition + + @Input hDevHandle : SGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32PowerCmd, ui32CompleteStatus; + SGXMKIF_COMMAND sCommand = {0}; +#if defined(SGX_FEATURE_AUTOCLOCKGATING) + IMG_UINT32 ui32Core; + IMG_UINT32 ui32CoresEnabled; +#endif + + #if defined(SUPPORT_HW_RECOVERY) + /* Disable timer callback for HW recovery */ + eError = OSDisableTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer")); + return eError; + } + #endif /* SUPPORT_HW_RECOVERY */ + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Request the ukernel to idle SGX and save its state. */ + ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF; + ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE; + PDUMPCOMMENT("SGX power off request"); + } + else + { + /* Request the ukernel to idle SGX. */ + ui32PowerCmd = PVRSRV_POWERCMD_IDLE; + ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE; + PDUMPCOMMENT("SGX idle request"); + } + + sCommand.ui32Data[1] = ui32PowerCmd; + + eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command")); + return eError; + } + + /* Wait for the ukernel to complete processing. */ + #if !defined(NO_HARDWARE) + if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus, + ui32CompleteStatus, + ui32CompleteStatus, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed.")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif /* NO_HARDWARE */ + + if (psDevInfo->bSGXIdle == IMG_FALSE) + { + psDevInfo->bSGXIdle = IMG_TRUE; + SysSGXIdleEntered(); + } + + #if defined(PDUMP) + PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel."); + PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus), + ui32CompleteStatus, + ui32CompleteStatus, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + + /* Wait for the pending ukernel to host interrupts to come back. */ + #if !defined(NO_HARDWARE) && defined(SUPPORT_LISR_MISR_SYNC) + if (PollForValueKM(&g_ui32HostIRQCountSample, + psDevInfo->psSGXHostCtl->ui32InterruptCount, + 0xffffffff, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for pending interrupts failed.")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif /* NO_HARDWARE && SUPPORT_LISR_MISR_SYNC*/ + +#if defined(SGX_FEATURE_AUTOCLOCKGATING) + if(psDevInfo->bDisableClockGating == IMG_FALSE) + { +#if defined(SGX_FEATURE_MP) + ui32CoresEnabled = ((OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE) & EUR_CR_MASTER_CORE_ENABLE_MASK) >> EUR_CR_MASTER_CORE_ENABLE_SHIFT) + 1; +#else + ui32CoresEnabled = 1; +#endif + + for (ui32Core = 0; ui32Core < ui32CoresEnabled; ui32Core++) + { + /* Wait for SGX clock gating. */ + SGXPollForClockGating(psDevInfo, + SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core), + psDevInfo->ui32ClkGateStatusMask, + "Wait for SGX clock gating"); + } + +#if defined(SGX_FEATURE_MP) + /* Wait for SGX master clock gating. */ + SGXPollForClockGating(psDevInfo, + psDevInfo->ui32MasterClkGateStatusReg, + psDevInfo->ui32MasterClkGateStatusMask, + "Wait for SGX master clock gating"); + + SGXPollForClockGating(psDevInfo, + psDevInfo->ui32MasterClkGateStatus2Reg, + psDevInfo->ui32MasterClkGateStatus2Mask, + "Wait for SGX master clock gating (2)"); +#endif /* SGX_FEATURE_MP */ + } +#endif /* defined(SGX_FEATURE_AUTOCLOCKGATING) */ + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Finally, de-initialise some registers. */ + eError = SGXDeinitialise(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %u", eError)); + return eError; + } + } + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXPostPowerState + + @Description + + does necessary preparation after power state transition + + @Input hDevHandle : SGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + + /* Reset the power manager flags. */ + psSGXHostCtl->ui32PowerStatus = 0; + #if defined(PDUMP) + PDUMPCOMMENT("Host Control - Reset power status"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* + Coming up from off, re-initialise SGX. + */ + + /* + Re-generate the timing data required by SGX. + */ + eError = SGXUpdateTimingInfo(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed")); + return eError; + } + + /* + Run the SGX init script. + */ + eError = SGXInitialise(psDevInfo, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed")); + return eError; + } + } + else + { + /* + Coming up from idle, restart the ukernel. + */ + SGXMKIF_COMMAND sCommand = {0}; + + sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME; + eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %u", eError)); + return eError; + } + } + + SGXStartTimer(psDevInfo); + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXPreClockSpeedChange + + @Description + + Does processing required before an SGX clock speed change. + + @Input hDevHandle : SGX Device Node + @Input bIdleDevice : Whether the microkernel needs to be idled + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) + { + if (bIdleDevice) + { + /* + * Idle SGX. + */ + PDUMPSUSPEND(); + + eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE, + PVRSRV_DEV_POWER_STATE_ON); + + if (eError != PVRSRV_OK) + { + PDUMPRESUME(); + return eError; + } + } + else + { + #if defined(SUPPORT_HW_RECOVERY) + PVRSRV_ERROR eError; + + eError = OSDisableTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer")); + } + #endif /* SUPPORT_HW_RECOVERY */ + } + } + + PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %uHz", + psDevInfo->ui32CoreClockSpeed)); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXPostClockSpeedChange + + @Description + + Does processing required after an SGX clock speed change. + + @Input hDevHandle : SGX Device Node + @Input bIdleDevice : Whether the microkernel had been idled previously + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed; + + PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed); + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) + { + PVRSRV_ERROR eError; + + /* + Re-generate the timing data required by SGX. + */ + eError = SGXUpdateTimingInfo(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed")); + return eError; + } + + if (bIdleDevice) + { + /* + * Resume SGX. + */ + eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_DEV_POWER_STATE_IDLE); + + PDUMPRESUME(); + + if (eError != PVRSRV_OK) + { + return eError; + } + } + else + { + SGXStartTimer(psDevInfo); + } + } + + PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %uHz to %uHz", + ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed)); + + return PVRSRV_OK; +} + + +/****************************************************************************** + End of file (sgxpower.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxreset.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxreset.c new file mode 100644 index 0000000..ecf0e62 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxreset.c @@ -0,0 +1,824 @@ +/*************************************************************************/ /*! +@Title Device specific reset routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "sgxdefs.h" +#include "sgxmmu.h" +#include "services_headers.h" +#include "sgxinfokm.h" +#include "sgxconfig.h" +#include "sgxutils.h" + +#include "pdump_km.h" + + +/*! +******************************************************************************* + + @Function SGXInitClocks + + @Description + Initialise the SGX clocks + + @Input psDevInfo - device info. structure + @Input ui32PDUMPFlags - flags to control PDUMP output + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags) +{ + IMG_UINT32 ui32RegVal; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + ui32RegVal = psDevInfo->ui32ClkGateCtl; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL, ui32RegVal, ui32PDUMPFlags); + +#if defined(EUR_CR_CLKGATECTL2) + ui32RegVal = psDevInfo->ui32ClkGateCtl2; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL2, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL2, ui32RegVal, ui32PDUMPFlags); +#endif +} + + +/*! +******************************************************************************* + + @Function SGXResetInitBIFContexts + + @Description + Initialise the BIF memory contexts + + @Input psDevInfo - SGX Device Info + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXResetInitBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags) +{ + IMG_UINT32 ui32RegVal; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF bank settings\r\n"); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); +#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF directory list\r\n"); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + { + IMG_UINT32 ui32DirList, ui32DirListReg; + + for (ui32DirList = 1; + ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS; + ui32DirList++) + { + ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, ui32DirListReg, ui32RegVal, ui32PDUMPFlags); + } + } +#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ +} + + +/*! +******************************************************************************* + + @Function SGXResetSetupBIFContexts + + @Description + Configure the BIF for the EDM context + + @Input psDevInfo - SGX Device Info + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXResetSetupBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags) +{ + IMG_UINT32 ui32RegVal; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + /* Set up EDM for bank 0 to point at kernel context */ + ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT); + + #if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA) + /* Set up 2D core for bank 0 to point at kernel context */ + ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT); + #endif /* SGX_FEATURE_2D_HARDWARE */ + + #if defined(FIX_HW_BRN_23410) + /* Set up TA core for bank 0 to point at kernel context to guarantee it is a valid context */ + ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT); + #endif /* FIX_HW_BRN_23410 */ + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Set up EDM requestor page table in BIF\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); + #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) */ + + { + IMG_UINT32 ui32EDMDirListReg; + + /* Set up EDM context with kernel page directory */ + #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0) + ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0; + #else + /* Bases 0 and 1 are not necessarily contiguous */ + ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1); + #endif /* SGX_BIF_DIR_LIST_INDEX_EDM */ + + ui32RegVal = (IMG_UINT32)(psDevInfo->sKernelPDDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT); + +#if defined(FIX_HW_BRN_28011) + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); + PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); +#endif + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the EDM's directory list base\r\n"); + PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, ui32EDMDirListReg, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); + } +} + + +/*! +******************************************************************************* + + @Function SGXResetSleep + + @Description + + Sleep for a short time to allow reset register writes to complete. + Required because no status registers are available to poll on. + + @Input psDevInfo - SGX Device Info + @Input ui32PDUMPFlags - flags to control PDUMP output + @Input bPDump - Pdump the sleep + + @Return Nothing + +******************************************************************************/ +static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags, + IMG_BOOL bPDump) +{ +#if defined(PDUMP) || defined(EMULATOR) + IMG_UINT32 ui32ReadRegister; + + #if defined(SGX_FEATURE_MP) + ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET; + #else + ui32ReadRegister = EUR_CR_SOFT_RESET; + #endif /* SGX_FEATURE_MP */ +#endif + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + /* Sleep for 100 SGX clocks */ + SGXWaitClocks(psDevInfo, 100); + if (bPDump) + { + PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n"); + PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags); +#endif + } + +#if defined(EMULATOR) + /* + Read a register to make sure we wait long enough on the emulator... + */ + OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister); +#endif +} + + +#if !defined(SGX_FEATURE_MP) +/*! +******************************************************************************* + + @Function SGXResetSoftReset + + @Description + + Write to the SGX soft reset register. + + @Input psDevInfo - SGX Device Info + @Input bResetBIF - Include the BIF in the soft reset + @Input ui32PDUMPFlags - flags to control PDUMP output + @Input bPDump - Pdump the sleep + + @Return Nothing + +******************************************************************************/ +static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bResetBIF, + IMG_UINT32 ui32PDUMPFlags, + IMG_BOOL bPDump) +{ + IMG_UINT32 ui32SoftResetRegVal; + + ui32SoftResetRegVal = + /* add common reset bits: */ + EUR_CR_SOFT_RESET_DPM_RESET_MASK | + EUR_CR_SOFT_RESET_TA_RESET_MASK | + EUR_CR_SOFT_RESET_USE_RESET_MASK | + EUR_CR_SOFT_RESET_ISP_RESET_MASK | + EUR_CR_SOFT_RESET_TSP_RESET_MASK; + +/* add conditional reset bits: */ +#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK; +#endif + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + if (bResetBIF) + { + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK; + } + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags); + } +} + + +/*! +******************************************************************************* + + @Function SGXResetInvalDC + + @Description + + Invalidate the BIF Directory Cache and wait for the operation to complete. + + @Input psDevInfo - SGX Device Info + @Input ui32PDUMPFlags - flags to control PDUMP output + + @Return Nothing + +******************************************************************************/ +static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags, + IMG_BOOL bPDump) +{ + IMG_UINT32 ui32RegVal; + + /* Invalidate BIF Directory cache. */ +#if defined(EUR_CR_BIF_CTRL_INVAL) + ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags); + } +#else + ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + } + + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + } +#endif + SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump); + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + { + /* + Wait for the DC invalidate to complete - indicated by + outstanding reads reaching zero. + */ + if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT), + 0, + EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed.")); + PVR_DBG_BREAK; + } + + if (bPDump) + { + PDUMPREGPOLWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags, PDUMP_POLL_OPERATOR_EQUAL); + } + } +#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ +} +#endif /* SGX_FEATURE_MP */ + + +/*! +******************************************************************************* + + @Function SGXReset + + @Description + + Reset chip + + @Input psDevInfo - device info. structure + @Input bHardwareRecovery - true if recovering powered hardware, + false if powering up + @Input ui32PDUMPFlags - flags to control PDUMP output + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery, + IMG_UINT32 ui32PDUMPFlags) +#if !defined(SGX_FEATURE_MP) +{ + IMG_UINT32 ui32RegVal; +#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK) + const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK; +#else + const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; +#endif + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); + +#if defined(FIX_HW_BRN_23944) + /* Pause the BIF. */ + ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); + if (ui32RegVal & ui32BifFaultMask) + { + /* Page fault needs to be cleared before resetting the BIF. */ + ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + } +#endif /* defined(FIX_HW_BRN_23944) */ + + /* Reset all including BIF */ + SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + /* + Initialise the BIF state. + */ +#if defined(SGX_FEATURE_36BIT_MMU) + /* enable 36bit addressing mode if the MMU supports it*/ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags); +#else + #if defined(EUR_CR_BIF_36BIT_ADDRESSING) + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + EUR_CR_BIF_36BIT_ADDRESSING, + 0); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, + EUR_CR_BIF_36BIT_ADDRESSING, + 0, + ui32PDUMPFlags); + #endif +#endif + + SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags); + +#if defined(EUR_CR_BIF_MEM_ARB_CONFIG) + /* + Initialise the memory arbiter to its default state + */ + ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) | + (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) | + (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags); +#endif /* EUR_CR_BIF_MEM_ARB_CONFIG */ + +#if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(SGX_BYPASS_SYSTEM_CACHE) + /* set the SLC to bypass all accesses */ + ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK; + #else + #if defined(FIX_HW_BRN_26620) + ui32RegVal = 0; + #else + /* set the SLC to bypass cache-coherent accesses */ + ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK; + #endif + #if defined(FIX_HW_BRN_34028) + /* Bypass the MNE for the USEC requester */ + ui32RegVal |= (8 << MNE_CR_CTRL_BYPASS_SHIFT); + #endif + #endif /* SGX_BYPASS_SYSTEM_CACHE */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal); + PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal); +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + + if (bHardwareRecovery) + { + /* + Set all requestors to the dummy PD which forces all memory + accesses to page fault. + This enables us to flush out BIF requests from parts of SGX + which do not have their own soft reset. + Note: sBIFResetPDDevPAddr.uiAddr is a relative address (2GB max) + MSB is the bus master flag; 1 == enabled + */ + ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Bring BIF out of reset. */ + SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* + Check for a page fault from parts of SGX which do not have a reset. + */ + for (;;) + { + IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); + IMG_DEV_VIRTADDR sBifFault; + IMG_UINT32 ui32PDIndex, ui32PTIndex; + + if ((ui32BifIntStat & ui32BifFaultMask) == 0) + { + break; + } + + /* + There is a page fault, so reset the BIF again, map in the dummy page, + bring the BIF up and invalidate the Directory Cache. + */ + sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); + PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr)); + ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); + ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + + /* Put the BIF into reset. */ + SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); + + /* Map in the dummy page. */ + psDevInfo->pui32BIFResetPD[ui32PDIndex] = (IMG_UINT32)(psDevInfo->sBIFResetPTDevPAddr.uiAddr + >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; + psDevInfo->pui32BIFResetPT[ui32PTIndex] = (IMG_UINT32)(psDevInfo->sBIFResetPageDevPAddr.uiAddr + >>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + + /* Clear outstanding events. */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Bring the BIF out of reset. */ + SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Invalidate Directory Cache. */ + SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Unmap the dummy page and try again. */ + psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; + psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; + } + } + else + { + /* Bring BIF out of reset. */ + SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + } + + /* + Initialise the BIF memory contexts before bringing the rest of SGX out of reset. + */ + SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA) + /* check that the heap base has the right alignment (1Mb) */ + #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0) + #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment" + #endif + /* Set up 2D requestor base */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags); +#endif + +#if defined(SGX_FEATURE_ADDRESS_SPACE_EXTENSION) + /*TODO: Set up USEC requestor base, here its set to ZERO (hard-coded) */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_USEC_REQ_BASE, 0); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_USEC_REQ_BASE, 0, ui32PDUMPFlags); +#endif + + /* Invalidate BIF Directory cache. */ + SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX")); + + /* Take chip out of reset */ + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); + + /* wait a bit */ + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n"); +} + +#else + +{ + IMG_UINT32 ui32RegVal; + + PVR_UNREFERENCED_PARAMETER(bHardwareRecovery); + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX MP reset sequence\r\n"); + + /* Put hydra into soft reset */ + ui32RegVal = EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK | + EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK | + EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK | + EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK; + + if (bHardwareRecovery) + { + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_MCI_RESET_MASK; + } + +#if defined(SGX_FEATURE_PTLA) + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_MASK; +#endif +#if defined(SGX_FEATURE_SYSTEM_CACHE) + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK; +#endif + + /* Hard reset the slave cores */ + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(0) | + EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(1) | + EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(2) | + EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(3); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Soft reset hydra partition, hard reset the cores\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra BIF control\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(SGX_BYPASS_SYSTEM_CACHE) + ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_MASK; + #else + ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK | + #if defined(FIX_HW_BRN_30954) + EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_MASK | + #endif + #if defined(PVR_SLC_8KB_ADDRESS_MODE) + (4 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) | + #endif + #if defined(FIX_HW_BRN_33809) + (1 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) | + #endif + (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC control\r\n"); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); + + ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK; + #if defined(FIX_HW_BRN_31620) + ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_MASK; + #endif + #if defined(FIX_HW_BRN_31195) + ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK; + #endif + #endif /* SGX_BYPASS_SYSTEM_CACHE */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC bypass control\r\n"); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + /* Remove the resets */ + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Remove the resets from all of SGX\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Turn on the slave cores' clock gating\r\n"); + SGXInitClocks(psDevInfo, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the slave BIFs\r\n"); + +#if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_31620) || defined(FIX_HW_BRN_31671) || defined(FIX_HW_BRN_32085) + #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085) + /* disable prefetch */ + ui32RegVal = (1<pvRegsBaseKM, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal, ui32PDUMPFlags); + + #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085) + /* disable prefetch */ + ui32RegVal = (1<pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal, ui32PDUMPFlags); + } + } +#endif + + SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags); + SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX MP reset sequence\r\n"); +} +#endif /* SGX_FEATURE_MP */ + + +/****************************************************************************** + End of file (sgxreset.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxtransfer.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxtransfer.c new file mode 100644 index 0000000..a13630b --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxtransfer.c @@ -0,0 +1,1007 @@ +/*************************************************************************/ /*! +@Title Device specific transfer queue routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(TRANSFER_QUEUE) + +#include + +#include "sgxdefs.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "sgxinfo.h" +#include "sysconfig.h" +#include "pdump_km.h" +#include "mmu.h" +#include "pvr_bridge.h" +#include "sgx_bridge_km.h" +#include "sgxinfokm.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "sgxutils.h" +#include "ttrace.h" + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include "pvr_sync_common.h" +#endif + +#if defined(SUPPORT_DMABUF) +#include "pvr_linux_fence.h" +#endif + +IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick) +{ + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo; + SGXMKIF_COMMAND sCommand = {0}; + SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + IMG_UINT32 loop; + IMG_HANDLE hDevMemContext = IMG_NULL; + IMG_BOOL abSrcSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS]; + IMG_UINT32 ui32RealSrcSyncNum = 0; + IMG_BOOL abDstSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS]; + IMG_UINT32 ui32RealDstSyncNum = 0; + +#if defined(SUPPORT_DMABUF) + IMG_UINT32 ui32FenceTag = 0; + IMG_UINT32 ui32NumResvObjs = 0; + IMG_BOOL bBlockingFences = IMG_FALSE; +#endif + + +#if defined(FIX_HW_BRN_31620) + hDevMemContext = psKick->hDevMemContext; +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, TRANSFER_TOKEN_SUBMIT); + + for (loop = 0; loop < SGX_MAX_TRANSFER_SYNC_OPS; loop++) + { + abSrcSyncEnable[loop] = IMG_TRUE; + abDstSyncEnable[loop] = IMG_TRUE; + } + + if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset)) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* override QAC warning about stricter alignment */ + /* PRQA S 3305 1 */ + psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset); + + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_START, TRANSFER_TOKEN_SUBMIT); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CCB, + TRANSFER_TOKEN_CCB_OFFSET, psKick->ui32SharedCmdCCBOffset); + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_TA_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); + psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + else + { + psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0; + psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_3D_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = SyncTakeWriteOp(psSyncInfo,SYNC_OP_CLASS_TQ_3D); + psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + else + { + psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0; + psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0; + } + + /* filter out multiple occurrences of the same sync object from srcs or dests + * note : the same sync can still be used to synchronize both src and dst. + */ + for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumSrcSync); loop++) + { + IMG_UINT32 i; + + PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + + for (i = 0; i < loop; i++) + { + if (abSrcSyncEnable[i]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i]; + + if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr) + { + PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same src synchronized multiple times!")); + abSrcSyncEnable[loop] = IMG_FALSE; + break; + } + } + } + if (abSrcSyncEnable[loop]) + { + ui32RealSrcSyncNum++; + } + } + for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumDstSync); loop++) + { + IMG_UINT32 i; + + PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + + for (i = 0; i < loop; i++) + { + if (abDstSyncEnable[i]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i]; + + if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr) + { + PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same dst synchronized multiple times!")); + abDstSyncEnable[loop] = IMG_FALSE; + break; + } + } + } + if (abDstSyncEnable[loop]) + { + ui32RealDstSyncNum++; + } + } + + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) + { + IMG_UINT32 i = 0; + +#if defined(SUPPORT_DMABUF) + ui32NumResvObjs = PVRLinuxFenceNumResvObjs(&bBlockingFences, + psKick->ui32NumSrcSync, + psKick->ahSrcSyncInfo, + abSrcSyncEnable, + psKick->ui32NumDstSync, + psKick->ahDstSyncInfo, + abDstSyncEnable); + /* + * If there are no blocking fences, the GPU need not wait + * whilst the reservation objects are being processed. They + * can be processed later, after the kick. + */ + if (ui32NumResvObjs && bBlockingFences) + { + eError = PVRLinuxFenceProcess(&ui32FenceTag, + ui32NumResvObjs, + bBlockingFences, + psKick->ui32NumSrcSync, + psKick->ahSrcSyncInfo, + abSrcSyncEnable, + psKick->ui32NumDstSync, + psKick->ahDstSyncInfo, + abDstSyncEnable); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_SRC_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + psSharedTransferCmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + psSharedTransferCmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + i++; + } + } + PVR_ASSERT(i == ui32RealSrcSyncNum); + + i = 0; + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + + psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_DST_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->asDstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + psSharedTransferCmd->asDstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + psSharedTransferCmd->asDstSyncs[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; + + psSharedTransferCmd->asDstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->asDstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psSharedTransferCmd->asDstSyncs[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + i++; + } + } + PVR_ASSERT(i == ui32RealDstSyncNum); + + /* + * We allow source and destination sync objects to be the + * same, which is why the read/write pending updates are delayed + * until the transfer command has been updated with the current + * values from the objects. + */ + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + SyncTakeReadOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); + } + } + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); + } + } + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + if (ui32RealDstSyncNum <= (SGX_MAX_DST_SYNCS_TQ - 1) && psKick->iFenceFd > 0) + { + IMG_HANDLE ahSyncInfo[SGX_MAX_SRC_SYNCS_TA]; + PVRSRV_DEVICE_SYNC_OBJECT *apsDevSyncs = &psSharedTransferCmd->asDstSyncs[ui32RealDstSyncNum]; + IMG_UINT32 ui32NumSrcSyncs = 1; + IMG_UINT32 i; + ahSyncInfo[0] = (IMG_HANDLE)(uintptr_t)(psKick->iFenceFd - 1); + + eError = PVRSyncPatchTransferSyncInfos(ahSyncInfo, apsDevSyncs, &ui32NumSrcSyncs); + if (eError != PVRSRV_OK) + { + /* We didn't kick yet, or perform PDUMP processing, so we should + * be able to trivially roll back any changes made to the sync + * data. If we don't do this, we'll wedge services cleanup. + */ + + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + } + + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending++; + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + + PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: PVRSyncPatchTransferKickSyncInfos failed.")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); + return eError; + } + + /* Find a free dst sync to slot in our extra sync */ + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + break; + } + + /* We shouldn't be in this code path if ui32RealDstSyncNum + * didn't allow for at least two free synchronization slots. + */ + PVR_ASSERT(loop + ui32NumSrcSyncs <= SGX_MAX_TRANSFER_SYNC_OPS); + + /* Slot in the extra dst syncs */ + for (i = 0; i < ui32NumSrcSyncs; i++) + { + psKick->ahDstSyncInfo[loop + i] = ahSyncInfo[i]; + abDstSyncEnable[loop + i] = IMG_TRUE; + psKick->ui32NumDstSync++; + ui32RealDstSyncNum++; + } + } +#endif /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) */ + } + + psSharedTransferCmd->ui32NumSrcSyncs = ui32RealSrcSyncNum; + psSharedTransferCmd->ui32NumDstSyncs = ui32RealDstSyncNum; + +#if defined(PDUMP) + if (PDumpWillCapture(psKick->ui32PDumpFlags)) + { + PDUMPCOMMENT("Shared part of transfer command\r\n"); + PDUMPMEM(psSharedTransferCmd, + psCCBMemInfo, + psKick->ui32CCBDumpWOff, + sizeof(SGXMKIF_TRANSFERCMD_SHARED), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) + { + IMG_UINT32 i = 0; + + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + IMG_UINT32 ui32PDumpReadOp2 = 0; + psSyncInfo = psKick->ahSrcSyncInfo[loop]; + + PDUMPCOMMENT("Tweak src surface write op in transfer cmd\r\n"); +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, "TQ Src: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak src surface read op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak srv surface read op2 in transfer cmd\r\n"); + PDUMPMEM(&ui32PDumpReadOp2, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal)), + sizeof(ui32PDumpReadOp2), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + i++; + } + } + + i = 0; + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[i]) + { + IMG_UINT32 ui32PDumpReadOp2 = 0; + psSyncInfo = psKick->ahDstSyncInfo[loop]; + + PDUMPCOMMENT("Tweak dest surface write op in transfer cmd\r\n"); +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, "TQ Dst: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak dest surface read op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak dest surface read op2 in transfer cmd\r\n"); + PDUMPMEM(&ui32PDumpReadOp2, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal)), + sizeof(ui32PDumpReadOp2), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + i++; + } + } + + /* + * We allow the first source and destination sync objects to be the + * same, which is why the read/write pending updates are delayed + * until the transfer command has been updated with the current + * values from the objects. + */ + for (loop = 0; loop < (psKick->ui32NumSrcSync); loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, + "TQ Src: PDump sync update: uiAddr = 0x%08x, ui32LastReadOpDumpVal = 0x%08x\r\n", + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastReadOpDumpVal); +#endif + } + } + + for (loop = 0; loop < (psKick->ui32NumDstSync); loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + psSyncInfo->psSyncData->ui32LastOpDumpVal++; +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, + "TQ Dst: PDump sync update: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + } + } + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hTASyncInfo; + + PDUMPCOMMENT("Tweak TA/TQ surface write op in transfer cmd\r\n"); +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TQ TA/TQ: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32TASyncWriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak TA/TQ surface read op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32TASyncReadOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->h3DSyncInfo; + + PDUMPCOMMENT("Tweak 3D/TQ surface write op in transfer cmd\r\n"); +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENT("TQ 3D/TQ: PDump sync sample: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui323DSyncWriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak 3D/TQ surface read op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui323DSyncReadOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + } + } +#endif + + sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr; + + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_END, + TRANSFER_TOKEN_SUBMIT); + + eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE); + + if (eError == PVRSRV_ERROR_RETRY) + { + /* Client will retry, so undo the sync ops pending increment(s) done above. */ + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) + { +#if defined(SUPPORT_DMABUF) + if (ui32NumResvObjs && bBlockingFences) + { + PVRLinuxFenceRelease(ui32FenceTag, + psKick->ui32NumSrcSync, + psKick->ahSrcSyncInfo, + abSrcSyncEnable, + psKick->ui32NumDstSync, + psKick->ahDstSyncInfo, + abDstSyncEnable); + } +#endif + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + SyncRollBackReadOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM() + || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + psSyncInfo->psSyncData->ui32LastReadOpDumpVal--; + } +#endif + } + } + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM() + || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + psSyncInfo->psSyncData->ui32LastOpDumpVal--; + } +#endif + } + } + } + + /* Command needed to be synchronised with the TA? */ + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); + } + + /* Command needed to be synchronised with the 3D? */ + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_3D); + } + } + else if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed.")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); +#if defined(SUPPORT_DMABUF) && defined(NO_HARDWARE) + PVRLinuxFenceCheckAll(); +#endif + return eError; + } +#if defined(SUPPORT_DMABUF) + else if (ui32NumResvObjs && !bBlockingFences) + { + eError = PVRLinuxFenceProcess(&ui32FenceTag, + ui32NumResvObjs, + bBlockingFences, + psKick->ui32NumSrcSync, + psKick->ahSrcSyncInfo, + abSrcSyncEnable, + psKick->ui32NumDstSync, + psKick->ahDstSyncInfo, + abDstSyncEnable); + if (eError != PVRSRV_OK) + { + return eError; + } + } +#endif + +#if defined(NO_HARDWARE) + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0) + { + /* Update sync objects pretending that we have done the job*/ + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + } + + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + } +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); + return eError; +} + +#if defined(SGX_FEATURE_2D_HARDWARE) +IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick) + +{ + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo; + SGXMKIF_COMMAND sCommand = {0}; + SGXMKIF_2DCMD_SHARED *ps2DCmd; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + IMG_UINT32 i; + IMG_HANDLE hDevMemContext = IMG_NULL; +#if defined(FIX_HW_BRN_31620) + hDevMemContext = psKick->hDevMemContext; +#endif + + if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset)) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* override QAC warning about stricter alignment */ + /* PRQA S 3305 1 */ + ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset); + + /* Command needs to be synchronised with the TA? */ + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + else + { + ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr.uiAddr = 0; + ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr.uiAddr = 0; + } + + /* Command needs to be synchronised with the 3D? */ + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + else + { + ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr.uiAddr = 0; + ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr.uiAddr = 0; + } + + /* + * We allow the first source and destination sync objects to be the + * same, which is why the read/write pending updates are delayed + * until the transfer command has been updated with the current + * values from the objects. + */ + ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync; + + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + + ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + + ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + ps2DCmd->sDstSyncData.ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; + + ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + ps2DCmd->sDstSyncData.sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + + /* We can do this immediately as we only have one */ + SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + } + else + { + ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr.uiAddr = 0; + ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr.uiAddr = 0; + ps2DCmd->sDstSyncData.sReadOps2CompleteDevVAddr.uiAddr = 0; + } + + /* Read/Write ops pending updates, delayed from above */ + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + SyncTakeReadOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + } + +#if defined(PDUMP) + if (PDumpWillCapture(psKick->ui32PDumpFlags)) + { + /* Pdump the command from the per context CCB */ + PDUMPCOMMENT("Shared part of 2D command\r\n"); + PDUMPMEM(ps2DCmd, + psCCBMemInfo, + psKick->ui32CCBDumpWOff, + sizeof(SGXMKIF_2DCMD_SHARED), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + + PDUMPCOMMENT("Tweak src surface write op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak src surface read op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + IMG_UINT32 ui32PDumpReadOp2 = 0; + psSyncInfo = psKick->hDstSyncInfo; + + PDUMPCOMMENT("Tweak dest surface write op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak dest surface read op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + PDUMPCOMMENT("Tweak dest surface read op2 in 2D cmd\r\n"); + PDUMPMEM(&ui32PDumpReadOp2, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOps2PendingVal), + sizeof(ui32PDumpReadOp2), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + /* Read/Write ops pending updates, delayed from above */ + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, + "TQ2D Src: PDump sync update: uiAddr = 0x%08x, ui32LastReadOpDumpVal = 0x%08x\r\n", + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastReadOpDumpVal); +#endif + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + psSyncInfo->psSyncData->ui32LastOpDumpVal++; +#if defined(SUPPORT_PDUMP_SYNC_DEBUG) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_PERSISTENT, + "TQ2D Dst: PDump sync update: uiAddr = 0x%08x, ui32LastOpDumpVal = 0x%08x\r\n", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32LastOpDumpVal); +#endif + } + } +#endif + + sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr; + + eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE); + + if (eError == PVRSRV_ERROR_RETRY) + { + /* Client will retry, so undo the write ops pending increment + done above. + */ +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32LastReadOpDumpVal--; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + psSyncInfo->psSyncData->ui32LastOpDumpVal--; + } + } +#endif + + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + SyncRollBackReadOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + } + + /* Command needed to be synchronised with the TA? */ + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + } + + /* Command needed to be synchronised with the 3D? */ + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + SyncRollBackWriteOp(psSyncInfo, SYNC_OP_CLASS_TQ_2D); + } + } + + + + +#if defined(NO_HARDWARE) + /* Update sync objects pretending that we have done the job*/ + for(i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } +#if defined(SUPPORT_DMABUF) + PVRLinuxFenceCheckAll(); +#endif +#endif + + return eError; +} +#endif /* SGX_FEATURE_2D_HARDWARE */ +#endif /* TRANSFER_QUEUE */ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.c b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.c new file mode 100644 index 0000000..4e0d683 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.c @@ -0,0 +1,1976 @@ +/*************************************************************************/ /*! +@Title Device specific utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "sgxdefs.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "sgx_bridge_km.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgx_mkif_km.h" +#include "sysconfig.h" +#include "pdump_km.h" +#include "mmu.h" +#include "pvr_bridge_km.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "sgxutils.h" +#include "ttrace.h" +#include "sgxmmu.h" + +#ifdef __linux__ +#include // sprintf +#include // strncpy, strlen +#else +#include +#endif + +IMG_UINT64 ui64KickCount; + + +#if defined(SYS_CUSTOM_POWERDOWN) +PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID); +#endif + + + +/*! +****************************************************************************** + + @Function SGXPostActivePowerEvent + + @Description + + post power event functionality (e.g. restart) + + @Input psDeviceNode : SGX Device Node + @Input ui32CallerID - KERNEL_ID or ISR_ID + + @Return IMG_VOID : + +******************************************************************************/ +static IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32CallerID) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + + /* Update the counter for stats. */ + psSGXHostCtl->ui32NumActivePowerEvents++; + + if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXPostActivePowerEvent: SGX requests immediate restart")); + + /* + Events were queued during the active power + request, so SGX will need to be restarted. + */ + if (ui32CallerID == ISR_ID) + { + psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE; + } + else + { + SGXScheduleProcessQueuesKM(psDeviceNode); + } + } +} + + +/*! +****************************************************************************** + + @Function SGXTestActivePowerEvent + + @Description + + Checks whether the microkernel has generated an active power event. If so, + perform the power transition. + + @Input psDeviceNode : SGX Device Node + @Input ui32CallerID - KERNEL_ID or ISR_ID + + @Return IMG_VOID : + +******************************************************************************/ +IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CallerID) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + + /* + * Quickly check (without lock) if there is an IDLE or APM event we should handle. + * This check fails most of the time so we don't want to incur lock overhead. + * Check the flags in the reverse order that microkernel clears them to prevent + * us from seeing an inconsistent state. + */ + if ((((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0)) || + (((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0))) + { + eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); + if (eError == PVRSRV_ERROR_RETRY) + { + return; + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXTestActivePowerEvent failed to acquire lock - " + "ui32CallerID:%d eError:%u", ui32CallerID, eError)); + return; + } + + /* + * Check again (with lock) if IDLE event has been cleared or handled. A race + * condition may allow multiple threads to pass the quick check. + */ + if(((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0)) + { + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_IDLE; + if (psDevInfo->bSGXIdle == IMG_FALSE) + { + psDevInfo->bSGXIdle = IMG_TRUE; + SysSGXIdleEntered(); + } + } + + /* + * Check again (with lock) if APM event has been cleared or handled. A race + * condition may allow multiple threads to pass the quick check. + */ + if (((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0)) + { + /* Microkernel is idle and is requesting to be powered down. */ + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER; + +#if !defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Suspend pdumping. */ + PDUMPSUSPEND(); +#endif + +#if defined(SYS_CUSTOM_POWERDOWN) + /* + Some power down code cannot be executed inside an MISR on + some platforms that use mutexes inside the power code. + */ + eError = SysPowerDownMISR(psDeviceNode, ui32CallerID); +#else + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE_OFF); +#endif + if (eError == PVRSRV_OK) + { + SGXPostActivePowerEvent(psDeviceNode, ui32CallerID); + } +#if !defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Resume pdumping */ + PDUMPRESUME(); +#endif + } + + PVRSRVPowerUnlock(ui32CallerID); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%u", eError)); + } +} + + +/****************************************************************************** + FUNCTION : SGXAcquireKernelCCBSlot + + PURPOSE : Attempts to obtain a slot in the Kernel CCB + + PARAMETERS : psCCB - the CCB + + RETURNS : Address of space if available, IMG_NULL otherwise +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SGXAcquireKernelCCBSlot) +#endif +static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB) +{ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset) + { + return &psCCB->psCommands[*psCCB->pui32WriteOffset]; + } + + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Time out on waiting for CCB space */ + return IMG_NULL; +} + +/*! +****************************************************************************** + + @Function SGXScheduleCCBCommand + + @Description - Submits a CCB command and kicks the ukernel (without + power management) + + @Input psDevInfo - pointer to device info + @Input eCmdType - see SGXMKIF_CMD_* + @Input psCommandData - kernel CCB command + @Input ui32CallerID - KERNEL_ID or ISR_ID + @Input ui32PDumpFlags + + @Return ui32Error - success or failure + +******************************************************************************/ +PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCmdType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene) +{ + PVRSRV_SGX_CCB_INFO *psKernelCCB; + PVRSRV_ERROR eError = PVRSRV_OK; + SGXMKIF_COMMAND *psSGXCommand; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; +#if defined(FIX_HW_BRN_31620) + IMG_UINT32 ui32CacheMasks[4]; + IMG_UINT32 i; + MMU_CONTEXT *psMMUContext; +#endif +#if defined(PDUMP) + IMG_VOID *pvDumpCommand; + IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended(); +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + IMG_BOOL bPDumpActive = _PDumpIsProcessActive(); +#else + IMG_BOOL bPDumpActive = IMG_TRUE; +#endif +#else + PVR_UNREFERENCED_PARAMETER(ui32CallerID); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + +#if defined(FIX_HW_BRN_31620) + for(i=0;i<4;i++) + { + ui32CacheMasks[i] = 0; + } + + psMMUContext = psDevInfo->hKernelMMUContext; + psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[0]); + + /* Put the apps memory context in the bottom half */ + if (hDevMemContext) + { + BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext; + + psMMUContext = psBMContext->psMMUContext; + psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[2]); + } + + /* If we have an outstanding flush request then set the cachecontrol bit */ + if (ui32CacheMasks[0] || ui32CacheMasks[1] || ui32CacheMasks[2] || ui32CacheMasks[3]) + { + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD; + } +#endif + +#if defined(FIX_HW_BRN_28889) + /* + If the data cache and bif cache need invalidating there has been a cleanup + request. Therefore, we need to send the invalidate separately and wait + for it to complete. + */ + if ( (eCmdType != SGXMKIF_CMD_PROCESS_QUEUES) && + ((psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_DATA) != 0) && + ((psDevInfo->ui32CacheControl & (SGXMKIF_CC_INVAL_BIF_PT | SGXMKIF_CC_INVAL_BIF_PD)) != 0)) + { + #if defined(PDUMP) + PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo; + #endif + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + SGXMKIF_COMMAND sCacheCommand = {0}; + + eError = SGXScheduleCCBCommand(psDeviceNode, + SGXMKIF_CMD_PROCESS_QUEUES, + &sCacheCommand, + ui32CallerID, + ui32PDumpFlags, + hDevMemContext, + bLastInScene); + if (eError != PVRSRV_OK) + { + goto Exit; + } + + /* Wait for the invalidate to happen */ + #if !defined(NO_HARDWARE) + if(PollForValueKM(&psSGXHostCtl->ui32InvalStatus, + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + 2 * MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommand: Wait for uKernel to Invalidate BIF cache failed")); + PVR_DBG_BREAK; + } + #endif + + #if defined(PDUMP) + /* Pdump the poll as well. */ + PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for BIF cache invalidate request to complete"); + PDUMPMEMPOL(psSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32InvalStatus), + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(psSGXHostCtlMemInfo)); + #endif /* PDUMP */ + + psSGXHostCtl->ui32InvalStatus &= ~(PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE); + PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo)); + } +#else + PVR_UNREFERENCED_PARAMETER(hDevMemContext); +#endif + +#if defined(FIX_HW_BRN_31620) + if ((eCmdType != SGXMKIF_CMD_FLUSHPDCACHE) && (psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_BIF_PD)) + { + SGXMKIF_COMMAND sPDECacheCommand = {0}; + IMG_DEV_PHYADDR sDevPAddr; + + /* Put the kernel info in the top 1/2 of the data */ + psMMUContext = psDevInfo->hKernelMMUContext; + + psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr); + sPDECacheCommand.ui32Data[0] = sDevPAddr.uiAddr | 1; + sPDECacheCommand.ui32Data[1] = ui32CacheMasks[0]; + sPDECacheCommand.ui32Data[2] = ui32CacheMasks[1]; + + /* Put the apps memory context in the bottom half */ + if (hDevMemContext) + { + BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext; + + psMMUContext = psBMContext->psMMUContext; + + psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr); + /* Or in 1 to the lsb to show we have a valid context */ + sPDECacheCommand.ui32Data[3] = sDevPAddr.uiAddr | 1; + sPDECacheCommand.ui32Data[4] = ui32CacheMasks[2]; + sPDECacheCommand.ui32Data[5] = ui32CacheMasks[3]; + } + + /* Only do a kick if there is any update */ + if (sPDECacheCommand.ui32Data[1] | sPDECacheCommand.ui32Data[2] | sPDECacheCommand.ui32Data[4] | + sPDECacheCommand.ui32Data[5]) + { + eError = SGXScheduleCCBCommand(psDeviceNode, + SGXMKIF_CMD_FLUSHPDCACHE, + &sPDECacheCommand, + ui32CallerID, + ui32PDumpFlags, + hDevMemContext, + bLastInScene); + if (eError != PVRSRV_OK) + { + goto Exit; + } + } + } +#endif + psKernelCCB = psDevInfo->psKernelCCBInfo; + + psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB); + + /* Wait for CCB space timed out */ + if(!psSGXCommand) + { + PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Wait for CCB space timed out")) ; + eError = PVRSRV_ERROR_TIMEOUT; + goto Exit; + } + + /* embed cache control word */ + psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl; + +#if defined(PDUMP) + /* Accumulate any cache invalidates that may have happened */ + psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl; +#endif + + /* and clear it */ + psDevInfo->ui32CacheControl = 0; + + /* Copy command data over */ + *psSGXCommand = *psCommandData; + + if (eCmdType >= SGXMKIF_CMD_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Unknown command type: %d", eCmdType)) ; + eError = PVRSRV_ERROR_INVALID_CCB_COMMAND; + goto Exit; + } + + if (eCmdType == SGXMKIF_CMD_2D || + eCmdType == SGXMKIF_CMD_TRANSFER || + ((eCmdType == SGXMKIF_CMD_TA) && bLastInScene)) + { + SYS_DATA *psSysData; + + /* CPU cache clean control */ + SysAcquireData(&psSysData); + + if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + OSFlushCPUCacheKM(); + } + else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + OSCleanCPUCacheKM(); + } + + /* Clear the pending op */ + psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE; + } + + PVR_ASSERT(eCmdType < SGXMKIF_CMD_MAX); + psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType]; /* PRQA S 3689 */ /* misuse of enums for bounds checking */ + +#if defined(PDUMP) + if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) && + (bPDumpActive == IMG_TRUE) ) + { + /* Poll for space in the CCB. */ + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n"); + PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, + offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), + (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, + 0xff, + PDUMP_POLL_OPERATOR_NOTEQUAL, + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command (type == %d)\r\n", eCmdType); + pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND))); + + PDUMPMEM(pvDumpCommand, + psKernelCCB->psCCBMemInfo, + psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND), + sizeof(SGXMKIF_COMMAND), + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo)); + + /* Overwrite cache control with pdump shadow */ + PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl, + psKernelCCB->psCCBMemInfo, + psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) + + offsetof(SGXMKIF_COMMAND, ui32CacheControl), + sizeof(IMG_UINT32), + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo)); + + if (PDumpIsCaptureFrameKM() + || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + /* Clear cache invalidate shadow */ + psDevInfo->sPDContext.ui32CacheControl = 0; + } + } +#endif + +#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + /* Make sure the previous command has been read before send the next one */ + eError = PollForValueKM (psKernelCCB->pui32ReadOffset, + *psKernelCCB->pui32WriteOffset, + 0xFF, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Timeout waiting for previous command to be read")) ; + eError = PVRSRV_ERROR_TIMEOUT; + goto Exit; + } +#endif + + /* + Increment the write offset + */ + *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255; + +#if defined(PDUMP) + if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) && + (bPDumpActive == IMG_TRUE) ) + { + #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n"); + PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, + offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), + (psKernelCCB->ui32CCBDumpWOff), + 0xFF, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); + #endif + + if (PDumpIsCaptureFrameKM() + || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF; + psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF; + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n"); + PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff, + psKernelCCB->psCCBCtlMemInfo, + offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset), + sizeof(IMG_UINT32), + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n"); + PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal, + psDevInfo->psKernelCCBEventKickerMemInfo, + 0, + sizeof(IMG_UINT32), + ui32PDumpFlags, + MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n"); + #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags); + #else + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags); + #endif + } +#endif + + *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF; + + /* + * New command submission is considered a proper handling of any pending + * IDLE or APM event, so mark them as handled to prevent other host threads + * from taking action. + */ + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_IDLE; + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER; + + OSWriteMemoryBarrier(); + + /* Order is importent for post processor! */ + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE, + MKSYNC_TOKEN_KERNEL_CCB_OFFSET, *psKernelCCB->pui32WriteOffset); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE, + MKSYNC_TOKEN_CORE_CLK, psDevInfo->ui32CoreClockSpeed); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE, + MKSYNC_TOKEN_UKERNEL_CLK, psDevInfo->ui32uKernelTimerClock); + + +#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), + EUR_CR_EVENT_KICK2_NOW_MASK); +#else + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), + EUR_CR_EVENT_KICK_NOW_MASK); +#endif + + OSMemoryBarrier(); + +#if defined(NO_HARDWARE) + /* Increment read offset */ + *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255; +#endif + + ui64KickCount++; +Exit: + return eError; +} + + +/*! +****************************************************************************** + + @Function SGXScheduleCCBCommandKM + + @Description - Submits a CCB command and kicks the ukernel + + @Input psDeviceNode - pointer to SGX device node + @Input eCmdType - see SGXMKIF_CMD_* + @Input psCommandData - kernel CCB command + @Input ui32CallerID - KERNEL_ID or ISR_ID + @Input ui32PDumpFlags + + @Return ui32Error - success or failure + +******************************************************************************/ +PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCmdType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); + if (eError == PVRSRV_ERROR_RETRY) + { + if (ui32CallerID == ISR_ID) + { + SYS_DATA *psSysData; + + /* + ISR failed to acquire lock so it must be held by a kernel thread. + Bring up and kick SGX if necessary when the lock is available. + */ + psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE; + eError = PVRSRV_OK; + + SysAcquireData(&psSysData); + OSScheduleMISR(psSysData); + } + else + { + /* + Return to srvclient for retry. + */ + } + + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - " + "ui32CallerID:%d eError:%u", ui32CallerID, eError)); + return eError; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + PDUMP_LOCK(); +#endif + /* Note that a power-up has been dumped in the init phase. */ + PDUMPSUSPEND(); + + /* Ensure that SGX is powered up before kicking the ukernel. */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE_ON); + + PDUMPRESUME(); +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + PDUMP_UNLOCK(); +#endif + + if (eError == PVRSRV_OK) + { + psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE; + } + else + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to power up device - " + "ui32CallerID:%d eError:%u", ui32CallerID, eError)); + PVRSRVPowerUnlock(ui32CallerID); + return eError; + } + + SysSGXCommandPending(psDevInfo->bSGXIdle); + psDevInfo->bSGXIdle = IMG_FALSE; + + eError = SGXScheduleCCBCommand(psDeviceNode, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags, hDevMemContext, bLastInScene); + + PVRSRVPowerUnlock(ui32CallerID); + return eError; +} + + +/*! +****************************************************************************** + + @Function SGXScheduleProcessQueuesKM + + @Description - Software command complete handler + + @Input psDeviceNode - SGX device node + +******************************************************************************/ +PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psHostCtl; + IMG_UINT32 ui32PowerStatus; + SGXMKIF_COMMAND sCommand = {0}; + + if (psDevInfo->psKernelSGXHostCtlMemInfo == IMG_NULL) + { + /* Part2 hasn't run yet, we can't do anything */ + return PVRSRV_OK; + } + + psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM; + + ui32PowerStatus = psHostCtl->ui32PowerStatus; + if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0) + { + /* The ukernel has no work to be done, so don't waste power. */ + return PVRSRV_OK; + } + + eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %u", eError)); + return eError; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXIsDevicePowered + + @Description + + Whether the device is powered, for the purposes of lockup detection. + + @Input psDeviceNode - pointer to device node + + @Return IMG_BOOL : Whether device is powered + +******************************************************************************/ +IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex); +} + +/*! +******************************************************************************* + + @Function SGXGetInternalDevInfoKM + + @Description + Gets device information that is not intended to be passed + on beyond the srvclient libs. + + @Input hDevCookie + + @Output psSGXInternalDevInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, + SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff; + + /* This should be patched up by OS bridge code */ + psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle = + (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo; + + return PVRSRV_OK; +} + + +/***************************************************************************** + FUNCTION : SGXCleanupRequest + + PURPOSE : Wait for the microkernel to clean up its references to either a + render context or render target. + + PARAMETERS : psDeviceNode - SGX device node + psHWDataDevVAddr - Device Address of the resource + ui32CleanupType - PVRSRV_CLEANUPCMD_* + bForceCleanup - Skips sync polling + + RETURNS : error status +*****************************************************************************/ +PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR *psHWDataDevVAddr, + IMG_UINT32 ui32CleanupType, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_KERNEL_MEM_INFO *psHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo; + SGXMKIF_HOST_CTL *psHostCtl = psHostCtlMemInfo->pvLinAddrKM; + + SGXMKIF_COMMAND sCommand = {0}; + + + if (bForceCleanup != FORCE_CLEANUP) + { + sCommand.ui32Data[0] = ui32CleanupType; + sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr; + PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resource clean-up, Type %u, Data 0x%X", sCommand.ui32Data[0], sCommand.ui32Data[1]); + + eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + return eError; + } + + /* Wait for the uKernel process the cleanup request */ + #if !defined(NO_HARDWARE) + if(PollForValueKM(&psHostCtl->ui32CleanupStatus, + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE, + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE, + 10 * MAX_HW_TIME_US, + 1000, + IMG_TRUE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up (%u) failed", ui32CleanupType)); + eError = PVRSRV_ERROR_TIMEOUT; + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif + + #if defined(PDUMP) + /* + Pdump the poll as well. + Note: + We don't expect the cleanup to report busy as the client should have + ensured the resource has been finished with before requesting it's + cleanup. This isn't true of the abnormal termination case but we + don't expect to PDump that. Unless/until PDump has flow control + there isn't anything else we can do. + */ + PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete"); + PDUMPMEMPOL(psHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE, + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(psHostCtlMemInfo)); + #endif /* PDUMP */ + + if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_BUSY) + { + /* Only one flag should be set */ + PVR_ASSERT((psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_DONE) == 0); + eError = PVRSRV_ERROR_RETRY; + psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_BUSY); + } + else + { + eError = PVRSRV_OK; + psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE); + } + + PDUMPMEM(IMG_NULL, psHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psHostCtlMemInfo)); + + /* Request the cache invalidate */ +#if defined(SGX_FEATURE_SYSTEM_CACHE) + psDevInfo->ui32CacheControl |= (SGXMKIF_CC_INVAL_BIF_SL | SGXMKIF_CC_INVAL_DATA); +#else + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_DATA; +#endif + return eError; +} + + +typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psHWRenderContextMemInfo; + IMG_HANDLE hBlockAlloc; + PRESMAN_ITEM psResItem; + IMG_BOOL bCleanupTimerRunning; + IMG_PVOID pvTimeData; +} SGX_HW_RENDER_CONTEXT_CLEANUP; + + +static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + eError = SGXCleanupRequest(psCleanup->psDeviceNode, + &psCleanup->psHWRenderContextMemInfo->sDevVAddr, + PVRSRV_CLEANUPCMD_RC, + bForceCleanup); + + if (eError == PVRSRV_ERROR_RETRY) + { + if (!psCleanup->bCleanupTimerRunning) + { + OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US); + psCleanup->bCleanupTimerRunning = IMG_TRUE; + } + else + { + if (OSTimeHasTimePassed(psCleanup->pvTimeData)) + { + eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + OSTimeDestroy(psCleanup->pvTimeData); + } + } + } + else + { + if (psCleanup->bCleanupTimerRunning) + { + OSTimeDestroy(psCleanup->pvTimeData); + } + } + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the Device Mem allocated */ + PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode, + psCleanup->psHWRenderContextMemInfo); + + /* Finally, free the cleanup structure itself */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + } + + return eError; +} + +typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psHWTransferContextMemInfo; + IMG_HANDLE hBlockAlloc; + PRESMAN_ITEM psResItem; + IMG_BOOL bCleanupTimerRunning; + IMG_PVOID pvTimeData; +} SGX_HW_TRANSFER_CONTEXT_CLEANUP; + + +static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + eError = SGXCleanupRequest(psCleanup->psDeviceNode, + &psCleanup->psHWTransferContextMemInfo->sDevVAddr, + PVRSRV_CLEANUPCMD_TC, + bForceCleanup); + + if (eError == PVRSRV_ERROR_RETRY) + { + if (!psCleanup->bCleanupTimerRunning) + { + OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US); + psCleanup->bCleanupTimerRunning = IMG_TRUE; + } + else + { + if (OSTimeHasTimePassed(psCleanup->pvTimeData)) + { + eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + OSTimeDestroy(psCleanup->pvTimeData); + } + } + } + else + { + if (psCleanup->bCleanupTimerRunning) + { + OSTimeDestroy(psCleanup->pvTimeData); + } + } + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the Device Mem allocated */ + PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode, + psCleanup->psHWTransferContextMemInfo); + + /* Finally, free the cleanup structure itself */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + } + + return eError; +} + +IMG_EXPORT +IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE hDeviceNode, + IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr, + IMG_UINT32 ui32HWRenderContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psHeapInfo; + IMG_HANDLE hDevMemContextInt; + MMU_CONTEXT *psMMUContext; + IMG_DEV_PHYADDR sPDDevPAddr; + int iPtrByte; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + PRESMAN_ITEM psResItem; + IMG_UINT32 ui32PDDevPAddrInDirListFormat; + IMG_UINT8 *pStartPDDevPAddr, *pEndPDDevPAddr; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), + (IMG_VOID **)&psCleanup, + &hBlockAlloc, + "SGX Hardware Render Context Cleanup"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure")); + goto exit0; + } + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID]; + + eError = PVRSRVAllocDeviceMemKM(hDeviceNode, + psPerProc, + psHeapInfo->hDevMemHeap, + PVRSRV_MEM_READ | PVRSRV_MEM_WRITE + | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT + | PVRSRV_MEM_CACHE_CONSISTENT, + ui32HWRenderContextSize, + 32, + IMG_NULL, + 0, + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psCleanup->psHWRenderContextMemInfo, + "HW Render Context"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate device memory for HW Render Context")); + goto exit1; + } + /* Ensure that the offset of Page directory dev physical address field is within the allocated context memory */ + pStartPDDevPAddr = (IMG_UINT8 *)(psCleanup->psHWRenderContextMemInfo->pvLinAddrKM) + ui32OffsetToPDDevPAddr; + pEndPDDevPAddr = pStartPDDevPAddr + sizeof(ui32PDDevPAddrInDirListFormat) - 1; + + if (pStartPDDevPAddr < (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM || + pEndPDDevPAddr >= (IMG_UINT8 *)(psCleanup->psHWRenderContextMemInfo->pvLinAddrKM) + ui32HWRenderContextSize) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Offset of page directory device physical address is invalid")); + goto exit2; + } + + eError = OSCopyFromUser(psPerProc, + psCleanup->psHWRenderContextMemInfo->pvLinAddrKM, + psHWRenderContextCpuVAddr, + ui32HWRenderContextSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't copy user-mode copy of HWContext into device memory")); + goto exit2; + } + + /* Pass the DevVAddr of the new context back up through the bridge */ + psHWRenderContextDevVAddr->uiAddr = psCleanup->psHWRenderContextMemInfo->sDevVAddr.uiAddr; + + /* Retrieve the PDDevPAddr */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Can't lookup DevMem Context")); + goto exit2; + } + + psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt); + sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext); + + /* + The PDDevPAddr needs to be shifted-down, as the uKernel expects it in the + format it will be inserted into the DirList registers in. + */ + ui32PDDevPAddrInDirListFormat = (IMG_UINT32)(sPDDevPAddr.uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT); + + /* + patch-in the Page-Directory Device-Physical address. Note that this is + copied-in one byte at a time, as we have no guarantee that the usermode- + provided ui32OffsetToPDDevPAddr is a validly-aligned address for the + current CPU architecture. + */ + pSrc = (IMG_UINT8 *)&ui32PDDevPAddrInDirListFormat; + pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetToPDDevPAddr; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32PDDevPAddrInDirListFormat); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + +#if defined(PDUMP) + /* PDUMP the HW context */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Render context struct"); + + PDUMPMEM( + IMG_NULL, + psCleanup->psHWRenderContextMemInfo, + 0, + ui32HWRenderContextSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo)); + + /* PDUMP the PDDevPAddr */ + PDUMPCOMMENT("Page directory address in HW render context"); + PDUMPPDDEVPADDR( + psCleanup->psHWRenderContextMemInfo, + ui32OffsetToPDDevPAddr, + sPDDevPAddr, + MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo), + PDUMP_PD_UNIQUETAG); +#endif + + psCleanup->hBlockAlloc = hBlockAlloc; + psCleanup->psDeviceNode = psDeviceNode; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_HW_RENDER_CONTEXT, + (IMG_VOID *)psCleanup, + 0, + &SGXCleanupHWRenderContextCallback); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed")); + goto exit2; + } + + psCleanup->psResItem = psResItem; + + return (IMG_HANDLE)psCleanup; + +/* Error exit paths */ +exit2: + PVRSRVFreeDeviceMemKM(hDeviceNode, + psCleanup->psHWRenderContextMemInfo); +exit1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, out of scope*/ +exit0: + return IMG_NULL; +} + +IMG_EXPORT +PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; + + PVR_ASSERT(hHWRenderContext != IMG_NULL); + + psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext; + + if (psCleanup == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup); + + return eError; +} + + +IMG_EXPORT +IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE hDeviceNode, + IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr, + IMG_UINT32 ui32HWTransferContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psHeapInfo; + IMG_HANDLE hDevMemContextInt; + MMU_CONTEXT *psMMUContext; + IMG_DEV_PHYADDR sPDDevPAddr; + int iPtrByte; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + PRESMAN_ITEM psResItem; + IMG_UINT32 ui32PDDevPAddrInDirListFormat; + IMG_UINT8 *pStartPDDevPAddr, *pEndPDDevPAddr; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), + (IMG_VOID **)&psCleanup, + &hBlockAlloc, + "SGX Hardware Transfer Context Cleanup"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure")); + goto exit0; + } + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID]; + + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate HW Transfer context"); + eError = PVRSRVAllocDeviceMemKM(hDeviceNode, + psPerProc, + psHeapInfo->hDevMemHeap, + PVRSRV_MEM_READ | PVRSRV_MEM_WRITE + | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT + | PVRSRV_MEM_CACHE_CONSISTENT, + ui32HWTransferContextSize, + 32, + IMG_NULL, + 0, + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psCleanup->psHWTransferContextMemInfo, + "HW Render Context"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate device memory for HW Render Context")); + goto exit1; + } + + /* Ensure that the offset of Page directory dev physical address field is within the allocated context memory */ + pStartPDDevPAddr = (IMG_UINT8 *)(psCleanup->psHWTransferContextMemInfo->pvLinAddrKM) + ui32OffsetToPDDevPAddr; + pEndPDDevPAddr = pStartPDDevPAddr + sizeof(ui32PDDevPAddrInDirListFormat) - 1; + + if (pStartPDDevPAddr < (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM || + pEndPDDevPAddr >= (IMG_UINT8 *)(psCleanup->psHWTransferContextMemInfo->pvLinAddrKM) + ui32HWTransferContextSize) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Offset of page directory device physical address is invalid")); + goto exit2; + } + + eError = OSCopyFromUser(psPerProc, + psCleanup->psHWTransferContextMemInfo->pvLinAddrKM, + psHWTransferContextCpuVAddr, + ui32HWTransferContextSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't copy user-mode copy of HWContext into device memory")); + goto exit2; + } + + /* Pass the DevVAddr of the new context back up through the bridge */ + psHWTransferContextDevVAddr->uiAddr = psCleanup->psHWTransferContextMemInfo->sDevVAddr.uiAddr; + + /* Retrieve the PDDevPAddr */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Can't lookup DevMem Context")); + goto exit2; + } + + psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt); + sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext); + + /* + The PDDevPAddr needs to be shifted-down, as the uKernel expects it in the + format it will be inserted into the DirList registers in. + */ + ui32PDDevPAddrInDirListFormat = (IMG_UINT32)(sPDDevPAddr.uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT); + + /* + patch-in the Page-Directory Device-Physical address. Note that this is + copied-in one byte at a time, as we have no guarantee that the usermode- + provided ui32OffsetToPDDevPAddr is a validly-aligned address for the + current CPU architecture. + */ + pSrc = (IMG_UINT8 *)&ui32PDDevPAddrInDirListFormat; + pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetToPDDevPAddr; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32PDDevPAddrInDirListFormat); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + +#if defined(PDUMP) + /* PDUMP the HW Transfer Context */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Transfer context struct"); + + PDUMPMEM( + IMG_NULL, + psCleanup->psHWTransferContextMemInfo, + 0, + ui32HWTransferContextSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo)); + + /* PDUMP the PDDevPAddr */ + PDUMPCOMMENT("Page directory address in HW transfer context"); + + PDUMPPDDEVPADDR( + psCleanup->psHWTransferContextMemInfo, + ui32OffsetToPDDevPAddr, + sPDDevPAddr, + MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo), + PDUMP_PD_UNIQUETAG); +#endif + + psCleanup->hBlockAlloc = hBlockAlloc; + psCleanup->psDeviceNode = psDeviceNode; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_HW_TRANSFER_CONTEXT, + psCleanup, + 0, + &SGXCleanupHWTransferContextCallback); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed")); + goto exit2; + } + + psCleanup->psResItem = psResItem; + + return (IMG_HANDLE)psCleanup; + +/* Error exit paths */ +exit2: + PVRSRVFreeDeviceMemKM(hDeviceNode, + psCleanup->psHWTransferContextMemInfo); +exit1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, out of scope*/ + +exit0: + return IMG_NULL; +} + +IMG_EXPORT +PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; + + PVR_ASSERT(hHWTransferContext != IMG_NULL); + + psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext; + + if (psCleanup == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup); + + return eError; +} + +IMG_EXPORT +PVRSRV_ERROR SGXSetTransferContextPriorityKM( + IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWTransferContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField) +{ + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + int iPtrByte; + PVR_UNREFERENCED_PARAMETER(hDeviceNode); + + if (hHWTransferContext != IMG_NULL) + { + psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext; + + if ((ui32OffsetOfPriorityField + sizeof(ui32Priority)) + >= psCleanup->psHWTransferContextMemInfo->uAllocSize) + { + PVR_DPF(( + PVR_DBG_ERROR, + "SGXSetTransferContextPriorityKM: invalid context prioirty offset")); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + cannot be sure that offset (passed from user-land) is safe to deref + as a word-ptr on current CPU arch: copy one byte at a time. + */ + pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetOfPriorityField; + pSrc = (IMG_UINT8 *)&ui32Priority; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + } + return PVRSRV_OK; +} + +IMG_EXPORT +PVRSRV_ERROR SGXSetRenderContextPriorityKM( + IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWRenderContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField) +{ + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + int iPtrByte; + PVR_UNREFERENCED_PARAMETER(hDeviceNode); + + if (hHWRenderContext != IMG_NULL) + { + psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext; + if ((ui32OffsetOfPriorityField + sizeof(ui32Priority)) + >= psCleanup->psHWRenderContextMemInfo->uAllocSize) + { + PVR_DPF(( + PVR_DBG_ERROR, + "SGXSetContextPriorityKM: invalid HWRenderContext prioirty offset")); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + cannot be sure that offset (passed from user-land) is safe to deref + as a word-ptr on current CPU arch: copy one byte at a time. + */ + pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetOfPriorityField; + + pSrc = (IMG_UINT8 *)&ui32Priority; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + } + return PVRSRV_OK; +} + +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psHW2DContextMemInfo; + IMG_HANDLE hBlockAlloc; + PRESMAN_ITEM psResItem; + IMG_BOOL bCleanupTimerRunning; + IMG_PVOID pvTimeData; +} SGX_HW_2D_CONTEXT_CLEANUP; + +static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + /* First, ensure the context is no longer being utilised */ + eError = SGXCleanupRequest(psCleanup->psDeviceNode, + &psCleanup->psHW2DContextMemInfo->sDevVAddr, + PVRSRV_CLEANUPCMD_2DC, + bForceCleanup); + + if (eError == PVRSRV_ERROR_RETRY) + { + if (!psCleanup->bCleanupTimerRunning) + { + OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US); + psCleanup->bCleanupTimerRunning = IMG_TRUE; + } + else + { + if (OSTimeHasTimePassed(psCleanup->pvTimeData)) + { + eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + OSTimeDestroy(psCleanup->pvTimeData); + } + } + } + else + { + if (psCleanup->bCleanupTimerRunning) + { + OSTimeDestroy(psCleanup->pvTimeData); + } + } + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the Device Mem allocated */ + PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode, + psCleanup->psHW2DContextMemInfo); + + /* Finally, free the cleanup structure itself */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_2D_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + } + return eError; +} + +IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE hDeviceNode, + IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr, + IMG_UINT32 ui32HW2DContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHW2DContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + SGX_HW_2D_CONTEXT_CLEANUP *psCleanup; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psHeapInfo; + IMG_HANDLE hDevMemContextInt; + MMU_CONTEXT *psMMUContext; + IMG_DEV_PHYADDR sPDDevPAddr; + int iPtrByte; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + PRESMAN_ITEM psResItem; + IMG_UINT32 ui32PDDevPAddrInDirListFormat; + IMG_UINT8 *pStartPDDevPAddr, *pEndPDDevPAddr; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_2D_CONTEXT_CLEANUP), + (IMG_VOID **)&psCleanup, + &hBlockAlloc, + "SGX Hardware 2D Context Cleanup"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure")); + goto exit0; + } + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID]; + + eError = PVRSRVAllocDeviceMemKM(hDeviceNode, + psPerProc, + psHeapInfo->hDevMemHeap, + PVRSRV_MEM_READ | PVRSRV_MEM_WRITE + | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT + | PVRSRV_MEM_CACHE_CONSISTENT, + ui32HW2DContextSize, + 32, + IMG_NULL, + 0, + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psCleanup->psHW2DContextMemInfo, + "HW 2D Context"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate device memory for HW Render Context")); + goto exit1; + } + + /* Ensure that the offset of Page directory dev physical address field is within the allocated context memory */ + pStartPDDevPAddr = (IMG_UINT8 *)(psCleanup->psHW2DContextMemInfo->pvLinAddrKM) + ui32OffsetToPDDevPAddr; + pEndPDDevPAddr = pStartPDDevPAddr + sizeof(ui32PDDevPAddrInDirListFormat) - 1; + + if (pStartPDDevPAddr < (IMG_UINT8 *)psCleanup->psHW2DContextMemInfo->pvLinAddrKM || + pEndPDDevPAddr >= (IMG_UINT8 *)(psCleanup->psHW2DContextMemInfo->pvLinAddrKM) + ui32HW2DContextSize) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Offset of page directory device physical address is invalid")); + goto exit2; + } + + eError = OSCopyFromUser(psPerProc, + psCleanup->psHW2DContextMemInfo->pvLinAddrKM, + psHW2DContextCpuVAddr, + ui32HW2DContextSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't copy user-mode copy of HWContext into device memory")); + goto exit2; + } + + /* Pass the DevVAddr of the new context back up through the bridge */ + psHW2DContextDevVAddr->uiAddr = psCleanup->psHW2DContextMemInfo->sDevVAddr.uiAddr; + + /* Retrieve the PDDevPAddr */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Can't lookup DevMem Context")); + goto exit2; + } + + psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt); + sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext); + + /* + The PDDevPAddr needs to be shifted-down, as the uKernel expects it in the + format it will be inserted into the DirList registers in. + */ + ui32PDDevPAddrInDirListFormat = sPDDevPAddr.uiAddr >> SGX_MMU_PTE_ADDR_ALIGNSHIFT; + + /* + patch-in the Page-Directory Device-Physical address. Note that this is + copied-in one byte at a time, as we have no guarantee that the usermode- + provided ui32OffsetToPDDevPAddr is a validly-aligned address for the + current CPU architecture. + */ + pSrc = (IMG_UINT8 *)&ui32PDDevPAddrInDirListFormat; + pDst = (IMG_UINT8 *)psCleanup->psHW2DContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetToPDDevPAddr; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32PDDevPAddrInDirListFormat); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + +#if defined(PDUMP) + /* PDUMP the HW 2D Context */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW 2D context struct"); + + PDUMPMEM( + IMG_NULL, + psCleanup->psHW2DContextMemInfo, + 0, + ui32HW2DContextSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo)); + + /* PDUMP the PDDevPAddr */ + PDUMPCOMMENT("Page directory address in HW 2D transfer context"); + PDUMPPDDEVPADDR( + psCleanup->psHW2DContextMemInfo, + ui32OffsetToPDDevPAddr, + sPDDevPAddr, + MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo), + PDUMP_PD_UNIQUETAG); +#endif + + psCleanup->hBlockAlloc = hBlockAlloc; + psCleanup->psDeviceNode = psDeviceNode; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_HW_2D_CONTEXT, + psCleanup, + 0, + &SGXCleanupHW2DContextCallback); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed")); + goto exit2; + } + + psCleanup->psResItem = psResItem; + + return (IMG_HANDLE)psCleanup; + +/* Error exit paths */ +exit2: + PVRSRVFreeDeviceMemKM(hDeviceNode, + psCleanup->psHW2DContextMemInfo); +exit1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_2D_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, out of scope*/ +exit0: + return IMG_NULL; +} + +IMG_EXPORT +PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_2D_CONTEXT_CLEANUP *psCleanup; + + PVR_ASSERT(hHW2DContext != IMG_NULL); + + if (hHW2DContext == IMG_NULL) + { + return (PVRSRV_ERROR_INVALID_PARAMS); + } + + psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext; + + eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup); + + return eError; +} +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE)*/ + +/*!**************************************************************************** + @Function SGX2DQuerySyncOpsCompleteKM + + @Input psSyncInfo : Sync object to be queried + + @Return IMG_TRUE - ops complete, IMG_FALSE - ops pending + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SGX2DQuerySyncOpsComplete) +#endif +static INLINE +IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_UINT32 ui32ReadOpsPending, + IMG_UINT32 ui32WriteOpsPending) +{ + PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData; + + return (IMG_BOOL)( + (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) && + (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending) + ); +} + +/*!**************************************************************************** + @Function SGX2DQueryBlitsCompleteKM + + @Input psDevInfo : SGX device info structure + + @Input psSyncInfo : Sync object to be queried + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_BOOL bWaitForComplete) +{ + IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending; + + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start")); + + ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending; + ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending; + + if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) + { + /* Instant success */ + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete.")); + return PVRSRV_OK; + } + + /* Not complete yet */ + if (!bWaitForComplete) + { + /* Just report not complete */ + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending.")); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + + /* Start polling */ + PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling.")); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + OSSleepms(1); + + if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) + { + /* Success */ + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete.")); + return PVRSRV_OK; + } + + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Timed out */ + PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending.")); + +#if defined(PVRSRV_NEED_PVR_TRACE) + { + PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData; + + PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: 0x%p, Syncdata: 0x%p", + psSyncInfo, psSyncData)); + + PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending)); + PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending)); + + } +#endif + + return PVRSRV_ERROR_TIMEOUT; +} + + +IMG_EXPORT +PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, + IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr, + IMG_BOOL bForceCleanup) +{ + PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL); + + return SGXCleanupRequest(psDeviceNode, + &sHWRTDataSetDevVAddr, + PVRSRV_CLEANUPCMD_RT, + bForceCleanup); +} + + +IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TimeWraps, + IMG_UINT32 ui32Time) +{ +#if defined(EUR_CR_TIMER) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32TimeWraps); + return ui32Time; +#else + IMG_UINT64 ui64Clocks; + IMG_UINT32 ui32Clocksx16; + + ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) + + (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK)); + ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16); + + return ui32Clocksx16; +#endif /* EUR_CR_TIMER */ +} + + +IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SGXClocks) +{ + /* + Round up to the next microsecond. + */ + OSWaitus(1 + (ui32SGXClocks * 1000000 / psDevInfo->ui32CoreClockSpeed)); +} + + + +/****************************************************************************** + End of file (sgxutils.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.h b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.h new file mode 100644 index 0000000..fc2ef6f --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/devices/sgx/sgxutils.h @@ -0,0 +1,195 @@ +/*************************************************************************/ /*! +@Title Device specific utility routines declarations +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Inline functions/structures specific to SGX +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "perproc.h" +#include "sgxinfokm.h" + +/* PRQA S 3410 7 */ /* macros require the absence of some brackets */ +#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \ + ((sizeof(type) <= (psCCBMemInfo)->uAllocSize) && \ + ((psCCBKick)->offset <= (psCCBMemInfo)->uAllocSize - sizeof(type))) + +#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \ + ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \ + (psCCBKick)->offset)) + +extern IMG_UINT64 ui64KickCount; + + +IMG_IMPORT +IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CallerID); + +IMG_IMPORT +PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCommandType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene); +IMG_IMPORT +PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCommandType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene); + +IMG_IMPORT +PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_IMPORT +IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_IMPORT +IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode, + IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr, + IMG_UINT32 ui32HWRenderContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +IMG_IMPORT +IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode, + IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr, + IMG_UINT32 ui32HWTransferContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +IMG_IMPORT +PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, + IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr, + IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR SGXSetRenderContextPriorityKM(IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWRenderContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField); + +IMG_IMPORT +PVRSRV_ERROR SGXSetTransferContextPriorityKM(IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWTransferContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField); + +#if defined(SGX_FEATURE_2D_HARDWARE) +IMG_IMPORT +IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode, + IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr, + IMG_UINT32 ui32HW2DContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHW2DContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +IMG_IMPORT +PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup); +#endif + +IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TimeWraps, + IMG_UINT32 ui32Time); + +/*! +******************************************************************************* + + @Function SGXWaitClocks + + @Description + + Wait for a specified number of SGX clock cycles to elapse. + + @Input psDevInfo - SGX Device Info + @Input ui32SGXClocks - number of clock cycles to wait + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SGXClocks); + +PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR *psHWDataDevVAddr, + IMG_UINT32 ui32CleanupType, + IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetSGXRevDataKM(PVRSRV_DEVICE_NODE* psDeviceNode, IMG_UINT32 *pui32SGXCoreRev, + IMG_UINT32 *pui32SGXCoreID); + +/*! +****************************************************************************** + + @Function SGXContextSuspend + + @Description - Interface to the SGX microkernel to instruct it to suspend or + resume processing on a given context. This will interrupt current + processing of this context if a task is already running and is + interruptable. + + @Input psDeviceNode SGX device node + @Input psHWContextDevVAddr SGX virtual address of the context to be suspended + or resumed. Can be of type SGXMKIF_HWRENDERCONTEXT, + SGXMKIF_HWTRANSFERCONTEXT or SGXMKIF_HW2DCONTEXT + @Input bResume IMG_TRUE to put a context into suspend state, + IMG_FALSE to resume a previously suspended context + +******************************************************************************/ +PVRSRV_ERROR SGXContextSuspend(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR *psHWContextDevVAddr, + IMG_BOOL bResume); + +/****************************************************************************** + End of file (sgxutils.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/Kbuild.mk b/sgx_km/eurasia_km/services4/srvkm/env/linux/Kbuild.mk new file mode 100644 index 0000000..53aad07 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/Kbuild.mk @@ -0,0 +1,194 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +pvrsrvkm-y += \ + services4/srvkm/env/linux/osfunc.o \ + services4/srvkm/env/linux/mutils.o \ + services4/srvkm/env/linux/mmap.o \ + services4/srvkm/env/linux/module.o \ + services4/srvkm/env/linux/pdump.o \ + services4/srvkm/env/linux/proc.o \ + services4/srvkm/env/linux/pvr_bridge_k.o \ + services4/srvkm/env/linux/pvr_debug.o \ + services4/srvkm/env/linux/mm.o \ + services4/srvkm/env/linux/mutex.o \ + services4/srvkm/env/linux/event.o \ + services4/srvkm/env/linux/osperproc.o \ + services4/srvkm/common/buffer_manager.o \ + services4/srvkm/common/devicemem.o \ + services4/srvkm/common/handle.o \ + services4/srvkm/common/hash.o \ + services4/srvkm/common/lists.o \ + services4/srvkm/common/mem.o \ + services4/srvkm/common/mem_debug.o \ + services4/srvkm/common/metrics.o \ + services4/srvkm/common/osfunc_common.o \ + services4/srvkm/common/pdump_common.o \ + services4/srvkm/common/perproc.o \ + services4/srvkm/common/power.o \ + services4/srvkm/common/pvrsrv.o \ + services4/srvkm/common/ra.o \ + services4/srvkm/common/refcount.o \ + services4/srvkm/common/resman.o \ + services4/srvkm/bridged/bridged_support.o \ + services4/srvkm/bridged/bridged_pvr_bridge.o \ + services4/system/$(PVR_SYSTEM)/sysconfig.o \ + services4/system/$(PVR_SYSTEM)/sysutils.o + +ifeq ($(SUPPORT_PVRSRV_DEVICE_CLASS),1) +pvrsrvkm-y += \ + services4/srvkm/common/deviceclass.o \ + services4/srvkm/common/queue.o +endif + +ifeq ($(SUPPORT_ION),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/ion.o +ifeq ($(LMA),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/lma_heap_ion.o +endif +endif + +ifeq ($(SUPPORT_DMABUF),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/dmabuf.o \ + services4/srvkm/env/linux/pvr_linux_fence.o +endif + +ifeq ($(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/pvr_sync.o \ + services4/srvkm/env/linux/pvr_sync_common.o +endif + +ifeq ($(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/pvr_sync_common.o \ + services4/srvkm/env/linux/pvr_fence.o \ + services4/srvkm/env/linux/dma_fence_sync_native_server.o \ + services4/srvkm/env/linux/pvr_counting_timeline.o \ + services4/srvkm/env/linux/pvr_sw_fence.o +endif + +ifeq ($(TTRACE),1) +pvrsrvkm-y += \ + services4/srvkm/common/ttrace.o +endif + +ifeq ($(SUPPORT_PVRSRV_ANDROID_SYSTRACE),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/systrace.o +endif + +ifneq ($(W),1) +CFLAGS_osfunc.o := -Werror +CFLAGS_mutils.o := -Werror +CFLAGS_mmap.o := -Werror +CFLAGS_module.o := -Werror +CFLAGS_pdump.o := -Werror +CFLAGS_proc.o := -Werror +CFLAGS_pvr_bridge_k.o := -Werror +CFLAGS_pvr_debug.o := -Werror +CFLAGS_mm.o := -Werror +CFLAGS_mutex.o := -Werror +CFLAGS_event.o := -Werror +CFLAGS_osperproc.o := -Werror +CFLAGS_buffer_manager.o := -Werror +CFLAGS_devicemem.o := -Werror +CFLAGS_deviceclass.o := -Werror +CFLAGS_handle.o := -Werror +CFLAGS_hash.o := -Werror +CFLAGS_metrics.o := -Werror +CFLAGS_pvrsrv.o := -Werror +CFLAGS_queue.o := -Werror +CFLAGS_ra.o := -Werror +CFLAGS_resman.o := -Werror +CFLAGS_power.o := -Werror +CFLAGS_mem.o := -Werror +CFLAGS_pdump_common.o := -Werror +CFLAGS_bridged_support.o := -Werror +CFLAGS_bridged_pvr_bridge.o := -Werror +CFLAGS_perproc.o := -Werror +CFLAGS_lists.o := -Werror +CFLAGS_mem_debug.o := -Werror +CFLAGS_osfunc_common.o := -Werror +CFLAGS_refcount.o := -Werror +endif + +# SUPPORT_SGX==1 only + +pvrsrvkm-y += \ + services4/srvkm/bridged/sgx/bridged_sgx_bridge.o \ + services4/srvkm/devices/sgx/sgxinit.o \ + services4/srvkm/devices/sgx/sgxpower.o \ + services4/srvkm/devices/sgx/sgxreset.o \ + services4/srvkm/devices/sgx/sgxutils.o \ + services4/srvkm/devices/sgx/sgxkick.o \ + services4/srvkm/devices/sgx/sgxtransfer.o \ + services4/srvkm/devices/sgx/mmu.o \ + services4/srvkm/devices/sgx/pb.o + +ifneq ($(W),1) +CFLAGS_bridged_sgx_bridge.o := -Werror +CFLAGS_sgxinit.o := -Werror +CFLAGS_sgxpower.o := -Werror +CFLAGS_sgxreset.o := -Werror +CFLAGS_sgxutils.o := -Werror +CFLAGS_sgxkick.o := -Werror +CFLAGS_sgxtransfer.o := -Werror +CFLAGS_mmu.o := -Werror +CFLAGS_pb.o := -Werror +endif + +ifeq ($(SUPPORT_DRI_DRM),1) + +pvrsrvkm-y += \ + services4/srvkm/env/linux/pvr_drm.o + +ccflags-y += \ + -Iinclude/drm \ + -I$(TOP)/services4/include/env/linux \ + +ifeq ($(PVR_DRI_DRM_NOT_PCI),1) +ccflags-y += -I$(TOP)/services4/3rdparty/linux_drm +endif + +endif # SUPPORT_DRI_DRM diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/Linux.mk b/sgx_km/eurasia_km/services4/srvkm/env/linux/Linux.mk new file mode 100644 index 0000000..535a1f9 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/Linux.mk @@ -0,0 +1,45 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +modules := srvkm + +srvkm_type := kernel_module +srvkm_target := pvrsrvkm.ko +srvkm_makefile := $(THIS_DIR)/Kbuild.mk diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/dma_fence_sync_native_server.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/dma_fence_sync_native_server.c new file mode 100644 index 0000000..b5cfe10 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/dma_fence_sync_native_server.c @@ -0,0 +1,94 @@ +/*************************************************************************/ /*! +@File sync_native_server.c +@Title Native implementation of server fence sync interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description The server implementation of software native synchronisation. +@License Strictly Confidential. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include + +#include "img_types.h" +#include "services_headers.h" +#include "servicesext.h" +#include "pvrsrv_sync_server.h" +#include "pvr_fence.h" +#include "pvr_counting_timeline.h" + +struct dma_fence* SyncSWTimelineFenceCreateKM(IMG_INT32 iSWTimeline, + IMG_UINT32 ui32NextSyncPtValue, + const IMG_CHAR *pszFenceName) +{ + PVRSRV_ERROR eError; + struct PVR_COUNTING_FENCE_TIMELINE *psSWTimeline; + struct dma_fence *psFence = NULL; + + psSWTimeline = pvr_sync_get_sw_timeline(iSWTimeline); + if (!psSWTimeline) + { + /* unrecognised timeline */ + PVR_DPF((PVR_DBG_ERROR, "%s: unrecognised timeline", __func__)); + goto ErrorOut; + } + + psFence = pvr_counting_fence_create(psSWTimeline, ui32NextSyncPtValue); + pvr_counting_fence_timeline_put(psSWTimeline); + if(!psFence) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorPutFence; + } + + return psFence; + +ErrorPutFence: + dma_fence_put(psFence); +ErrorOut: + return IMG_NULL; +} + +PVRSRV_ERROR SyncSWTimelineAdvanceKM(IMG_PVOID pvSWTimeline) +{ + pvr_counting_fence_timeline_inc(pvSWTimeline, 1); + return PVRSRV_OK; +} + +PVRSRV_ERROR SyncSWTimelineReleaseKM(IMG_PVOID pvSWTimeline) +{ + pvr_counting_fence_timeline_put(pvSWTimeline); + return PVRSRV_OK; +} + +PVRSRV_ERROR SyncSWTimelineFenceReleaseKM(IMG_PVOID pvSWFenceObj) +{ + dma_fence_put(pvSWFenceObj); + return PVRSRV_OK; +} + +PVRSRV_ERROR SyncSWGetTimelineObj(IMG_INT32 i32SWTimeline, IMG_PVOID *ppvSWTimelineObj) +{ + struct PVR_COUNTING_FENCE_TIMELINE *psSwTimeline = pvr_sync_get_sw_timeline(i32SWTimeline); + + if (psSwTimeline == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + *ppvSWTimelineObj = psSwTimeline; + return PVRSRV_OK; +} + +PVRSRV_ERROR SyncSWGetFenceObj(IMG_INT32 i32SWFence, IMG_PVOID *ppvSWFenceObj) +{ + struct dma_fence *psFence = sync_file_get_fence(i32SWFence); + + if(psFence == NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + *ppvSWFenceObj = (IMG_PVOID*)psFence; + return PVRSRV_OK; +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.c new file mode 100644 index 0000000..0051bff --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.c @@ -0,0 +1,328 @@ +/*************************************************************************/ /*! +@Title Dma_buf support code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "dmabuf.h" + +#if defined(SUPPORT_DMABUF) + +#include +#include +#include +#include +#include +#if defined(SUPPORT_DRI_DRM) +#include +#endif + +#include "services_headers.h" +#include "pvr_debug.h" +#include "linkage.h" +#include "pvr_bridge.h" + +struct dmabuf_import +{ + struct dma_buf *dma_buf; + + struct dma_buf_attachment *attachment; + + struct sg_table *sg_table; + +#if defined(PDUMP) + void *kvaddr; +#endif /* defined(PDUMP) */ +}; + +IMG_VOID DmaBufUnimportAndReleasePhysAddr(IMG_HANDLE hImport) +{ + struct dmabuf_import *import; + + import = (struct dmabuf_import *)hImport; + +#if defined(PDUMP) + if (import->kvaddr) + { + dma_buf_vunmap(import->dma_buf, import->kvaddr); + dma_buf_end_cpu_access(import->dma_buf, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) + 0, import->dma_buf->size, +#endif + DMA_BIDIRECTIONAL); + } +#endif /* defined(PDUMP) */ + + if (!IS_ERR_OR_NULL(import->sg_table)) + { + dma_buf_unmap_attachment(import->attachment, + import->sg_table, + DMA_BIDIRECTIONAL); + } + + if (!IS_ERR_OR_NULL(import->attachment)) + { + dma_buf_detach(import->dma_buf, import->attachment); + } + + if (!IS_ERR_OR_NULL(import->dma_buf)) + { + dma_buf_put(import->dma_buf); + } + + kfree(import); +} + +PVRSRV_ERROR DmaBufImportAndAcquirePhysAddr(const IMG_INT32 i32FD, + const IMG_SIZE_T uiDmaBufOffset, + const IMG_SIZE_T uiSize, + IMG_UINT32 *pui32PageCount, + IMG_SYS_PHYADDR **ppsSysPhysAddr, + IMG_SIZE_T *puiMemInfoOffset, + IMG_PVOID *ppvKernAddr, + IMG_HANDLE *phImport, + IMG_HANDLE *phUnique) +{ + struct dmabuf_import *import = NULL; + struct device *dev = NULL; + struct scatterlist *sg; + size_t buf_size; + size_t start_offset, end_offset, buf_offset, remainder; + unsigned npages = 0; + unsigned pti = 0; + IMG_SYS_PHYADDR *spaddr = NULL; + unsigned i; + PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; +#if defined(PDUMP) + int err; +#endif /* defined(PDUMP) */ + + import = kzalloc(sizeof(*import), GFP_KERNEL); + if (!import) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Out of memory", __func__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto error; + } + + dev = PVRLDMGetDevice(); + if (!dev) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't get device", __func__)); + eError = PVRSRV_ERROR_NOT_SUPPORTED; + goto error; + } + + import->dma_buf = dma_buf_get((int)i32FD); + if (IS_ERR(import->dma_buf)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_get failed: %ld", __func__, PTR_ERR(import->dma_buf))); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto error; + } + + buf_size = uiSize ? uiSize : import->dma_buf->size; + + if ((uiDmaBufOffset + buf_size) > import->dma_buf->size || + (size_t)(uiDmaBufOffset + buf_size) < buf_size) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Bad size and/or offset for FD", __func__)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto error; + } + + import->attachment = dma_buf_attach(import->dma_buf, dev); + if (IS_ERR(import->attachment)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_attach failed: %ld", __func__, PTR_ERR(import->attachment))); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto error; + } + + import->sg_table = dma_buf_map_attachment(import->attachment, + DMA_BIDIRECTIONAL); + if (IS_ERR(import->sg_table)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: dma_buf_map_attachment failed: %ld", __func__, PTR_ERR(import->sg_table))); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto error; + } + + start_offset = PAGE_MASK & uiDmaBufOffset; + end_offset = PAGE_ALIGN(uiDmaBufOffset + buf_size); + + *puiMemInfoOffset = (uiDmaBufOffset - start_offset); + + npages = (end_offset - start_offset) >> PAGE_SHIFT; + + /* The following allocation will be freed by the caller */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + npages * sizeof(IMG_SYS_PHYADDR), + (IMG_VOID **)&spaddr, IMG_NULL, + "Array of Page Addresses"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed: %s", __func__, PVRSRVGetErrorStringKM(eError))); + goto error; + } + + buf_offset = 0; + remainder = buf_size; + start_offset = PAGE_MASK & uiDmaBufOffset; + end_offset = PAGE_ALIGN(uiDmaBufOffset + buf_size); + + for_each_sg(import->sg_table->sgl, sg, import->sg_table->nents, i) + { + if (buf_offset >= end_offset) + { + break; + } + + if ((start_offset >= buf_offset) && (start_offset < buf_offset + sg_dma_len(sg))) + { + size_t sg_start; + size_t sg_pos; + size_t sg_remainder; + + sg_start = start_offset - buf_offset; + + sg_remainder = MIN(sg_dma_len(sg) - sg_start, remainder); + + for (sg_pos = sg_start; sg_pos < sg_start + sg_remainder; sg_pos += PAGE_SIZE) + { + IMG_CPU_PHYADDR cpaddr; + + cpaddr.uiAddr = PAGE_MASK & (sg_phys(sg) + sg_pos); + BUG_ON(pti >= npages); + + spaddr[pti++] = SysCpuPAddrToSysPAddr(cpaddr); + } + + remainder -= sg_remainder; + buf_offset += sg_dma_len(sg); + start_offset = buf_offset; + } + else + { + buf_offset += sg_dma_len(sg); + } + } + BUG_ON(remainder); + +#if defined(PDUMP) + err = dma_buf_begin_cpu_access(import->dma_buf, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) + 0, import->dma_buf->size, +#endif + DMA_BIDIRECTIONAL); + if (err) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: dma_buf_begin_cpu_access failed: %d", __func__, err)); + } + else + { + import->kvaddr = dma_buf_vmap(import->dma_buf); + *ppvKernAddr = import->kvaddr; + } +#else /* defined(PDUMP) */ + *ppvKernAddr = NULL; +#endif /* defined(PDUMP) */ + + *pui32PageCount = pti; + *ppsSysPhysAddr = spaddr; + *phImport = (IMG_HANDLE)import; + *phUnique = (IMG_HANDLE)import->dma_buf; + + return PVRSRV_OK; +error: + if (import) + { + DmaBufUnimportAndReleasePhysAddr((IMG_HANDLE)import); + } + + if (spaddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + npages * sizeof(IMG_SYS_PHYADDR), + (IMG_VOID **)&spaddr, IMG_NULL); + } + + return eError; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) +IMG_HANDLE DmaBufGetNativeSyncHandle(IMG_HANDLE hImport) +{ + struct dmabuf_import *import; + struct dmabuf_resvinfo *info; + + import = (struct dmabuf_import *)hImport; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + + if (info) + { + info->resv = import->dma_buf->resv; + } + + return (IMG_HANDLE)info; +} + +void DmaBufFreeNativeSyncHandle(IMG_HANDLE hSync) +{ + struct dmabuf_resvinfo *info; + + info = (struct dmabuf_resvinfo *)hSync; + + kfree(info); +} +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */ +IMG_HANDLE DmaBufGetNativeSyncHandle(IMG_HANDLE hImport) +{ + (void) hImport; + + return IMG_NULL; +} + +void DmaBufFreeNativeSyncHandle(IMG_HANDLE hSync) +{ + (void) hSync; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */ + +#endif /* defined(SUPPORT_DMABUF) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.h new file mode 100644 index 0000000..9a09464 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/dmabuf.h @@ -0,0 +1,98 @@ +/*************************************************************************/ /*! +@Title DmaBuf driver inter-operability code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __IMG_LINUX_DMABUF_H__ +#define __IMG_LINUX_DMABUF_H__ + +#if defined(SUPPORT_DMABUF) + +#include + +#include "img_types.h" +#include "servicesext.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) +#include +#include "pvr_bridge.h" + +struct dmabuf_resvinfo +{ + struct reservation_object *resv; +}; + +static inline void *DmaBufGetReservationObject(IMG_HANDLE hSync) +{ + struct dmabuf_resvinfo *info; + + info = (struct dmabuf_resvinfo *)hSync; + + return info->resv; +} +#else +static inline void *DmaBufGetReservationObject(IMG_HANDLE hSync) +{ + (void) hSync; + + return (void *)0; +} +#endif + +PVRSRV_ERROR DmaBufInit(IMG_VOID); + +IMG_VOID DmaBufDeinit(IMG_VOID); + +PVRSRV_ERROR DmaBufImportAndAcquirePhysAddr(const IMG_INT32 i32DmaBufFD, + const IMG_SIZE_T uiDmaBufOffset, + const IMG_SIZE_T uiSize, + IMG_UINT32 *pui32PageCount, + IMG_SYS_PHYADDR **ppsSysPhysAddr, + IMG_SIZE_T *puiMemInfoOffset, + IMG_PVOID *ppvKernAddr, + IMG_HANDLE *phImport, + IMG_HANDLE *phUnique); + +IMG_VOID DmaBufUnimportAndReleasePhysAddr(IMG_HANDLE hImport); + +IMG_HANDLE DmaBufGetNativeSyncHandle(IMG_HANDLE hImport); +void DmaBufFreeNativeSyncHandle(IMG_HANDLE hSync); +#endif /* defined(SUPPORT_DMABUF) */ + +#endif /* __IMG_LINUX_DMABUF_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/env_data.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/env_data.h new file mode 100644 index 0000000..b838809 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/env_data.h @@ -0,0 +1,93 @@ +/*************************************************************************/ /*! +@Title Environmental Data header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux-specific part of system data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _ENV_DATA_ +#define _ENV_DATA_ + +#include +#include + +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) +#include +#endif + +/* + * Env data specific to linux - convenient place to put this + */ + +/* Fairly arbitrary sizes - hopefully enough for all bridge calls */ +#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000 +#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 + +typedef struct _PVR_PCI_DEV_TAG +{ + struct pci_dev *psPCIDev; + HOST_PCI_INIT_FLAGS ePCIFlags; + IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; +} PVR_PCI_DEV; + +typedef struct _ENV_DATA_TAG +{ + IMG_VOID *pvBridgeData; + struct pm_dev *psPowerDevice; + IMG_BOOL bLISRInstalled; + IMG_BOOL bMISRInstalled; + IMG_UINT32 ui32IRQ; + IMG_VOID *pvISRCookie; +#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) + struct workqueue_struct *psWorkQueue; +#endif +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) + struct work_struct sMISRWork; + IMG_VOID *pvMISRData; +#else + struct tasklet_struct sMISRTasklet; +#endif +#if defined (SUPPORT_ION) + IMG_HANDLE hIonHeaps; + IMG_HANDLE hIonDev; +#endif +} ENV_DATA; + +#endif /* _ENV_DATA_ */ +/***************************************************************************** + End of file (env_data.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/env_perproc.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/env_perproc.h new file mode 100644 index 0000000..788e80f --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/env_perproc.h @@ -0,0 +1,79 @@ +/*************************************************************************/ /*! +@Title OS specific per process data interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux per process data +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __ENV_PERPROC_H__ +#define __ENV_PERPROC_H__ + +#include + +#include "proc.h" +#include "services.h" +#include "handle.h" + +#define ION_CLIENT_NAME_SIZE 50 +typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_ +{ + IMG_HANDLE hBlockAlloc; + struct proc_dir_entry *psProcDir; +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + struct list_head sDRMAuthListHead; +#endif +#if defined(SUPPORT_ION) + struct ion_client *psIONClient; + IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE]; +#endif +} PVRSRV_ENV_PER_PROCESS_DATA; + +IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc); + +PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc); + +IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc); + +PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); + +IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID); + +#endif /* __ENV_PERPROC_H__ */ + +/****************************************************************************** + End of file (env_perproc.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/event.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/event.c new file mode 100644 index 0000000..c60f2d5 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/event.c @@ -0,0 +1,413 @@ +/*************************************************************************/ /*! +@Title Event Object +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "img_types.h" +#include "services_headers.h" +#include "mm.h" +#include "pvrmmap.h" +#include "mmap.h" +#include "env_data.h" +#include "mutex.h" +#include "lock.h" +#include "event.h" + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG +{ + rwlock_t sLock; + struct list_head sList; + +} PVRSRV_LINUX_EVENT_OBJECT_LIST; + + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG +{ + atomic_t sTimeStamp; + IMG_UINT32 ui32TimeStampPrevious; +#if defined(DEBUG) + IMG_UINT ui32Stats; +#endif + wait_queue_head_t sWait; + struct list_head sList; + IMG_HANDLE hResItem; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; +} PVRSRV_LINUX_EVENT_OBJECT; + +/*! +****************************************************************************** + + @Function LinuxEventObjectListCreate + + @Description + + Linux wait object list creation + + @Output hOSEventKM : Pointer to the event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEventObjectList; + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), + (IMG_VOID **)&psEventObjectList, IMG_NULL, + "Linux Event Object List") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psEventObjectList->sList); + + rwlock_init(&psEventObjectList->sLock); + + *phEventObjectList = (IMG_HANDLE *) psEventObjectList; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectListDestroy + + @Description + + Linux wait object list destruction + + @Input hOSEventKM : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) +{ + + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ; + + if(psEventObjectList) + { + IMG_BOOL bListEmpty; + + read_lock(&psEventObjectList->sLock); + bListEmpty = list_empty(&psEventObjectList->sList); + read_unlock(&psEventObjectList->sLock); + + if (!bListEmpty) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty")); + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEventObjectList, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function LinuxEventObjectDelete + + @Description + + Linux wait object removal + + @Input hOSEventObjectList : Event object list handle + @Input hOSEventObject : Event object handle + @Input bResManCallback : Called from the resman + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject) +{ + if(hOSEventObjectList) + { + if(hOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %u", psLinuxEventObject->ui32Stats)); +#endif + if(ResManFreeResByPtr(psLinuxEventObject->hResItem, CLEANUP_WITH_POLL) != PVRSRV_OK) + { + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + + return PVRSRV_OK; + } + } + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectDeleteCallback + + @Description + + Linux wait object removal + + @Input hOSEventObject : Event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bForceCleanup) +{ + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + unsigned long ulLockFlags; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bForceCleanup); + + write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags); + list_del(&psLinuxEventObject->sList); + write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags); + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %u", psLinuxEventObject->ui32Stats)); +#endif + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} +/*! +****************************************************************************** + + @Function LinuxEventObjectAdd + + @Description + + Linux wait object addition + + @Input hOSEventObjectList : Event object list handle + @Output phOSEventObject : Pointer to the event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + PVRSRV_PER_PROCESS_DATA *psPerProc; + unsigned long ulLockFlags; + + psPerProc = PVRSRVPerProcessData(ui32PID); + if (psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* allocate completion variable */ + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), + (IMG_VOID **)&psLinuxEventObject, IMG_NULL, + "Linux Event Object") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory ")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psLinuxEventObject->sList); + + atomic_set(&psLinuxEventObject->sTimeStamp, 0); + psLinuxEventObject->ui32TimeStampPrevious = 0; + +#if defined(DEBUG) + psLinuxEventObject->ui32Stats = 0; +#endif + init_waitqueue_head(&psLinuxEventObject->sWait); + + psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; + + psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_EVENT_OBJECT, + psLinuxEventObject, + 0, + &LinuxEventObjectDeleteCallback); + + write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags); + list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); + write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags); + + *phOSEventObject = psLinuxEventObject; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectSignal + + @Description + + Linux wait object signaling function + + @Input hOSEventObjectList : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + struct list_head *psListEntry, *psList; + + psList = &psLinuxEventObjectList->sList; + + /* + * We don't take the write lock in interrupt context, so we don't + * need to use read_lock_irqsave. + */ + read_lock(&psLinuxEventObjectList->sLock); + list_for_each(psListEntry, psList) + { + + psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); + + atomic_inc(&psLinuxEventObject->sTimeStamp); + wake_up_interruptible(&psLinuxEventObject->sWait); + } + read_unlock(&psLinuxEventObjectList->sLock); + + return PVRSRV_OK; + +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectWait + + @Description + + Linux wait object routine + + @Input hOSEventObject : Event object handle + + @Input ui32MSTimeout : Time out value in msec + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout) +{ + IMG_UINT32 ui32TimeStamp; + DEFINE_WAIT(sWait); + + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + + IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout); + + do + { + prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); + ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp); + + if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp) + { + break; + } + + LinuxUnLockMutex(&gPVRSRVLock); + + ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies); + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); +#if defined(DEBUG) + psLinuxEventObject->ui32Stats++; +#endif + + + } while (ui32TimeOutJiffies); + + finish_wait(&psLinuxEventObject->sWait, &sWait); + + psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp; + + return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; + +} + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/event.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/event.h new file mode 100644 index 0000000..5c1451c --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/event.h @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Event Object +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList); +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject); +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject); +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout); diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/ion.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/ion.c new file mode 100644 index 0000000..8d3ddfa --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/ion.c @@ -0,0 +1,580 @@ +/*************************************************************************/ /*! +@Title Ion driver inter-operability code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "ion.h" + +/* Three possible configurations: + * + * - SUPPORT_ION && CONFIG_ION_OMAP + * Real ion support, but sharing with an SOC ion device. We need + * to co-share the heaps too. + * + * - SUPPORT_ION && !CONFIG_ION_OMAP + * "Reference" ion implementation. Creates its own ion device + * and heaps for the driver to use. + */ + +#if defined(SUPPORT_ION) + +#include +#include +#include +#include + +#if defined(CONFIG_ION_OMAP) + +/* Real ion with sharing */ + +extern struct ion_device *omap_ion_device; +struct ion_device *gpsIonDev; + +PVRSRV_ERROR IonInit(IMG_VOID) +{ + gpsIonDev = omap_ion_device; + return PVRSRV_OK; +} + +IMG_VOID IonDeinit(IMG_VOID) +{ + gpsIonDev = IMG_NULL; +} + +#else /* defined(CONFIG_ION_OMAP) */ + +#if defined(CONFIG_ION_S5P) + +/* Real ion with sharing (s5pv210) */ + +extern struct ion_device *s5p_ion_device; +struct ion_device *gpsIonDev; + +PVRSRV_ERROR IonInit(IMG_VOID) +{ + gpsIonDev = s5p_ion_device; + return PVRSRV_OK; +} + +IMG_VOID IonDeinit(IMG_VOID) +{ + gpsIonDev = IMG_NULL; +} + +#else /* defined(CONFIG_ION_S5P) */ + +#if defined(CONFIG_ION_SUNXI) + +/* Real ion with sharing (sunxi) */ + +extern struct ion_device *sunxi_ion_device; +struct ion_device *gpsIonDev; + +PVRSRV_ERROR IonInit(IMG_VOID) +{ + gpsIonDev = sunxi_ion_device; + return PVRSRV_OK; +} + +IMG_VOID IonDeinit(IMG_VOID) +{ + gpsIonDev = IMG_NULL; +} + +#else /* defined(CONFIG_ION_SUNXI) */ + +#if defined(CONFIG_ION_INCDHAD1) + +/* Real ion with sharing (incdhad1) */ + +extern struct ion_device *incdhad1_ion_device; +struct ion_device *gpsIonDev; + +PVRSRV_ERROR IonInit(IMG_VOID) +{ + gpsIonDev = incdhad1_ion_device; + return PVRSRV_OK; +} + + +IMG_VOID IonDeinit(IMG_VOID) +{ + gpsIonDev = IMG_NULL; +} + +#else /* defined(CONFIG_ION_INCDHAD1) */ + +/* "Reference" ion implementation */ + +#include SUPPORT_ION_PRIV_HEADER +#include +#include "ion_sys_private.h" +#include "lma_heap_ion.h" + +static struct ion_heap **gapsIonHeaps; +struct ion_device *gpsIonDev; + +#if defined(LMA) +struct ion_platform_data gsTCIonConfig = { + .nr = 1, + .heaps = +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,39)) +#else + (struct ion_platform_heap []) +#endif + { + { + /* This heap must be first. The base address and size are filled + in from data passed down by sysconfig.c. */ + .type = ION_HEAP_TYPE_CUSTOM, + .name = "tc_local_mem", + .id = ION_HEAP_TYPE_CUSTOM, + .base = 0, /* filled in later */ + .size = 0, /* filled in later */ + } + } +}; + +PVRSRV_ERROR IonInit(void *pvPrivateData) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + int i; + + ION_TC_PRIVATE_DATA sPrivateData = *(ION_TC_PRIVATE_DATA *)pvPrivateData; + + /* Fill in the heap base and size according to the private data. */ + gsTCIonConfig.heaps[0].base = sPrivateData.uiHeapBase; + gsTCIonConfig.heaps[0].size = sPrivateData.uiHeapSize; + + gapsIonHeaps = kzalloc(sizeof(struct ion_heap *) * gsTCIonConfig.nr, + GFP_KERNEL); + gpsIonDev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(gpsIonDev)) + { + kfree(gapsIonHeaps); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + for (i = 0; i < gsTCIonConfig.nr; i++) + { + struct ion_platform_heap *psPlatHeapData = &gsTCIonConfig.heaps[i]; + + switch (psPlatHeapData->type) + { + case ION_HEAP_TYPE_CUSTOM: + /* Custom heap: this is used to mean a TC-specific heap, + which allocates from local memory. */ + gapsIonHeaps[i] = lma_heap_create(psPlatHeapData); + break; + default: + /* For any other type of heap, hand this to ion to create as + appropriate. We don't necessarily need any of these - + this just gives us the flexibility to have another kind + of heap if necessary. */ + gapsIonHeaps[i] = ion_heap_create(psPlatHeapData); + break; + } + + if (IS_ERR_OR_NULL(gapsIonHeaps[i])) + { + printk("%s: Failed to create ion heap '%s'", __func__, psPlatHeapData->name); + IonDeinit(); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + ion_device_add_heap(gpsIonDev, gapsIonHeaps[i]); + } + + return eError; +} + +void IonDeinit(void) +{ + int i; + for (i = 0; i < gsTCIonConfig.nr; i++) + if (gapsIonHeaps[i]) + ion_heap_destroy(gapsIonHeaps[i]); + kfree(gapsIonHeaps); + ion_device_destroy(gpsIonDev); +} + +#else + +#if defined(ION_CARVEOUT_MEM_BASE) && defined(ION_CARVEOUT_MEM_SIZE) +/* Only define the carveout heap on boards with BASE and SIZE defined, + * otherwise crashes may be seen when empty cache flushes are issued + * (seen on the MIPS architecture). + */ +#define ION_HAS_CARVEOUT_HEAP +#endif + +static struct ion_platform_data gsGenericConfig = +{ +#if defined(ION_HAS_CARVEOUT_HEAP) + .nr = 3, +#else + .nr = 2, +#endif + .heaps = +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,39)) + (struct ion_platform_heap []) +#endif + { + { + .type = ION_HEAP_TYPE_SYSTEM, + .name = "system", + .id = ION_HEAP_TYPE_SYSTEM, + }, + { + .type = ION_HEAP_TYPE_DMA, + .name = "dma", + .id = ION_HEAP_TYPE_DMA, + }, +#if defined(ION_HAS_CARVEOUT_HEAP) + { + .type = ION_HEAP_TYPE_CARVEOUT, + .name = "carveout", + .id = ION_HEAP_TYPE_CARVEOUT, + .base = ION_CARVEOUT_MEM_BASE, + .size = ION_CARVEOUT_MEM_SIZE, + }, +#endif /* defined(ION_HAS_CARVEOUT_HEAP) */ + } +}; + +PVRSRV_ERROR IonInit(IMG_VOID) +{ + int uiHeapCount = gsGenericConfig.nr; + int uiError; + int i; + + gapsIonHeaps = kzalloc(sizeof(struct ion_heap *) * uiHeapCount, GFP_KERNEL); + /* Create the ion devicenode */ + gpsIonDev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(gpsIonDev)) { + kfree(gapsIonHeaps); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Register all the heaps */ + for (i = 0; i < gsGenericConfig.nr; i++) + { + struct ion_platform_heap *psPlatHeapData = &gsGenericConfig.heaps[i]; + + gapsIonHeaps[i] = ion_heap_create(psPlatHeapData); + if (IS_ERR_OR_NULL(gapsIonHeaps[i])) + { + uiError = PTR_ERR(gapsIonHeaps[i]); + goto failHeapCreate; + } + ion_device_add_heap(gpsIonDev, gapsIonHeaps[i]); + } + + return PVRSRV_OK; +failHeapCreate: + for (i = 0; i < uiHeapCount; i++) + { + if (gapsIonHeaps[i]) + { + ion_heap_destroy(gapsIonHeaps[i]); + } + } + kfree(gapsIonHeaps); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +IMG_VOID IonDeinit(IMG_VOID) +{ + int uiHeapCount = gsGenericConfig.nr; + int i; + + for (i = 0; i < uiHeapCount; i++) + { + if (gapsIonHeaps[i]) + { + ion_heap_destroy(gapsIonHeaps[i]); + } + } + kfree(gapsIonHeaps); + ion_device_destroy(gpsIonDev); +} + +#endif /* defined(LMA) */ + +#endif /* defined(CONFIG_ION_INCDHAD1) */ + +#endif /* defined(CONFIG_ION_SUNXI) */ + +#endif /* defined(CONFIG_ION_S5P) */ + +#endif /* defined(CONFIG_ION_OMAP) */ + +#define MAX_IMPORT_ION_FDS 3 + +typedef struct _ION_IMPORT_DATA_ +{ + /* ion client handles are imported into */ + struct ion_client *psIonClient; + + /* Number of ion handles represented by this import */ + IMG_UINT32 ui32NumIonHandles; + + /* Array of ion handles in use by services */ + struct ion_handle *apsIonHandle[MAX_IMPORT_ION_FDS]; + + /* Array of physical addresses represented by these buffers */ + IMG_SYS_PHYADDR *psSysPhysAddr; + +#if defined(PDUMP) + /* If ui32NumBuffers is 1 and ion_map_kernel() is implemented by the + * allocator, this may be non-NULL. Otherwise it will be NULL. + */ + IMG_PVOID pvKernAddr0; +#endif /* defined(PDUMP) */ +} +ION_IMPORT_DATA; + +PVRSRV_ERROR IonImportBufferAndAcquirePhysAddr(IMG_HANDLE hIonDev, + IMG_UINT32 ui32NumFDs, + IMG_INT32 *pai32BufferFDs, + IMG_UINT32 *pui32PageCount, + IMG_SYS_PHYADDR **ppsSysPhysAddr, + IMG_PVOID *ppvKernAddr0, + IMG_HANDLE *phPriv, + IMG_HANDLE *phUnique) +{ + struct scatterlist *psTemp, *psScatterList[MAX_IMPORT_ION_FDS] = {}; + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + struct ion_client *psIonClient = hIonDev; + IMG_UINT32 i, k, ui32PageCount = 0; + ION_IMPORT_DATA *psImportData; + + if(ui32NumFDs > MAX_IMPORT_ION_FDS) + { + printk(KERN_ERR "%s: More ion export fds passed in than supported " + "(%d provided, %d max)", __func__, ui32NumFDs, + MAX_IMPORT_ION_FDS); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psImportData = kzalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL); + if (psImportData == NULL) + { + goto exitFailKMallocImportData; + } + + /* Set up import data for free call */ + psImportData->psIonClient = psIonClient; + psImportData->ui32NumIonHandles = ui32NumFDs; + + for(i = 0; i < ui32NumFDs; i++) + { + int fd = (int)pai32BufferFDs[i]; + struct sg_table *psSgTable; + + psImportData->apsIonHandle[i] = ion_import_dma_buf(psIonClient, fd); + if (psImportData->apsIonHandle[i] == IMG_NULL) + { + eError = PVRSRV_ERROR_BAD_MAPPING; + goto exitFailImport; + } + + psSgTable = ion_sg_table(psIonClient, psImportData->apsIonHandle[i]); + psScatterList[i] = psSgTable->sgl; + if (psScatterList[i] == NULL) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto exitFailImport; + } + + /* Although all heaps will provide an sg_table, the tables cannot + * always be trusted because sg_lists are just pointers to "struct + * page" values, and some memory e.g. carveout may not have valid + * "struct page" values. In particular, on ARM, carveout is + * generally reserved with memblock_remove(), which leaves the + * "struct page" entries uninitialized when SPARSEMEM is enabled. + * The effect of this is that page_to_pfn(pfn_to_page(pfn)) != pfn. + * + * There's more discussion on this mailing list thread: + * http://lists.linaro.org/pipermail/linaro-mm-sig/2012-August/002440.html + * + * If the heap this buffer comes from implements ->phys(), it's + * probably a contiguous allocator. If the phys() function is + * implemented, we'll use it to check sg_table->sgl[0]. If we find + * they don't agree, we'll assume phys() is more reliable and use + * that. + * + * Some heaps out there will implement phys() even though they are + * not for physically contiguous allocations (so the sg_table must + * be used). Therefore use the sg_table if the phys() and first + * sg_table entry match. This should be reliable because for most + * contiguous allocators, the sg_table should be a single span + * from 'start' to 'start+size'. + * + * Also, ion prints out an error message if the heap doesn't implement + * ->phys(), which we want to avoid, so only use ->phys() if the + * sg_table contains a single span and therefore could plausibly + * be a contiguous allocator. + */ + if(!sg_next(psScatterList[i])) + { + ion_phys_addr_t sPhyAddr; + size_t sLength; + + if(!ion_phys(psIonClient, psImportData->apsIonHandle[i], + &sPhyAddr, &sLength)) + { + BUG_ON(sLength & ~PAGE_MASK); + + if(sg_phys(psScatterList[i]) != sPhyAddr) + { + psScatterList[i] = IMG_NULL; + ui32PageCount += sLength / PAGE_SIZE; + } + } + } + + for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp)) + { + IMG_UINT32 j; + for (j = 0; j < psTemp->length; j += PAGE_SIZE) + { + ui32PageCount++; + } + } + } + + BUG_ON(ui32PageCount == 0); + + psImportData->psSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL); + if (psImportData->psSysPhysAddr == NULL) + { + goto exitFailImport; + } + + for(i = 0, k = 0; i < ui32NumFDs; i++) + { + if(psScatterList[i]) + { + for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp)) + { + IMG_UINT32 j; + for (j = 0; j < psTemp->length; j += PAGE_SIZE) + { + psImportData->psSysPhysAddr[k].uiAddr = sg_phys(psTemp) + j; + k++; + } + } + } + else + { + ion_phys_addr_t sPhyAddr; + size_t sLength, j; + + ion_phys(psIonClient, psImportData->apsIonHandle[i], + &sPhyAddr, &sLength); + + for(j = 0; j < sLength; j += PAGE_SIZE) + { + psImportData->psSysPhysAddr[k].uiAddr = sPhyAddr + j; + k++; + } + } + } + + *pui32PageCount = ui32PageCount; + *ppsSysPhysAddr = psImportData->psSysPhysAddr; + +#if defined(PDUMP) + if(ui32NumFDs == 1) + { + IMG_PVOID pvKernAddr0; + + pvKernAddr0 = ion_map_kernel(psIonClient, psImportData->apsIonHandle[0]); + if (IS_ERR(pvKernAddr0)) + { + pvKernAddr0 = IMG_NULL; + } + + psImportData->pvKernAddr0 = pvKernAddr0; + *ppvKernAddr0 = pvKernAddr0; + } + else +#endif /* defined(PDUMP) */ + { + *ppvKernAddr0 = NULL; + } + + *phPriv = psImportData; + *phUnique = (IMG_HANDLE)psImportData->psSysPhysAddr[0].uiAddr; + + return PVRSRV_OK; + +exitFailImport: + for(i = 0; psImportData->apsIonHandle[i] != NULL; i++) + { + ion_free(psIonClient, psImportData->apsIonHandle[i]); + } + kfree(psImportData); +exitFailKMallocImportData: + return eError; +} + +IMG_VOID IonUnimportBufferAndReleasePhysAddr(IMG_HANDLE hPriv) +{ + ION_IMPORT_DATA *psImportData = hPriv; + IMG_UINT32 i; + +#if defined(PDUMP) + if (psImportData->pvKernAddr0) + { + ion_unmap_kernel(psImportData->psIonClient, psImportData->apsIonHandle[0]); + } +#endif /* defined(PDUMP) */ + + for(i = 0; i < psImportData->ui32NumIonHandles; i++) + { + ion_free(psImportData->psIonClient, psImportData->apsIonHandle[i]); + } + + kfree(psImportData->psSysPhysAddr); + kfree(psImportData); +} + +#endif /* defined(SUPPORT_ION) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/ion.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/ion.h new file mode 100644 index 0000000..145ae12 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/ion.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title Ion driver inter-operability code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __IMG_LINUX_ION_H__ +#define __IMG_LINUX_ION_H__ + +#if defined(SUPPORT_ION) + +#include SUPPORT_ION_HEADER + +#include "img_types.h" +#include "servicesext.h" + +#if defined(LMA) +PVRSRV_ERROR IonInit(void *pvPrivateData); +#else +PVRSRV_ERROR IonInit(IMG_VOID); +#endif + +IMG_VOID IonDeinit(IMG_VOID); + +PVRSRV_ERROR IonImportBufferAndAcquirePhysAddr(IMG_HANDLE hIonDev, + IMG_UINT32 ui32NumFDs, + IMG_INT32 *pi32BufferFDs, + IMG_UINT32 *pui32PageCount, + IMG_SYS_PHYADDR **ppsSysPhysAddr, + IMG_PVOID *ppvKernAddr0, + IMG_HANDLE *phPriv, + IMG_HANDLE *phUnique); + +IMG_VOID IonUnimportBufferAndReleasePhysAddr(IMG_HANDLE hPriv); + +#endif /* defined(SUPPORT_ION) */ + +#endif /* __IMG_LINUX_ION_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/linkage.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/linkage.h new file mode 100644 index 0000000..3c4b506 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/linkage.h @@ -0,0 +1,77 @@ +/*************************************************************************/ /*! +@Title Linux specific Services code internal interfaces +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Interfaces between various parts of the Linux specific + Services code, that don't have any other obvious + header file to go into. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __LINKAGE_H__ +#define __LINKAGE_H__ + +#if !defined(SUPPORT_DRI_DRM) +long PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd, unsigned long arg); +#if defined(CONFIG_COMPAT) +long PVRSRV_BridgeCompatDispatchKM(struct file *file, unsigned int cmd, unsigned long arg); +#endif +#endif + +IMG_VOID PVRDPFInit(IMG_VOID); +PVRSRV_ERROR PVROSFuncInit(IMG_VOID); +IMG_VOID PVROSFuncDeInit(IMG_VOID); + +#ifdef DEBUG + +IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data); +void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el); + +#ifdef PVR_MANUAL_POWER_CONTROL +IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data); + +void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el); + +#endif /* PVR_MANUAL_POWER_CONTROL */ + +#endif /* DEBUG */ + +struct device *PVRLDMGetDevice(void); + +#endif /* __LINKAGE_H__ */ +/***************************************************************************** + End of file (linkage.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/lma_heap_ion.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/lma_heap_ion.h new file mode 100644 index 0000000..c76effb --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/lma_heap_ion.h @@ -0,0 +1,45 @@ +/*************************************************************************/ /*! +@File lma_heap_ion.h +@Title Ion heap for local memory +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include SUPPORT_ION_HEADER + +struct ion_heap *lma_heap_create(struct ion_platform_heap *data); diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/lock.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/lock.h new file mode 100644 index 0000000..11adcaa --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/lock.h @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@Title Main driver lock +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description The main driver lock, held in most places in + the driver. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __LOCK_H__ +#define __LOCK_H__ + +/* + * Main driver lock, used to ensure driver code is single threaded. + * There are some places where this lock must not be taken, such as + * in the mmap related deriver entry points. + */ +extern PVRSRV_LINUX_MUTEX gPVRSRVLock; + +#endif /* __LOCK_H__ */ +/***************************************************************************** + End of file (lock.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mm.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/mm.c new file mode 100644 index 0000000..0e1be1a --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mm.c @@ -0,0 +1,2770 @@ +/*************************************************************************/ /*! +@Title Misc memory management utility functions for Linux +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#if !defined(PVR_LINUX_MEM_AREA_POOL_MAX_PAGES) +#define PVR_LINUX_MEM_AREA_POOL_MAX_PAGES 0 +#endif + +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) +#include +#endif +#include +#include +#include + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) +#include +#endif +#endif + +#include "img_defs.h" +#include "services.h" +#include "servicesint.h" +#include "syscommon.h" +#include "mutils.h" +#include "mm.h" +#include "pvrmmap.h" +#include "mmap.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "proc.h" +#include "mutex.h" +#include "lock.h" + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + #include "lists.h" +#endif + +/* Decide whether or not DevMem allocs need __GFP_DMA32 */ +#ifndef SGX_FEATURE_36BIT_MMU + #if defined CONFIG_X86_PAE || defined CONFIG_ARM_LPAE || defined CONFIG_XPA || defined CONFIG_64BIT + #define PVR_USE_DMA32_FOR_DEVMEM_ALLOCS + #endif +#endif + +/* + * The page pool entry count is an atomic int so that the shrinker function + * can return it even when we can't take the lock that protects the page pool + * list. + */ +static atomic_t g_sPagePoolEntryCount = ATOMIC_INIT(0); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +typedef enum { + DEBUG_MEM_ALLOC_TYPE_KMALLOC = 0, + DEBUG_MEM_ALLOC_TYPE_VMALLOC, + DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, + DEBUG_MEM_ALLOC_TYPE_IOREMAP, + DEBUG_MEM_ALLOC_TYPE_IO, + DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, + DEBUG_MEM_ALLOC_TYPE_ION, +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + DEBUG_MEM_ALLOC_TYPE_VMAP, +#endif + DEBUG_MEM_ALLOC_TYPE_SWAP, + DEBUG_MEM_ALLOC_TYPE_COUNT +} DEBUG_MEM_ALLOC_TYPE; + +typedef struct _DEBUG_MEM_ALLOC_REC +{ + DEBUG_MEM_ALLOC_TYPE eAllocType; + IMG_UINTPTR_T uiKey; /* Some unique value (private to the eAllocType) */ + IMG_VOID *pvCpuVAddr; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_VOID *pvPrivateData; + IMG_SIZE_T uiBytes; + pid_t pid; + IMG_CHAR *pszFileName; + IMG_UINT32 ui32Line; + + struct _DEBUG_MEM_ALLOC_REC *psNext; + struct _DEBUG_MEM_ALLOC_REC **ppsThis; +} DEBUG_MEM_ALLOC_REC; + +static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE) +static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC) +static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC) +static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC) +static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC) + + +static DEBUG_MEM_ALLOC_REC *g_MemoryRecords; +static DEBUG_MEM_ALLOC_REC *g_SwapMemoryRecords; + +static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT]; +static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT]; + +/* vmalloc + kmalloc + alloc_pages + kmem_cache */ +static IMG_UINT32 g_SysRAMWaterMark; /* Doesn't include page pool */ +static IMG_UINT32 g_SysRAMHighWaterMark; /* *DOES* include page pool */ + +static inline IMG_UINT32 +SysRAMTrueWaterMark(void) +{ + return g_SysRAMWaterMark + PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount)) + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_SWAP]; +} + +/* ioremap + io */ +static IMG_UINT32 g_IOMemWaterMark; +static IMG_UINT32 g_IOMemHighWaterMark; + +static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType, + IMG_UINTPTR_T uiKey, + IMG_VOID *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + IMG_VOID *pvPrivateData, + IMG_SIZE_T uiBytes, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line); + +static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_UINTPTR_T uiKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + +static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType); + + +static struct pvr_proc_dir_entry *g_SeqFileMemoryRecords; +static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off); +static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off); + +#endif + + +#if defined(DEBUG_LINUX_MEM_AREAS) +typedef struct _DEBUG_LINUX_MEM_AREA_REC +{ + LinuxMemArea *psLinuxMemArea; + IMG_UINT32 ui32Flags; + pid_t pid; + + struct _DEBUG_LINUX_MEM_AREA_REC *psNext; + struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis; +}DEBUG_LINUX_MEM_AREA_REC; + + +static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC) +static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC) +static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC) +static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC) + + + + +static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords; +static IMG_UINT32 g_LinuxMemAreaCount; +static IMG_UINT32 g_LinuxMemAreaWaterMark; +static IMG_UINT32 g_LinuxMemAreaHighWaterMark; + + +static struct pvr_proc_dir_entry *g_SeqFileMemArea; + +static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off); +static void ProcSeqShowMemArea(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off); +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static PVRSRV_LINUX_MUTEX g_sDebugMutex; +static PVRSRV_LINUX_MUTEX g_sSwapDebugMutex; +#endif + +#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)) +static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start); +#endif + +typedef struct +{ + /* Linkage for page pool LRU list */ + struct list_head sPagePoolItem; + + struct page *psPage; +} LinuxPagePoolEntry; + +static LinuxKMemCache *g_PsLinuxMemAreaCache; +static LinuxKMemCache *g_PsLinuxPagePoolCache; + +static LIST_HEAD(g_sPagePoolList); +static int g_iPagePoolMaxEntries; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_SIZE_T uiLength); +static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_SIZE_T uiLength); +#endif + +static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID); +static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea); +#if defined(DEBUG_LINUX_MEM_AREAS) +static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags); +static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea); +static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea); +#endif + + +static inline IMG_BOOL +AreaIsUncached(IMG_UINT32 ui32AreaFlags) +{ + return (ui32AreaFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED)) != 0; +} + +static inline IMG_BOOL +CanFreeToPool(LinuxMemArea *psLinuxMemArea) +{ + return AreaIsUncached(psLinuxMemArea->ui32AreaFlags) && !psLinuxMemArea->bNeedsCacheInvalidate; +} + +IMG_VOID * +_KMallocWrapper(IMG_SIZE_T uiByteSize, gfp_t uFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, IMG_BOOL bSwapAlloc) +{ + IMG_VOID *pvRet; + pvRet = kmalloc(uiByteSize, uFlags); +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvRet) + { + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + DebugMemAllocRecordAdd(bSwapAlloc ? DEBUG_MEM_ALLOC_TYPE_SWAP : DEBUG_MEM_ALLOC_TYPE_KMALLOC, + (IMG_UINTPTR_T)pvRet, + pvRet, + sCpuPAddr, + NULL, + uiByteSize, + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); + PVR_UNREFERENCED_PARAMETER(bSwapAlloc); +#endif + return pvRet; +} + +void +_KFreeWrapper(void *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, IMG_BOOL bSwapAlloc) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove( + bSwapAlloc ? DEBUG_MEM_ALLOC_TYPE_SWAP : DEBUG_MEM_ALLOC_TYPE_KMALLOC, + (IMG_UINTPTR_T)pvCpuVAddr, + pszFileName, + ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); + PVR_UNREFERENCED_PARAMETER(bSwapAlloc); +#endif + kfree(pvCpuVAddr); +} + + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static IMG_VOID +DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType, + IMG_UINTPTR_T uiKey, + IMG_VOID *pvCpuVAddr, + IMG_CPU_PHYADDR sCpuPAddr, + IMG_VOID *pvPrivateData, + IMG_SIZE_T uiBytes, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + DEBUG_MEM_ALLOC_REC *psRecord; + + psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL); + + psRecord->eAllocType = eAllocType; + psRecord->uiKey = uiKey; + psRecord->pvCpuVAddr = pvCpuVAddr; + psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr; + psRecord->pvPrivateData = pvPrivateData; + psRecord->pid = OSGetCurrentProcessIDKM(); + psRecord->uiBytes = uiBytes; + psRecord->pszFileName = pszFileName; + psRecord->ui32Line = ui32Line; + + if (eAllocType == DEBUG_MEM_ALLOC_TYPE_SWAP) + { + LinuxLockMutexNested(&g_sSwapDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + + List_DEBUG_MEM_ALLOC_REC_Insert(&g_SwapMemoryRecords, psRecord); + + g_WaterMarkData[eAllocType] += uiBytes; + if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType]) + { + g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType]; + } + + LinuxUnLockMutex(&g_sSwapDebugMutex); + return; + } + + LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord); + + g_WaterMarkData[eAllocType] += uiBytes; + if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType]) + { + g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType]; + } + + if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES + || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) + { + IMG_SIZE_T uSysRAMTrueWaterMark; + + g_SysRAMWaterMark += uiBytes; + uSysRAMTrueWaterMark = SysRAMTrueWaterMark(); + + if (uSysRAMTrueWaterMark > g_SysRAMHighWaterMark) + { + g_SysRAMHighWaterMark = uSysRAMTrueWaterMark; + } + } + else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP + || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) + { + g_IOMemWaterMark += uiBytes; + if (g_IOMemWaterMark > g_IOMemHighWaterMark) + { + g_IOMemHighWaterMark = g_IOMemWaterMark; + } + } + + LinuxUnLockMutex(&g_sDebugMutex); +} + + +static IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va) +{ + DEBUG_MEM_ALLOC_TYPE eAllocType; + IMG_UINTPTR_T uiKey; + + eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE); + uiKey = va_arg(va, IMG_UINTPTR_T); + + if (psCurrentRecord->eAllocType == eAllocType + && psCurrentRecord->uiKey == uiKey) + { + g_WaterMarkData[eAllocType] -= psCurrentRecord->uiBytes; + + if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES + || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) + { + g_SysRAMWaterMark -= psCurrentRecord->uiBytes; + } + else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP + || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) + { + g_IOMemWaterMark -= psCurrentRecord->uiBytes; + } + + List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord); + kfree(psCurrentRecord); + + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +} + + +static IMG_VOID +DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_UINTPTR_T uiKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +/* DEBUG_MEM_ALLOC_REC **ppsCurrentRecord;*/ + + LinuxLockMutexNested(eAllocType == DEBUG_MEM_ALLOC_TYPE_SWAP ? &g_sSwapDebugMutex : &g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + + /* Locate the corresponding allocation entry */ + if (!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(eAllocType == DEBUG_MEM_ALLOC_TYPE_SWAP ? g_SwapMemoryRecords : g_MemoryRecords, + DebugMemAllocRecordRemove_AnyVaCb, + eAllocType, + uiKey)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with uiKey=" UINTPTR_FMT " (called from %s, line %d\n", + __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), uiKey, + pszFileName, ui32Line)); + } + + LinuxUnLockMutex(eAllocType == DEBUG_MEM_ALLOC_TYPE_SWAP ? &g_sSwapDebugMutex : &g_sDebugMutex); +} + + +static IMG_CHAR * +DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType) +{ + IMG_CHAR *apszDebugMemoryRecordTypes[] = { + "KMALLOC", + "VMALLOC", + "ALLOC_PAGES", + "IOREMAP", + "IO", + "KMEM_CACHE_ALLOC", + "ION", +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + "VMAP", +#endif + "SWAPALLOC", + }; + return apszDebugMemoryRecordTypes[eAllocType]; +} +#endif + + +static IMG_BOOL +AllocFlagsToPGProt(pgprot_t *pPGProtFlags, IMG_UINT32 ui32AllocFlags) +{ + pgprot_t PGProtFlags; + + switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + case PVRSRV_HAP_CACHED: + PGProtFlags = PAGE_KERNEL; + break; + case PVRSRV_HAP_WRITECOMBINE: + PGProtFlags = PGPROT_WC(PAGE_KERNEL); + break; + case PVRSRV_HAP_UNCACHED: + PGProtFlags = PGPROT_UC(PAGE_KERNEL); + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown mapping flags=0x%08x", + __FUNCTION__, ui32AllocFlags)); + dump_stack(); + return IMG_FALSE; + } + + *pPGProtFlags = PGProtFlags; + + return IMG_TRUE; +} + +IMG_VOID * +_VMallocWrapper(IMG_SIZE_T uiBytes, + IMG_UINT32 ui32AllocFlags, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + pgprot_t PGProtFlags; + IMG_VOID *pvRet; + gfp_t gfp_mask; + + if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags)) + { + return NULL; + } + + gfp_mask = GFP_KERNEL; + +#if defined(PVR_USE_DMA32_FOR_DEVMEM_ALLOCS) +#ifdef CONFIG_ZONE_DMA32 + gfp_mask |= __GFP_DMA32; +#else + gfp_mask |= __GFP_DMA; +#endif +#else + gfp_mask |= __GFP_HIGHMEM; +#endif + + /* Allocate virtually contiguous pages */ + pvRet = __vmalloc(uiBytes, gfp_mask, PGProtFlags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvRet) + { + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC, + (IMG_UINTPTR_T)pvRet, + pvRet, + sCpuPAddr, + NULL, + PAGE_ALIGN(uiBytes), + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvRet; +} + + +IMG_VOID +_VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove( + DEBUG_MEM_ALLOC_TYPE_VMALLOC, + (IMG_UINTPTR_T)pvCpuVAddr, + pszFileName, + ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + vfree(pvCpuVAddr); +} + + +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) +static IMG_VOID * +_VMapWrapper(struct page **ppsPageList, IMG_UINT32 ui32NumPages, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ + pgprot_t PGProtFlags; + IMG_VOID *pvRet; + + if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags)) + { + return NULL; + } + + pvRet = vmap(ppsPageList, ui32NumPages, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvRet) + { + IMG_CPU_PHYADDR sCpuPAddr; + sCpuPAddr.uiAddr = 0; + + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMAP, + (IMG_UINTPTR_T)pvRet, + pvRet, + sCpuPAddr, + NULL, + PAGES_TO_BYTES(ui32NumPages), + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvRet; +} + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VMapWrapper(ppsPageList, uBytes, ui32AllocFlags) _VMapWrapper(ppsPageList, uBytes, ui32AllocFlags, __FILE__, __LINE__) +#else +#define VMapWrapper(ppsPageList, uBytes, ui32AllocFlags) _VMapWrapper(ppsPageList, uBytes, ui32AllocFlags, NULL, 0) +#endif + + +static IMG_VOID +_VUnmapWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMAP, + (IMG_UINTPTR_T)pvCpuVAddr, pszFileName, ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + vunmap(pvCpuVAddr); +} + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, __FILE__, __LINE__) +#else +#define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, NULL, 0) +#endif + +#endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + + +IMG_VOID +_KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove( + DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, + (IMG_UINTPTR_T)pvObject, + pszFileName, + ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + kmem_cache_free(psCache, pvObject); +} + + +const IMG_CHAR * +KMemCacheNameWrapper(LinuxKMemCache *psCache) +{ + PVR_UNREFERENCED_PARAMETER(psCache); + + /* In this case kmem_cache_t is an incomplete typedef, + * so we can't even de-reference to get the name member. It is also a GPL export symbol */ + return ""; +} + + +static LinuxPagePoolEntry * +LinuxPagePoolEntryAlloc(IMG_VOID) +{ + return KMemCacheAllocWrapper(g_PsLinuxPagePoolCache, GFP_KERNEL); +} + +static IMG_VOID +LinuxPagePoolEntryFree(LinuxPagePoolEntry *psPagePoolEntry) +{ + KMemCacheFreeWrapper(g_PsLinuxPagePoolCache, psPagePoolEntry); +} + + +static struct page * +AllocPageFromLinux(void) +{ + struct page *psPage; + gfp_t gfp_mask; + + gfp_mask = GFP_KERNEL; + +#if defined(PVR_USE_DMA32_FOR_DEVMEM_ALLOCS) +#ifdef CONFIG_ZONE_DMA32 + gfp_mask |= __GFP_DMA32; +#else + gfp_mask |= __GFP_DMA; +#endif +#else + gfp_mask |= __GFP_HIGHMEM; +#endif + + /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled */ + WARN_ON(current->flags & PF_DUMPCORE); + current->flags |= PF_DUMPCORE; + + psPage = alloc_pages(gfp_mask, 0); + current->flags &= ~PF_DUMPCORE; + if (!psPage) + { + return NULL; + + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + /* Reserve those pages to allow them to be re-mapped to user space */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + SetPageReserved(psPage); +#else + mem_map_reserve(psPage); +#endif +#endif + return psPage; +} + + +static IMG_VOID +FreePageToLinux(struct page *psPage) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + ClearPageReserved(psPage); +#else + mem_map_reserve(psPage); +#endif +#endif + __free_pages(psPage, 0); +} + + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) +static DEFINE_MUTEX(g_sPagePoolMutex); + +static inline void +PagePoolLock(void) +{ + mutex_lock(&g_sPagePoolMutex); +} + +static inline void +PagePoolUnlock(void) +{ + mutex_unlock(&g_sPagePoolMutex); +} + +static inline int +PagePoolTrylock(void) +{ + return mutex_trylock(&g_sPagePoolMutex); +} + +#else /* (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) */ +static inline void +PagePoolLock(void) +{ +} + +static inline void +PagePoolUnlock(void) +{ +} + +static inline int +PagePoolTrylock(void) +{ + return 1; +} +#endif /* (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) */ + + +static inline void +AddEntryToPool(LinuxPagePoolEntry *psPagePoolEntry) +{ + list_add_tail(&psPagePoolEntry->sPagePoolItem, &g_sPagePoolList); + atomic_inc(&g_sPagePoolEntryCount); +} + +static inline void +RemoveEntryFromPool(LinuxPagePoolEntry *psPagePoolEntry) +{ + list_del(&psPagePoolEntry->sPagePoolItem); + atomic_dec(&g_sPagePoolEntryCount); +} + +static inline LinuxPagePoolEntry * +RemoveFirstEntryFromPool(void) +{ + LinuxPagePoolEntry *psPagePoolEntry; + + if (list_empty(&g_sPagePoolList)) + { + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + + return NULL; + } + + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) > 0); + + psPagePoolEntry = list_first_entry(&g_sPagePoolList, LinuxPagePoolEntry, sPagePoolItem); + + RemoveEntryFromPool(psPagePoolEntry); + + return psPagePoolEntry; +} + +static struct page * +AllocPage(IMG_UINT32 ui32AreaFlags, IMG_BOOL *pbFromPagePool) +{ + struct page *psPage = NULL; + + /* + * Only uncached allocations can come from the page pool. + * The page pool is currently used to reduce the cost of + * invalidating the CPU cache when uncached memory is allocated. + */ + if (AreaIsUncached(ui32AreaFlags) && atomic_read(&g_sPagePoolEntryCount) != 0) + { + LinuxPagePoolEntry *psPagePoolEntry; + + PagePoolLock(); + psPagePoolEntry = RemoveFirstEntryFromPool(); + PagePoolUnlock(); + + /* List may have changed since we checked the counter */ + if (psPagePoolEntry) + { + psPage = psPagePoolEntry->psPage; + LinuxPagePoolEntryFree(psPagePoolEntry); + *pbFromPagePool = IMG_TRUE; + } + } + + if (!psPage) + { + psPage = AllocPageFromLinux(); + if (psPage) + { + *pbFromPagePool = IMG_FALSE; + } + } + + return psPage; + +} + +static IMG_VOID +FreePage(IMG_BOOL bToPagePool, struct page *psPage) +{ + /* Only uncached allocations can be freed to the page pool */ + if (bToPagePool && atomic_read(&g_sPagePoolEntryCount) < g_iPagePoolMaxEntries) + { + LinuxPagePoolEntry *psPagePoolEntry = LinuxPagePoolEntryAlloc(); + if (psPagePoolEntry) + { + psPagePoolEntry->psPage = psPage; + + PagePoolLock(); + AddEntryToPool(psPagePoolEntry); + PagePoolUnlock(); + + return; + } + } + + FreePageToLinux(psPage); +} + +static IMG_VOID +FreePagePool(IMG_VOID) +{ + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + + PagePoolLock(); + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + PVR_TRACE(("%s: Freeing %d pages from pool", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount))); +#else + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + PVR_ASSERT(list_empty(&g_sPagePoolList)); +#endif + + list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem) + { + RemoveEntryFromPool(psPagePoolEntry); + + FreePageToLinux(psPagePoolEntry->psPage); + LinuxPagePoolEntryFree(psPagePoolEntry); + } + + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + + PagePoolUnlock(); +} + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) +#if defined(PVRSRV_NEED_PVR_ASSERT) +static struct shrinker g_sShrinker; +#endif + +static unsigned long +CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + (void)psShrinkControl; + + return atomic_read(&g_sPagePoolEntryCount); +} + +static unsigned long +ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + unsigned long uNumToScan = psShrinkControl->nr_to_scan; + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + + PVR_TRACE(("%s: Number to scan: %ld", __FUNCTION__, uNumToScan)); + PVR_TRACE(("%s: Pages in pool before scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount))); + + if (!PagePoolTrylock()) + { + PVR_TRACE(("%s: Couldn't get page pool lock", __FUNCTION__)); + return -1; + } + + list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem) + { + RemoveEntryFromPool(psPagePoolEntry); + + FreePageToLinux(psPagePoolEntry->psPage); + LinuxPagePoolEntryFree(psPagePoolEntry); + + if (--uNumToScan == 0) + { + break; + } + } + + if (list_empty(&g_sPagePoolList)) + { + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + } + + PagePoolUnlock(); + + PVR_TRACE(("%s: Pages in pool after scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount))); + + return atomic_read(&g_sPagePoolEntryCount); +} +#endif /* defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) */ + +static IMG_BOOL +AllocPages(IMG_UINT32 ui32AreaFlags, struct page ***pppsPageList, IMG_HANDLE *phBlockPageList, IMG_UINT32 ui32NumPages, IMG_BOOL *pbFromPagePool) +{ + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + IMG_INT32 i; /* Must be signed; see "for" loop conditions */ + PVRSRV_ERROR eError; + IMG_BOOL bFromPagePool = IMG_FALSE; + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + IMG_CPU_PHYADDR sCpuPAddr; +#endif + + eError = OSAllocMem(0, sizeof(*ppsPageList) * ui32NumPages, (IMG_VOID **)&ppsPageList, &hBlockPageList, + "Array of pages"); + if (eError != PVRSRV_OK) + { + goto failed_page_list_alloc; + } + + *pbFromPagePool = IMG_TRUE; + for(i = 0; i < (IMG_INT32)ui32NumPages; i++) + { + ppsPageList[i] = AllocPage(ui32AreaFlags, &bFromPagePool); + if (!ppsPageList[i]) + { + goto failed_alloc_pages; + } + *pbFromPagePool &= bFromPagePool; + } + + *pppsPageList = ppsPageList; + *phBlockPageList = hBlockPageList; + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + sCpuPAddr.uiAddr = 0; + + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, + (IMG_UINTPTR_T)ppsPageList, + 0, + sCpuPAddr, + NULL, + PAGES_TO_BYTES(ui32NumPages), + "unknown", + 0 + ); +#endif + + return IMG_TRUE; + +failed_alloc_pages: + for(i--; i >= 0; i--) + { + FreePage(*pbFromPagePool, ppsPageList[i]); + } + (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList); + +failed_page_list_alloc: + return IMG_FALSE; +} + + +static IMG_VOID +FreePages(IMG_BOOL bToPagePool, struct page **ppsPageList, IMG_HANDLE hBlockPageList, IMG_UINT32 ui32NumPages) +{ + IMG_INT32 i; + + for(i = 0; i < (IMG_INT32)ui32NumPages; i++) + { + FreePage(bToPagePool, ppsPageList[i]); + } + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove( + DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, + (IMG_UINTPTR_T)ppsPageList, + __FILE__, + __LINE__); +#endif + + (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList); +} + + +LinuxMemArea * +NewVMallocLinuxMemArea(IMG_SIZE_T uBytes, IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea = NULL; + IMG_VOID *pvCpuVAddr; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + IMG_UINT32 ui32NumPages = 0; + struct page **ppsPageList = NULL; + IMG_HANDLE hBlockPageList; +#endif + IMG_BOOL bFromPagePool = IMG_FALSE; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + goto failed; + } + +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + ui32NumPages = RANGE_TO_PAGES(uBytes); + + if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool)) + { + goto failed; + } + + pvCpuVAddr = VMapWrapper(ppsPageList, ui32NumPages, ui32AreaFlags); +#else /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + pvCpuVAddr = VMallocWrapper(uBytes, ui32AreaFlags); + if (!pvCpuVAddr) + { + goto failed; + } +/* PG_reserved was deprecated in linux-2.6.15 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + /* Reserve those pages to allow them to be re-mapped to user space */ + ReservePages(pvCpuVAddr, uBytes); +#endif +#endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC; + psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + psLinuxMemArea->uData.sVmalloc.ppsPageList = ppsPageList; + psLinuxMemArea->uData.sVmalloc.hBlockPageList = hBlockPageList; +#endif + psLinuxMemArea->uiByteSize = uBytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + /* This works around a problem where Linux will not invalidate + * the cache for physical memory it frees that is direct mapped. + * + * As a result, cache entries remain that may be subsequently flushed + * to these physical pages after they have been allocated for another + * purpose. For a subsequent cached use of this memory, that is not a + * problem, but if we are allocating uncached or write-combined memory, + * and bypassing the cache, it can cause subsequent uncached writes to + * the memory to be replaced with junk from the cache. + * + * If the pages are from our page cache, no cache invalidate is needed. + * + * This just handles the __vmalloc() case (when we have a kernel virtual + * address range). The alloc_pages() path is handled in mmap.c. + */ + if (AreaIsUncached(ui32AreaFlags) && !bFromPagePool) + { + OSInvalidateCPUCacheRangeKM(psLinuxMemArea, 0, pvCpuVAddr, uBytes); + } + + return psLinuxMemArea; + +failed: + PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__)); +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + if (ppsPageList) + { + FreePages(bFromPagePool, ppsPageList, hBlockPageList, ui32NumPages); + } +#endif + if (psLinuxMemArea) + { + LinuxMemAreaStructFree(psLinuxMemArea); + } + + return NULL; +} + + +IMG_VOID +FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + IMG_UINT32 ui32NumPages; + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; +#endif + + PVR_ASSERT(psLinuxMemArea); + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC); + PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p", + __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress)); + +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + VUnmapWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); + + ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->uiByteSize); + ppsPageList = psLinuxMemArea->uData.sVmalloc.ppsPageList; + hBlockPageList = psLinuxMemArea->uData.sVmalloc.hBlockPageList; + + FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages); +#else +/* PG_reserved was deprecated in linux-2.6.15 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress, + psLinuxMemArea->uiByteSize); +#endif + + VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); +#endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +/* Reserve pages of memory in order that they're not automatically + deallocated after the last user reference dies. */ +static IMG_VOID +ReservePages(IMG_VOID *pvAddress, IMG_SIZE_T uLength) +{ + IMG_VOID *pvPage; + IMG_VOID *pvEnd = pvAddress + uLength; + + for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE) + { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + SetPageReserved(vmalloc_to_page(pvPage)); +#else + mem_map_reserve(vmalloc_to_page(pvPage)); +#endif + } +} + + +/* Un-reserve pages of memory in order that they can be freed. */ +static IMG_VOID +UnreservePages(IMG_VOID *pvAddress, IMG_SIZE_T uLength) +{ + IMG_VOID *pvPage; + IMG_VOID *pvEnd = pvAddress + uLength; + + for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE) + { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + ClearPageReserved(vmalloc_to_page(pvPage)); +#else + mem_map_unreserve(vmalloc_to_page(pvPage)); +#endif + } +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) */ + + +IMG_VOID * +_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32MappingFlags, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + IMG_VOID *pvIORemapCookie; + + switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + case PVRSRV_HAP_CACHED: + pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, uBytes); + break; + case PVRSRV_HAP_WRITECOMBINE: + pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, uBytes); + break; + case PVRSRV_HAP_UNCACHED: + pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, uBytes); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags")); + return NULL; + } + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvIORemapCookie) + { + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP, + (IMG_UINTPTR_T)pvIORemapCookie, + pvIORemapCookie, + BasePAddr, + NULL, + uBytes, + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvIORemapCookie; +} + + +IMG_VOID +_IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove( + DEBUG_MEM_ALLOC_TYPE_IOREMAP, + (IMG_UINTPTR_T)pvIORemapCookie, + pszFileName, + ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + iounmap(pvIORemapCookie); +} + + +LinuxMemArea * +NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea; + IMG_VOID *pvIORemapCookie; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + pvIORemapCookie = IORemapWrapper(BasePAddr, uBytes, ui32AreaFlags); + if (!pvIORemapCookie) + { + LinuxMemAreaStructFree(psLinuxMemArea); + return NULL; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP; + psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie; + psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr; + psLinuxMemArea->uiByteSize = uBytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; +} + + +IMG_VOID +FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie); + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +/* + * Avoid using remap_pfn_range on RAM, if possible. On x86 systems, with + * PAT enabled, remap_pfn_range checks the page attributes requested by + * remap_pfn_range against those of the direct kernel mapping for those + * pages (if any). This is rather annoying if the pages have been obtained + * with alloc_pages, where we just ask for raw pages; we don't care about + * the direct mapping. This latter issue arises when device memory is + * exported from one process to another. Services implements this + * using memory wrapping, which ends up creating an external KV memory area. + */ +static IMG_BOOL +TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_SIZE_T uBytes, IMG_BOOL bPhysContig) +{ + IMG_UINT32 ui32; + IMG_UINT32 ui32AddrChk; + IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(uBytes); + + /* + * If bPhysContig is IMG_TRUE, we must assume psSysPhysAddr points + * to the address of the first page, not an array of page addresses. + */ + for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr; + ui32 < ui32NumPages; + ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr) + { + if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk))) + { + break; + } + } + if (ui32 == ui32NumPages) + { + return IMG_FALSE; + } + + if (!bPhysContig) + { + for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr; + ui32 < ui32NumPages; + ui32++, ui32AddrChk += PAGE_SIZE) + { + if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk) + { + return IMG_FALSE; + } + } + } + + return IMG_TRUE; +} +#endif + +LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_SIZE_T uBytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV; + psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr; + psLinuxMemArea->uData.sExternalKV.bPhysContig = +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + (bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, uBytes, bPhysContig)) + ? IMG_TRUE : IMG_FALSE; +#else + bPhysContig; +#endif + if (psLinuxMemArea->uData.sExternalKV.bPhysContig) + { + psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr; + } + else + { + psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr; + } + psLinuxMemArea->uiByteSize = uBytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; +} + + +IMG_VOID +FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +LinuxMemArea * +NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + /* Nothing to activly do. We just keep a record of the physical range. */ + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO; + psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr; + psLinuxMemArea->uiByteSize = uBytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO, + BasePAddr.uiAddr, + 0, + BasePAddr, + NULL, + uBytes, + "unknown", + 0 + ); +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; +} + + +IMG_VOID +FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, + psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, + __FILE__, + __LINE__); +#endif + + /* Nothing more to do than free the LinuxMemArea struct */ + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +LinuxMemArea * +NewAllocPagesLinuxMemArea(IMG_SIZE_T uBytes, IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea; + IMG_UINT32 ui32NumPages; + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + IMG_BOOL bFromPagePool; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + goto failed_area_alloc; + } + + ui32NumPages = RANGE_TO_PAGES(uBytes); + + if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool)) + { + goto failed_alloc_pages; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES; + psLinuxMemArea->uData.sPageList.ppsPageList = ppsPageList; + psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList; + psLinuxMemArea->uiByteSize = uBytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + + /* We defer the cache flush to the first user mapping of this memory */ + psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags) && !bFromPagePool; + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; + +failed_alloc_pages: + LinuxMemAreaStructFree(psLinuxMemArea); +failed_area_alloc: + PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__)); + + return NULL; +} + + +IMG_VOID +FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + IMG_UINT32 ui32NumPages; + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + + PVR_ASSERT(psLinuxMemArea); + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->uiByteSize); + ppsPageList = psLinuxMemArea->uData.sPageList.ppsPageList; + hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList; + + FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages); + + LinuxMemAreaStructFree(psLinuxMemArea); +} + +struct page* +LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, + IMG_UINTPTR_T uByteOffset) +{ + IMG_UINTPTR_T uPageIndex; + IMG_CHAR *pui8Addr; + + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_ALLOC_PAGES: + uPageIndex = PHYS_TO_PFN(uByteOffset); + return psLinuxMemArea->uData.sPageList.ppsPageList[uPageIndex]; + + case LINUX_MEM_AREA_VMALLOC: + pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; + pui8Addr += uByteOffset; + return vmalloc_to_page(pui8Addr); + + case LINUX_MEM_AREA_SUB_ALLOC: + /* PRQA S 3670 3 */ /* ignore recursive warning */ + return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea, + psLinuxMemArea->uData.sSubAlloc.uiByteOffset + + uByteOffset); + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unsupported request for struct page from LinuxMemArea with type=%s", + __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType))); + return NULL; + } +} + + +LinuxKMemCache * +KMemCacheCreateWrapper(IMG_CHAR *pszName, + size_t Size, + size_t Align, + IMG_UINT32 ui32Flags) +{ +#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS) + ui32Flags |= SLAB_POISON|SLAB_RED_ZONE; +#endif + return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) + , NULL +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22) */ + ); +} + + +IMG_VOID +KMemCacheDestroyWrapper(LinuxKMemCache *psCache) +{ + kmem_cache_destroy(psCache); +} + + +IMG_VOID * +_KMemCacheAllocWrapper(LinuxKMemCache *psCache, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) + gfp_t Flags, +#else + IMG_INT Flags, +#endif + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + IMG_VOID *pvRet; + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + IMG_CPU_PHYADDR sCpuPAddr; +#endif + + pvRet = kmem_cache_zalloc(psCache, Flags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + sCpuPAddr.uiAddr = 0; + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, + (IMG_UINTPTR_T)pvRet, + pvRet, + sCpuPAddr, + psCache, + kmem_cache_size(psCache), + pszFileName, + ui32Line + ); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvRet; +} + + +LinuxMemArea * +NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea, + IMG_UINTPTR_T uiByteOffset, + IMG_SIZE_T uBytes) +{ + LinuxMemArea *psLinuxMemArea; + + PVR_ASSERT((uiByteOffset + uBytes) <= psParentLinuxMemArea->uiByteSize); + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC; + psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea; + psLinuxMemArea->uData.sSubAlloc.uiByteOffset = uiByteOffset; + psLinuxMemArea->uiByteSize = uBytes; + psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags; + psLinuxMemArea->bNeedsCacheInvalidate = psParentLinuxMemArea->bNeedsCacheInvalidate; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + { + DEBUG_LINUX_MEM_AREA_REC *psParentRecord; + psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea); + PVR_ASSERT(psParentRecord != IMG_NULL); + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags); + } +#endif + + return psLinuxMemArea; +} + + +static IMG_VOID +FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + /* Nothing more to do than free the LinuxMemArea structure */ + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +static LinuxMemArea * +LinuxMemAreaStructAlloc(IMG_VOID) +{ +/* debug */ +#if 0 + LinuxMemArea *psLinuxMemArea; + psLinuxMemArea = kmem_cache_alloc(g_PsLinuxMemAreaCache, GFP_KERNEL); + printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea); + dump_stack(); + return psLinuxMemArea; +#else + return KMemCacheAllocWrapper(g_PsLinuxMemAreaCache, GFP_KERNEL); +#endif +} + + +static IMG_VOID +LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea) +{ + KMemCacheFreeWrapper(g_PsLinuxMemAreaCache, psLinuxMemArea); + /* debug */ + //printk(KERN_ERR "%s(%p)\n", __FUNCTION__, psLinuxMemArea); +} + + +IMG_VOID +LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea) +{ + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + FreeVMallocLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_ALLOC_PAGES: + FreeAllocPagesLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_IOREMAP: + FreeIORemapLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_EXTERNAL_KV: + FreeExternalKVLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_IO: + FreeIOLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_SUB_ALLOC: + FreeSubLinuxMemArea(psLinuxMemArea); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n", + __FUNCTION__, psLinuxMemArea->eAreaType)); + break; + } +} + + +#if defined(DEBUG_LINUX_MEM_AREAS) +static IMG_VOID +DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags) +{ + DEBUG_LINUX_MEM_AREA_REC *psNewRecord; + const IMG_CHAR *pi8FlagsString; + + LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_LinuxMemAreaWaterMark += psLinuxMemArea->uiByteSize; + if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark) + { + g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark; + } + } + g_LinuxMemAreaCount++; + + /* Create a new memory allocation record */ + psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL); + if (psNewRecord) + { + /* Record the allocation */ + psNewRecord->psLinuxMemArea = psLinuxMemArea; + psNewRecord->ui32Flags = ui32Flags; + psNewRecord->pid = OSGetCurrentProcessIDKM(); + + List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate linux memory area record.", + __FUNCTION__)); + } + + /* Sanity check the flags */ + pi8FlagsString = HAPFlagsToString(ui32Flags); + if (strstr(pi8FlagsString, "UNKNOWN")) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected flags (0x%08x) associated with psLinuxMemArea @ %p", + __FUNCTION__, + ui32Flags, + psLinuxMemArea)); + //dump_stack(); + } + + LinuxUnLockMutex(&g_sDebugMutex); +} + + + +static IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord, + va_list va) +{ + LinuxMemArea *psLinuxMemArea; + + psLinuxMemArea = va_arg(va, LinuxMemArea*); + if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea) + { + return psCurrentRecord; + } + else + { + return IMG_NULL; + } +} + + +static DEBUG_LINUX_MEM_AREA_REC * +DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea) +{ + DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord; + + LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + MatchLinuxMemArea_AnyVaCb, + psLinuxMemArea); + +/*exit_unlock:*/ + LinuxUnLockMutex(&g_sDebugMutex); + + return psCurrentRecord; +} + + +static IMG_VOID +DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea) +{ + DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord; + + LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_LinuxMemAreaWaterMark -= psLinuxMemArea->uiByteSize; + } + g_LinuxMemAreaCount--; + + /* Locate the corresponding allocation entry */ + psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + MatchLinuxMemArea_AnyVaCb, + psLinuxMemArea); + if (psCurrentRecord) + { + /* Unlink the allocation record */ + List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord); + kfree(psCurrentRecord); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n", + __FUNCTION__, psLinuxMemArea)); + } + + LinuxUnLockMutex(&g_sDebugMutex); +} +#endif + + +IMG_VOID * +LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea) +{ + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; + case LINUX_MEM_AREA_IOREMAP: + return psLinuxMemArea->uData.sIORemap.pvIORemapCookie; + case LINUX_MEM_AREA_EXTERNAL_KV: + return psLinuxMemArea->uData.sExternalKV.pvExternalKV; + case LINUX_MEM_AREA_SUB_ALLOC: + { + IMG_CHAR *pAddr = + LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea); /* PRQA S 3670 */ /* ignore recursive warning */ + if (!pAddr) + { + return NULL; + } + return pAddr + psLinuxMemArea->uData.sSubAlloc.uiByteOffset; + } + default: + return NULL; + } +} + + +IMG_CPU_PHYADDR +LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINTPTR_T uiByteOffset) +{ + IMG_CPU_PHYADDR CpuPAddr; + + CpuPAddr.uiAddr = 0; + + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_IOREMAP: + { + CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr; + CpuPAddr.uiAddr += uiByteOffset; + break; + } + case LINUX_MEM_AREA_EXTERNAL_KV: + { + if (psLinuxMemArea->uData.sExternalKV.bPhysContig) + { + CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr); + CpuPAddr.uiAddr += uiByteOffset; + } + else + { + IMG_UINTPTR_T uiPageIndex = PHYS_TO_PFN(uiByteOffset); + IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[uiPageIndex]; + + CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr); + CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(uiByteOffset); + } + break; + } + case LINUX_MEM_AREA_IO: + { + CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr; + CpuPAddr.uiAddr += uiByteOffset; + break; + } + case LINUX_MEM_AREA_VMALLOC: + { + IMG_CHAR *pCpuVAddr; + pCpuVAddr = + (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; + pCpuVAddr += uiByteOffset; + CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr); + break; + } + case LINUX_MEM_AREA_ALLOC_PAGES: + { + struct page *page; + IMG_UINTPTR_T uiPageIndex = PHYS_TO_PFN(uiByteOffset); + page = psLinuxMemArea->uData.sPageList.ppsPageList[uiPageIndex]; + CpuPAddr.uiAddr = page_to_phys(page); + CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(uiByteOffset); + break; + } + case LINUX_MEM_AREA_SUB_ALLOC: + { + CpuPAddr = + OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea, + psLinuxMemArea->uData.sSubAlloc.uiByteOffset + + uiByteOffset); + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n", + __FUNCTION__, psLinuxMemArea->eAreaType)); + PVR_ASSERT(CpuPAddr.uiAddr); + break; + } + } + + return CpuPAddr; +} + + +IMG_BOOL +LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea) +{ + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_IOREMAP: + case LINUX_MEM_AREA_IO: + return IMG_TRUE; + + case LINUX_MEM_AREA_EXTERNAL_KV: + return psLinuxMemArea->uData.sExternalKV.bPhysContig; + + case LINUX_MEM_AREA_VMALLOC: + case LINUX_MEM_AREA_ALLOC_PAGES: + return IMG_FALSE; + + case LINUX_MEM_AREA_SUB_ALLOC: + /* PRQA S 3670 1 */ /* ignore recursive warning */ + return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea); + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n", + __FUNCTION__, psLinuxMemArea->eAreaType)); + break; + } + return IMG_FALSE; +} + + +const IMG_CHAR * +LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType) +{ + /* Note we explicitly check the types instead of e.g. + * using the type to index an array of strings so + * we remain orthogonal to enum changes */ + switch (eMemAreaType) + { + case LINUX_MEM_AREA_IOREMAP: + return "LINUX_MEM_AREA_IOREMAP"; + case LINUX_MEM_AREA_EXTERNAL_KV: + return "LINUX_MEM_AREA_EXTERNAL_KV"; + case LINUX_MEM_AREA_IO: + return "LINUX_MEM_AREA_IO"; + case LINUX_MEM_AREA_VMALLOC: + return "LINUX_MEM_AREA_VMALLOC"; + case LINUX_MEM_AREA_SUB_ALLOC: + return "LINUX_MEM_AREA_SUB_ALLOC"; + case LINUX_MEM_AREA_ALLOC_PAGES: + return "LINUX_MEM_AREA_ALLOC_PAGES"; + default: + PVR_ASSERT(0); + } + + return ""; +} + + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start) +{ + if (start) + { + LinuxLockMutexNested(&g_sDebugMutex, PVRSRV_LOCK_CLASS_MM_DEBUG); + } + else + { + LinuxUnLockMutex(&g_sDebugMutex); + } +} +#endif /* defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) */ + +#if defined(DEBUG_LINUX_MEM_AREAS) + +static IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va) +{ + off_t *pOff = va_arg(va, off_t*); + if (--(*pOff)) + { + return IMG_NULL; + } + else + { + return psNode; + } +} + +/* seq_file version of generating output, for reference check proc.c:CreateProcReadEntrySeq */ +static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off) +{ + DEBUG_LINUX_MEM_AREA_REC *psRecord; + psRecord = (DEBUG_LINUX_MEM_AREA_REC*) + List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + DecOffMemAreaRec_AnyVaCb, + &off); + return (void*)psRecord; +} + +static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off) +{ + DEBUG_LINUX_MEM_AREA_REC *psRecord; + if (!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + psRecord = (DEBUG_LINUX_MEM_AREA_REC*) + List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + DecOffMemAreaRec_AnyVaCb, + &off); + return (void*)psRecord; +} + + +static void ProcSeqShowMemArea(struct seq_file *sfile,void* el) +{ + DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el; + if (el == PVR_PROC_SEQ_START_TOKEN) + { + +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + seq_printf(sfile, + "Number of Linux Memory Areas: %u\n" + "At the current water mark these areas correspond to %u bytes (excluding SUB areas)\n" + "At the highest water mark these areas corresponded to %u bytes (excluding SUB areas)\n" + "\nDetails for all Linux Memory Areas:\n" + "%s %-24s %s %s %-8s %-5s %s\n", + g_LinuxMemAreaCount, + g_LinuxMemAreaWaterMark, + g_LinuxMemAreaHighWaterMark, + "psLinuxMemArea", + "LinuxMemType", + "CpuVAddr", + "CpuPAddr", + "Bytes", + "Pid", + "Flags" + ); +#else + seq_printf(sfile, + "\n" + "\t%u\n" + "\t\n" /* (excluding SUB areas) */ + "\t\n" /* (excluding SUB areas) */ + "\n", + g_LinuxMemAreaCount, + g_LinuxMemAreaWaterMark, + g_LinuxMemAreaHighWaterMark + ); +#endif + return; + } + + seq_printf(sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%p %-24s %p " CPUPADDR_FMT " %" SIZE_T_FMT_LEN "u %-5u %08x=(%s)\n", +#else + "\n" + "\t%p\n" + "\t%s\n" + "\t%p\n" + "\t" CPUPADDR_FMT "\n" + "\t%" SIZE_T_FMT_LEN "d\n" + "\t%u\n" + "\t%08x\n" + "\t%s\n" + "\n", +#endif + psRecord->psLinuxMemArea, + LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType), + LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea), + LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr, + psRecord->psLinuxMemArea->uiByteSize, + psRecord->pid, + psRecord->ui32Flags, + HAPFlagsToString(psRecord->ui32Flags) + ); + +} +#endif /* DEBUG_LINUX_MEM_AREAS */ + + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + +static IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va) +{ + off_t *pOff = va_arg(va, off_t*); + if (--(*pOff)) + { + return IMG_NULL; + } + else + { + return psNode; + } +} + +/* seq_file version of generating output, for reference check proc.c:CreateProcReadEntrySeq */ +static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off) +{ + DEBUG_MEM_ALLOC_REC *psRecord; + psRecord = (DEBUG_MEM_ALLOC_REC*) + List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords, + DecOffMemAllocRec_AnyVaCb, + &off); +#if defined(DEBUG_LINUX_XML_PROC_FILES) + if (!psRecord) + { + seq_printf(sfile, "\n"); + } +#endif + + return (void*)psRecord; +} + +static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off) +{ + DEBUG_MEM_ALLOC_REC *psRecord; + if (!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + psRecord = (DEBUG_MEM_ALLOC_REC*) + List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords, + DecOffMemAllocRec_AnyVaCb, + &off); + +#if defined(DEBUG_LINUX_XML_PROC_FILES) + if (!psRecord) + { + seq_printf(sfile, "\n"); + } +#endif + + return (void*)psRecord; +} + +static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el) +{ + DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el; + if (el == PVR_PROC_SEQ_START_TOKEN) + { +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + /* NOTE: If you update this code, please also update the XML varient below + * too! */ + + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via kmalloc", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC] + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_SWAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via kmalloc", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC] + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_SWAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via vmalloc", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via vmalloc", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via alloc_pages", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via alloc_pages", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via ioremap", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via ioremap", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes reserved for \"IO\" memory areas", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated for \"IO\" memory areas", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via kmem_cache_alloc", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via kmem_cache_alloc", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes mapped via vmap", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes mapped via vmap", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); +#endif +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + seq_printf(sfile, "%-60s: %d pages\n", + "Number of pages in page pool", + atomic_read(&g_sPagePoolEntryCount)); +#endif + seq_printf( sfile, "\n"); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Current Water Mark for memory allocated from system RAM", + SysRAMTrueWaterMark()); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Highest Water Mark for memory allocated from system RAM", + g_SysRAMHighWaterMark); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Current Water Mark for memory allocated from IO memory", + g_IOMemWaterMark); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Highest Water Mark for memory allocated from IO memory", + g_IOMemHighWaterMark); + + seq_printf( sfile, "\n"); + + seq_printf(sfile, "Details for all known allocations:\n" + "%-16s %-8s %-8s %-10s %-5s %-10s %s\n", + "Type", + "CpuVAddr", + "CpuPAddr", + "Bytes", + "PID", + "PrivateData", + "Filename:Line"); + +#else /* DEBUG_LINUX_XML_PROC_FILES */ + + /* Note: If you want to update the description property of a watermark + * ensure that the key property remains unchanged so that watermark data + * logged over time from different driver revisions may remain comparable + */ + seq_printf(sfile, "\n\n"); + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC] + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_SWAP]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC] + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_SWAP]); + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + seq_printf(sfile, + "\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); + seq_printf(sfile, + "\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); +#endif + seq_printf(sfile, + "\n", + SysRAMTrueWaterMark()); + seq_printf(sfile, + "\n", + g_SysRAMHighWaterMark); + seq_printf(sfile, + "\n", + g_IOMemWaterMark); + seq_printf(sfile, + "\n", + g_IOMemHighWaterMark); + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + seq_printf(sfile, + "\n", + PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount))); +#endif + seq_printf(sfile, "\n"); + +#endif /* DEBUG_LINUX_XML_PROC_FILES */ + return; + } + + if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) + { + seq_printf(sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%-16s %p " CPUPADDR_FMT " %" SIZE_T_FMT_LEN "u %-5d %-10s %s:%d\n", +#else + "\n" + "\t%s\n" + "\t%p\n" + "\t" CPUPADDR_FMT "\n" + "\t%" SIZE_T_FMT_LEN "u\n" + "\t%d\n" + "\t%s\n" + "\t%s\n" + "\t%d\n" + "\n", +#endif + DebugMemAllocRecordTypeToString(psRecord->eAllocType), + psRecord->pvCpuVAddr, + psRecord->sCpuPAddr.uiAddr, + psRecord->uiBytes, + psRecord->pid, + "NULL", + psRecord->pszFileName, + psRecord->ui32Line); + } + else + { + seq_printf(sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%-16s %p " CPUPADDR_FMT " %" SIZE_T_FMT_LEN "u %-5d %-10s %s:%d\n", +#else + "\n" + "\t%s\n" + "\t%p\n" + "\t" CPUPADDR_FMT "\n" + "\t%" SIZE_T_FMT_LEN "u\n" + "\t%d\n" + "\t%s\n" + "\t%s\n" + "\t%d\n" + "\n", +#endif + DebugMemAllocRecordTypeToString(psRecord->eAllocType), + psRecord->pvCpuVAddr, + psRecord->sCpuPAddr.uiAddr, + psRecord->uiBytes, + psRecord->pid, + KMemCacheNameWrapper(psRecord->pvPrivateData), + psRecord->pszFileName, + psRecord->ui32Line); + } +} +#endif /* defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) */ + + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS) +/* This could be moved somewhere more general */ +const IMG_CHAR * +HAPFlagsToString(IMG_UINT32 ui32Flags) +{ + static IMG_CHAR szFlags[50]; + IMG_INT32 i32Pos = 0; + IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex; + IMG_CHAR *apszCacheTypes[] = { + "UNCACHED", + "CACHED", + "WRITECOMBINE", + "UNKNOWN" + }; + IMG_CHAR *apszMapType[] = { + "KERNEL_ONLY", + "SINGLE_PROCESS", + "MULTI_PROCESS", + "FROM_EXISTING_PROCESS", + "NO_CPU_VIRTUAL", + "UNKNOWN" + }; + + /* FIXME create an enum for the cache type that we can + * cast and select so we get compiler warnings when + * when this code isn't complete due to new flags */ + if (ui32Flags & PVRSRV_HAP_UNCACHED) { + ui32CacheTypeIndex = 0; + } else if (ui32Flags & PVRSRV_HAP_CACHED) { + ui32CacheTypeIndex = 1; + } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) { + ui32CacheTypeIndex = 2; + } else { + ui32CacheTypeIndex = 3; + PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)", + __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK))); + } + + /* FIXME create an enum for the map type that we can + * cast and select so we get compiler warnings when + * when this code isn't complete due to new flags */ + if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) { + ui32MapTypeIndex = 0; + } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) { + ui32MapTypeIndex = 1; + } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) { + ui32MapTypeIndex = 2; + } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) { + ui32MapTypeIndex = 3; + } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) { + ui32MapTypeIndex = 4; + } else { + ui32MapTypeIndex = 5; + PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)", + __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK))); + } + + i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]); + if (i32Pos <= 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)", + __FUNCTION__, ui32CacheTypeIndex, i32Pos)); + szFlags[0] = 0; + } + else + { + sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]); + } + + return szFlags; +} +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) +static IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord) +{ + LinuxMemArea *psLinuxMemArea; + + psLinuxMemArea = psCurrentRecord->psLinuxMemArea; + PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%"SIZE_T_FMT_LEN"d bytes", + __FUNCTION__, + psCurrentRecord->psLinuxMemArea, + LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType), + psCurrentRecord->psLinuxMemArea->uiByteSize)); + /* Note this will also remove psCurrentRecord from g_LinuxMemAreaRecords + * but that's ok since we have already got a pointer to the next area. */ + LinuxMemAreaDeepFree(psLinuxMemArea); +} +#endif + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord) + +{ + +/* It's a bug if anything remains allocated at this point. We + * report an error, and simply brute force free anything we find. */ + PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: " + "type=%s " + "CpuVAddr=%p " + "CpuPAddr=0x" CPUPADDR_FMT ", " + "allocated @ file=%s,line=%d", + __FUNCTION__, + DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType), + psCurrentRecord->pvCpuVAddr, + psCurrentRecord->sCpuPAddr.uiAddr, + psCurrentRecord->pszFileName, + psCurrentRecord->ui32Line)); + switch (psCurrentRecord->eAllocType) + { + case DEBUG_MEM_ALLOC_TYPE_SWAP: + _KFreeWrapper(psCurrentRecord->pvCpuVAddr, __FILE__, __LINE__, IMG_TRUE); + break; + case DEBUG_MEM_ALLOC_TYPE_KMALLOC: + KFreeWrapper(psCurrentRecord->pvCpuVAddr); + break; + case DEBUG_MEM_ALLOC_TYPE_IOREMAP: + IOUnmapWrapper(psCurrentRecord->pvCpuVAddr); + break; + case DEBUG_MEM_ALLOC_TYPE_IO: + /* Nothing needed except to free the record */ + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->uiKey, __FILE__, __LINE__); + break; + case DEBUG_MEM_ALLOC_TYPE_VMALLOC: + VFreeWrapper(psCurrentRecord->pvCpuVAddr); + break; + case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES: + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->uiKey, __FILE__, __LINE__); + break; + case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE: + KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr); + break; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + case DEBUG_MEM_ALLOC_TYPE_VMAP: + VUnmapWrapper(psCurrentRecord->pvCpuVAddr); + break; +#endif + default: + PVR_ASSERT(0); + } +} +#endif + + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) +static int +ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + if(psShrinkControl->nr_to_scan != 0) + { + return ScanObjectsInPagePool(psShrinker, psShrinkControl); + } + else + { + /* No pages are being reclaimed so just return the page count. */ + return CountObjectsInPagePool(psShrinker, psShrinkControl); + } +} + +static struct shrinker g_sShrinker = +{ + .shrink = ShrinkPagePool, + .seeks = DEFAULT_SEEKS +}; +#else +static struct shrinker g_sShrinker = +{ + .count_objects = CountObjectsInPagePool, + .scan_objects = ScanObjectsInPagePool, + .seeks = DEFAULT_SEEKS +}; +#endif + +static IMG_BOOL g_bShrinkerRegistered; +#endif + +IMG_VOID +LinuxMMCleanup(IMG_VOID) +{ +#if defined(DEBUG_LINUX_MEM_AREAS) + { + if (g_LinuxMemAreaCount) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%d bytes)", + __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark)); + } + + List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords, LinuxMMCleanup_MemAreas_ForEachCb); + + if (g_SeqFileMemArea) + { + RemoveProcEntrySeq(g_SeqFileMemArea); + } + } +#endif + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) + if (g_bShrinkerRegistered) + { + unregister_shrinker(&g_sShrinker); + } +#endif + + /* + * The page pool must be freed after any remaining mem areas, but before + * the remaining memory resources. + */ + FreePagePool(); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + { + + /* + * It's a bug if anything remains allocated at this point. We + * report an error, and simply brute force free anything we find. + */ + List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords, LinuxMMCleanup_MemRecords_ForEachVa); + List_DEBUG_MEM_ALLOC_REC_ForEach(g_SwapMemoryRecords, LinuxMMCleanup_MemRecords_ForEachVa); + + if (g_SeqFileMemoryRecords) + { + RemoveProcEntrySeq(g_SeqFileMemoryRecords); + } + } +#endif + + if (g_PsLinuxMemAreaCache) + { + KMemCacheDestroyWrapper(g_PsLinuxMemAreaCache); + } + + if (g_PsLinuxPagePoolCache) + { + KMemCacheDestroyWrapper(g_PsLinuxPagePoolCache); + } +} + +PVRSRV_ERROR +LinuxMMInit(IMG_VOID) +{ +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + LinuxInitMutex(&g_sDebugMutex); + LinuxInitMutex(&g_sSwapDebugMutex); +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) + { + g_SeqFileMemArea = CreateProcReadEntrySeq( + "mem_areas", + NULL, + ProcSeqNextMemArea, + ProcSeqShowMemArea, + ProcSeqOff2ElementMemArea, + ProcSeqStartstopDebugMutex + ); + if (!g_SeqFileMemArea) + { + goto failed; + } + } +#endif + + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + { + g_SeqFileMemoryRecords = CreateProcReadEntrySeq( + "meminfo", + NULL, + ProcSeqNextMemoryRecords, + ProcSeqShowMemoryRecords, + ProcSeqOff2ElementMemoryRecords, + ProcSeqStartstopDebugMutex + ); + if (!g_SeqFileMemoryRecords) + { + goto failed; + } + } +#endif + + g_PsLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0); + if (!g_PsLinuxMemAreaCache) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate mem area kmem_cache", __FUNCTION__)); + goto failed; + } + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + g_iPagePoolMaxEntries = PVR_LINUX_MEM_AREA_POOL_MAX_PAGES; + if (g_iPagePoolMaxEntries <= 0 || g_iPagePoolMaxEntries > INT_MAX/2) + { + g_iPagePoolMaxEntries = INT_MAX/2; + PVR_TRACE(("%s: No limit set for page pool size", __FUNCTION__)); + } + else + { + PVR_TRACE(("%s: Maximum page pool size: %d", __FUNCTION__, g_iPagePoolMaxEntries)); + } + + g_PsLinuxPagePoolCache = KMemCacheCreateWrapper("img-mm-pool", sizeof(LinuxPagePoolEntry), 0, 0); + if (!g_PsLinuxPagePoolCache) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate page pool kmem_cache", __FUNCTION__)); + goto failed; + } +#endif + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) + register_shrinker(&g_sShrinker); + g_bShrinkerRegistered = IMG_TRUE; +#endif + + return PVRSRV_OK; + +failed: + LinuxMMCleanup(); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mm.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/mm.h new file mode 100644 index 0000000..4e64dd6 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mm.h @@ -0,0 +1,675 @@ +/*************************************************************************/ /*! +@Title Linux Memory Management. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares various memory management utility functions + for Linux. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __IMG_LINUX_MM_H__ +#define __IMG_LINUX_MM_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#include + +#include + +#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT) +#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT) + +#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT) + +#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1)) + +#define PAGES_TO_BYTES(pages) ((pages) << PAGE_SHIFT) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)) +#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot) +#else +#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)) +#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot) +#else +#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page) +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)) +#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot); +#else +#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot); +#endif +#endif + +static inline IMG_UINTPTR_T VMallocToPhys(IMG_VOID *pCpuVAddr) +{ + return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr)); + +} + +typedef enum { + LINUX_MEM_AREA_IOREMAP, + LINUX_MEM_AREA_EXTERNAL_KV, + LINUX_MEM_AREA_IO, + LINUX_MEM_AREA_VMALLOC, + LINUX_MEM_AREA_ALLOC_PAGES, + LINUX_MEM_AREA_SUB_ALLOC, +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + LINUX_MEM_AREA_VMAP, +#endif + LINUX_MEM_AREA_TYPE_COUNT +}LINUX_MEM_AREA_TYPE; + +typedef struct _LinuxMemArea LinuxMemArea; + + +/* FIXME - describe this structure. */ +struct _LinuxMemArea { + LINUX_MEM_AREA_TYPE eAreaType; + union _uData + { + struct _sIORemap + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + IMG_CPU_PHYADDR CPUPhysAddr; + IMG_VOID *pvIORemapCookie; + }sIORemap; + struct _sExternalKV + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + IMG_BOOL bPhysContig; + union { + /* + * SYSPhysAddr is valid if bPhysContig is true, else + * pSysPhysAddr is valid + */ + IMG_SYS_PHYADDR SysPhysAddr; + IMG_SYS_PHYADDR *pSysPhysAddr; + } uPhysAddr; + IMG_VOID *pvExternalKV; + }sExternalKV; + struct _sIO + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + IMG_CPU_PHYADDR CPUPhysAddr; + }sIO; + struct _sVmalloc + { + /* Note the memory this represents _is_ implicitly + * page aligned _and_ so is its size */ + IMG_VOID *pvVmallocAddress; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; +#endif + }sVmalloc; + struct _sPageList + { + /* Note the memory this represents _is_ implicitly + * page aligned _and_ so is its size */ + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + }sPageList; + struct _sSubAlloc + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + LinuxMemArea *psParentLinuxMemArea; + IMG_UINTPTR_T uiByteOffset; + }sSubAlloc; + }uData; + + IMG_SIZE_T uiByteSize; /* Size of memory area */ + + IMG_UINT32 ui32AreaFlags; /* Flags passed at creation time */ + + IMG_BOOL bMMapRegistered; /* Registered with mmap code */ + + IMG_BOOL bNeedsCacheInvalidate; /* Cache should be invalidated on first map? */ + + IMG_HANDLE hBMHandle; /* Handle back to BM for this allocation */ + + /* List entry for global list of areas registered for mmap */ + struct list_head sMMapItem; + + /* + * Head of list of all mmap offset structures associated with this + * memory area. + */ + struct list_head sMMapOffsetStructList; +}; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) +typedef kmem_cache_t LinuxKMemCache; +#else +typedef struct kmem_cache LinuxKMemCache; +#endif + + +/*! + ******************************************************************************* + * @Function LinuxMMInit + * + * @Description + * + * Initialise linux memory management code. + * This should be called during services initialisation. + * + * @Return none +******************************************************************************/ +PVRSRV_ERROR LinuxMMInit(IMG_VOID); + + +/*! + ******************************************************************************* + * + * @Function LinuxMMCleanup + * + * @Description + * + * Cleanup state for the linux memory management code. + * This should be called at services cleanup. + * + * @Return none +******************************************************************************/ +IMG_VOID LinuxMMCleanup(IMG_VOID); + + +/*! + ******************************************************************************* + * @brief Wrappers for kmalloc/kfree with optional /proc/pvr/km tracking + * They can also be used as more concise replacements for OSAllocMem + * in Linux specific code. + * + * @param uByteSize + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KMallocWrapper(uByteSize, uFlags) _KMallocWrapper(uByteSize, uFlags, __FILE__, __LINE__, IMG_FALSE) +#else +#define KMallocWrapper(uByteSize, uFlags) _KMallocWrapper(uByteSize, uFlags, NULL, 0, IMG_FALSE) +#endif +void *_KMallocWrapper(IMG_SIZE_T uByteSize, gfp_t uFlags, IMG_CHAR *szFileName, IMG_UINT32 ui32Line, IMG_BOOL bSwapAlloc); + +/*! + ******************************************************************************* + * @brief + * + * @param pvCpuVAddr + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__, IMG_FALSE) +#else +#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0, IMG_FALSE) +#endif +void _KFreeWrapper(void *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, IMG_BOOL bSwapAlloc); + +/*! + ******************************************************************************* + * @brief + * + * @param uBytes + * @param ui32AllocFlags + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VMallocWrapper(uiBytes, ui32AllocFlags) _VMallocWrapper(uiBytes, ui32AllocFlags, __FILE__, __LINE__) +#else +#define VMallocWrapper(uiBytes, ui32AllocFlags) _VMallocWrapper(uiBytes, ui32AllocFlags, NULL, 0) +#endif +IMG_VOID *_VMallocWrapper(IMG_SIZE_T uiBytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param pvCpuVAddr + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__) +#else +#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0) +#endif +IMG_VOID _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief Allocates virtually contiguous pages + * + * @param uBytes number of bytes to reserve + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return Page-aligned address of virtual allocation or NULL on error + ******************************************************************************/ +LinuxMemArea *NewVMallocLinuxMemArea(IMG_SIZE_T uBytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief Deallocates virtually contiguous pages + * + * @param LinuxMemArea from NewVMallocLinuxMemArea + * + ******************************************************************************/ +IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief Reserve physical IO memory and create a CPU virtual mapping for it + * + * @param BasePAddr + * @param uiBytes + * @param ui32MappingFlags + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define IORemapWrapper(BasePAddr, uiBytes, ui32MappingFlags) \ + _IORemapWrapper(BasePAddr, uiBytes, ui32MappingFlags, __FILE__, __LINE__) +#else +#define IORemapWrapper(BasePAddr, uiBytes, ui32MappingFlags) \ + _IORemapWrapper(BasePAddr, uiBytes, ui32MappingFlags, NULL, 0) +#endif +IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32MappingFlags, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief Reserve physical IO memory and create a CPU virtual mapping for it + * + * @param BasePAddr + * @param uiBytes + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T uiBytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ********************************************************************************/ +IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea); + +/*! + ******************************************************************************* + * @brief Register physical memory which already has a CPU virtual mapping + * + * @param pBasePAddr + * @param pvCPUVAddr + * @param bPhysContig + * @param uBytes + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_SIZE_T uBytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ****************************************************************************** + * @brief Unmaps an IO memory mapping created using IORemap + * + * @param pvIORemapCookie + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define IOUnmapWrapper(pvIORemapCookie) \ + _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__) +#else +#define IOUnmapWrapper(pvIORemapCookie) \ + _IOUnmapWrapper(pvIORemapCookie, NULL, 0) +#endif +IMG_VOID _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * @param uByteOffset + * + * @return + ******************************************************************************/ +struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINTPTR_T uByteOffset); + + +/*! + ******************************************************************************* + * @brief + * + * @param pszName + * @param Size + * @param Align + * @param ui32Flags + * + * @return + ******************************************************************************/ +LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * + * @return + ******************************************************************************/ +IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache); + + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * @param Flags + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__) +#else +#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) +IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); +#else +IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); +#endif + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * @param pvObject + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__) +#else +#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0) +#endif +IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * + * @return + ******************************************************************************/ +const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache); + + +/*! + ******************************************************************************* + * @brief + * + * @param BasePAddr + * @param uiBytes + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T uiBytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief + * + * @param uiBytes + * @param ui32AreaFlags E.g Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_SIZE_T uiBytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief + * + * @param psParentLinuxMemArea + * @param uByteOffset + * @param uBytes + * + * @return + ******************************************************************************/ +LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea, + IMG_UINTPTR_T uByteOffset, + IMG_SIZE_T uBytes); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief For debug builds, LinuxMemAreas are tracked in /proc + * + * @param psLinuxMemArea + * + ******************************************************************************/ +#if defined(LINUX_MEM_AREAS_DEBUG) +IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea); +#else +#define LinuxMemAreaRegister(X) +#endif + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * @param uByteOffset + * + * @return + ******************************************************************************/ +IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINTPTR_T uByteOffset); + + +#define LinuxMemAreaToCpuPFN(psLinuxMemArea, uByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, uByteOffset).uiAddr) + +/*! + ******************************************************************************* + * @brief Indicate whether a LinuxMemArea is physically contiguous + * + * @param psLinuxMemArea + * + * @return IMG_TRUE if the physical address range is contiguous, else IMG_FALSE + ******************************************************************************/ +IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea); + +/*! + ******************************************************************************* + * @brief Return the real underlying LinuxMemArea + * + * @param psLinuxMemArea + * + * @return The real underlying LinuxMemArea + ******************************************************************************/ +static inline LinuxMemArea * +LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea) +{ + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) + { + return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea; + } + else + { + return psLinuxMemArea; + } +} + + +/*! + ******************************************************************************* + * @brief Return type of real underlying LinuxMemArea + * + * @param psLinuxMemArea + * + * @return The areas eAreaType or for SUB areas; return the parents eAreaType. + ******************************************************************************/ +static inline LINUX_MEM_AREA_TYPE +LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea) +{ + return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType; +} + + +/*! + ******************************************************************************* + * @brief Converts the enum type of a LinuxMemArea to a const string + * + * @param eMemAreaType + * + * @return const string representation of type + ******************************************************************************/ +const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType); + + +/*! + ******************************************************************************* + * @brief + * + * @param ui32Flags + * + * @return + ******************************************************************************/ +#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS) +const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags); +#endif + +#endif /* __IMG_LINUX_MM_H__ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c new file mode 100644 index 0000000..3a2a16b --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.c @@ -0,0 +1,1659 @@ +/*************************************************************************/ /*! +@Title Linux mmap interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) +#include +#endif +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +#include +#include +#endif +#if defined(SUPPORT_DRI_DRM) +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)) +#include +#endif +#endif + +#ifdef CONFIG_ARCH_OMAP5 +#ifdef CONFIG_DSSCOMP +#include <../drivers/staging/omapdrm/omap_dmm_tiler.h> +#endif +#endif + +#include "services_headers.h" + +#include "pvrmmap.h" +#include "mutils.h" +#include "mmap.h" +#include "mm.h" +#include "proc.h" +#include "mutex.h" +#include "handle.h" +#include "perproc.h" +#include "env_perproc.h" +#include "bridged_support.h" +#if defined(SUPPORT_DRI_DRM) +#include "pvr_drm.h" +#endif + +#if !defined(PVR_SECURE_HANDLES) +#error "The mmap code requires PVR_SECURE_HANDLES" +#endif + +#if defined(SUPPORT_DRI_DRM) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)) +static inline int drm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + return drm_legacy_mmap(filp, vma); +} +#endif + +/* WARNING: + * The mmap code has its own mutex, to prevent a possible deadlock, + * when using gPVRSRVLock. + * The Linux kernel takes the mm->mmap_sem before calling the mmap + * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl + * entry point may take mm->mmap_sem during fault handling, or + * before calling get_user_pages. If gPVRSRVLock was used in the + * mmap entry points, a deadlock could result, due to the ioctl + * and mmap code taking the two locks in different orders. + * As a corollary to this, the mmap entry points must not call + * any driver code that relies on gPVRSRVLock is held. + */ +PVRSRV_LINUX_MUTEX g_sMMapMutex; + +static LinuxKMemCache *g_psMemmapCache = NULL; +static LIST_HEAD(g_sMMapAreaList); +static LIST_HEAD(g_sMMapOffsetStructList); +#if defined(DEBUG_LINUX_MMAP_AREAS) +static IMG_UINT32 g_ui32RegisteredAreas = 0; +static IMG_SIZE_T g_uiTotalByteSize = 0; +#endif + + +#if defined(DEBUG_LINUX_MMAP_AREAS) +static struct pvr_proc_dir_entry *g_ProcMMap; +#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */ + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +/* + * Now that we are using mmap2 in srvclient, almost (*) the full 32 + * bit offset is available. The range of values is divided into two. + * The first part of the range, from FIRST_PHYSICAL_PFN to + * LAST_PHYSICAL_PFN, is for raw page mappings (VM_PFNMAP). The + * resulting 43 bit (*) physical address range should be enough for + * the current range of processors we support. + * + * NB: (*) -- the above figures assume 4KB page size. The offset + * argument to mmap2() is in units of 4,096 bytes regardless of page + * size. Thus, we lose (PAGE_SHIFT-12) bits of resolution on other + * architectures. + * + * The second part of the range, from FIRST_SPECIAL_PFN to LAST_SPECIAL_PFN, + * is used for all other mappings. These other mappings will always + * consist of pages with associated page structures, and need not + * represent a contiguous range of physical addresses. + * + */ +#define MMAP2_PGOFF_RESOLUTION (32-PAGE_SHIFT+12) +#define RESERVED_PGOFF_BITS 1 +#define MAX_MMAP_HANDLE ((1UL<<(MMAP2_PGOFF_RESOLUTION-RESERVED_PGOFF_BITS))-1) + +#define FIRST_PHYSICAL_PFN 0 +#define LAST_PHYSICAL_PFN (FIRST_PHYSICAL_PFN + MAX_MMAP_HANDLE) +#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1) +#define LAST_SPECIAL_PFN (FIRST_SPECIAL_PFN + MAX_MMAP_HANDLE) + +#else /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */ + +/* + * Since we no longer have to worry about clashes with the mmap + * offsets used for pure PFN mappings (VM_PFNMAP), there is greater + * freedom in choosing the mmap handles. This is useful if the + * mmap offset space has to be shared with another driver component. + */ + +#if defined(PVR_MMAP_OFFSET_BASE) +#define FIRST_SPECIAL_PFN PVR_MMAP_OFFSET_BASE +#else +#define FIRST_SPECIAL_PFN_BASE 0x80000000UL +#define FIRST_SPECIAL_PFN (FIRST_SPECIAL_PFN_BASE >> (PAGE_SHIFT - 12)) +#endif + +#if defined(PVR_NUM_MMAP_HANDLES) +#define MAX_MMAP_HANDLE PVR_NUM_MMAP_HANDLES +#else +#define MAX_MMAP_HANDLE_BASE 0x7fffffffUL +#define MAX_MMAP_HANDLE (MAX_MMAP_HANDLE_BASE >> (PAGE_SHIFT - 12)) +#endif + +#endif /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */ + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +static inline IMG_BOOL +PFNIsPhysical(IMG_UINT32 pfn) +{ + /* Unsigned, no need to compare >=0 */ + return (/*(pfn >= FIRST_PHYSICAL_PFN) &&*/ (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE; +} + +static inline IMG_BOOL +PFNIsSpecial(IMG_UINT32 pfn) +{ + /* Unsigned, no need to compare <=MAX_UINT */ + return ((pfn >= FIRST_SPECIAL_PFN) /*&& (pfn <= LAST_SPECIAL_PFN)*/) ? IMG_TRUE : IMG_FALSE; +} +#endif + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +static inline IMG_HANDLE +MMapOffsetToHandle(IMG_UINT32 pfn) +{ + if (PFNIsPhysical(pfn)) + { + PVR_ASSERT(PFNIsPhysical(pfn)); + return IMG_NULL; + } + return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN); +} +#endif + +static inline IMG_UINTPTR_T +HandleToMMapOffset(IMG_HANDLE hHandle) +{ + IMG_UINTPTR_T ulHandle = (IMG_UINTPTR_T)hHandle; + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (PFNIsSpecial(ulHandle)) + { + PVR_ASSERT(PFNIsSpecial(ulHandle)); + return 0; + } +#endif + return ulHandle + FIRST_SPECIAL_PFN; +} + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +/* + * Determine whether physical or special mappings will be used for + * a given memory area. At present, this decision is made on + * whether the mapping represents a contiguous range of physical + * addresses, which is a requirement for raw page mappings (VM_PFNMAP). + * In the VMA structure for such a mapping, vm_pgoff is the PFN + * (page frame number, the physical address divided by the page size) + * of the first page in the VMA. The second page is assumed to have + * PFN (vm_pgoff + 1), the third (vm_pgoff + 2) and so on. + */ +static inline IMG_BOOL +LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea) +{ + return LinuxMemAreaPhysIsContig(psLinuxMemArea); +} +#endif + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +static inline IMG_UINT32 +GetCurrentThreadID(IMG_VOID) +{ + /* + * The PID is the thread ID, as each thread is a + * seperate process. + */ + return (IMG_UINT32)current->pid; +} +#endif + +/* + * Create an offset structure, which is used to hold per-process + * mmap data. + */ +static PKV_OFFSET_STRUCT +CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINTPTR_T uiOffset, IMG_SIZE_T uiRealByteSize) +{ + PKV_OFFSET_STRUCT psOffsetStruct; +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea)); +#endif + +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8x)", + __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags)); +#endif + + PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC); + + PVR_ASSERT(psLinuxMemArea->bMMapRegistered); + + psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL); + if(psOffsetStruct == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache")); + return IMG_NULL; + } + + psOffsetStruct->uiMMapOffset = uiOffset; + + psOffsetStruct->psLinuxMemArea = psLinuxMemArea; + + psOffsetStruct->uiRealByteSize = uiRealByteSize; + + /* + * We store the TID in case two threads within a process + * generate the same offset structure, and both end up on the + * list of structures waiting to be mapped, at the same time. + * This could happen if two sub areas within the same page are + * being mapped at the same time. + * The TID allows the mmap entry point to distinguish which + * mapping is being done by which thread. + */ +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + psOffsetStruct->ui32TID = GetCurrentThreadID(); +#endif + psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM(); + +#if defined(DEBUG_LINUX_MMAP_AREAS) + /* Extra entries to support proc filesystem debug info */ + psOffsetStruct->pszName = pszName; +#endif + + list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList); + + return psOffsetStruct; +} + + +static IMG_VOID +DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct) +{ +#ifdef DEBUG + IMG_CPU_PHYADDR CpuPAddr; + CpuPAddr = LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0); +#endif + + list_del(&psOffsetStruct->sAreaItem); + + if (psOffsetStruct->bOnMMapList) + { + list_del(&psOffsetStruct->sMMapItem); + } + +#ifdef DEBUG + PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: " + "psLinuxMemArea=%p, CpuPAddr=0x" CPUPADDR_FMT, + __FUNCTION__, + psOffsetStruct->psLinuxMemArea, + CpuPAddr.uiAddr)); +#endif + + KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct); +} + + +/* + * There are no alignment constraints for mapping requests made by user + * mode Services. For this, and potentially other reasons, the + * mapping created for a users request may look different to the + * original request in terms of size and alignment. + * + * This function determines an offset that the user can add to the mapping + * that is _actually_ created which will point to the memory they are + * _really_ interested in. + * + */ +static inline IMG_VOID +DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea, + IMG_SIZE_T *puiRealByteSize, + IMG_UINTPTR_T *puiByteOffset) +{ + IMG_UINTPTR_T uiPageAlignmentOffset; + IMG_CPU_PHYADDR CpuPAddr; + + CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0); + uiPageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr); + + *puiByteOffset = uiPageAlignmentOffset; + + *puiRealByteSize = PAGE_ALIGN(psLinuxMemArea->uiByteSize + uiPageAlignmentOffset); +} + + +/*! + ******************************************************************************* + + @Function PVRMMapOSMemHandleToMMapData + + @Description + + Determine various parameters needed to mmap a memory area, and to + locate the memory within the mapped area. + + @input psPerProc : Per-process data. + @input hMHandle : Memory handle. + @input puiMMapOffset : pointer to location for returned mmap offset. + @input puiByteOffset : pointer to location for returned byte offset. + @input puiRealByteSize : pointer to location for returned real byte size. + @input puiUserVaddr : pointer to location for returned user mode address. + + @output puiMMapOffset : points to mmap offset to be used in mmap2 sys call. + @output puiByteOffset : points to byte offset of start of memory + within mapped area returned by mmap2. + @output puiRealByteSize : points to size of area to be mapped. + @output puiUserVAddr : points to user mode address of start of + mapping, or 0 if it hasn't been mapped yet. + + @Return PVRSRV_ERROR : PVRSRV_OK, or error code. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hMHandle, + IMG_UINTPTR_T *puiMMapOffset, + IMG_UINTPTR_T *puiByteOffset, + IMG_SIZE_T *puiRealByteSize, + IMG_UINTPTR_T *puiUserVAddr) +{ + LinuxMemArea *psLinuxMemArea; + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_HANDLE hOSMemHandle; + PVRSRV_ERROR eError; + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE); + + eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle)); + + goto exit_unlock; + } + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + /* Sparse mappings have to ask the BM for the virtual size */ + if (psLinuxMemArea->hBMHandle) + { + *puiRealByteSize = BM_GetVirtualSize(psLinuxMemArea->hBMHandle); + *puiByteOffset = 0; + } + else + { + DetermineUsersSizeAndByteOffset(psLinuxMemArea, + puiRealByteSize, + puiByteOffset); + } + + /* Check whether this memory area has already been mapped */ + list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + if (psPerProc->ui32PID == psOffsetStruct->ui32PID) + { + if (!psLinuxMemArea->hBMHandle) + { + PVR_ASSERT(*puiRealByteSize == psOffsetStruct->uiRealByteSize); + } + /* + * User mode locking is required to stop two threads racing to + * map the same memory area. The lock should prevent a + * second thread retrieving mmap data for a given handle, + * before the first thread has done the mmap. + * Without locking, both threads may attempt the mmap, + * and one of them will fail. + */ + *puiMMapOffset = psOffsetStruct->uiMMapOffset; + *puiUserVAddr = psOffsetStruct->uiUserVAddr; + PVRSRVOffsetStructIncRef(psOffsetStruct); + + eError = PVRSRV_OK; + goto exit_unlock; + } + } + + /* Memory area won't have been mapped yet */ + *puiUserVAddr = 0; + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea)) + { + *puiMMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0); + PVR_ASSERT(PFNIsPhysical(*puiMMapOffset)); + } + else +#endif + { + *puiMMapOffset = HandleToMMapOffset(hMHandle); +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + PVR_ASSERT(PFNIsSpecial(*puiMMapOffset)); +#endif + } + + psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *puiMMapOffset, *puiRealByteSize); + if (psOffsetStruct == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exit_unlock; + } + + /* + * Offset structures representing physical mappings are added to + * a list, so that they can be located when the memory area is mapped. + */ + list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList); + + psOffsetStruct->bOnMMapList = IMG_TRUE; + + PVRSRVOffsetStructIncRef(psOffsetStruct); + + eError = PVRSRV_OK; + + /* Need to scale up the offset to counter the shifting that + is done in the mmap2() syscall, as it expects the pgoff + argument to be in units of 4,096 bytes irrespective of + page size */ + *puiMMapOffset = *puiMMapOffset << (PAGE_SHIFT - 12); + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + + return eError; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapReleaseMMapData + + @Description + + Release mmap data. + + @input psPerProc : Per-process data. + @input hMHandle : Memory handle. + @input pbMUnmap : pointer to location for munmap flag. + @input puiUserVAddr : pointer to location for user mode address of mapping. + @input puiByteSize : pointer to location for size of mapping. + + @Output pbMUnmap : points to flag that indicates whether an munmap is + required. + @output puiUserVAddr : points to user mode address to munmap. + + @Return PVRSRV_ERROR : PVRSRV_OK, or error code. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hMHandle, + IMG_BOOL *pbMUnmap, + IMG_SIZE_T *puiRealByteSize, + IMG_UINTPTR_T *puiUserVAddr) +{ + LinuxMemArea *psLinuxMemArea; + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_HANDLE hOSMemHandle; + PVRSRV_ERROR eError; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE); + + eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle)); + + goto exit_unlock; + } + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + /* Find the offset structure */ + list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + if (psOffsetStruct->ui32PID == ui32PID) + { + if (psOffsetStruct->ui32RefCount == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area %p", __FUNCTION__, psOffsetStruct, psLinuxMemArea)); + eError = PVRSRV_ERROR_STILL_MAPPED; + goto exit_unlock; + } + + PVRSRVOffsetStructDecRef(psOffsetStruct); + + *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->uiUserVAddr != 0)); + + *puiUserVAddr = (*pbMUnmap) ? psOffsetStruct->uiUserVAddr : 0; + *puiRealByteSize = (*pbMUnmap) ? psOffsetStruct->uiRealByteSize : 0; + + eError = PVRSRV_OK; + goto exit_unlock; + } + } + + /* MMap data not found */ + PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %p (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea)); + + eError = PVRSRV_ERROR_MAPPING_NOT_FOUND; + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + + return eError; +} + +static inline PKV_OFFSET_STRUCT +FindOffsetStructByOffset(IMG_UINTPTR_T uiOffset, IMG_SIZE_T uiRealByteSize) +{ + PKV_OFFSET_STRUCT psOffsetStruct; +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + IMG_UINT32 ui32TID = GetCurrentThreadID(); +#endif + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + + list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem) + { + if (uiOffset == psOffsetStruct->uiMMapOffset && uiRealByteSize == psOffsetStruct->uiRealByteSize && psOffsetStruct->ui32PID == ui32PID) + { +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + /* + * If the offset is physical, make sure the thread IDs match, + * as different threads may be mapping different memory areas + * with the same offset. + */ + if (!PFNIsPhysical(uiOffset) || psOffsetStruct->ui32TID == ui32TID) +#endif + { + return psOffsetStruct; + } + } + } + + return IMG_NULL; +} + + +/* + * Map a memory area into user space. + * Note, the ui32ByteOffset is _not_ implicitly page aligned since + * LINUX_MEM_AREA_SUB_ALLOC LinuxMemAreas have no alignment constraints. + */ +static IMG_BOOL +DoMapToUser(LinuxMemArea *psLinuxMemArea, + struct vm_area_struct* ps_vma, + IMG_UINTPTR_T uiByteOffset) +{ + IMG_SIZE_T uiByteSize; + + if ((psLinuxMemArea->hBMHandle) && (uiByteOffset != 0)) + { + /* Partial mapping of sparse allocations should never happen */ + return IMG_FALSE; + } + + if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) + { + return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea), /* PRQA S 3670 */ /* allow recursion */ + ps_vma, + psLinuxMemArea->uData.sSubAlloc.uiByteOffset + uiByteOffset); + } + + /* + * Note that ui32ByteSize may be larger than the size of the memory + * area being mapped, as the former is a multiple of the page size. + */ + uiByteSize = ps_vma->vm_end - ps_vma->vm_start; + PVR_ASSERT(ADDR_TO_PAGE_OFFSET(uiByteSize) == 0); + +#if defined (__sparc__) + /* + * For LINUX_MEM_AREA_EXTERNAL_KV, we don't know where the address range + * we are being asked to map has come from, that is, whether it is memory + * or I/O. For all architectures other than SPARC, there is no distinction. + * Since we don't currently support SPARC, we won't worry about it. + */ +#error "SPARC not supported" +#endif + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (PFNIsPhysical(ps_vma->vm_pgoff)) + { + IMG_INT result; + + PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea)); + PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, uiByteOffset) == ps_vma->vm_pgoff); + /* + * Since the memory is contiguous, we can map the whole range in one + * go . + */ + + PVR_ASSERT(psLinuxMemArea->hBMHandle == IMG_NULL); + + result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, uiByteSize, ps_vma->vm_page_prot); + + if(result == 0) + { + return IMG_TRUE; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result)); + } +#endif + + { + /* + * Memory may be non-contiguous, so we map the range page, + * by page. Since VM_PFNMAP mappings are assumed to be physically + * contiguous, we can't legally use REMAP_PFN_RANGE (that is, we + * could, but the resulting VMA may confuse other bits of the kernel + * that attempt to interpret it). + * The only alternative is to use VM_INSERT_PAGE, which requires + * finding the page structure corresponding to each page, or + * if mixed maps are supported (VM_MIXEDMAP), vm_insert_mixed. + */ + IMG_UINTPTR_T ulVMAPos; + IMG_UINTPTR_T uiByteEnd = uiByteOffset + uiByteSize; + IMG_UINTPTR_T uiPA; + IMG_UINTPTR_T uiAdjustedPA = uiByteOffset; +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + IMG_BOOL bMixedMap = IMG_FALSE; +#endif + /* First pass, validate the page frame numbers */ + for(uiPA = uiByteOffset; uiPA < uiByteEnd; uiPA += PAGE_SIZE) + { + IMG_UINTPTR_T pfn; + IMG_BOOL bMapPage = IMG_TRUE; + + if (psLinuxMemArea->hBMHandle) + { + if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, uiPA)) + { + bMapPage = IMG_FALSE; + } + } + + if (bMapPage) + { + pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, uiAdjustedPA); + if (!pfn_valid(pfn)) + { +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x" UINTPTR_FMT, __FUNCTION__, pfn)); + return IMG_FALSE; +#else + bMixedMap = IMG_TRUE; +#endif + } + else if (0 == page_count(pfn_to_page(pfn))) + { +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + bMixedMap = IMG_TRUE; +#endif + } + uiAdjustedPA += PAGE_SIZE; + } + } + +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (bMixedMap) + { + ps_vma->vm_flags |= VM_MIXEDMAP; + } +#endif + /* Second pass, get the page structures and insert the pages */ + ulVMAPos = ps_vma->vm_start; + uiAdjustedPA = uiByteOffset; + for(uiPA = uiByteOffset; uiPA < uiByteEnd; uiPA += PAGE_SIZE) + { + IMG_UINTPTR_T pfn; + IMG_INT result; + IMG_BOOL bMapPage = IMG_TRUE; + + if (psLinuxMemArea->hBMHandle) + { + /* We have a sparse allocation, check if this page should be mapped */ + if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, uiPA)) + { + bMapPage = IMG_FALSE; + } + } + + if (bMapPage) + { + pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, uiAdjustedPA); + +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (bMixedMap) + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0)) + pfn_t pfns = { pfn }; + + result = vm_insert_mixed(ps_vma, ulVMAPos, pfns); +#else + result = vm_insert_mixed(ps_vma, ulVMAPos, pfn); +#endif + if(result != 0) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Error - vm_insert_mixed failed (%d)", __FUNCTION__, result)); + return IMG_FALSE; + } + } + else +#endif + { + struct page *psPage; + + PVR_ASSERT(pfn_valid(pfn)); + + psPage = pfn_to_page(pfn); + + result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage); + if(result != 0) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result)); + return IMG_FALSE; + } + } + uiAdjustedPA += PAGE_SIZE; + } + ulVMAPos += PAGE_SIZE; + } + } + + return IMG_TRUE; +} + + +static IMG_VOID +MMapVOpenNoLock(struct vm_area_struct* ps_vma) +{ + PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data; + + PVR_ASSERT(psOffsetStruct != IMG_NULL); + PVR_ASSERT(!psOffsetStruct->bOnMMapList); + + PVRSRVOffsetStructIncMapped(psOffsetStruct); + + if (psOffsetStruct->ui32Mapped > 1) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %u)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped)); + PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0); + } + +#if defined(DEBUG_LINUX_MMAP_AREAS) + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset " UINTPTR_FMT ", ui32Mapped %d", + __FUNCTION__, + psOffsetStruct->psLinuxMemArea, + LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea), + psOffsetStruct->uiMMapOffset, + psOffsetStruct->ui32Mapped)); +#endif +} + + +/* + * Linux mmap open entry point. + */ +static void +MMapVOpen(struct vm_area_struct* ps_vma) +{ + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + MMapVOpenNoLock(ps_vma); + + LinuxUnLockMutex(&g_sMMapMutex); +} + + +static IMG_VOID +MMapVCloseNoLock(struct vm_area_struct* ps_vma) +{ + PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data; + PVR_ASSERT(psOffsetStruct != IMG_NULL); + +#if defined(DEBUG_LINUX_MMAP_AREAS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s: psLinuxMemArea %p, CpuVAddr %p uiMMapOffset " UINTPTR_FMT ", ui32Mapped %d", + __FUNCTION__, + psOffsetStruct->psLinuxMemArea, + LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea), + psOffsetStruct->uiMMapOffset, + psOffsetStruct->ui32Mapped)); +#endif + + PVR_ASSERT(!psOffsetStruct->bOnMMapList); + PVRSRVOffsetStructDecMapped(psOffsetStruct); + if (psOffsetStruct->ui32Mapped == 0) + { + if (psOffsetStruct->ui32RefCount != 0) + { + PVR_DPF(( + PVR_DBG_MESSAGE, + "%s: psOffsetStruct %p has non-zero reference count (ui32RefCount = %u). User mode address of start of mapping: 0x" UINTPTR_FMT, + __FUNCTION__, + psOffsetStruct, + psOffsetStruct->ui32RefCount, + psOffsetStruct->uiUserVAddr)); + } + + DestroyOffsetStruct(psOffsetStruct); + } + + ps_vma->vm_private_data = NULL; +} + +/* + * Linux mmap close entry point. + */ +static void +MMapVClose(struct vm_area_struct* ps_vma) +{ + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + MMapVCloseNoLock(ps_vma); + + LinuxUnLockMutex(&g_sMMapMutex); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +/* + * This vma operation is used to read data from mmap regions. It is called + * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace + * requests and reads from /proc//mem. + */ +static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, + void *buf, int len, int write) +{ + PKV_OFFSET_STRUCT psOffsetStruct; + LinuxMemArea *psLinuxMemArea; + unsigned long ulOffset; + int iRetVal = -EINVAL; + IMG_VOID *pvKernelAddr; + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data; + psLinuxMemArea = psOffsetStruct->psLinuxMemArea; + ulOffset = addr - ps_vma->vm_start; + + if (ulOffset+len > psLinuxMemArea->uiByteSize) + /* Out of range. We shouldn't get here, because the kernel will do + the necessary checks before calling access_process_vm. */ + goto exit_unlock; + + pvKernelAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); + + if (pvKernelAddr) + { + memcpy(buf, pvKernelAddr+ulOffset, len); + iRetVal = len; + } + else + { + IMG_UINTPTR_T pfn, uiOffsetInPage; + struct page *page; + + pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ulOffset); + + if (!pfn_valid(pfn)) + goto exit_unlock; + + page = pfn_to_page(pfn); + uiOffsetInPage = ADDR_TO_PAGE_OFFSET(ulOffset); + + if (uiOffsetInPage + len > PAGE_SIZE) + /* The region crosses a page boundary */ + goto exit_unlock; + + pvKernelAddr = kmap(page); + memcpy(buf, pvKernelAddr + uiOffsetInPage, len); + kunmap(page); + + iRetVal = len; + } + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + return iRetVal; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) */ + +static struct vm_operations_struct MMapIOOps = +{ + .open=MMapVOpen, + .close=MMapVClose, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + .access=MMapVAccess, +#endif +}; + + +/*! + ******************************************************************************* + + @Function PVRMMap + + @Description + + Driver mmap entry point. + + @input pFile : unused. + @input ps_vma : pointer to linux memory area descriptor. + + @Return 0, or Linux error code. + + ******************************************************************************/ +int +PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma) +{ + LinuxMemArea *psFlushMemArea = IMG_NULL; + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_SIZE_T uiByteSize; + IMG_VOID *pvBase = IMG_NULL; + int iRetVal = 0; + IMG_UINTPTR_T uiByteOffset = 0; /* Keep compiler happy */ + IMG_SIZE_T uiFlushSize = 0; + + PVR_UNREFERENCED_PARAMETER(pFile); + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + uiByteSize = ps_vma->vm_end - ps_vma->vm_start; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x" UINTPTR_FMT "," + " and uiByteSize %" SIZE_T_FMT_LEN "u(0x%" SIZE_T_FMT_LEN "x)", + __FUNCTION__, + ps_vma->vm_pgoff, + uiByteSize, + uiByteSize)); + + psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, uiByteSize); + + if (psOffsetStruct == IMG_NULL) + { +#if defined(SUPPORT_DRI_DRM) + LinuxUnLockMutex(&g_sMMapMutex); + +#if !defined(SUPPORT_DRI_DRM_EXT) + /* Pass unknown requests onto the DRM module */ + return drm_mmap(pFile, ps_vma); +#else + /* + * Indicate to caller that the request is not for us. + * Do not return this error elsewhere in this function, as the + * caller may use it as a clue as to whether the mmap request + * should be passed on to another component (e.g. drm_mmap). + */ + return -ENOENT; +#endif +#else + PVR_UNREFERENCED_PARAMETER(pFile); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Attempted to mmap unregistered area at vm_pgoff 0x%lx", + __FUNCTION__, ps_vma->vm_pgoff)); + iRetVal = -EINVAL; +#endif + goto unlock_and_return; + } + + list_del(&psOffsetStruct->sMMapItem); + psOffsetStruct->bOnMMapList = IMG_FALSE; + + /* Only support shared writeable mappings */ + if (((ps_vma->vm_flags & VM_WRITE) != 0) && + ((ps_vma->vm_flags & VM_SHARED) == 0)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__)); + iRetVal = -EINVAL; + goto unlock_and_return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n", + __FUNCTION__, psOffsetStruct->psLinuxMemArea)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + /* This is probably superfluous and implied by VM_IO */ + ps_vma->vm_flags |= VM_RESERVED; +#else + ps_vma->vm_flags |= VM_DONTDUMP; +#endif + ps_vma->vm_flags |= VM_IO; + + /* + * Disable mremap because our nopage handler assumes all + * page requests have already been validated. + */ + ps_vma->vm_flags |= VM_DONTEXPAND; + + /* Don't allow mapping to be inherited across a process fork */ + ps_vma->vm_flags |= VM_DONTCOPY; + + ps_vma->vm_private_data = (void *)psOffsetStruct; + + switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + case PVRSRV_HAP_CACHED: + /* This is the default, do nothing. */ + break; + case PVRSRV_HAP_WRITECOMBINE: + ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot); + break; + case PVRSRV_HAP_UNCACHED: + ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__)); + iRetVal = -EINVAL; + goto unlock_and_return; + } + +#ifdef CONFIG_ARCH_OMAP5 + { + IMG_BOOL bModPageProt = IMG_FALSE; + +#ifdef CONFIG_DSSCOMP + bModPageProt |= is_tiler_addr(LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0).uiAddr); +#endif /* CONFIG_DSSCOMP */ + + if (bModPageProt) + { + ps_vma->vm_page_prot = __pgprot_modify(ps_vma->vm_page_prot, + L_PTE_MT_MASK, + L_PTE_MT_DEV_SHARED); + } + } +#endif /* CONFIG_ARCH_OMAP5 */ + + /* Install open and close handlers for ref-counting */ + ps_vma->vm_ops = &MMapIOOps; + + if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0)) + { + iRetVal = -EAGAIN; + goto unlock_and_return; + } + + PVR_ASSERT(psOffsetStruct->uiUserVAddr == 0); + + psOffsetStruct->uiUserVAddr = ps_vma->vm_start; + + /* Compute the flush region (if necessary) inside the mmap mutex */ + if(psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate) + { + psFlushMemArea = psOffsetStruct->psLinuxMemArea; + + /* Sparse mappings have to ask the BM for the virtual size */ + if (psFlushMemArea->hBMHandle) + { + pvBase = (IMG_VOID *)ps_vma->vm_start; + uiByteOffset = 0; + uiFlushSize = BM_GetVirtualSize(psFlushMemArea->hBMHandle); + } + else + { + IMG_SIZE_T uiDummyByteSize; + + DetermineUsersSizeAndByteOffset(psFlushMemArea, + &uiDummyByteSize, + &uiByteOffset); + + pvBase = (IMG_VOID *)ps_vma->vm_start + uiByteOffset; + uiFlushSize = psFlushMemArea->uiByteSize; + } + + psFlushMemArea->bNeedsCacheInvalidate = IMG_FALSE; + } + + /* Call the open routine to increment the usage count */ + MMapVOpenNoLock(ps_vma); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x" UINTPTR_FMT "\n", + __FUNCTION__, (IMG_UINTPTR_T)ps_vma->vm_pgoff)); + +unlock_and_return: + if (iRetVal != 0 && psOffsetStruct != IMG_NULL) + { + DestroyOffsetStruct(psOffsetStruct); + } + + LinuxUnLockMutex(&g_sMMapMutex); + + if(psFlushMemArea && uiFlushSize) + { + OSInvalidateCPUCacheRangeKM(psFlushMemArea, uiByteOffset, pvBase, + uiFlushSize); + } + + return iRetVal; +} + + +#if defined(DEBUG_LINUX_MMAP_AREAS) + +/* + * Lock MMap regions list (called on page start/stop while reading /proc/mmap) + + * sfile : seq_file that handles /proc file + * start : TRUE if it's start, FALSE if it's stop + * +*/ +static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start) +{ + if(start) + { + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + } + else + { + LinuxUnLockMutex(&g_sMMapMutex); + } +} + + +/* + * Convert offset (index from KVOffsetTable) to element + * (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element that will be dumped + * +*/ +static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off) +{ + LinuxMemArea *psLinuxMemArea; + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem) + { + PKV_OFFSET_STRUCT psOffsetStruct; + + list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + off--; + if (off == 0) + { + PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea); + return (void*)psOffsetStruct; + } + } + } + return (void*)0; +} + +/* + * Gets next MMap element to show. (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element to show (0 ends iteration) +*/ +static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off) +{ + return ProcSeqOff2ElementMMapRegistrations(sfile,off); +} + +/* + * Show MMap element (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * +*/ +static void ProcSeqShowMMapRegistrations(struct seq_file *sfile, void *el) +{ + KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el; + LinuxMemArea *psLinuxMemArea; + IMG_SIZE_T uiRealByteSize; + IMG_UINTPTR_T uiByteOffset; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf( sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "Allocations registered for mmap: %u\n" + "In total these areas correspond to %" SIZE_T_FMT_LEN "u bytes\n" + "psLinuxMemArea " + "UserVAddr " + "KernelVAddr " + "CpuPAddr " + "MMapOffset " + "ByteLength " + "LinuxMemType " + "Pid Name Flags\n", +#else + "\n" + "\t%u\n" + "\t%" SIZE_T_FMT_LEN "u\n" + "\n", +#endif + g_ui32RegisteredAreas, + g_uiTotalByteSize + ); + return; + } + + psLinuxMemArea = psOffsetStruct->psLinuxMemArea; + + DetermineUsersSizeAndByteOffset(psLinuxMemArea, + &uiRealByteSize, + &uiByteOffset); + + seq_printf( sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%p %p %p " CPUPADDR_FMT " " UINTPTR_FMT " %" SIZE_T_FMT_LEN "u %-24s %-5u %-8s %08x(%s)\n", +#else + "\n" + "\t%p\n" + "\t%p\n" + "\t%p\n" + "\t" CPUPADDR_FMT "\n" + "\t" UINTPTR_FMT "\n" + "\t%" SIZE_T_FMT_LEN "u\n" + "\t%-24s\n" + "\t%-5u\n" + "\t%-8s\n" + "\t%08x\n" + "\t%s\n" + "\n", +#endif + psLinuxMemArea, + (IMG_PVOID)(psOffsetStruct->uiUserVAddr + uiByteOffset), + LinuxMemAreaToCpuVAddr(psLinuxMemArea), + LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr, + (IMG_UINTPTR_T)psOffsetStruct->uiMMapOffset, + psLinuxMemArea->uiByteSize, + LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType), + psOffsetStruct->ui32PID, + psOffsetStruct->pszName, + psLinuxMemArea->ui32AreaFlags, + HAPFlagsToString(psLinuxMemArea->ui32AreaFlags)); +} + +#endif + + +/*! + ******************************************************************************* + + @Function PVRMMapRegisterArea + + @Description + + Register a memory area with the mmap code. + + @input psLinuxMemArea : pointer to memory area. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea) +{ + PVRSRV_ERROR eError; +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea)); +#endif + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8x)", + __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags)); +#endif + + PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC); + + /* Check this mem area hasn't already been registered */ + if(psLinuxMemArea->bMMapRegistered) + { + PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered", + __FUNCTION__, psLinuxMemArea)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto exit_unlock; + } + + list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList); + + psLinuxMemArea->bMMapRegistered = IMG_TRUE; + +#if defined(DEBUG_LINUX_MMAP_AREAS) + g_ui32RegisteredAreas++; + /* + * Sub memory areas are excluded from g_ui32TotalByteSize so that we + * don't count memory twice, once for the parent and again for sub + * allocationis. + */ + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_uiTotalByteSize += psLinuxMemArea->uiByteSize; + } +#endif + + eError = PVRSRV_OK; + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + + return eError; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapRemoveRegisterArea + + @Description + + Unregister a memory area with the mmap code. + + @input psLinuxMemArea : pointer to memory area. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea) +{ + PVRSRV_ERROR eError; + PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct; + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + PVR_ASSERT(psLinuxMemArea->bMMapRegistered); + + list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + if (psOffsetStruct->ui32Mapped != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %u", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped)); + dump_stack(); + PVRSRVDumpRefCountCCB(); + eError = PVRSRV_ERROR_STILL_MAPPED; + goto exit_unlock; + } + else + { + /* + * An offset structure is created when a call is made to get + * the mmap data for a physical mapping. If the data is never + * used for mmap, we will be left with an umapped offset + * structure. + */ + PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct)); + } + + PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList); + + DestroyOffsetStruct(psOffsetStruct); + } + + list_del(&psLinuxMemArea->sMMapItem); + + psLinuxMemArea->bMMapRegistered = IMG_FALSE; + +#if defined(DEBUG_LINUX_MMAP_AREAS) + g_ui32RegisteredAreas--; + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_uiTotalByteSize -= psLinuxMemArea->uiByteSize; + } +#endif + + eError = PVRSRV_OK; + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + return eError; +} + + +/*! + ******************************************************************************* + + @Function LinuxMMapPerProcessConnect + + @Description + + Per-process mmap initialisation code. + + @input psEnvPerProc : pointer to OS specific per-process data. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR +LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psEnvPerProc); + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function LinuxMMapPerProcessDisconnect + + @Description + + Per-process mmap deinitialisation code. + + @input psEnvPerProc : pointer to OS specific per-process data. + + ******************************************************************************/ +IMG_VOID +LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc) +{ + PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct; + IMG_BOOL bWarn = IMG_FALSE; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + + PVR_UNREFERENCED_PARAMETER(psEnvPerProc); + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem) + { + if (psOffsetStruct->ui32PID == ui32PID) + { + if (!bWarn) + { + PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__)); + bWarn = IMG_TRUE; + } + PVR_ASSERT(psOffsetStruct->ui32Mapped == 0); + PVR_ASSERT(psOffsetStruct->bOnMMapList); + + DestroyOffsetStruct(psOffsetStruct); + } + } + + LinuxUnLockMutex(&g_sMMapMutex); +} + + +/*! + ******************************************************************************* + + @Function LinuxMMapPerProcessHandleOptions + + @Description + + Set secure handle options required by mmap code. + + @input psHandleBase : pointer to handle base. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError)); + return eError; + } + + return eError; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapInit + + @Description + + MMap initialisation code + + ******************************************************************************/ +IMG_VOID +PVRMMapInit(IMG_VOID) +{ + LinuxInitMutex(&g_sMMapMutex); + + g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0); + if (!g_psMemmapCache) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__)); + goto error; + } + +#if defined(DEBUG_LINUX_MMAP_AREAS) + g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL, + ProcSeqNextMMapRegistrations, + ProcSeqShowMMapRegistrations, + ProcSeqOff2ElementMMapRegistrations, + ProcSeqStartstopMMapRegistations + ); +#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */ + return; + +error: + PVRMMapCleanup(); + return; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapCleanup + + @Description + + Mmap deinitialisation code + + ******************************************************************************/ +IMG_VOID +PVRMMapCleanup(IMG_VOID) +{ + PVRSRV_ERROR eError; + + if (!list_empty(&g_sMMapAreaList)) + { + LinuxMemArea *psLinuxMemArea, *psTmpMemArea; + + PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__)); + + PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__)); + list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem) + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); + + LinuxMemAreaDeepFree(psLinuxMemArea); + } + } + PVR_ASSERT(list_empty((&g_sMMapAreaList))); + +#if defined(DEBUG_LINUX_MMAP_AREAS) + RemoveProcEntrySeq(g_ProcMMap); +#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */ + + if(g_psMemmapCache) + { + KMemCacheDestroyWrapper(g_psMemmapCache); + g_psMemmapCache = NULL; + } +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.h new file mode 100644 index 0000000..72640e3 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mmap.h @@ -0,0 +1,232 @@ +/*************************************************************************/ /*! +@Title Linux mmap interface declaration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__MMAP_H__) +#define __MMAP_H__ + +#include +#include + +#if defined(VM_MIXEDMAP) +/* + * Mixed maps allow us to avoid using raw PFN mappings (VM_PFNMAP) for + * pages without pages structures ("struct page"), giving us more + * freedom in choosing the mmap offset for mappings. Mixed maps also + * allow both the mmap and the wrap code to be simplified somewhat. + */ +#define PVR_MAKE_ALL_PFNS_SPECIAL +#endif + +#include "perproc.h" +#include "mm.h" + +/* + * This structure represents the relationship between an mmap2 file + * offset and a LinuxMemArea for a given process. + */ +typedef struct KV_OFFSET_STRUCT_TAG +{ + /* + * Mapping count. Incremented when the mapping is created, and + * if the mapping is inherited across a process fork. + */ + IMG_UINT32 ui32Mapped; + + /* + * Offset to be passed to mmap2 to map the associated memory area + * into user space. The offset may represent the page frame number + * of the first page in the area (if the area is physically + * contiguous), or it may represent the secure handle associated + * with the area. + */ + IMG_UINTPTR_T uiMMapOffset; + + IMG_SIZE_T uiRealByteSize; + + /* Memory area associated with this offset structure */ + LinuxMemArea *psLinuxMemArea; + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + /* ID of the thread that owns this structure */ + IMG_UINT32 ui32TID; +#endif + + /* ID of the process that owns this structure */ + IMG_UINT32 ui32PID; + + /* + * For offsets that represent actual page frame numbers, this structure + * is temporarily put on a list so that it can be found from the + * driver mmap entry point. This flag indicates the structure is + * on the list. + */ + IMG_BOOL bOnMMapList; + + /* Reference count for this structure */ + IMG_UINT32 ui32RefCount; + + /* + * User mode address of start of mapping. This is not necessarily the + * first user mode address of the memory area. + */ + IMG_UINTPTR_T uiUserVAddr; + + /* Extra entries to support proc filesystem debug info */ +#if defined(DEBUG_LINUX_MMAP_AREAS) + const IMG_CHAR *pszName; +#endif + + /* List entry field for MMap list */ + struct list_head sMMapItem; + + /* List entry field for per-memory area list */ + struct list_head sAreaItem; +}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT; + + + +/*! + ******************************************************************************* + * @Function Mmap initialisation code + ******************************************************************************/ +IMG_VOID PVRMMapInit(IMG_VOID); + + +/*! + ******************************************************************************* + * @Function Mmap de-initialisation code + ******************************************************************************/ +IMG_VOID PVRMMapCleanup(IMG_VOID); + + +/*! + ******************************************************************************* + * @Function Registers a memory area with the mmap code + * + * @Input psLinuxMemArea + * + * @Return PVRSRV_ERROR status + ******************************************************************************/ +PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @Function Unregisters a memory area from the mmap code + * + * @Input psLinuxMemArea + * + * @Return PVRSRV_ERROR status + ******************************************************************************/ +PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ****************************************************************************** + * @Function When a userspace services client, requests to map a memory + * area to userspace, this function validates the request and + * returns the details that the client must use when calling mmap(2). + * + * @Input psPerProc Per process data. + * @Input hMHandle Handle associated with the memory to map. + * This is a (secure) handle to the OS specific + * memory handle structure (hOSMemHandle), or + * a handle to a structure that contains the + * memory handle. + * @Output pui32MMapOffset The page aligned offset that the client must + * pass to the mmap2 system call. + * @Output pui32ByteOffset The real mapping that will be created for the + * services client may have a different + * size/alignment from it request. This offset + * is returned to the client and should be added + * to virtual address returned from mmap2 to get + * the first address corresponding to its request. + * @Output pui32RealByteOffset The size that the mapping will really be, + * that the client must also pass to mmap/munmap. + * + * @Output pui32UserVAddr Pointer to returned user mode address of + * mapping. + * @Return PVRSRV_ERROR + ******************************************************************************/ +PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hMHandle, + IMG_UINTPTR_T *puiMMapOffset, + IMG_UINTPTR_T *puiByteOffset, + IMG_SIZE_T *puiRealByteSize, + IMG_UINTPTR_T *puiUserVAddr); + +/*! + ******************************************************************************* + + @Function Release mmap data. + + @Input psPerProc Per-process data. + @Input hMHandle Memory handle. + + @Output pbMUnmap Flag that indicates whether an munmap is + required. + @Output pui32RealByteSize Location for size of mapping. + @Output pui32UserVAddr User mode address to munmap. + + @Return PVRSRV_ERROR + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hMHandle, + IMG_BOOL *pbMUnmap, + IMG_SIZE_T *puiRealByteSize, + IMG_UINTPTR_T *puiUserVAddr); + +/*! + ******************************************************************************* + * @Function driver mmap entry point + * + * @Input pFile : user file structure + * + * @Input ps_vma : vm area structure + * + * @Return 0 for success, -errno for failure. + ******************************************************************************/ +int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma); + + +#endif /* __MMAP_H__ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/module.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/module.c new file mode 100644 index 0000000..fafd089 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/module.c @@ -0,0 +1,1288 @@ +/*************************************************************************/ /*! +@Title Linux module setup +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#if defined(SUPPORT_DRI_DRM) && !defined(SUPPORT_DRI_DRM_PLUGIN) +#define PVR_MOD_STATIC +#else + /* + * For LDM drivers, define PVR_LDM_MODULE to indicate generic LDM + * support is required, besides indicating the exact support + * required (e.g. platform, or PCI device). + */ + #if defined(LDM_PLATFORM) + #define PVR_LDM_PLATFORM_MODULE + #define PVR_LDM_DEVICE_CLASS + #define PVR_LDM_MODULE + #else + #if defined(LDM_PCI) + #define PVR_LDM_DEVICE_CLASS + #define PVR_LDM_PCI_MODULE + #define PVR_LDM_MODULE + #else + #if defined(SYS_SHARES_WITH_3PKM) + #define PVR_LDM_DEVICE_CLASS + #endif + #endif + #endif +#define PVR_MOD_STATIC static +#endif + +#if (defined(PVR_LDM_PLATFORM_PRE_REGISTERED) || defined(PVR_LDM_DEVICE_TREE)) && !defined(NO_HARDWARE) +#define PVR_USE_PRE_REGISTERED_PLATFORM_DEV +#endif + +#if defined(PVR_LDM_DEVICE_TREE) && !defined(NO_HARDWARE) +#define PVR_USE_DEVICE_TREE +#endif + +#include +#include +#include +#include + +#if defined(SUPPORT_DRI_DRM) +#include +#if defined(PVR_SECURE_DRM_AUTH_EXPORT) +#include "env_perproc.h" +#endif +#endif + +#if defined(PVR_LDM_PLATFORM_MODULE) +#include +#endif /* PVR_LDM_PLATFORM_MODULE */ + +#if defined(PVR_LDM_PCI_MODULE) +#include +#endif /* PVR_LDM_PCI_MODULE */ + +#if defined(PVR_LDM_DEVICE_CLASS) +#include +#endif /* PVR_LDM_DEVICE_CLASS */ + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) +#include +#endif + +#include "img_defs.h" +#include "services.h" +#include "kerneldisplay.h" +#include "kernelbuffer.h" +#include "syscommon.h" +#include "pvrmmap.h" +#include "mutils.h" +#include "mm.h" +#include "mmap.h" +#include "mutex.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "perproc.h" +#include "handle.h" +#include "pvr_bridge_km.h" +#include "proc.h" +#include "pvrmodule.h" +#include "private_data.h" +#include "lock.h" +#include "linkage.h" +#include "buffer_manager.h" +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include "pvr_sync_common.h" +#endif + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) +#include "systrace.h" +#endif + +#if defined(SUPPORT_DRI_DRM) +#include "pvr_drm.h" +#endif + +#if defined(SUPPORT_DMABUF) +#include "pvr_linux_fence.h" +#endif + +/* + * DRVNAME is the name we use to register our driver. + * DEVNAME is the name we use to register actual device nodes. + */ +#if defined(PVR_LDM_MODULE) +#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME +#endif +#define DEVNAME PVRSRV_MODNAME + +#if defined(SUPPORT_DRI_DRM) +#define PRIVATE_DATA(pFile) ((pFile)->driver_priv) +#else +#define PRIVATE_DATA(pFile) ((pFile)->private_data) +#endif + +/* + * This is all module configuration stuff required by the linux kernel. + */ +MODULE_SUPPORTED_DEVICE(DEVNAME); + +#if defined(PVRSRV_NEED_PVR_DPF) +#include +extern IMG_UINT32 gPVRDebugLevel; +module_param(gPVRDebugLevel, uint, 0644); +MODULE_PARM_DESC(gPVRDebugLevel, "Sets the level of debug output (default 0x7)"); +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +#if !defined(__devinitdata) +#define __devinitdata +#endif +#if !defined(__devinit) +#define __devinit +#endif +#if !defined(__devexit) +#define __devexit +#endif +#if !defined(__devexit_p) +#define __devexit_p(x) (&(x)) +#endif + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) +/* PRQA S 3207 2 */ /* ignore 'not used' warning */ +EXPORT_SYMBOL(PVRGetDisplayClassJTable); +EXPORT_SYMBOL(PVRGetBufferClassJTable); +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +#if defined(PVR_LDM_DEVICE_CLASS) && !defined(SUPPORT_DRI_DRM) +/* + * Device class used for /sys entries (and udev device node creation) + */ +static struct class *psPvrClass; +#endif + +#if !defined(SUPPORT_DRI_DRM) +/* + * This is the major number we use for all nodes in /dev. + */ +static int AssignedMajorNumber; + +/* + * These are the operations that will be associated with the device node + * we create. + * + * With gcc -W, specifying only the non-null members produces "missing + * initializer" warnings. +*/ +static int PVRSRVOpen(struct inode* pInode, struct file* pFile); +static int PVRSRVRelease(struct inode* pInode, struct file* pFile); + +static struct file_operations pvrsrv_fops = +{ + .owner=THIS_MODULE, + .unlocked_ioctl = PVRSRV_BridgeDispatchKM, +#if defined(CONFIG_COMPAT) + .compat_ioctl = PVRSRV_BridgeCompatDispatchKM, +#endif + .open=PVRSRVOpen, + .release=PVRSRVRelease, + .mmap=PVRMMap, +}; +#endif + +PVRSRV_LINUX_MUTEX gPVRSRVLock; + +/* PID of process being released */ +IMG_UINT32 gui32ReleasePID; + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) +static IMG_UINT32 gPVRPowerLevel; +#endif + +#if defined(PVR_LDM_MODULE) + +#if defined(PVR_LDM_PLATFORM_MODULE) +#define LDM_DEV struct platform_device +#define LDM_DRV struct platform_driver +#endif /*PVR_LDM_PLATFORM_MODULE */ + +#if defined(PVR_LDM_PCI_MODULE) +#define LDM_DEV struct pci_dev +#define LDM_DRV struct pci_driver +#endif /* PVR_LDM_PCI_MODULE */ +/* + * This is the driver interface we support. + */ +#if defined(PVR_LDM_PLATFORM_MODULE) +static int PVRSRVDriverRemove(LDM_DEV *device); +static int PVRSRVDriverProbe(LDM_DEV *device); +#endif +#if defined(PVR_LDM_PCI_MODULE) +static void PVRSRVDriverRemove(LDM_DEV *device); +static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id); +#endif +static int PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state); +static void PVRSRVDriverShutdown(LDM_DEV *device); +static int PVRSRVDriverResume(LDM_DEV *device); + +#if defined(PVR_LDM_PCI_MODULE) +/* This structure is used by the Linux module code */ +struct pci_device_id powervr_id_table[] __devinitdata = { + {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID)}, +#if defined (SYS_SGX_DEV1_DEVICE_ID) + {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV1_DEVICE_ID)}, +#endif + {0} +}; + +MODULE_DEVICE_TABLE(pci, powervr_id_table); +#endif + +#if defined(PVR_USE_DEVICE_TREE) +static struct of_device_id powervr_id_table[] = { + { + .compatible = SYS_SGX_DEV_NAME + }, + {} +}; +MODULE_DEVICE_TABLE(of, powervr_id_table); +#else +#if defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) +static struct platform_device_id powervr_id_table[] __devinitdata = { + {SYS_SGX_DEV_NAME, 0}, + {} +}; +#endif +#endif + +static LDM_DRV powervr_driver = { +#if defined(PVR_LDM_PLATFORM_MODULE) + .driver = { + .name = DRVNAME, +#if defined(PVR_USE_DEVICE_TREE) + .of_match_table = powervr_id_table, +#endif + }, +#endif +#if defined(PVR_LDM_PCI_MODULE) + .name = DRVNAME, +#endif +#if (defined(PVR_LDM_PCI_MODULE) || defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)) && !defined(PVR_USE_DEVICE_TREE) + .id_table = powervr_id_table, +#endif + .probe = PVRSRVDriverProbe, +#if defined(PVR_LDM_PLATFORM_MODULE) + .remove = PVRSRVDriverRemove, +#endif +#if defined(PVR_LDM_PCI_MODULE) + .remove = __devexit_p(PVRSRVDriverRemove), +#endif + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, + .shutdown = PVRSRVDriverShutdown, +}; + +LDM_DEV *gpsPVRLDMDev; + +#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE) && \ + !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) +static void PVRSRVDeviceRelease(struct device unref__ *pDevice) +{ +} + +static struct platform_device powervr_device = { + .name = DEVNAME, + .id = -1, + .dev = { + .release = PVRSRVDeviceRelease + } +}; +#endif + +/*! +****************************************************************************** + + @Function PVRSRVDriverProbe + + @Description + + See whether a given device is really one we can drive. The platform bus + handler has already established that we should be able to service this device + because of the name match. We probably don't need to do anything else. + + @input pDevice - the device for which a probe is requested + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(PVR_LDM_PLATFORM_MODULE) +static int PVRSRVDriverProbe(LDM_DEV *pDevice) +#endif +#if defined(PVR_LDM_PCI_MODULE) +static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id) +#endif +{ + SYS_DATA *psSysData; + + PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice)); + +#if 0 /* INTEGRATION_POINT */ + /* Some systems require device-specific system initialisation. + * E.g. this lets the OS track a device's dependencies on various + * system hardware. + * + * Note: some systems use this to enable HW that SysAcquireData + * will depend on, therefore it must be called first. + */ + if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK) + { + return -EINVAL; + } +#endif + /* SysInitialise only designed to be called once. + */ + psSysData = SysAcquireDataNoCheck(); + if (psSysData == IMG_NULL) + { + gpsPVRLDMDev = pDevice; + if (SysInitialise() != PVRSRV_OK) + { + return -ENODEV; + } + } + + return 0; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDriverRemove + + @Description + + This call is the opposite of the probe call: it is called when the device is + being removed from the driver's control. See the file $KERNELDIR/drivers/ + base/bus.c:device_release_driver() for the call to this function. + + This is the correct place to clean up anything our driver did while it was + asoociated with the device. + + @input pDevice - the device for which driver detachment is happening + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined (PVR_LDM_PLATFORM_MODULE) +static int PVRSRVDriverRemove(LDM_DEV *pDevice) +#endif +#if defined(PVR_LDM_PCI_MODULE) +static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice) +#endif +{ + SYS_DATA *psSysData; + + PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice)); + + SysAcquireData(&psSysData); + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) + if (gPVRPowerLevel != 0) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) + { + gPVRPowerLevel = 0; + } + } +#endif + (void) SysDeinitialise(psSysData); + + gpsPVRLDMDev = IMG_NULL; + +#if 0 /* INTEGRATION_POINT */ + /* See previous integration point for details. */ + if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK) + { + return -EINVAL; + } +#endif + +#if defined (PVR_LDM_PLATFORM_MODULE) + return 0; +#endif +#if defined (PVR_LDM_PCI_MODULE) + return; +#endif +} +#endif /* defined(PVR_LDM_MODULE) */ + +#if !defined(SUPPORT_DRI_DRM) +struct device *PVRLDMGetDevice(void) +{ +#if defined(PVR_LDM_MODULE) + return &gpsPVRLDMDev->dev; +#else + return NULL; +#endif +} +#endif + +#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) +static PVRSRV_LINUX_MUTEX gsPMMutex; +static IMG_BOOL bDriverIsSuspended; +static IMG_BOOL bDriverIsShutdown; +#endif + +#if defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV) +/*! +****************************************************************************** + + @Function PVRSRVDriverShutdown + + @Description + + Suspend device operation for system shutdown. This is called as part of the + system halt/reboot process. The driver is put into a quiescent state by + setting the power state to D3. + + @input pDevice - the device for which shutdown is requested + + @Return nothing + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +void PVRSRVDriverShutdown(struct drm_device *pDevice) +#else +PVR_MOD_STATIC void PVRSRVDriverShutdown(LDM_DEV *pDevice) +#endif +{ + PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice)); + + LinuxLockMutexNested(&gsPMMutex, PVRSRV_LOCK_CLASS_POWER); + + if (!bDriverIsShutdown && !bDriverIsSuspended) + { +#if defined(ANDROID) + /* + * Take the bridge mutex, and never release it, to stop + * processes trying to use the driver after it has been + * shutdown. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); +#endif + (void) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3); + } + + bDriverIsShutdown = IMG_TRUE; + + /* The bridge mutex is held on exit */ + LinuxUnLockMutex(&gsPMMutex); +} + +#endif /* defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV) */ + + +#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) +/*! +****************************************************************************** + + @Function PVRSRVDriverSuspend + + @Description + + For 2.6 kernels: + Suspend device operation. We always get three calls to this regardless of + the state (D1-D3) chosen. The order is SUSPEND_DISABLE, SUSPEND_SAVE_STATE + then SUSPEND_POWER_DOWN. We take action as soon as we get the disable call, + the other states not being handled by us yet. + + For MontaVista 2.4 kernels: + This call gets made once only when someone does something like + + # echo -e -n "suspend powerdown 0" >/sys.devices/legacy/pvrsrv0/power + + The 3rd, numeric parameter (0) in the above has no relevence and is not + passed into us. The state parameter is always zero and the level parameter + is always SUSPEND_POWER_DOWN. Vive la difference! + + @input pDevice - the device for which resume is requested + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(SUPPORT_DRM_MODESET) +int PVRSRVDriverSuspend(struct pci_dev *pDevice, pm_message_t state) +#else +int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state) +#endif +#else +PVR_MOD_STATIC int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state) +#endif +{ + int res = 0; +#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)) + PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice)); + + LinuxLockMutexNested(&gsPMMutex, PVRSRV_LOCK_CLASS_POWER); + + if (!bDriverIsSuspended && !bDriverIsShutdown) + { +#if defined(ANDROID) + /* + * The bridge mutex will be held until we resume. + * The lock doesn't need to be taken on (non-Android) + * Linux systems, as all user processes will have been + * suspended at this point. In any case, taking the mutex + * may result in possible lock ordering problems being + * flagged up by the kernel, as the Linux console lock may + * have already been taken at this point. If the 3rd party + * display driver is Linux Framebuffer based, the previous + * locking order may have been bridge mutex first, followed + * by the console lock. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); +#endif + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) == PVRSRV_OK) + { + bDriverIsSuspended = IMG_TRUE; + } + else + { +#if defined(ANDROID) + LinuxUnLockMutex(&gPVRSRVLock); +#endif + res = -EINVAL; + } + } + + LinuxUnLockMutex(&gsPMMutex); +#endif + return res; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDriverResume + + @Description + + Resume device operation following a lull due to earlier suspension. It is + implicit we're returning to D0 (fully operational) state. We always get three + calls to this using level thus: RESUME_POWER_ON, RESUME_RESTORE_STATE then + RESUME_ENABLE. On 2.6 kernels We don't do anything until we get the enable + call; on the MontaVista set-up we only ever get the RESUME_POWER_ON call. + + @input pDevice - the device for which resume is requested + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(SUPPORT_DRM_MODESET) +int PVRSRVDriverResume(struct pci_dev *pDevice) +#else +int PVRSRVDriverResume(struct drm_device *pDevice) +#endif +#else +PVR_MOD_STATIC int PVRSRVDriverResume(LDM_DEV *pDevice) +#endif +{ + int res = 0; +#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)) + PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice)); + + LinuxLockMutexNested(&gsPMMutex, PVRSRV_LOCK_CLASS_POWER); + + if (bDriverIsSuspended && !bDriverIsShutdown) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) + { + bDriverIsSuspended = IMG_FALSE; +#if defined(ANDROID) + LinuxUnLockMutex(&gPVRSRVLock); +#endif + } + else + { +#if defined(ANDROID) + /* The bridge mutex is not released on failure */ +#endif + res = -EINVAL; + } + } + + LinuxUnLockMutex(&gsPMMutex); +#endif + return res; +} +#endif /* defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) */ + + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM) +/* + * If PVR_LDM_PCI_MODULE is defined (and PVR_MANUAL_POWER_CONTROL is *NOT* defined), + * the device can be suspended and resumed without suspending/resuming the + * system, by writing values into the power/state sysfs file for the device. + * To suspend: + * echo -n 2 > power/state + * To Resume: + * echo -n 0 > power/state + * + * The problem with this approach is that the device is usually left + * powered up; it is the responsibility of the bus driver to remove + * the power. + * + * Defining PVR_MANUAL_POWER_CONTROL is intended to make it easier to + * debug power management issues, especially when power is really removed + * from the device. It is easier to debug the driver if it is not being + * suspended/resumed with the rest of the system. + * + * When PVR_MANUAL_POWER_CONTROL is defined, the following proc entry is + * created: + * /proc/pvr/power_control + * The driver suspend/resume entry points defined below no longer suspend or + * resume the device. To suspend the device, type the following: + * echo 2 > /proc/pvr/power_control + * To resume the device, type: + * echo 0 > /proc/pvr/power_control + * + * The following example shows how to suspend/resume the device independently + * of the rest of the system. + * Suspend the device: + * echo 2 > /proc/pvr/power_control + * Suspend the system. Then you should be able to suspend and resume + * as normal. To resume the device type the following: + * echo 0 > /proc/pvr/power_control + */ + +IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data) +{ + IMG_CHAR data_buffer[2]; + IMG_UINT32 PVRPowerLevel; + + if (count != sizeof(data_buffer)) + { + return -EINVAL; + } + else + { + if (copy_from_user(data_buffer, buffer, count)) + return -EINVAL; + if (data_buffer[count - 1] != '\n') + return -EINVAL; + PVRPowerLevel = data_buffer[0] - '0'; + if (PVRPowerLevel != gPVRPowerLevel) + { + if (PVRPowerLevel != 0) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK) + { + return -EINVAL; + } + } + else + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK) + { + return -EINVAL; + } + } + + gPVRPowerLevel = PVRPowerLevel; + } + } + return (count); +} + +void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el) +{ + seq_printf(sfile, "%lu\n", gPVRPowerLevel); +} + +#endif + +/*! +****************************************************************************** + + @Function PVRSRVOpen + + @Description + + Open the PVR services node - called when the relevant device node is open()ed. + + @input pInode - the inode for the file being openeded + @input dev - the DRM device corresponding to this driver. + + @input pFile - the file handle data for the actual file being opened + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +int PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile) +#else +static int PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile) +#endif +{ + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + IMG_HANDLE hBlockAlloc; + int iRet = -ENOMEM; + PVRSRV_ERROR eError; + IMG_UINT32 ui32PID; +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; +#endif + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + ui32PID = OSGetCurrentProcessIDKM(); + + if (PVRSRVProcessConnect(ui32PID, 0) != PVRSRV_OK) + goto err_unlock; + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID); + if (psEnvPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__)); + goto err_unlock; + } +#endif + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_FILE_PRIVATE_DATA), + (IMG_PVOID *)&psPrivateData, + &hBlockAlloc, + "File Private Data"); + + if(eError != PVRSRV_OK) + goto err_unlock; + + psPrivateData->hKernelMemInfo = NULL; + pFile->f_mode |= FMODE_UNSIGNED_OFFSET; +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + psPrivateData->psDRMFile = pFile; + + list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead); +#endif + psPrivateData->ui32OpenPID = ui32PID; + psPrivateData->hBlockAlloc = hBlockAlloc; + PRIVATE_DATA(pFile) = psPrivateData; + iRet = 0; +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); + return iRet; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRelease + + @Description + + Release access the PVR services node - called when a file is closed, whether + at exit or using close(2) system call. + + @input pInode - the inode for the file being released + + @input pFile - the file handle data for the actual file being released + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +void PVRSRVRelease(void *pvPrivData) +#else +static int PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile) +#endif +{ + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + int err = 0; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + +#if defined(SUPPORT_DRI_DRM) + psPrivateData = (PVRSRV_FILE_PRIVATE_DATA *)pvPrivData; +#else + psPrivateData = PRIVATE_DATA(pFile); +#endif + if (psPrivateData != IMG_NULL) + { +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + list_del(&psPrivateData->sDRMAuthListItem); +#endif + + if(psPrivateData->hKernelMemInfo) + { + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + /* Look up the meminfo we just exported */ + if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psKernelMemInfo, + psPrivateData->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__)); + err = -EFAULT; + goto err_unlock; + } + + /* Tell the XProc about the export if required */ + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + BM_XProcIndexRelease(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + } + + /* This drops the psMemInfo refcount bumped on export */ + if(FreeMemCallBackCommon(psKernelMemInfo, 0, + PVRSRV_FREE_CALLBACK_ORIGIN_EXTERNAL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: FreeMemCallBackCommon failed", __FUNCTION__)); + err = -EFAULT; + goto err_unlock; + } + } + + /* Usually this is the same as OSGetCurrentProcessIDKM(), + * but not necessarily (e.g. fork(), child closes last..) + */ + gui32ReleasePID = psPrivateData->ui32OpenPID; + PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID); + gui32ReleasePID = 0; + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_FILE_PRIVATE_DATA), + psPrivateData, psPrivateData->hBlockAlloc); + +#if !defined(SUPPORT_DRI_DRM) + PRIVATE_DATA(pFile) = IMG_NULL; /*nulling shared pointer*/ +#endif + } + +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); +#if defined(SUPPORT_DRI_DRM) + return; +#else + return err; +#endif +} + + +/*! +****************************************************************************** + + @Function PVRCore_Init + + @Description + + Insert the driver into the kernel. + + The device major number is allocated by the kernel dynamically. This means + that the device node (nominally /dev/pvrsrv) will need to be re-made at boot + time if the number changes between subsequent loads of the module. While the + number often stays constant between loads this is not guaranteed. The node + is made as root on the shell with: + + mknod /dev/pvrsrv c nnn 0 + + where nnn is the major number found in /proc/devices for DEVNAME and also + reported by the PVR_DPF() - look at the boot log using dmesg' to see this). + + Currently the auto-generated script /etc/init.d/rc.pvr handles creation of + the device. In other environments the device may be created either through + devfs or sysfs. + + Readable proc-filesystem entries under /proc/pvr are created with + CreateProcEntries(). These can be read at runtime to get information about + the device (eg. 'cat /proc/pvr/vm') + + __init places the function in a special memory section that the kernel frees + once the function has been run. Refer also to module_init() macro call below. + + @input none + + @Return none + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +int PVRCore_Init(void) +#else +static int __init PVRCore_Init(void) +#endif +{ + int error; +#if !defined(PVR_LDM_MODULE) + PVRSRV_ERROR eError; +#endif +#if !defined(SUPPORT_DRI_DRM) && defined(PVR_LDM_DEVICE_CLASS) + struct device *psDev; +#endif + + + +#if !defined(SUPPORT_DRI_DRM) + /* + * Must come before attempting to print anything via Services. + * For DRM, the initialisation will already have been done. + */ + PVRDPFInit(); +#endif + PVR_TRACE(("PVRCore_Init")); + +#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) + LinuxInitMutex(&gsPMMutex); +#endif + LinuxInitMutex(&gPVRSRVLock); + + if (CreateProcEntries ()) + { + error = -ENOMEM; + return error; + } + + if (PVROSFuncInit() != PVRSRV_OK) + { + error = -ENOMEM; + goto init_failed; + } + + PVRLinuxMUtilsInit(); + + if(LinuxMMInit() != PVRSRV_OK) + { + error = -ENOMEM; + goto init_failed; + } + +#if defined(SUPPORT_DMABUF) + if (PVRLinuxFenceInit()) + { + error = -ENOMEM; + goto init_failed; + } +#endif + + LinuxBridgeInit(); + + PVRMMapInit(); + +#if defined(PVR_LDM_MODULE) + +#if defined(PVR_LDM_PLATFORM_MODULE) || defined(SUPPORT_DRI_DRM_PLUGIN) + if ((error = platform_driver_register(&powervr_driver)) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error)); + + goto init_failed; + } + +#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + if ((error = platform_device_register(&powervr_device)) != 0) + { + platform_driver_unregister(&powervr_driver); + + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error)); + + goto init_failed; + } +#endif +#endif /* PVR_LDM_PLATFORM_MODULE */ + +#if defined(PVR_LDM_PCI_MODULE) + if ((error = pci_register_driver(&powervr_driver)) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error)); + + goto init_failed; + } +#endif /* PVR_LDM_PCI_MODULE */ +#endif /* defined(PVR_LDM_MODULE) */ + +#if !defined(PVR_LDM_MODULE) + /* + * Drivers using LDM, will call SysInitialise in the probe/attach code + */ + if ((eError = SysInitialise()) != PVRSRV_OK) + { + error = -ENODEV; +#if defined(TCF_REV) && (TCF_REV == 110) + if(eError == PVRSRV_ERROR_NOT_SUPPORTED) + { + printk("\nAtlas wrapper (FPGA image) version mismatch"); + error = -ENODEV; + } +#endif + goto init_failed; + } +#endif /* !defined(PVR_LDM_MODULE) */ + +#if !defined(SUPPORT_DRI_DRM) + AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops); + + if (AssignedMajorNumber <= 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number")); + + error = -EBUSY; + goto sys_deinit; + } + + PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber)); + +#if defined(PVR_LDM_DEVICE_CLASS) + /* + * This code (using GPL symbols) facilitates automatic device + * node creation on platforms with udev (or similar). + */ + psPvrClass = class_create(THIS_MODULE, "pvr"); + + if (IS_ERR(psPvrClass)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass))); + error = -EBUSY; + goto unregister_device; + } + + psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0), +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) + NULL, +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) */ + DEVNAME); + if (IS_ERR(psDev)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev))); + error = -EBUSY; + goto destroy_class; + } +#endif /* defined(PVR_LDM_DEVICE_CLASS) */ +#endif /* !defined(SUPPORT_DRI_DRM) */ + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + SystraceCreateFS(); +#endif + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + PVRSyncDeviceInit(); +#endif + return 0; + +#if !defined(SUPPORT_DRI_DRM) +#if defined(PVR_LDM_DEVICE_CLASS) +destroy_class: + class_destroy(psPvrClass); +unregister_device: + unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME); +#endif +#endif +#if !defined(SUPPORT_DRI_DRM) +sys_deinit: +#endif +#if defined(PVR_LDM_MODULE) +#if defined(PVR_LDM_PCI_MODULE) + pci_unregister_driver(&powervr_driver); +#endif + +#if defined (PVR_LDM_PLATFORM_MODULE) +#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + platform_device_unregister(&powervr_device); +#endif + platform_driver_unregister(&powervr_driver); +#endif + +#else /* defined(PVR_LDM_MODULE) */ + /* LDM drivers call SysDeinitialise during PVRSRVDriverRemove */ + { + SYS_DATA *psSysData; + + psSysData = SysAcquireDataNoCheck(); + if (psSysData != IMG_NULL) + { + (void) SysDeinitialise(psSysData); + } + } +#endif /* defined(PVR_LDM_MODULE) */ +init_failed: + PVRMMapCleanup(); + LinuxMMCleanup(); + LinuxBridgeDeInit(); +#if defined(SUPPORT_DMABUF) + PVRLinuxFenceDeInit(); +#endif + PVROSFuncDeInit(); + RemoveProcEntries(); + return error; + +} /*PVRCore_Init*/ + + +/*! +***************************************************************************** + + @Function PVRCore_Cleanup + + @Description + + Remove the driver from the kernel. + + There's no way we can get out of being unloaded other than panicking; we + just do everything and plough on regardless of error. + + __exit places the function in a special memory section that the kernel frees + once the function has been run. Refer also to module_exit() macro call below. + + Note that the for LDM on MontaVista kernels, the positioning of the driver + de-registration is the opposite way around than would be suggested by the + registration case or the 2,6 kernel case. This is the correct way to do it + and the kernel panics if you change it. You have been warned. + + @input none + + @Return none + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +void PVRCore_Cleanup(void) +#else +static void __exit PVRCore_Cleanup(void) +#endif +{ +#if !defined(PVR_LDM_MODULE) + SYS_DATA *psSysData; +#endif + PVR_TRACE(("PVRCore_Cleanup")); + +#if !defined(PVR_LDM_MODULE) + SysAcquireData(&psSysData); +#endif + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + PVRSyncDeviceDeInit(); +#endif + +#if !defined(SUPPORT_DRI_DRM) + +#if defined(PVR_LDM_DEVICE_CLASS) + device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0)); + class_destroy(psPvrClass); +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) + if ( +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) */ + unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME) +#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) + ; +#else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) */ + ) + { + PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber)); + } +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) */ +#endif /* !defined(SUPPORT_DRI_DRM) */ + +#if defined(PVR_LDM_MODULE) + +#if defined(PVR_LDM_PCI_MODULE) + pci_unregister_driver(&powervr_driver); +#endif + +#if defined (PVR_LDM_PLATFORM_MODULE) +#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + platform_device_unregister(&powervr_device); +#endif + platform_driver_unregister(&powervr_driver); +#endif + +#else /* defined(PVR_LDM_MODULE) */ +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) + if (gPVRPowerLevel != 0) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) + { + gPVRPowerLevel = 0; + } + } +#endif + /* LDM drivers call SysDeinitialise during PVRSRVDriverRemove */ + (void) SysDeinitialise(psSysData); +#endif /* defined(PVR_LDM_MODULE) */ + + PVRMMapCleanup(); + + LinuxMMCleanup(); + + LinuxBridgeDeInit(); + +#if defined(SUPPORT_DMABUF) + PVRLinuxFenceDeInit(); +#endif + + PVROSFuncDeInit(); + + RemoveProcEntries(); + +#if defined(SUPPORT_PVRSRV_ANDROID_SYSTRACE) && defined(EUR_CR_TIMER) + SystraceDestroyFS(); +#endif + + PVR_TRACE(("PVRCore_Cleanup: unloading")); +} + +/* + * These macro calls define the initialisation and removal functions of the + * driver. Although they are prefixed `module_', they apply when compiling + * statically as well; in both cases they define the function the kernel will + * run to start/stop the driver. +*/ +#if !defined(SUPPORT_DRI_DRM) +module_init(PVRCore_Init); +module_exit(PVRCore_Cleanup); +#endif diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.c new file mode 100644 index 0000000..56422d6 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.c @@ -0,0 +1,163 @@ +/*************************************************************************/ /*! +@Title Linux mutex interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#include +#else +#include +#endif +#include + +#include +#include + +#include "mutex.h" + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + +IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + mutex_init(psPVRSRVMutex); +} + +IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + mutex_lock(psPVRSRVMutex); +} + +IMG_VOID LinuxLockMutexNested(PVRSRV_LINUX_MUTEX *psPVRSRVMutex, unsigned int uiLockClass) +{ + mutex_lock_nested(psPVRSRVMutex, uiLockClass); +} + +PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR) + { + return PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR; + } + else + { + return PVRSRV_OK; + } +} + +IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + return mutex_trylock(psPVRSRVMutex); +} + +IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + mutex_unlock(psPVRSRVMutex); +} + +IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + return (mutex_is_locked(psPVRSRVMutex)) ? IMG_TRUE : IMG_FALSE; +} + + +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) */ + + +IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + init_MUTEX(&psPVRSRVMutex->sSemaphore); + atomic_set(&psPVRSRVMutex->Count, 0); +} + +IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + down(&psPVRSRVMutex->sSemaphore); + atomic_dec(&psPVRSRVMutex->Count); +} + +IMG_VOID LinuxLockMutexNested(PVRSRV_LINUX_MUTEX *psPVRSRVMutex, unsigned int uiLockClass) +{ + LinuxLockMutex(psPVRSRVMutex); +} + +PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR) + { + /* The process was sent a signal while waiting for the semaphore + * (e.g. a kill signal from userspace) + */ + return PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR; + }else{ + atomic_dec(&psPVRSRVMutex->Count); + return PVRSRV_OK; + } +} + +IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore); + if(Status == 0) + { + atomic_dec(&psPVRSRVMutex->Count); + } + + return Status == 0; +} + +IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + atomic_inc(&psPVRSRVMutex->Count); + up(&psPVRSRVMutex->sSemaphore); +} + +IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + IMG_INT32 iCount; + + iCount = atomic_read(&psPVRSRVMutex->Count); + + return (IMG_BOOL)iCount; +} + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.h new file mode 100644 index 0000000..09ae46d --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutex.h @@ -0,0 +1,100 @@ +/*************************************************************************/ /*! +@Title Linux mutex interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef __INCLUDED_LINUX_MUTEX_H_ +#define __INCLUDED_LINUX_MUTEX_H_ + +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#include +#else +#include +#endif + + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + +typedef struct mutex PVRSRV_LINUX_MUTEX; + +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) */ + + +typedef struct { + struct semaphore sSemaphore; + /* since Linux's struct semaphore is intended to be + * opaque we don't poke inside for the count and + * instead we track it outselves. (So we can implement + * LinuxIsLockedMutex) + */ + atomic_t Count; +}PVRSRV_LINUX_MUTEX; + +#endif + +enum PVRSRV_MUTEX_LOCK_CLASS +{ + PVRSRV_LOCK_CLASS_POWER, + PVRSRV_LOCK_CLASS_BRIDGE, + PVRSRV_LOCK_CLASS_MMAP, + PVRSRV_LOCK_CLASS_MM_DEBUG, + PVRSRV_LOCK_CLASS_PVR_DEBUG, +}; + +extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_VOID LinuxLockMutexNested(PVRSRV_LINUX_MUTEX *psPVRSRVMutex, unsigned int uiLockClass); + +extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + + +#endif /* __INCLUDED_LINUX_MUTEX_H_ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.c new file mode 100644 index 0000000..3afa61b --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.c @@ -0,0 +1,179 @@ +/*************************************************************************/ /*! +@Title Linux memory interface support functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#include +#include + +#include "img_defs.h" +#include "pvr_debug.h" +#include "mutils.h" + +#if defined(SUPPORT_LINUX_X86_PAT) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)) +#define _PAGE_CACHE_WC pgprot_val(cachemode2pgprot(_PAGE_CACHE_MODE_WC)) +#endif + +#define PAT_LINUX_X86_WC 1 + +#define PAT_X86_ENTRY_BITS 8 + +#define PAT_X86_BIT_PWT 1U +#define PAT_X86_BIT_PCD 2U +#define PAT_X86_BIT_PAT 4U +#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT) + +static IMG_BOOL g_write_combining_available = IMG_FALSE; + +#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0) + +static inline IMG_UINT +pvr_pat_index(pgprotval_t prot_val) +{ + IMG_UINT ret = 0; + pgprotval_t val = prot_val & _PAGE_CACHE_MASK; + + ret |= PROT_TO_PAT_INDEX(val, PAT); + ret |= PROT_TO_PAT_INDEX(val, PCD); + ret |= PROT_TO_PAT_INDEX(val, PWT); + + return ret; +} + +static inline IMG_UINT +pvr_pat_entry(u64 pat, IMG_UINT index) +{ + return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK; +} + +static IMG_VOID +PVRLinuxX86PATProbe(IMG_VOID) +{ + /* + * cpu_has_pat indicates whether PAT support is available on the CPU, + * but doesn't indicate if it has been enabled. + */ + if (boot_cpu_has(X86_FEATURE_PAT))/* PRQA S 3335 */ /* ignore 'no function declared' */ + { + u64 pat; + IMG_UINT pat_index; + IMG_UINT pat_entry; + + PVR_TRACE(("%s: PAT available", __FUNCTION__)); + /* + * There is no Linux API for finding out if write combining + * is avaialable through the PAT, so we take the direct + * approach, and see if the PAT MSR contains a write combining + * entry. + */ + rdmsrl(MSR_IA32_CR_PAT, pat); + PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32))); + PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat))); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)) + pat_index = pvr_pat_index(_PAGE_CACHE_MODE_WC); +#else + pat_index = pvr_pat_index(_PAGE_CACHE_WC); +#endif + PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index)); + + pat_entry = pvr_pat_entry(pat, pat_index); + PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC)); + +#if defined(SUPPORT_LINUX_X86_WRITECOMBINE) + g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC); +#endif + } +#if defined(DEBUG) +#if defined(SUPPORT_LINUX_X86_WRITECOMBINE) + if (g_write_combining_available) + { + PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__)); + } + else + { + PVR_TRACE(("%s: Write combining not available", __FUNCTION__)); + } +#else /* defined(SUPPORT_LINUX_X86_WRITECOMBINE) */ + PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__)); +#endif /* defined(SUPPORT_LINUX_X86_WRITECOMBINE) */ +#endif /* DEBUG */ +} + +pgprot_t +pvr_pgprot_writecombine(pgprot_t prot) +{ + /* + * It would be worth checking from time to time to see if a + * pgprot_writecombine function (or similar) is introduced on Linux for + * x86 processors. If so, this function, and PVRLinuxX86PATProbe can be + * removed, and a macro used to select between pgprot_writecombine and + * pgprot_noncached, dpending on the value for of + * SUPPORT_LINUX_X86_WRITECOMBINE. + */ + /* PRQA S 0481,0482 2 */ /* scalar expressions */ + return (g_write_combining_available) ? +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)) + __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_MODE_WC) : pgprot_noncached(prot); +#else + __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot); +#endif +} +#endif /* defined(SUPPORT_LINUX_X86_PAT) */ + +IMG_VOID +PVRLinuxMUtilsInit(IMG_VOID) +{ +#if defined(SUPPORT_LINUX_X86_PAT) + PVRLinuxX86PATProbe(); +#endif +} + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.h new file mode 100644 index 0000000..2cafc7e --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/mutils.h @@ -0,0 +1,128 @@ +/*************************************************************************/ /*! +@Title Memory management support utils +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares various memory management support functions + for Linux. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __IMG_LINUX_MUTILS_H__ +#define __IMG_LINUX_MUTILS_H__ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))) +#if defined(SUPPORT_LINUX_X86_PAT) +#undef SUPPORT_LINUX_X86_PAT +#endif +#endif + +#if defined(SUPPORT_LINUX_X86_PAT) + pgprot_t pvr_pgprot_writecombine(pgprot_t prot); + #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv) +#else + #if defined(__arm__) || defined(__aarch64__) || defined(__sh__) + #define PGPROT_WC(pv) pgprot_writecombine(pv) + #elif defined(__mips__) + #define PGPROT_WC(pv) pgprot_writecombine(pv) + #elif defined(__i386__) || defined(__x86_64) + /* PAT support supersedes this */ + #define PGPROT_WC(pv) pgprot_noncached(pv) + #else + #define PGPROT_WC(pv) pgprot_noncached(pv) + #error Unsupported architecture! + #endif +#endif + +#define PGPROT_UC(pv) pgprot_noncached(pv) + +#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes) +#else + #if defined(__arm__) || defined(__aarch64__) + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) + #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes) + #else + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) + #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes) + #else + #define IOREMAP(pa, bytes) ioremap(pa, bytes) + #endif + #endif + #else + #define IOREMAP(pa, bytes) ioremap(pa, bytes) + #endif +#endif + +#if defined(SUPPORT_LINUX_X86_PAT) + #if defined(SUPPORT_LINUX_X86_WRITECOMBINE) + #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes) + #else + #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes) + #endif +#else + #if defined(__arm__) || defined(__aarch64__) + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes) + #else + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes) + #else + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)) + #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE) + #else + #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1) + #endif + #endif + #endif + #else + #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes) + #endif +#endif + +#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes) + +IMG_VOID PVRLinuxMUtilsInit(IMG_VOID); + +#endif /* __IMG_LINUX_MUTILS_H__ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/osfunc.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/osfunc.c new file mode 100644 index 0000000..273b490 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/osfunc.c @@ -0,0 +1,4850 @@ +/*************************************************************************/ /*! +@Title Environment related functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#if defined(MEM_TRACK_INFO_DEBUG) || defined (PVRSRV_DEVMEM_TIME_STATS) +#include +#endif +#include +#include +#include +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \ + defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \ + defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \ + defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) || \ + defined(PVR_LINUX_USING_WORKQUEUES) +#include +#endif + +#include "img_types.h" +#include "services_headers.h" +#include "mm.h" +#include "pvrmmap.h" +#include "mmap.h" +#include "env_data.h" +#include "mutex.h" +#include "event.h" +#include "linkage.h" +#include "pvr_uaccess.h" +#include "lock.h" +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) +#include "pvr_sync_common.h" +#endif +#if defined (SUPPORT_ION) +#include "ion.h" +#endif +#if defined(SUPPORT_DMABUF) +#include "pvr_linux_fence.h" +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) +#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, wait) +#else +#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, 0, wait) +#endif + +#if defined(PVR_LINUX_USING_WORKQUEUES) && !defined(CONFIG_PREEMPT) +/* + * Services spins at certain points waiting for events (e.g. swap + * chain destrucion). If those events rely on workqueues running, + * it needs to be possible to preempt the waiting thread. + * Removing the need for CONFIG_PREEMPT will require adding preemption + * points at various points in Services. + */ +#error "A preemptible Linux kernel is required when using workqueues" +#endif + +#if defined(EMULATOR) +#define EVENT_OBJECT_TIMEOUT_MS (2000) +#else +#define EVENT_OBJECT_TIMEOUT_MS (100) +#endif /* EMULATOR */ + +#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uiSize, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc) +#else +PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uiSize, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line) +#endif +{ + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(phBlockAlloc); + + if (uiSize > PAGE_SIZE) + { + /* Try to allocate the memory using vmalloc */ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + *ppvCpuVAddr = _VMallocWrapper(uiSize, PVRSRV_HAP_CACHED, pszFilename, ui32Line); +#else + *ppvCpuVAddr = VMallocWrapper(uiSize, PVRSRV_HAP_CACHED); +#endif + if (*ppvCpuVAddr) + { + return PVRSRV_OK; + } + } + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + *ppvCpuVAddr = _KMallocWrapper(uiSize, GFP_KERNEL | __GFP_NOWARN, pszFilename, ui32Line, ui32Flags & PVRSRV_SWAP_BUFFER_ALLOCATION); +#else + *ppvCpuVAddr = KMallocWrapper(uiSize, GFP_KERNEL | __GFP_NOWARN); +#endif + if (!*ppvCpuVAddr) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return PVRSRV_OK; +} + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) + +static inline int is_vmalloc_addr(const void *pvCpuVAddr) +{ + unsigned long lAddr = (unsigned long)pvCpuVAddr; + return lAddr >= VMALLOC_START && lAddr < VMALLOC_END; +} + +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) */ + +#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uiSize, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc) +#else +PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uiSize, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line) +#endif +{ + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(hBlockAlloc); + + if (is_vmalloc_addr(pvCpuVAddr)) + { +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line); +#else + VFreeWrapper(pvCpuVAddr); +#endif + } + else + { +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line, ui32Flags & PVRSRV_SWAP_BUFFER_ALLOCATION); +#else + KFreeWrapper(pvCpuVAddr); +#endif + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags, + IMG_SIZE_T uiSize, + IMG_UINT32 ui32PageSize, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_HANDLE hBMHandle, + IMG_VOID **ppvCpuVAddr, + IMG_HANDLE *phOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + + PVR_UNREFERENCED_PARAMETER(ui32PageSize); + +#if 0 + /* For debug: force all OSAllocPages allocations to have a kernel + * virtual address */ + if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS) + { + ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS; + ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS; + } +#endif + + switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + { + psLinuxMemArea = NewVMallocLinuxMemArea(uiSize, ui32AllocFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + break; + } + case PVRSRV_HAP_SINGLE_PROCESS: + { + /* Currently PVRSRV_HAP_SINGLE_PROCESS implies that we dont need a + * kernel virtual mapping, but will need a user space virtual mapping */ + + psLinuxMemArea = NewAllocPagesLinuxMemArea(uiSize, ui32AllocFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + + case PVRSRV_HAP_MULTI_PROCESS: + { + /* Currently PVRSRV_HAP_MULTI_PROCESS implies that we need a kernel + * virtual mapping and potentially multiple user space virtual + * mappings: Note: these eat into our limited kernel virtual + * address space. */ + +#if defined(VIVT_CACHE) || defined(__sh__) + /* ARM9 caches are tagged with virtual pages, not physical. As we are going to + * share this memory in different address spaces, we don't want it to be cached. + * ARM11 has physical tagging, so we can cache this memory without fear of virtual + * address aliasing in the TLB, as long as the kernel supports cache colouring for + * VIPT architectures. */ + ui32AllocFlags &= ~PVRSRV_HAP_CACHED; +#endif + psLinuxMemArea = NewVMallocLinuxMemArea(uiSize, ui32AllocFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags)); + *ppvCpuVAddr = NULL; + *phOSMemHandle = (IMG_HANDLE)0; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + In case of sparse mapping we need to handle back to the BM as it + knows the mapping info + */ + if (ui32AllocFlags & PVRSRV_MEM_SPARSE) + { + psLinuxMemArea->hBMHandle = hBMHandle; + } + + *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); + *phOSMemHandle = psLinuxMemArea; + + LinuxMemAreaRegister(psLinuxMemArea); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_SIZE_T uiBytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(uiBytes); + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + break; + case PVRSRV_HAP_SINGLE_PROCESS: + case PVRSRV_HAP_MULTI_PROCESS: + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%" SIZE_T_FMT_LEN "u, " + "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!", + ui32AllocFlags, uiBytes, pvCpuVAddr, hOSMemHandle)); + return eError; + } + break; + default: + PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n", + __FUNCTION__, ui32AllocFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, + IMG_UINTPTR_T uiByteOffset, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandleRet) +{ + LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea; + PVRSRV_ERROR eError; + + psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, uiByteOffset, uiBytes); + if(!psLinuxMemArea) + { + *phOSMemHandleRet = NULL; + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + *phOSMemHandleRet = psLinuxMemArea; + + /* KERNEL_ONLY areas are never mmapable. */ + if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY) + { + return PVRSRV_OK; + } + + eError = PVRMMapRegisterArea(psLinuxMemArea); + if(eError != PVRSRV_OK) + { + goto failed_register_area; + } + + return PVRSRV_OK; + +failed_register_area: + *phOSMemHandleRet = NULL; + LinuxMemAreaDeepFree(psLinuxMemArea); + return eError; +} + +PVRSRV_ERROR +OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags) +{ + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC); + + if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0) + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if(eError != PVRSRV_OK) + { + return eError; + } + } + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + + +IMG_CPU_PHYADDR +OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINTPTR_T uiByteOffset) +{ + PVR_ASSERT(hOSMemHandle); + + return LinuxMemAreaToCpuPAddr(hOSMemHandle, uiByteOffset); +} + + +IMG_BOOL OSMemHandleIsPhysContig(IMG_VOID *hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + PVR_ASSERT(psLinuxMemArea); + + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV) + return psLinuxMemArea->uData.sExternalKV.bPhysContig; + + return IMG_FALSE; +} + + +/*! +****************************************************************************** + + @Function OSMemCopy + + @Description Copies memory around + + @Input pvDst - pointer to dst + @Output pvSrc - pointer to src + @Input ui32Size - bytes to copy + + @Return none + +******************************************************************************/ +IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T uiSize) +{ +#if defined(USE_UNOPTIMISED_MEMCPY) + IMG_UINT8 *Src,*Dst; + IMG_INT i; + + Src=(IMG_UINT8 *)pvSrc; + Dst=(IMG_UINT8 *)pvDst; + for(i=0;iui32Lock; + + if(*pui32Access) + { + if(psResource->ui32ID == ui32ID) + { + psResource->ui32ID = 0; + *pui32Access = 0; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process.")); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked")); + } +} + + +/*! +****************************************************************************** + + @Function OSCreateResource + + @Description creates a OS dependant resource object + + @Input phResource - pointer to OS dependent resource + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource) +{ +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + PVRSRV_ERROR eError = PVRSRV_OK; +#endif + + psResource->ui32ID = 0; + psResource->ui32Lock = 0; +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + psResource->pOSSyncPrimitive = IMG_NULL; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(spinlock_t), (IMG_VOID**)&psResource->pOSSyncPrimitive, IMG_NULL, + "Resource Spinlock"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"OSCreateResource: Spinlock could not be alloc'd")); + return eError; + } + + spin_lock_init((spinlock_t*)psResource->pOSSyncPrimitive); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSDestroyResource + + @Description destroys an OS dependant resource object + + @Input phResource - pointer to OS dependent resource + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource) +{ +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) + if (psResource->pOSSyncPrimitive) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(spinlock_t), (IMG_VOID*)psResource->pOSSyncPrimitive, IMG_NULL); + } +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + + OSBreakResourceLock (psResource, psResource->ui32ID); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSInitEnvData + + @Description Allocates space for env specific data + + @Input ppvEnvSpecificData - pointer to pointer in which to return + allocated data. + @Input ui32MMUMode - MMU mode. + + @Return nothing + +******************************************************************************/ +PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData) +{ + ENV_DATA *psEnvData; + PVRSRV_ERROR eError; + + /* allocate env specific data */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL, + "Environment Data"); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, + &psEnvData->pvBridgeData, IMG_NULL, + "Bridge Data"); + if (eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL); + /*not nulling pointer, out of scope*/ + return eError; + } + + + /* ISR installation flags */ + psEnvData->bMISRInstalled = IMG_FALSE; + psEnvData->bLISRInstalled = IMG_FALSE; + + /* copy structure back */ + *ppvEnvSpecificData = psEnvData; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSDeInitEnvData + + @Description frees env specific data memory + + @Input pvEnvSpecificData - pointer to private structure + + @Return PVRSRV_OK on success else PVRSRV_ERROR_OUT_OF_MEMORY + +******************************************************************************/ +PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData) +{ + ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData; + + PVR_ASSERT(!psEnvData->bMISRInstalled); + PVR_ASSERT(!psEnvData->bLISRInstalled); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL); + psEnvData->pvBridgeData = IMG_NULL; + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSReleaseThreadQuanta + + @Description + Releases thread quanta + + @Return nothing + +******************************************************************************/ +IMG_VOID OSReleaseThreadQuanta(IMG_VOID) +{ + schedule(); +} + +#if defined (PVRSRV_DEVMEM_TIME_STATS) +/*! +****************************************************************************** + + @Function OSClockMonotonicus + + @Description This function returns the raw monotonic clock time in microseconds + (i.e. un-affected by NTP or similar changes) + + @Input void + + @Return - monotonic clock time in (us) + +******************************************************************************/ +IMG_UINT64 OSClockMonotonicus(IMG_VOID) +{ + struct timespec ts; + + getrawmonotonic(&ts); + + return ((unsigned long)ts.tv_sec * 1000000ul + (unsigned long)ts.tv_nsec / 1000ul); +} +#endif + + +/*! +****************************************************************************** + + @Function OSClockus + + @Description + This function returns the clock in microseconds + + @Input void + + @Return - clock (us) + +******************************************************************************/ +IMG_UINT32 OSClockus(IMG_VOID) +{ + IMG_UINT32 time, j = jiffies; + + time = j * (1000000 / HZ); + + return time; +} + + +IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus) +{ + udelay(ui32Timeus); +} + + +IMG_VOID OSSleepms(IMG_UINT32 ui32Timems) +{ + msleep(ui32Timems); +} + + +/*! +****************************************************************************** + + @Function OSFuncHighResTimerCreate + + @Description + This function creates a high res timer who's handle is returned + + @Input nothing + + @Return handle + +******************************************************************************/ +IMG_HANDLE OSFuncHighResTimerCreate(IMG_VOID) +{ + /* We don't need a handle, but we must return non-NULL */ + return (IMG_HANDLE) 1; +} + +/*! +****************************************************************************** + + @Function OSFuncHighResTimerGetus + + @Description + This function returns the current timestamp in us + + @Input nothing + + @Return handle + +******************************************************************************/ +IMG_UINT32 OSFuncHighResTimerGetus(IMG_HANDLE hTimer) +{ + return (IMG_UINT32) jiffies_to_usecs(jiffies); +} + +/*! +****************************************************************************** + + @Function OSFuncHighResTimerDestroy + + @Description + This function will destroy the high res timer + + @Input nothing + + @Return handle + +******************************************************************************/ +IMG_VOID OSFuncHighResTimerDestroy(IMG_HANDLE hTimer) +{ + PVR_UNREFERENCED_PARAMETER(hTimer); +} + +/*! +****************************************************************************** + + @Function OSGetCurrentProcessIDKM + + @Description Returns handle for current process + + @Return ID of current process + +*****************************************************************************/ +IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) + return (IMG_UINT32)current->pgrp; +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + return (IMG_UINT32)task_tgid_nr(current); +#else + return (IMG_UINT32)current->tgid; +#endif +#endif +} + +#if defined(MEM_TRACK_INFO_DEBUG) +/*! +****************************************************************************** + + @Function OSGetCurrentTimeInUSecsKM + + @Description Returns current time in usecs + + @Return ID of current process + +*****************************************************************************/ +IMG_UINT32 OSGetCurrentTimeInUSecsKM(IMG_VOID) +{ + struct timeval tv; + do_gettimeofday(&tv); + return (tv.tv_sec * 1000000 + tv.tv_usec); +} +#endif + +/*! +****************************************************************************** + + @Function OSGetPageSize + + @Description gets page size + + @Return page size + +******************************************************************************/ +IMG_UINT32 OSGetPageSize(IMG_VOID) +{ +#if defined(__sh__) + IMG_UINT32 ui32ReturnValue = PAGE_SIZE; + + return (ui32ReturnValue); +#else + return PAGE_SIZE; +#endif +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) +/*! +****************************************************************************** + + @Function DeviceISRWrapper + + @Description wrapper for Device ISR function to conform to ISR OS interface + + @Return + +******************************************************************************/ +static irqreturn_t DeviceISRWrapper(int irq, void *dev_id +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + , struct pt_regs *regs +#endif + ) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id; + SYS_DATA *psSysData = psDeviceNode->psSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + IMG_BOOL bStatus = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(irq); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + PVR_UNREFERENCED_PARAMETER(regs); +#endif + + if (psEnvData->bLISRInstalled) + { + bStatus = PVRSRVDeviceLISR(psDeviceNode); + if (bStatus) + { + OSScheduleMISR((IMG_VOID *)psSysData); + } + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) + return bStatus ? IRQ_HANDLED : IRQ_NONE; +#endif +} + + + +/*! +****************************************************************************** + + @Function SystemISRWrapper + + @Description wrapper for System ISR function to conform to ISR OS interface + + @Input Interrupt - NT interrupt object. + @Input Context - Context parameter + + @Return + +******************************************************************************/ +static irqreturn_t SystemISRWrapper(int irq, void *dev_id +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + , struct pt_regs *regs +#endif + ) +{ + SYS_DATA *psSysData = (SYS_DATA *)dev_id; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + IMG_BOOL bStatus = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(irq); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + PVR_UNREFERENCED_PARAMETER(regs); +#endif + + if (psEnvData->bLISRInstalled) + { + bStatus = PVRSRVSystemLISR(psSysData); + if (bStatus) + { + OSScheduleMISR((IMG_VOID *)psSysData); + } + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) + return bStatus ? IRQ_HANDLED : IRQ_NONE; +#endif +} +/*! +****************************************************************************** + + @Function OSInstallDeviceLISR + + @Description Installs a Device ISR + + @Input pvSysData + @Input ui32Irq - IRQ number + @Input pszISRName - ISR name + @Input pvDeviceNode - device node contains ISR function and data argument + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData, + IMG_UINT32 ui32Irq, + IMG_CHAR *pszISRName, + IMG_VOID *pvDeviceNode) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %p", pszISRName, ui32Irq, pvDeviceNode)); + + if(request_irq(ui32Irq, DeviceISRWrapper, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) + SA_SHIRQ +#else + IRQF_SHARED +#endif + , pszISRName, pvDeviceNode)) + { + PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq)); + + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psEnvData->ui32IRQ = ui32Irq; + psEnvData->pvISRCookie = pvDeviceNode; + psEnvData->bLISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSUninstallDeviceLISR + + @Description Uninstalls a Device ISR + + @Input pvSysData - sysdata + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + + psEnvData->bLISRInstalled = IMG_FALSE; + + free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSInstallSystemLISR + + @Description Installs a System ISR + + @Input psSysData + @Input ui32Irq - IRQ number + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing system LISR on IRQ %d with cookie %p", ui32Irq, pvSysData)); + + if(request_irq(ui32Irq, SystemISRWrapper, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) + SA_SHIRQ +#else + IRQF_SHARED +#endif + , PVRSRV_MODNAME, pvSysData)) + { + PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq)); + + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psEnvData->ui32IRQ = ui32Irq; + psEnvData->pvISRCookie = pvSysData; + psEnvData->bLISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallSystemLISR + + @Description Uninstalls a System ISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + + psEnvData->bLISRInstalled = IMG_FALSE; + + free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie); + + return PVRSRV_OK; +} + +#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) +/*! +****************************************************************************** + + @Function MISRWrapper + + @Description OS dependent MISR wrapper + + @Input psSysData + + @Return error status + +******************************************************************************/ +static void MISRWrapper( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + void *data +#else + struct work_struct *data +#endif +) +{ + ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork); + SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData; + + PVRSRVMISR(psSysData); + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + PVRSyncUpdateAllSyncs(); +#endif +#if defined(SUPPORT_DMABUF) + PVRLinuxFenceCheckAll(); +#endif +} + + +/*! +****************************************************************************** + + @Function OSInstallMISR + + @Description Installs an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed")); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing MISR with cookie %p", pvSysData)); + + psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue"); + + if (psEnvData->psWorkQueue == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed")); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + } + + INIT_WORK(&psEnvData->sMISRWork, MISRWrapper +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + , (void *)&psEnvData->sMISRWork +#endif + ); + + psEnvData->pvMISRData = pvSysData; + psEnvData->bMISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallMISR + + @Description Uninstalls an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling MISR")); + + destroy_workqueue(psEnvData->psWorkQueue); + + psEnvData->bMISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSScheduleMISR + + @Description Schedules an OS dependent MISR + + @Input pvSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork); + } + + return PVRSRV_OK; +} +#else /* defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */ +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) +/*! +****************************************************************************** + + @Function MISRWrapper + + @Description OS dependent MISR wrapper + + @Input psSysData + + @Return error status + +******************************************************************************/ +static void MISRWrapper( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + void *data +#else + struct work_struct *data +#endif +) +{ + ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork); + SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData; + + PVRSRVMISR(psSysData); +} + + +/*! +****************************************************************************** + + @Function OSInstallMISR + + @Description Installs an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed")); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing MISR with cookie %p", pvSysData)); + + INIT_WORK(&psEnvData->sMISRWork, MISRWrapper +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + , (void *)&psEnvData->sMISRWork +#endif + ); + + psEnvData->pvMISRData = pvSysData; + psEnvData->bMISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallMISR + + @Description Uninstalls an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling MISR")); + + flush_scheduled_work(); + + psEnvData->bMISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSScheduleMISR + + @Description Schedules an OS dependent MISR + + @Input pvSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + schedule_work(&psEnvData->sMISRWork); + } + + return PVRSRV_OK; +} + +#else /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */ + + +/*! +****************************************************************************** + + @Function MISRWrapper + + @Description OS dependent MISR wrapper + + @Input psSysData + + @Return error status + +******************************************************************************/ +static void MISRWrapper(unsigned long data) +{ + SYS_DATA *psSysData; + + psSysData = (SYS_DATA *)data; + + PVRSRVMISR(psSysData); +} + + +/*! +****************************************************************************** + + @Function OSInstallMISR + + @Description Installs an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed")); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing MISR with cookie %p", pvSysData)); + + tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData); + + psEnvData->bMISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallMISR + + @Description Uninstalls an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling MISR")); + + tasklet_kill(&psEnvData->sMISRTasklet); + + psEnvData->bMISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSScheduleMISR + + @Description Schedules an OS dependent MISR + + @Input pvSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + tasklet_schedule(&psEnvData->sMISRTasklet); + } + + return PVRSRV_OK; +} + +#endif /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */ +#endif /* #if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */ + +#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) */ + +IMG_VOID OSPanic(IMG_VOID) +{ + BUG(); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +#define OS_TAS(p) xchg((p), 1) +#else +#define OS_TAS(p) tas(p) +#endif +/*! +****************************************************************************** + + @Function OSLockResource + + @Description locks an OS dependant Resource + + @Input phResource - pointer to OS dependent Resource + @Input bBlock - do we want to block? + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource, + IMG_UINT32 ui32ID) + +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(!OS_TAS(&psResource->ui32Lock)) + psResource->ui32ID = ui32ID; + else + eError = PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE; + + return eError; +} + + +/*! +****************************************************************************** + + @Function OSUnlockResource + + @Description unlocks an OS dependant resource + + @Input phResource - pointer to OS dependent resource structure + + @Return + +******************************************************************************/ +PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID) +{ + volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock; + PVRSRV_ERROR eError = PVRSRV_OK; + + if(*pui32Access) + { + if(psResource->ui32ID == ui32ID) + { + psResource->ui32ID = 0; + smp_mb(); + *pui32Access = 0; + } + else + { + PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource)); + PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID)); + eError = PVRSRV_ERROR_INVALID_LOCK_ID; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource)); + eError = PVRSRV_ERROR_RESOURCE_NOT_LOCKED; + } + + return eError; +} + + +#if !defined(PVR_LINUX_USING_WORKQUEUES) +/*! +****************************************************************************** + + @Function OSLockResourceAndBlockMISR + + @Description locks an OS dependant Resource and blocks MISR interrupts + + @Input phResource - pointer to OS dependent Resource + @Input bBlock - do we want to block? + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSLockResourceAndBlockMISR ( PVRSRV_RESOURCE *psResource, + IMG_UINT32 ui32ID) + +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + spin_lock_bh(psResource->pOSSyncPrimitive); + + if(!OS_TAS(&psResource->ui32Lock)) + psResource->ui32ID = ui32ID; + else + eError = PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE; + + return eError; +} + + +/*! +****************************************************************************** + + @Function OSUnlockResourceAndUnblockMISR + + @Description unlocks an OS dependant resource and unblocks MISR interrupts + + @Input phResource - pointer to OS dependent resource structure + + @Return + +******************************************************************************/ +PVRSRV_ERROR OSUnlockResourceAndUnblockMISR (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID) +{ + volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock; + PVRSRV_ERROR eError = PVRSRV_OK; + + if(*pui32Access) + { + if(psResource->ui32ID == ui32ID) + { + psResource->ui32ID = 0; + smp_mb(); + *pui32Access = 0; + spin_unlock_bh(psResource->pOSSyncPrimitive); + } + else + { + PVR_DPF((PVR_DBG_ERROR,"OSUnlockResourceAndUnblockMISR: Resource %p is not locked with expected value.", psResource)); + PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID)); + eError = PVRSRV_ERROR_INVALID_LOCK_ID; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR,"OSUnlockResourceAndUnblockMISR: Resource %p is not locked", psResource)); + eError = PVRSRV_ERROR_RESOURCE_NOT_LOCKED; + } + + return eError; +} +#endif + + +/*! +****************************************************************************** + + @Function OSIsResourceLocked + + @Description tests if resource is locked + + @Input phResource - pointer to OS dependent resource structure + + @Return error status + +******************************************************************************/ +IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID) +{ + volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock; + + return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID) + ? IMG_TRUE + : IMG_FALSE; +} + + +#if !defined(SYS_CUSTOM_POWERLOCK_WRAP) +PVRSRV_ERROR OSPowerLockWrap(IMG_BOOL bTryLock) +{ + PVR_UNREFERENCED_PARAMETER(bTryLock); + + return PVRSRV_OK; +} + +IMG_VOID OSPowerLockUnwrap (IMG_VOID) +{ +} +#endif /* SYS_CUSTOM_POWERLOCK_WRAP */ + + +IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_HANDLE hOSMemHandle, + IMG_VOID *pvLinAddr) +{ + IMG_CPU_PHYADDR CpuPAddr; + LinuxMemArea *psLinuxMemArea; + IMG_UINTPTR_T uiByteOffset; + IMG_UINT32 ui32ByteOffset; + + PVR_ASSERT(hOSMemHandle != IMG_NULL); + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + uiByteOffset = (IMG_UINTPTR_T)pvLinAddr - (IMG_UINTPTR_T)LinuxMemAreaToCpuVAddr(psLinuxMemArea); + ui32ByteOffset = (IMG_UINT32)uiByteOffset; + + CpuPAddr = LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset); + + return CpuPAddr; +} + + +/*! +****************************************************************************** + + @Function OSMapPhysToLin + + @Description Maps the physical memory into linear addr range + + @Input BasePAddr : physical cpu address + + @Input ui32Bytes - bytes to map + + @Input ui32CacheType - cache type + + @Return : Linear addr of mapping on success, else NULL + + ******************************************************************************/ +IMG_VOID * +OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE *phOSMemHandle) +{ + if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) + { + /* + * Provide some backwards compatibility, until all callers + * have been updated to pass a non-null OSMemHandle pointer. + * Such callers must not call OSMapLinToCPUPhys. + */ + if(phOSMemHandle == IMG_NULL) + { + IMG_VOID *pvIORemapCookie; + pvIORemapCookie = IORemapWrapper(BasePAddr, uiBytes, ui32MappingFlags); + if(pvIORemapCookie == IMG_NULL) + { + return IMG_NULL; + } + return pvIORemapCookie; + } + else + { + LinuxMemArea *psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, uiBytes, ui32MappingFlags); + + if(psLinuxMemArea == IMG_NULL) + { + return IMG_NULL; + } + + *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea; + return LinuxMemAreaToCpuVAddr(psLinuxMemArea); + } + } + + PVR_DPF((PVR_DBG_ERROR, + "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY " + " (Use OSReservePhys otherwise)")); + + return IMG_NULL; +} + +/*! +****************************************************************************** + @Function OSUnMapPhysToLin + @Description Unmaps memory that was mapped with OSMapPhysToLin + @Return TRUE on success, else FALSE +******************************************************************************/ +IMG_BOOL +OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T uiBytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(uiBytes); + + if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) + { + if (hOSMemHandle == IMG_NULL) + { + IOUnmapWrapper(pvLinAddr); + } + else + { + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + PVR_ASSERT(LinuxMemAreaToCpuVAddr(psLinuxMemArea) == pvLinAddr); + + FreeIORemapLinuxMemArea(psLinuxMemArea); + } + + return IMG_TRUE; + } + + PVR_DPF((PVR_DBG_ERROR, + "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY " + " (Use OSUnReservePhys otherwise)")); + return IMG_FALSE; +} + +/*! +****************************************************************************** + @Function RegisterExternalMem + @Description Registers external memory for user mode mapping + @Return TRUE on success, else FALSE, MemHandle out +******************************************************************************/ +static PVRSRV_ERROR +RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 ui32Bytes, + IMG_BOOL bPhysContig, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE *phOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + { + psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags); + + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + break; + } + case PVRSRV_HAP_SINGLE_PROCESS: + { + psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags); + + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + case PVRSRV_HAP_MULTI_PROCESS: + { + /* Currently PVRSRV_HAP_MULTI_PROCESS implies that we need a kernel + * virtual mapping and potentially multiple user space virtual mappings. + * Beware that the kernel virtual address space is a limited resource. + */ +#if defined(VIVT_CACHE) || defined(__sh__) + /* + * ARM9 caches are tagged with virtual pages, not physical. As we are going to + * share this memory in different address spaces, we don't want it to be cached. + * ARM11 has physical tagging, so we can cache this memory without fear of virtual + * address aliasing in the TLB, as long as the kernel supports cache colouring for + * VIPT architectures. + */ + ui32MappingFlags &= ~PVRSRV_HAP_CACHED; +#endif + psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags); + + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags)); + *phOSMemHandle = (IMG_HANDLE)0; + return PVRSRV_ERROR_INVALID_FLAGS; + } + + *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea; + + LinuxMemAreaRegister(psLinuxMemArea); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + @Function OSRegisterMem + @Description Registers external memory for user mode mapping + @Output phOSMemHandle - handle to registered memory + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, + IMG_VOID *pvCPUVAddr, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE *phOSMemHandle) +{ + IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr); + + return RegisterExternalMem(&SysPAddr, pvCPUVAddr, uiBytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle); +} + + +PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle) +{ + return RegisterExternalMem(pBasePAddr, pvCPUVAddr, uBytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle); +} + + +/*! +****************************************************************************** + @Function OSUnRegisterMem + @Description UnRegisters external memory for user mode mapping + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSUnRegisterMem (IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(uiBytes); + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + break; + case PVRSRV_HAP_SINGLE_PROCESS: + case PVRSRV_HAP_MULTI_PROCESS: + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s(%p, %" SIZE_T_FMT_LEN "u, 0x%08X, %p) FAILED!", + __FUNCTION__, pvCpuVAddr, uiBytes, + ui32MappingFlags, hOSMemHandle)); + return eError; + } + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle) +{ + return OSUnRegisterMem(pvCpuVAddr, uBytes, ui32Flags, hOSMemHandle); +} + +/*! +****************************************************************************** + @Function OSReservePhys + @Description Registers physical memory for user mode mapping + @Output ppvCpuVAddr + @Output phOsMemHandle handle to registered memory + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSReservePhys(IMG_CPU_PHYADDR BasePAddr, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE hBMHandle, + IMG_VOID **ppvCpuVAddr, + IMG_HANDLE *phOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + +#if 0 + /* For debug: force all OSReservePhys reservations to have a kernel + * virtual address */ + if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS) + { + ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS; + ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS; + } +#endif + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + { + /* Currently PVRSRV_HAP_KERNEL_ONLY implies that a kernel virtual + * mapping is required for the allocation and no user virtual + * mappings are allowed: Note these eat into our limited kernel + * virtual address space */ + psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, uiBytes, ui32MappingFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + break; + } + case PVRSRV_HAP_SINGLE_PROCESS: + { + /* Currently this implies that we dont need a kernel virtual + * mapping, but will need a user space virtual mapping */ + psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, uiBytes, ui32MappingFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + case PVRSRV_HAP_MULTI_PROCESS: + { + /* Currently PVRSRV_HAP_MULTI_PROCESS implies that we need a kernel + * virtual mapping and potentially multiple user space virtual mappings. + * Beware that the kernel virtual address space is a limited resource. + */ +#if defined(VIVT_CACHE) || defined(__sh__) + /* + * ARM9 caches are tagged with virtual pages, not physical. As we are going to + * share this memory in different address spaces, we don't want it to be cached. + * ARM11 has physical tagging, so we can cache this memory without fear of virtual + * address aliasing in the TLB, as long as the kernel supports cache colouring for + * VIPT architectures. + */ + ui32MappingFlags &= ~PVRSRV_HAP_CACHED; +#endif + psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, uiBytes, ui32MappingFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags)); + *ppvCpuVAddr = NULL; + *phOSMemHandle = (IMG_HANDLE)0; + return PVRSRV_ERROR_INVALID_FLAGS; + } + + /* + In case of sparse mapping we need to handle back to the BM as it + knows the mapping info + */ + if (ui32MappingFlags & PVRSRV_MEM_SPARSE) + { + PVR_ASSERT(hBMHandle != IMG_NULL); + psLinuxMemArea->hBMHandle = hBMHandle; + } + + *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea; + *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); + + LinuxMemAreaRegister(psLinuxMemArea); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function OSUnReservePhys + @Description UnRegisters physical memory for user mode mapping + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSUnReservePhys(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uiBytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(uiBytes); + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + break; + case PVRSRV_HAP_SINGLE_PROCESS: + case PVRSRV_HAP_MULTI_PROCESS: + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s(%p, %" SIZE_T_FMT_LEN "u, 0x%08X, %p) FAILED!", + __FUNCTION__, pvCpuVAddr, uiBytes, + ui32MappingFlags, hOSMemHandle)); + return eError; + } + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + @Function OSBaseAllocContigMemory + @Description Allocate a block of contiguous virtual non-paged memory. + @Input ui32Size - number of bytes to allocate + @Output ppvLinAddr - pointer to variable that will receive the linear address of buffer + @Return PVRSRV_OK if allocation successed else returns PVRSRV_ERROR_OUT_OF_MEMORY + **************************************************************************/ +PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T uiSize, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr) +{ +#if !defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(pvLinAddr); + PVR_UNREFERENCED_PARAMETER(psPhysAddr); + PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__)); + + return PVRSRV_ERROR_OUT_OF_MEMORY; +#else +/* + * On Linux, the returned virtual address should be used for CPU access, + * and not be remapped into the CPU virtual address using ioremap. The fact + * that the RAM is being managed by the kernel, and already has a virtual + * address, seems to lead to problems when the attributes of the memory are + * changed in the ioremap call (such as from cached to non-cached). + */ + IMG_VOID *pvKernLinAddr; + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + pvKernLinAddr = _KMallocWrapper(uiSize, GFP_KERNEL, __FILE__, __LINE__, IMG_FALSE); +#else + pvKernLinAddr = KMallocWrapper(uiSize, GFP_KERNEL); +#endif + if (!pvKernLinAddr) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + *pvLinAddr = pvKernLinAddr; + + psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr); + + return PVRSRV_OK; +#endif /* !defined(NO_HARDWARE) */ +} + + +/*! +****************************************************************************** + @Function OSBaseFreeContigMemory + @Description Frees memory allocated with OSBaseAllocContigMemory + @Input LinAddr - pointer to buffer allocated with OSBaseAllocContigMemory + **************************************************************************/ +PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T uiSize, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr) +{ +#if !defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(pvLinAddr); + PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr); + + PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__)); +#else + PVR_UNREFERENCED_PARAMETER(uiSize); + PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr); + + KFreeWrapper(pvLinAddr); +#endif + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSWriteHWReg + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : + + @input ui32Value : + + @Return none + +******************************************************************************/ + +IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) +{ +#if !defined(NO_HARDWARE) + return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset); +#else + return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset); +#endif +} + +IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ +#if !defined(NO_HARDWARE) + writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset); +#else + *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value; +#endif +} + +#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) + +/*! +****************************************************************************** + + @Function OSPCISetDev + + @Description + + Set a PCI device for subsequent use. + + @input pvPCICookie : Pointer to OS specific PCI structure/cookie + + @input eFlags : Flags + + @Return Pointer to PCI device handle + +******************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) +{ + int err; + IMG_UINT32 i; + PVR_PCI_DEV *psPVRPCI; + + PVR_TRACE(("OSPCISetDev")); + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL, + "PCI Device") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure")); + return IMG_NULL; + } + + psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; + psPVRPCI->ePCIFlags = eFlags; + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err)); + return IMG_NULL; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_set_master(psPVRPCI->psPCIDev); + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { +#if defined(CONFIG_PCI_MSI) + err = pci_enable_msi(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err)); + psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ + } +#else + PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel")); +#endif + } + + /* Initialise the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + + return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; +} + +/*! +****************************************************************************** + + @Function OSPCIAcquireDev + + @Description + + Acquire a PCI device for subsequent use. + + @input ui16VendorID : Vendor PCI ID + + @input ui16VendorID : Device PCI ID + + @input eFlags : Flags + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags) +{ + struct pci_dev *psPCIDev; + + psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); + if (psPCIDev == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device")); + return IMG_NULL; + } + + return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags); +} + +/*! +****************************************************************************** + + @Function OSPCIIRQ + + @Description + + Get the interrupt number for the device. + + @input hPVRPCI : PCI device handle + + @input pui32IRQ : Pointer to where the interrupt number should be returned + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + *pui32IRQ = psPVRPCI->psPCIDev->irq; + + return PVRSRV_OK; +} + +/* Functions supported by OSPCIAddrRangeFunc */ +enum HOST_PCI_ADDR_RANGE_FUNC +{ + HOST_PCI_ADDR_RANGE_FUNC_LEN, + HOST_PCI_ADDR_RANGE_FUNC_START, + HOST_PCI_ADDR_RANGE_FUNC_END, + HOST_PCI_ADDR_RANGE_FUNC_REQUEST, + HOST_PCI_ADDR_RANGE_FUNC_RELEASE +}; + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeFunc + + @Description + + Internal support function for various address range related functions + + @input eFunc : Function to perform + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return function dependent + +******************************************************************************/ +static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, + PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (ui32Index >= DEVICE_COUNT_RESOURCE) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range")); + return 0; + + } + + switch (eFunc) + { + case HOST_PCI_ADDR_RANGE_FUNC_LEN: + return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); + case HOST_PCI_ADDR_RANGE_FUNC_START: + return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + case HOST_PCI_ADDR_RANGE_FUNC_END: + return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: + { + int err; + + err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err)); + return 0; + } + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; + return 1; + } + case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: + if (psPVRPCI->abPCIResourceInUse[ui32Index]) + { + pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; + } + return 1; + default: + PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function")); + break; + } + + return 0; +} + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeLen + + @Description + + Returns length of a given address range length + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return Length of address range, or 0 if no such range + +******************************************************************************/ +IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); +} + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeStart + + @Description + + Returns the start of a given address range + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return Start of address range, or 0 if no such range + +******************************************************************************/ +IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); +} + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeEnd + + @Description + + Returns the end of a given address range + + @input hPVRPCI : PCI device handle"ayy + + @input ui32Index : Address range index + + @Return End of address range, or 0 if no such range + +******************************************************************************/ +IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); +} + +/*! +****************************************************************************** + + @Function OSPCIRequestAddrRange + + @Description + + Request a given address range index for subsequent use + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_PCI_CALL_FAILED : PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCIReleaseAddrRange + + @Description + + Release a given address range that is no longer being used + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_PCI_CALL_FAILED : PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCIReleaseDev + + @Description + + Release a PCI device that is no longer being used + + @input hPVRPCI : PCI device handle + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + + PVR_TRACE(("OSPCIReleaseDev")); + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i)); + pci_release_region(psPVRPCI->psPCIDev, i); + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + } + +#if defined(CONFIG_PCI_MSI) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_disable_msi(psPVRPCI->psPCIDev); + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_clear_master(psPVRPCI->psPCIDev); + } +#endif + pci_disable_device(psPVRPCI->psPCIDev); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCISuspendDev + + @Description + + Prepare PCI device to be turned off by power management + + @input hPVRPCI : PCI device handle + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + int err; + + PVR_TRACE(("OSPCISuspendDev")); + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + } + } + + err = pci_save_state(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err)); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + pci_disable_device(psPVRPCI->psPCIDev); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); + switch(err) + { + case 0: + break; + case -EIO: + PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM")); + break; + case -EINVAL: + PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state")); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err)); + break; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCIResumeDev + + @Description + + Prepare a PCI device to be resumed by power management + + @input hPVRPCI : PCI device handle + + @input pvPCICookie : Pointer to OS specific PCI structure/cookie + + @input eFlags : Flags + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int err; + int i; + + PVR_TRACE(("OSPCIResumeDev")); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); + switch(err) + { + case 0: + break; + case -EIO: + PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM")); + break; + case -EINVAL: + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state")); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + default: + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err)); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) + pci_restore_state(psPVRPCI->psPCIDev); +#else + err = pci_restore_state(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err)); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } +#endif + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err)); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + pci_set_master(psPVRPCI->psPCIDev); + + /* Restore the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err)); + } + } + + } + + return PVRSRV_OK; +} + +#endif /* #if defined(CONFIG_PCI) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) */ + +#define OS_MAX_TIMERS 8 + +/* Timer callback strucure used by OSAddTimer */ +typedef struct TIMER_CALLBACK_DATA_TAG +{ + IMG_BOOL bInUse; + PFN_TIMER_FUNC pfnTimerFunc; + IMG_VOID *pvData; + struct timer_list sTimer; + IMG_UINT32 ui32Delay; + IMG_BOOL bActive; +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + struct work_struct sWork; +#endif +}TIMER_CALLBACK_DATA; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) +static struct workqueue_struct *psTimerWorkQueue; +#endif + +static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) +DEFINE_MUTEX(sTimerStructLock); +#else +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +/* The lock is used to control access to sTimers */ +/* PRQA S 0671,0685 1 */ /* C99 macro not understood by QAC */ +static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED; +#else +static DEFINE_SPINLOCK(sTimerStructLock); +#endif +#endif + +static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData) +{ + if (!psTimerCBData->bActive) + return; + + /* call timer callback */ + psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); + + /* reset timer */ + mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies); +} + + +/*! +****************************************************************************** + + @Function OSTimerCallbackWrapper + + @Description + + OS specific timer callback wrapper function + + @Input ui32Data : timer callback data + + @Return NONE + +******************************************************************************/ +static IMG_VOID OSTimerCallbackWrapper(IMG_UINTPTR_T uiData) +{ + TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uiData; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + int res; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); +#else + res = schedule_work(&psTimerCBData->sWork); +#endif + if (res == 0) + { + PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued")); + } +#else + OSTimerCallbackBody(psTimerCBData); +#endif +} + + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) +static void OSTimerWorkQueueCallBack(struct work_struct *psWork) +{ + TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork); + + OSTimerCallbackBody(psTimerCBData); +} +#endif + +/*! +****************************************************************************** + + @Function OSAddTimer + + @Description + + OS specific function to install a timer callback + + @Input pfnTimerFunc : timer callback + + @Input *pvData :callback data + + @Input ui32MsTimeout: callback period + + @Return IMG_HANDLE : valid handle success, NULL failure + +******************************************************************************/ +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout) +{ + TIMER_CALLBACK_DATA *psTimerCBData; + IMG_UINTPTR_T ui; +#if !(defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)) + unsigned long ulLockFlags; +#endif + + /* check callback */ + if(!pfnTimerFunc) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); + return IMG_NULL; + } + + /* Allocate timer callback data structure */ +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + mutex_lock(&sTimerStructLock); +#else + spin_lock_irqsave(&sTimerStructLock, ulLockFlags); +#endif + for (ui = 0; ui < OS_MAX_TIMERS; ui++) + { + psTimerCBData = &sTimers[ui]; + if (!psTimerCBData->bInUse) + { + psTimerCBData->bInUse = IMG_TRUE; + break; + } + } +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + mutex_unlock(&sTimerStructLock); +#else + spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags); +#endif + if (ui >= OS_MAX_TIMERS) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use")); + return IMG_NULL; + } + + psTimerCBData->pfnTimerFunc = pfnTimerFunc; + psTimerCBData->pvData = pvData; + psTimerCBData->bActive = IMG_FALSE; + + /* + HZ = ticks per second + ui32MsTimeout = required ms delay + ticks = (Hz * ui32MsTimeout) / 1000 + */ + psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) + ? 1 + : ((HZ * ui32MsTimeout) / 1000); + /* initialise object */ + init_timer(&psTimerCBData->sTimer); + + /* setup timer object */ + /* PRQA S 0307,0563 1 */ /* ignore warning about inconpartible ptr casting */ + psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper; + psTimerCBData->sTimer.data = (IMG_UINTPTR_T)psTimerCBData; + + return (IMG_HANDLE)(ui + 1); +} + + +static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer) +{ + IMG_UINTPTR_T ui = ((IMG_UINTPTR_T)hTimer) - 1; + + PVR_ASSERT(ui < OS_MAX_TIMERS); + + return &sTimers[ui]; +} + +/*! +****************************************************************************** + + @Function OSRemoveTimer + + @Description + + OS specific function to remove a timer callback + + @Input hTimer : timer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* free timer callback data struct */ + psTimerCBData->bInUse = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSEnableTimer + + @Description + + OS specific function to enable a timer callback + + @Input hTimer : timer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* Start timer arming */ + psTimerCBData->bActive = IMG_TRUE; + + /* set the expire time */ + psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; + + /* Add the timer to the list */ + add_timer(&psTimerCBData->sTimer); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSDisableTimer + + @Description + + OS specific function to disable a timer callback + + @Input hTimer : timer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(psTimerCBData->bActive); + + /* Stop timer from arming */ + psTimerCBData->bActive = IMG_FALSE; + smp_mb(); + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + flush_workqueue(psTimerWorkQueue); +#endif +#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + flush_scheduled_work(); +#endif + + /* remove timer */ + del_timer_sync(&psTimerCBData->sTimer); + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + /* + * This second flush is to catch the case where the timer ran + * before we managed to delete it, in which case, it will have + * queued more work for the workqueue. Since the bActive flag + * has been cleared, this second flush won't result in the + * timer being rearmed. + */ + flush_workqueue(psTimerWorkQueue); +#endif +#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + flush_scheduled_work(); +#endif + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSEventObjectCreateKM + + @Description + + OS specific function to create an event object + + @Input pszName : Globally unique event object name (if null name must be autogenerated) + + @Output psEventObject : OS event object info structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject) +{ + + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(pszName) + { + /* copy over the event object name */ + strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH); + } + else + { + /* autogenerate a name */ + static IMG_UINT16 ui16NameIndex = 0; + snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++); + } + + if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + } + + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreateKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT; + } + + return eError; + +} + + +/*! +****************************************************************************** + + @Function OSEventObjectDestroyKM + + @Description + + OS specific function to destroy an event object + + @Input psEventObject : OS event object info structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT *psEventObject) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(psEventObject->hOSEventKM) + { + LinuxEventObjectListDestroy(psEventObject->hOSEventKM); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: hOSEventKM is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSEventObjectWaitKM + + @Description + + OS specific function to wait for an event object. Called from client + + @Input hOSEventKM : OS and kernel specific handle to event object + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM) +{ + PVRSRV_ERROR eError; + + if(hOSEventKM) + { + eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKM: hOSEventKM is not a valid handle")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSEventObjectOpenKM + + @Description + + OS specific function to open an event object. Called from client + + @Input psEventObject : Pointer to an event object + @Output phOSEvent : OS and kernel specific handle to event object + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT *psEventObject, + IMG_HANDLE *phOSEvent) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreateKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSEventObjectCloseKM + + @Description + + OS specific function to close an event object. Called from client + + @Input psEventObject : Pointer to an event object + @OInput hOSEventKM : OS and kernel specific handle to event object + + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT *psEventObject, + IMG_HANDLE hOSEventKM) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; + +} + +/*! +****************************************************************************** + + @Function OSEventObjectSignalKM + + @Description + + OS specific function to 'signal' an event object. Called from L/MISR + + @Input hOSEventKM : OS and kernel specific handle to event object + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM) +{ + PVRSRV_ERROR eError; + + if(hOSEventKM) + { + eError = LinuxEventObjectSignal(hOSEventKM); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignalKM: hOSEventKM is not a valid handle")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSProcHasPrivSrvInit + + @Description + + Does the process have sufficient privileges to initialise services? + + @Input none + + @Return IMG_BOOL : + +******************************************************************************/ +IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID) +{ + return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE; +} + +/*! +****************************************************************************** + + @Function OSCopyToUser + + @Description + + Copy a block of data into user space + + @Input pvSrc + + @Output pvDest + + @Input ui32Bytes + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_SIZE_T uiBytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if(pvr_copy_to_user(pvDest, pvSrc, uiBytes)==0) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +/*! +****************************************************************************** + + @Function OSCopyFromUser + + @Description + + Copy a block of data from the user space + + @Output pvDest + + @Input pvSrc + + @Input ui32Bytes + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_SIZE_T uiBytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if(pvr_copy_from_user(pvDest, pvSrc, uiBytes)==0) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +/*! +****************************************************************************** + + @Function OSAccessOK + + @Description + + Checks if a user space pointer is valide + + @Input eVerification + + @Input pvUserPtr + + @Input ui32Bytes + + @Return IMG_BOOL : + +******************************************************************************/ +IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T uiBytes) +{ + IMG_INT linuxType; + + if (eVerification == PVR_VERIFY_READ) + { + linuxType = VERIFY_READ; + } + else + { + PVR_ASSERT(eVerification == PVR_VERIFY_WRITE); + linuxType = VERIFY_WRITE; + } + + return access_ok(linuxType, pvUserPtr, uiBytes); +} + +typedef enum _eWrapMemType_ +{ + WRAP_TYPE_NULL = 0, + WRAP_TYPE_GET_USER_PAGES, + WRAP_TYPE_FIND_VMA +} eWrapMemType; + +typedef struct _sWrapMemInfo_ +{ + eWrapMemType eType; + IMG_INT iNumPages; + IMG_INT iNumPagesMapped; + struct page **ppsPages; + IMG_SYS_PHYADDR *psPhysAddr; + IMG_INT iPageOffset; +#if defined(DEBUG) + IMG_UINTPTR_T uStartAddr; + IMG_UINTPTR_T uBeyondEndAddr; + struct vm_area_struct *psVMArea; +#endif +} sWrapMemInfo; + + +/*! +****************************************************************************** + + @Function *CPUVAddrToPFN + + @Description + + Find the PFN associated with a given CPU virtual address, and return + the associated page structure, if it exists. + The page in question must be present (i.e. no fault handling required), + and must be writable. A get_page is done on the returned page structure. + + @Input psVMArea - pointer to VM area structure + uCPUVAddr - CPU virtual address + pui32PFN - Pointer to returned PFN. + ppsPAge - Pointer to returned page structure pointer. + + @Output *pui32PFN - Set to PFN + *ppsPage - Pointer to the page structure if present, else NULL. + @Return IMG_TRUE if PFN lookup was succesful. + +******************************************************************************/ +static IMG_BOOL CPUVAddrToPFN(struct vm_area_struct *psVMArea, IMG_UINTPTR_T uCPUVAddr, IMG_UINT32 *pui32PFN, struct page **ppsPage) +{ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) + pgd_t *psPGD; + pud_t *psPUD; + pmd_t *psPMD; + pte_t *psPTE; + struct mm_struct *psMM = psVMArea->vm_mm; + spinlock_t *psPTLock; + IMG_BOOL bRet = IMG_FALSE; + + *pui32PFN = 0; + *ppsPage = NULL; + + psPGD = pgd_offset(psMM, uCPUVAddr); + if (pgd_none(*psPGD) || pgd_bad(*psPGD)) + return bRet; + + psPUD = pud_offset(psPGD, uCPUVAddr); + if (pud_none(*psPUD) || pud_bad(*psPUD)) + return bRet; + + psPMD = pmd_offset(psPUD, uCPUVAddr); + if (pmd_none(*psPMD) || pmd_bad(*psPMD)) + return bRet; + + psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, uCPUVAddr, &psPTLock); + + if ((pte_none(*psPTE) == 0) && (pte_present(*psPTE) != 0) && (pte_write(*psPTE) != 0)) + { + *pui32PFN = pte_pfn(*psPTE); + bRet = IMG_TRUE; + + if (pfn_valid(*pui32PFN)) + { + *ppsPage = pfn_to_page(*pui32PFN); + + get_page(*ppsPage); + } + } + + pte_unmap_unlock(psPTE, psPTLock); + + return bRet; +#else + return IMG_FALSE; +#endif +} + +/*! +****************************************************************************** + + @Function OSReleasePhysPageAddr + + @Description + + Release wrapped memory. + + @Input hOSWrapMem : Driver cookie + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem) +{ + sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem; + IMG_INT i; + + if (psInfo == IMG_NULL) + { + PVR_DPF((PVR_DBG_WARNING, + "OSReleasePhysPageAddr: called with null wrap handle")); + return PVRSRV_OK; + } + + switch (psInfo->eType) + { + case WRAP_TYPE_NULL: + { + PVR_DPF((PVR_DBG_WARNING, + "OSReleasePhysPageAddr: called with wrap type WRAP_TYPE_NULL")); + break; + } + case WRAP_TYPE_GET_USER_PAGES: + { + for (i = 0; i < psInfo->iNumPagesMapped; i++) + { + struct page *psPage = psInfo->ppsPages[i]; + + PVR_ASSERT(psPage != NULL); + + /* + * If the number of pages mapped is not the same as + * the number of pages in the address range, then + * get_user_pages must have failed, so we are cleaning + * up after failure, and the pages can't be dirty. + */ + if (psInfo->iNumPagesMapped == psInfo->iNumPages) + { + if (!PageReserved(psPage)) + { + SetPageDirty(psPage); + } + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) + page_cache_release(psPage); +#else + put_page(psPage); +#endif + } + break; + } + case WRAP_TYPE_FIND_VMA: + { + for (i = 0; i < psInfo->iNumPages; i++) + { + if (psInfo->ppsPages[i] != IMG_NULL) + { + put_page(psInfo->ppsPages[i]); + } + } + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, + "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType)); + return PVRSRV_ERROR_INVALID_WRAP_TYPE; + } + } + + if (psInfo->ppsPages != IMG_NULL) + { + kfree(psInfo->ppsPages); + } + + if (psInfo->psPhysAddr != IMG_NULL) + { + kfree(psInfo->psPhysAddr); + } + + kfree(psInfo); + + return PVRSRV_OK; +} + +#if defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) + +static IMG_UINT32 CPUAddrToTilerPhy(IMG_UINT32 uiAddr) +{ + IMG_UINT32 ui32PhysAddr = 0; + pte_t *ptep, pte; + pgd_t *pgd; + pmd_t *pmd; + pud_t *pud; + + pgd = pgd_offset(current->mm, uiAddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + goto err_out; + + pud = pud_offset(pgd, uiAddr); + if (pud_none(*pud) || pud_bad(*pud)) + goto err_out; + + pmd = pmd_offset(pud, uiAddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + goto err_out; + + ptep = pte_offset_map(pmd, uiAddr); + if (!ptep) + goto err_out; + + pte = *ptep; + if (!pte_present(pte)) + goto err_out; + + ui32PhysAddr = (pte & PAGE_MASK) | (~PAGE_MASK & uiAddr); + + /* If the physAddr is not in the TILER physical range + * then we don't proceed. + */ + if (ui32PhysAddr < 0x60000000 && ui32PhysAddr > 0x7fffffff) + { + PVR_DPF((PVR_DBG_ERROR, "CPUAddrToTilerPhy: Not in tiler range")); + ui32PhysAddr = 0; + goto err_out; + } + +err_out: + return ui32PhysAddr; +} + +#endif /* defined(CONFIG_TI_TILER) && defined(CONFIG_DRM_OMAP_DMM_TILER) */ + +/*! +****************************************************************************** + + @Function OSAcquirePhysPageAddr + + @Description + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID *pvCPUVAddr, + IMG_SIZE_T uiBytes, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_HANDLE *phOSWrapMem) +{ + IMG_UINTPTR_T uStartAddrOrig = (IMG_UINTPTR_T) pvCPUVAddr; + IMG_SIZE_T uAddrRangeOrig = uiBytes; + IMG_UINTPTR_T uBeyondEndAddrOrig = uStartAddrOrig + uAddrRangeOrig; + IMG_UINTPTR_T uStartAddr; + IMG_SIZE_T uAddrRange; + IMG_UINTPTR_T uBeyondEndAddr; + IMG_UINTPTR_T uAddr; + IMG_INT i; + struct vm_area_struct *psVMArea; + sWrapMemInfo *psInfo = NULL; + IMG_BOOL bHavePageStructs = IMG_FALSE; + IMG_BOOL bHaveNoPageStructs = IMG_FALSE; + IMG_BOOL bMMapSemHeld = IMG_FALSE; + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + + /* Align start and end addresses to page boundaries */ + uStartAddr = uStartAddrOrig & PAGE_MASK; + uBeyondEndAddr = PAGE_ALIGN(uBeyondEndAddrOrig); + uAddrRange = uBeyondEndAddr - uStartAddr; + + /* + * Check for address range calculation overflow, and attempts to wrap + * zero bytes. + */ + if (uBeyondEndAddr <= uStartAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Invalid address range (start " UINTPTR_FMT ", length %" SIZE_T_FMT_LEN "x)", + uStartAddrOrig, uAddrRangeOrig)); + goto error; + } + + /* Allocate information structure */ + psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL); + if (psInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't allocate information structure")); + goto error; + } + memset(psInfo, 0, sizeof(*psInfo)); + +#if defined(DEBUG) + psInfo->uStartAddr = uStartAddrOrig; + psInfo->uBeyondEndAddr = uBeyondEndAddrOrig; +#endif + + psInfo->iNumPages = (IMG_INT)(uAddrRange >> PAGE_SHIFT); + psInfo->iPageOffset = (IMG_INT)(uStartAddrOrig & ~PAGE_MASK); + + /* Allocate physical address array */ + psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL); + if (psInfo->psPhysAddr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't allocate page array")); + goto error; + } + memset(psInfo->psPhysAddr, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr)); + + /* Allocate page array */ + psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL); + if (psInfo->ppsPages == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't allocate page array")); + goto error; + } + memset(psInfo->ppsPages, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages)); + + /* Default error code from now on */ + eError = PVRSRV_ERROR_BAD_MAPPING; + + /* Set the mapping type to aid clean up */ + psInfo->eType = WRAP_TYPE_GET_USER_PAGES; + + /* Lock down user memory */ + down_read(¤t->mm->mmap_sem); + bMMapSemHeld = IMG_TRUE; + + /* Get page list */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) + psInfo->iNumPagesMapped = get_user_pages( + current, current->mm, + uStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL); +#else + psInfo->iNumPagesMapped = get_user_pages_remote( + current, current->mm, + uStartAddr, psInfo->iNumPages, FOLL_WRITE, psInfo->ppsPages, NULL, NULL); +#endif + if (psInfo->iNumPagesMapped >= 0) + { + /* See if we got all the pages we wanted */ + if (psInfo->iNumPagesMapped != psInfo->iNumPages) + { + PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, psInfo->iNumPagesMapped)); + + goto error; + } + + /* Build list of physical page addresses */ + for (i = 0; i < psInfo->iNumPages; i++) + { + IMG_CPU_PHYADDR CPUPhysAddr; + IMG_UINT32 ui32PFN; + + ui32PFN = page_to_pfn(psInfo->ppsPages[i]); + CPUPhysAddr.uiAddr = ui32PFN << PAGE_SHIFT; + if ((CPUPhysAddr.uiAddr >> PAGE_SHIFT) != ui32PFN) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Page frame number out of range (%x)", ui32PFN)); + + goto error; + } + psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr); + psSysPAddr[i] = psInfo->psPhysAddr[i]; + + } + + goto exit; + } + + PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), using CPU page table", psInfo->iNumPagesMapped)); + + /* Reset some fields */ + psInfo->eType = WRAP_TYPE_NULL; + psInfo->iNumPagesMapped = 0; + memset(psInfo->ppsPages, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages)); + + /* + * get_user_pages didn't work. If this is due to the address range + * representing memory mapped I/O, then we'll look for the pages + * in the appropriate memory region of the process. + */ + + /* Set the mapping type to aid clean up */ + psInfo->eType = WRAP_TYPE_FIND_VMA; + + psVMArea = find_vma(current->mm, uStartAddrOrig); + if (psVMArea == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't find memory region containing start address " UINTPTR_FMT, + uStartAddrOrig)); + + goto error; + } +#if defined(DEBUG) + psInfo->psVMArea = psVMArea; +#endif + + /* + * find_vma locates a region with an end point past a given + * virtual address. So check the address is actually in the region. + */ + if (uStartAddrOrig < psVMArea->vm_start) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Start address " UINTPTR_FMT " is outside of the region returned by find_vma", + uStartAddrOrig)); + goto error; + } + + /* Now check the end address is in range */ + if (uBeyondEndAddrOrig > psVMArea->vm_end) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: End address " UINTPTR_FMT " is outside of the region returned by find_vma", uBeyondEndAddrOrig)); + goto error; + } + + /* Does the region represent memory mapped I/O? */ + if (!(psVMArea->vm_flags & VM_IO)) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags)); + goto error; + } + + /* We require read and write access */ + if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags)); + goto error; + } + + for (uAddr = uStartAddrOrig, i = 0; uAddr < uBeyondEndAddrOrig; uAddr += PAGE_SIZE, i++) + { + IMG_CPU_PHYADDR CPUPhysAddr; + IMG_UINT32 ui32PFN = 0; + + PVR_ASSERT(i < psInfo->iNumPages); + + if (!CPUVAddrToPFN(psVMArea, uAddr, &ui32PFN, &psInfo->ppsPages[i])) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Invalid CPU virtual address")); + + goto error; + } + if (psInfo->ppsPages[i] == NULL) + { +#if defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) + /* This could be tiler memory.*/ + IMG_UINT32 ui32TilerAddr = CPUAddrToTilerPhy(uAddr); + if (ui32TilerAddr) + { + bHavePageStructs = IMG_TRUE; + psInfo->iNumPagesMapped++; + psInfo->psPhysAddr[i].uiAddr = ui32TilerAddr; + psSysPAddr[i].uiAddr = ui32TilerAddr; + continue; + } +#endif /* defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) */ + + bHaveNoPageStructs = IMG_TRUE; + } + else + { + bHavePageStructs = IMG_TRUE; + + psInfo->iNumPagesMapped++; + + PVR_ASSERT(ui32PFN == page_to_pfn(psInfo->ppsPages[i])); + } + + CPUPhysAddr.uiAddr = ui32PFN << PAGE_SHIFT; + if ((CPUPhysAddr.uiAddr >> PAGE_SHIFT) != ui32PFN) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Page frame number out of range (%x)", ui32PFN)); + + goto error; + } + + psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr); + psSysPAddr[i] = psInfo->psPhysAddr[i]; + } + PVR_ASSERT(i == psInfo->iNumPages); + +#if defined(VM_MIXEDMAP) + if ((psVMArea->vm_flags & VM_MIXEDMAP) != 0) + { + goto exit; + } +#endif + + if (bHavePageStructs && bHaveNoPageStructs) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Region is VM_MIXEDMAP, but isn't marked as such")); + goto error; + } + + if (!bHaveNoPageStructs) + { + /* The ideal case; every page has a page structure */ + goto exit; + } + +#if defined(VM_PFNMAP) + if ((psVMArea->vm_flags & VM_PFNMAP) == 0) +#endif + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Region is VM_PFNMAP, but isn't marked as such")); + goto error; + } + +exit: + PVR_ASSERT(bMMapSemHeld); + up_read(¤t->mm->mmap_sem); + bMMapSemHeld = IMG_FALSE; + + PVR_ASSERT(psInfo->eType != 0); + + if (bHaveNoPageStructs) + { +#if defined(PVR_ALLOW_NON_PAGE_STRUCT_MEMORY_IMPORT) + /* + * Allowing the GPU to access pages that can't be locked down is + * potentially unsafe. For recent versions of Linux, there are + * safer ways to get access to such memory, such as DMA Buffer + * sharing (DMABUF). + */ + PVR_DPF((PVR_DBG_MESSAGE, + "OSAcquirePhysPageAddr: Region contains pages which can't be locked down (no page structures)")); +#else + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Region contains pages which can't be locked down (no page structures)")); + goto error; +#endif + } + + /* Return the cookie */ + *phOSWrapMem = (IMG_HANDLE)psInfo; + + return PVRSRV_OK; + +error: + if (bMMapSemHeld) + { + up_read(¤t->mm->mmap_sem); + } + OSReleasePhysPageAddr((IMG_HANDLE)psInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +#if ! (defined(__arm__) || defined(__aarch64__)) +# define USE_VIRTUAL_CACHE_OP +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) +# define USE_VIRTUAL_CACHE_OP +# if defined(CONFIG_OUTER_CACHE) +# define USE_PHYSICAL_CACHE_OP +# endif +#else +# define USE_PHYSICAL_CACHE_OP +#endif + +extern PVRSRV_LINUX_MUTEX g_sMMapMutex; + +/* g_sMMapMutex must be held while this function is called */ +static +IMG_VOID *FindMMapBaseVAddr(struct list_head *psMMapOffsetStructList, + IMG_VOID *pvRangeAddrStart, IMG_UINT32 ui32Length) +{ + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_VOID *pvMinVAddr; + + /* There's no kernel-virtual for this type of allocation, so if + * we're flushing it, it must be user-virtual, and therefore + * have a mapping. + */ + list_for_each_entry(psOffsetStruct, psMMapOffsetStructList, sAreaItem) + { + if(OSGetCurrentProcessIDKM() != psOffsetStruct->ui32PID) + continue; + + pvMinVAddr = (IMG_VOID *)psOffsetStruct->uiUserVAddr; + + /* Within permissible range */ + if(pvRangeAddrStart >= pvMinVAddr && + ui32Length <= psOffsetStruct->uiRealByteSize) + return pvMinVAddr; + } + + return IMG_NULL; +} + +#if defined(USE_PHYSICAL_CACHE_OP) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) +typedef void (*PhysicalCacheOp_t)(phys_addr_t uStart, phys_addr_t uEnd); +#else +typedef void (*PhysicalCacheOp_t)(unsigned long ulStart, unsigned long ulEnd); +#endif + +/* + Note: use IMG_CPU_PHYADDR to return CPU Phys Addresses, and not just 'unsigned long', + as this is not big enough to hold physical addresses on 32-bit PAE devices. +*/ +typedef IMG_BOOL (*MemAreaToPhys_t)(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + IMG_CPU_PHYADDR *psStart); + +static IMG_BOOL VMallocAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + IMG_CPU_PHYADDR *psStart) +{ + psStart->uiAddr = vmalloc_to_pfn(pvRangeAddrStart + ui32PageNum * PAGE_SIZE) << PAGE_SHIFT; + return IMG_TRUE; +} + +static IMG_BOOL ExternalKVAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + IMG_CPU_PHYADDR *psStart) +{ + IMG_SYS_PHYADDR SysPAddr; + SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageNumOffset + ui32PageNum]; + *psStart = SysSysPAddrToCpuPAddr(SysPAddr); + return IMG_TRUE; +} + +static IMG_BOOL AllocPagesAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + IMG_CPU_PHYADDR *psStart) +{ + struct page *pPage; + + pPage = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageNumOffset + ui32PageNum]; + psStart->uiAddr = page_to_pfn(pPage) << PAGE_SHIFT; + return IMG_TRUE; +} + +static IMG_BOOL AllocPagesSparseAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + IMG_CPU_PHYADDR *psStart) +{ + IMG_UINT32 ui32VirtOffset = (ui32PageNumOffset + ui32PageNum) << PAGE_SHIFT; + IMG_UINT32 ui32PhysOffset; + struct page *pPage; + + if (BM_VirtOffsetToPhysical(psLinuxMemArea->hBMHandle, ui32VirtOffset, &ui32PhysOffset)) + { + PVR_ASSERT(ui32PhysOffset <= ui32VirtOffset); + pPage = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PhysOffset >> PAGE_SHIFT]; + psStart->uiAddr = page_to_pfn(pPage) << PAGE_SHIFT; + return IMG_TRUE; + } + + return IMG_FALSE; +} + +static inline void DoPhysicalCacheOp(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_SIZE_T uiLength, + IMG_UINTPTR_T uPageNumOffset, + MemAreaToPhys_t pfnMemAreaToPhys, + PhysicalCacheOp_t pfnPhysicalCacheOp) +{ + IMG_CPU_PHYADDR sStart, sEnd; + unsigned long ulLength, ulStartOffset, ulEndOffset; + IMG_UINT32 i, ui32NumPages; + IMG_BOOL bValidPage; + + /* Length and offsets of flush region WRT page alignment */ + ulLength = (unsigned long)uiLength; + ulStartOffset = ((unsigned long)pvRangeAddrStart) & (PAGE_SIZE - 1); + ulEndOffset = ((unsigned long)pvRangeAddrStart + ulLength) & (PAGE_SIZE - 1); + + /* The affected pages, rounded up */ + ui32NumPages = (ulStartOffset + ulLength + PAGE_SIZE - 1) >> PAGE_SHIFT; + + for(i = 0; i < ui32NumPages; i++) + { + bValidPage = pfnMemAreaToPhys(psLinuxMemArea, pvRangeAddrStart, + uPageNumOffset, i, &sStart); + if (bValidPage) + { + sEnd.uiAddr = sStart.uiAddr + PAGE_SIZE; + + if(i == ui32NumPages - 1 && ulEndOffset != 0) + sEnd.uiAddr = sStart.uiAddr + ulEndOffset; + + if(i == 0) + sStart.uiAddr += ulStartOffset; + + pfnPhysicalCacheOp(sStart.uiAddr, sEnd.uiAddr); + } + } +} + +#endif /* defined(USE_PHYSICAL_CACHE_OP) */ + +#if defined(USE_VIRTUAL_CACHE_OP) +typedef void (*VirtualCacheOp_t)(const void *pvStart, const void *pvEnd); + +static inline void DoVirtualCacheOp(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length, + VirtualCacheOp_t pfnVirtualCacheOp) +{ + LinuxMemArea *psLinuxMemArea = hOSMemHandle; + + if (!psLinuxMemArea->hBMHandle) + { + pfnVirtualCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length); + } + else + { + IMG_UINT32 ui32ByteRemain = ui32Length; + IMG_UINT32 ui32BytesToDo = PAGE_SIZE - (((IMG_UINTPTR_T) pvRangeAddrStart) & (~PAGE_MASK)); + IMG_UINT8 *pbDo = (IMG_UINT8 *) pvRangeAddrStart; + + while(ui32ByteRemain) + { + if (BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, ui32ByteOffset + (ui32Length - ui32ByteRemain))) + { + pfnVirtualCacheOp(pbDo, pbDo + ui32BytesToDo); + } + pbDo += ui32BytesToDo; + ui32ByteRemain -= ui32BytesToDo; + ui32BytesToDo = MIN(ui32ByteRemain, PAGE_SIZE); + } + } +} +#endif /* defined(USE_VIRTUAL_CACHE_OP) */ + +static +IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvVirtRangeStart, + IMG_SIZE_T uiLength +#if defined(USE_VIRTUAL_CACHE_OP) + , VirtualCacheOp_t pfnVirtualCacheOp +#endif +#if defined(USE_PHYSICAL_CACHE_OP) + , PhysicalCacheOp_t pfnPhysicalCacheOp +#endif + ) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + IMG_UINTPTR_T uiAreaOffset = 0; + struct list_head *psMMapOffsetStructList; + IMG_VOID *pvMinVAddr; +#if defined(USE_PHYSICAL_CACHE_OP) + MemAreaToPhys_t pfnMemAreaToPhys = IMG_NULL; + IMG_UINTPTR_T uPageNumOffset = 0; + IMG_VOID *pvPhysRangeStart = pvVirtRangeStart; +#endif + + PVR_ASSERT(psLinuxMemArea != IMG_NULL); + + LinuxLockMutexNested(&g_sMMapMutex, PVRSRV_LOCK_CLASS_MMAP); + + psMMapOffsetStructList = &psLinuxMemArea->sMMapOffsetStructList; + + /* + Don't check the length in the case of sparse mappings as + we only know the physical length not the virtual + */ + if (!psLinuxMemArea->hBMHandle) + { + PVR_ASSERT(uiLength <= psLinuxMemArea->uiByteSize); + } + + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) + { + uiAreaOffset = psLinuxMemArea->uData.sSubAlloc.uiByteOffset; + psLinuxMemArea = psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea; + } + + /* Recursion surely isn't possible? */ + PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC); + + switch(psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + { + if(is_vmalloc_addr(pvVirtRangeStart)) + { + pvMinVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + uiAreaOffset; + + /* Outside permissible range */ + if(pvVirtRangeStart < pvMinVAddr) + goto err_blocked; + } + else + { + /* If this isn't a vmalloc address, assume we're flushing by + * user-virtual. Compute the mmap base vaddr and use this to + * compute the offset in vmalloc space. + */ + + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvVirtRangeStart, uiLength); + if(!pvMinVAddr) + goto err_blocked; + +#if defined(USE_PHYSICAL_CACHE_OP) + /* + * We don't need to worry about cache aliasing here because + * we have already flushed the virtually-indexed caches (L1 + * etc.) by the supplied user-virtual addresses. + * + * The vmalloc address will only be used to determine + * affected physical pages for outer cache flushing. + */ + pvPhysRangeStart = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + + (uiAreaOffset & PAGE_MASK) + (pvVirtRangeStart - pvMinVAddr); +#endif + } + break; + } + + case LINUX_MEM_AREA_EXTERNAL_KV: + { + /* We'll only see bPhysContig for frame buffers, and we shouldn't + * be flushing those (they're write combined or uncached). + */ + if (psLinuxMemArea->uData.sExternalKV.bPhysContig == IMG_TRUE) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Attempt to flush contiguous external memory", __func__)); + goto err_blocked; + } + + /* If it has a kernel virtual address, something odd has happened. + * We expect EXTERNAL_KV _only_ from the wrapping of ALLOC_PAGES. + */ + if (psLinuxMemArea->uData.sExternalKV.pvExternalKV != IMG_NULL) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Attempt to flush external memory with a kernel virtual address", __func__)); + goto err_blocked; + } + + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvVirtRangeStart, uiLength); + if(!pvMinVAddr) + goto err_blocked; + + break; + } + + case LINUX_MEM_AREA_ALLOC_PAGES: + { + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvVirtRangeStart, uiLength); + if(!pvMinVAddr) + goto err_blocked; + + break; + } + + default: + PVR_DBG_BREAK; + goto err_blocked; + } + +#if defined(USE_PHYSICAL_CACHE_OP) + switch(psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + { + pfnMemAreaToPhys = VMallocAreaToPhys; + break; + } + + case LINUX_MEM_AREA_EXTERNAL_KV: + { + uPageNumOffset = ((uiAreaOffset & PAGE_MASK) + (pvPhysRangeStart - pvMinVAddr)) >> PAGE_SHIFT; + pfnMemAreaToPhys = ExternalKVAreaToPhys; + break; + } + + case LINUX_MEM_AREA_ALLOC_PAGES: + { + uPageNumOffset = ((uiAreaOffset & PAGE_MASK) + (pvPhysRangeStart - pvMinVAddr)) >> PAGE_SHIFT; + + if (psLinuxMemArea->hBMHandle) + pfnMemAreaToPhys = AllocPagesSparseAreaToPhys; + else + pfnMemAreaToPhys = AllocPagesAreaToPhys; + + break; + } + + default: + PVR_DBG_BREAK; + } +#endif + +#if defined(USE_VIRTUAL_CACHE_OP) + DoVirtualCacheOp(hOSMemHandle, + ui32ByteOffset, + pvVirtRangeStart, + uiLength, + pfnVirtualCacheOp); +#endif + + LinuxUnLockMutex(&g_sMMapMutex); + +#if defined(USE_PHYSICAL_CACHE_OP) + PVR_ASSERT(pfnMemAreaToPhys != IMG_NULL); + + DoPhysicalCacheOp(psLinuxMemArea, + pvPhysRangeStart, + uiLength, + uPageNumOffset, + pfnMemAreaToPhys, + pfnPhysicalCacheOp); +#endif + + return IMG_TRUE; + +err_blocked: + PVR_DPF((PVR_DBG_WARNING, "%s: Blocked cache op on virtual range " + "%p-%p (type %d)", __func__, + pvVirtRangeStart, pvVirtRangeStart + uiLength, + psLinuxMemArea->eAreaType)); + LinuxUnLockMutex(&g_sMMapMutex); + return IMG_FALSE; +} + +#if defined(__i386__) || defined (__x86_64__) + +#define ROUND_UP(x,a) (((x) + (a) - 1) & ~((a) - 1)) + +static void per_cpu_cache_flush(void *arg) +{ + PVR_UNREFERENCED_PARAMETER(arg); + wbinvd(); +} + +static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) +{ + IMG_BYTE *pbStart = (IMG_BYTE *)pvStart; + IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd; + IMG_BYTE *pbBase; + + pbEnd = (IMG_BYTE *)ROUND_UP((IMG_UINTPTR_T)pbEnd, + boot_cpu_data.x86_clflush_size); + + mb(); + for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) + { + clflush(pbBase); + } + mb(); +} + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID) +{ + /* No clean feature on x86 */ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +} + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID) +{ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +} + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + /* Write-back and invalidate */ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, pvRangeAddrStart, ui32Length, + x86_flush_cache_range); +} + +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + /* No clean feature on x86 */ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, pvRangeAddrStart, ui32Length, + x86_flush_cache_range); +} + +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + /* No invalidate-only support */ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, pvRangeAddrStart, ui32Length, + x86_flush_cache_range); +} + +#elif defined(__arm__) || defined(__aarch64__) + +static void per_cpu_cache_flush(void *arg) +{ + PVR_UNREFERENCED_PARAMETER(arg); +#if defined(__aarch64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) + /* + NOTE: Regarding arm64 global flush support on >= Linux v4.2: + - Global cache flush support is deprecated from v4.2 onwards + - Cache maintenance is done using UM/KM VA maintenance _only_ + - If you find that more time is spent in VA cache maintenance + - Implement arm64 assembly sequence for global flush here + - asm volatile (); + - If you do not want to implement the global cache assembly + - Disable KM cache maintenance support in UM cache.c + - Remove this PVR_LOG message + */ + PVR_LOG(("arm64: Global d-cache flush assembly not implemented")); +#else + flush_cache_all(); +#endif +} + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID) +{ + /* No full (inner) cache clean op */ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +#if defined(CONFIG_OUTER_CACHE) + outer_clean_range(0, ULONG_MAX); +#endif +} + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID) +{ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +#if defined(CONFIG_OUTER_CACHE) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + /* To use the "deferred flush" (not clean) DDK feature you need a kernel + * implementation of outer_flush_all() for ARM CPUs with an outer cache + * controller (e.g. PL310, common with Cortex A9 and later). + * + * Reference DDKs don't require this functionality, as they will only + * clean the cache, never flush (clean+invalidate) it. + */ + outer_flush_all(); +#endif +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) +static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd) +{ + return (size_t)((char *)pvEnd - (char *)pvStart); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + +static void pvr_dmac_inv_range(const void *pvStart, const void *pvEnd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) + dmac_inv_range(pvStart, pvEnd); +#else + dmac_map_area(pvStart, pvr_dmac_range_len(pvStart, pvEnd), DMA_FROM_DEVICE); +#endif +} + +static void pvr_dmac_clean_range(const void *pvStart, const void *pvEnd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) + dmac_clean_range(pvStart, pvEnd); +#else + dmac_map_area(pvStart, pvr_dmac_range_len(pvStart, pvEnd), DMA_TO_DEVICE); +#endif +} + +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) */ + +static void pvr_flush_range(phys_addr_t pStart, phys_addr_t pEnd) +{ +#if defined(__aarch64__) + struct dma_map_ops *dma_ops = get_dma_ops(PVRLDMGetDevice()); + dma_ops->sync_single_for_device(NULL, pStart, pEnd - pStart, DMA_TO_DEVICE); + dma_ops->sync_single_for_cpu(NULL, pStart, pEnd - pStart, DMA_FROM_DEVICE); +#else + arm_dma_ops.sync_single_for_device(NULL, pStart, pEnd - pStart, DMA_TO_DEVICE); + arm_dma_ops.sync_single_for_cpu(NULL, pStart, pEnd - pStart, DMA_FROM_DEVICE); +#endif +} + +static void pvr_clean_range(phys_addr_t pStart, phys_addr_t pEnd) +{ +#if defined(__aarch64__) + struct dma_map_ops *dma_ops = get_dma_ops(PVRLDMGetDevice()); + dma_ops->sync_single_for_device(NULL, pStart, pEnd - pStart, DMA_TO_DEVICE); +#else + arm_dma_ops.sync_single_for_device(NULL, pStart, pEnd - pStart, DMA_TO_DEVICE); +#endif + +} + +static void pvr_invalidate_range(phys_addr_t pStart, phys_addr_t pEnd) +{ +#if defined(__aarch64__) + struct dma_map_ops *dma_ops = get_dma_ops(PVRLDMGetDevice()); + dma_ops->sync_single_for_cpu(NULL, pStart, pEnd - pStart, DMA_FROM_DEVICE); +#else + arm_dma_ops.sync_single_for_cpu(NULL, pStart, pEnd - pStart, DMA_FROM_DEVICE); +#endif +} + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) */ + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + pvr_flush_range +#elif defined(CONFIG_OUTER_CACHE) + dmac_flush_range, outer_flush_range +#else + dmac_flush_range +#endif + ); +} + +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + pvr_clean_range +#elif defined(CONFIG_OUTER_CACHE) + pvr_dmac_clean_range, outer_clean_range +#else + pvr_dmac_clean_range +#endif + ); +} + +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + pvr_invalidate_range +#elif defined(CONFIG_OUTER_CACHE) + pvr_dmac_inv_range, outer_inv_range +#else + pvr_dmac_inv_range +#endif + ); +} + +#elif defined(__mips__) + +/* + * dmac cache functions are supposed to be used for dma + * memory which comes from dma-able memory. However examining + * the implementation of dmac cache functions and experimenting, + * can assert that dmac functions are safe to use for high-mem + * memory as well for our OS{Clean/Flush/Invalidate}Cache functions + * + */ + +static inline size_t pvr_dma_range_len(const void *pvStart, const void *pvEnd) +{ + return (size_t)((char *)pvEnd - (char *)pvStart); +} + +static void pvr_dma_cache_wback_inv(const void *pvStart, const void *pvEnd) +{ + size_t uLength = pvr_dma_range_len(pvStart, pvEnd); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + dma_cache_sync(NULL, (void *)pvStart, uLength, DMA_BIDIRECTIONAL); +#else + dma_cache_wback_inv((unsigned long)pvStart, uLength); +#endif +} + +static void pvr_dma_cache_wback(const void *pvStart, const void *pvEnd) +{ + size_t uLength = pvr_dma_range_len(pvStart, pvEnd); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + dma_cache_sync(NULL, (void *)pvStart, uLength, DMA_TO_DEVICE); +#else + dma_cache_wback((unsigned long)pvStart, uLength); +#endif +} + +static void pvr_dma_cache_inv(const void *pvStart, const void *pvEnd) +{ + size_t uLength = pvr_dma_range_len(pvStart, pvEnd); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + dma_cache_sync(NULL, (void *)pvStart, uLength, DMA_FROM_DEVICE); +#else + dma_cache_inv((unsigned long)pvStart, uLength); +#endif +} + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID) +{ + /* dmac functions flush full cache if size is larger than + * {s,d}-cache size. This is a workaround for the fact that + * __flush_cache_all is not an exported symbol. Please + * replace with custom function if available in latest + * version of linux being used. + * Arbitrary large number (1MB) which should be larger than + * mips {s,d}-cache sizes for some time in future. + * */ + pvr_dma_cache_wback(0, (const void *)0x200000); +} + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID) +{ + /* dmac functions flush full cache if size is larger than + * {s,d}-cache size. This is a workaround for the fact that + * __flush_cache_all is not an exported symbol. Please + * replace with custom function if available in latest + * version of linux being used. + * Arbitrary large number (1MB) which should be larger than + * mips {s,d}-cache sizes for some time in future. + * */ + pvr_dma_cache_wback_inv(0, (const void *)0x200000); +} + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dma_cache_wback_inv); +} + +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dma_cache_wback); +} + +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dma_cache_inv); +} + +#else + +#error "Implement CPU cache flush/clean/invalidate primitives for this CPU!" + +#endif + +typedef struct _AtomicStruct +{ + atomic_t RefCount; +} AtomicStruct; + +PVRSRV_ERROR OSAtomicAlloc(IMG_PVOID *ppvRefCount) +{ + AtomicStruct *psRefCount; + + psRefCount = kmalloc(sizeof(AtomicStruct), GFP_KERNEL); + if (psRefCount == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + atomic_set(&psRefCount->RefCount, 0); + + *ppvRefCount = psRefCount; + return PVRSRV_OK; +} + +IMG_VOID OSAtomicFree(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + PVR_ASSERT(atomic_read(&psRefCount->RefCount) == 0); + kfree(psRefCount); +} + +IMG_VOID OSAtomicInc(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + atomic_inc(&psRefCount->RefCount); +} + +IMG_BOOL OSAtomicDecAndTest(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + return atomic_dec_and_test(&psRefCount->RefCount) ? IMG_TRUE:IMG_FALSE; +} + +IMG_UINT32 OSAtomicRead(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + return (IMG_UINT32) atomic_read(&psRefCount->RefCount); +} + +IMG_VOID OSReleaseBridgeLock(IMG_VOID) +{ + LinuxUnLockMutex(&gPVRSRVLock); +} + +IMG_VOID OSReacquireBridgeLock(IMG_VOID) +{ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); +} + +typedef struct _OSTime +{ + unsigned long ulTime; +} OSTime; + +PVRSRV_ERROR OSTimeCreateWithUSOffset(IMG_PVOID *pvRet, IMG_UINT32 ui32USOffset) +{ + OSTime *psOSTime; + + psOSTime = kmalloc(sizeof(OSTime), GFP_KERNEL); + if (psOSTime == IMG_NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psOSTime->ulTime = jiffies + usecs_to_jiffies(ui32USOffset); + *pvRet = psOSTime; + return PVRSRV_OK; +} + + +IMG_BOOL OSTimeHasTimePassed(IMG_PVOID pvData) +{ + OSTime *psOSTime = pvData; + + if (time_is_before_jiffies(psOSTime->ulTime)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_VOID OSTimeDestroy(IMG_PVOID pvData) +{ + kfree(pvData); +} + +IMG_VOID OSGetCurrentProcessNameKM(IMG_CHAR *pszName, IMG_UINT32 ui32Size) +{ + strncpy(pszName, current->comm, MIN(ui32Size,TASK_COMM_LEN)); +} + +/* One time osfunc initialisation */ +PVRSRV_ERROR PVROSFuncInit(IMG_VOID) +{ +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + { + psTimerWorkQueue = create_workqueue("pvr_timer"); + if (psTimerWorkQueue == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__)); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + + } + } +#endif + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + { + IMG_UINT32 ui32i; + + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i]; + + INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack); + } + } +#endif + +#if defined(SUPPORT_ION) && !defined(LMA) + { + PVRSRV_ERROR eError; + eError = IonInit(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: IonInit failed", __FUNCTION__)); + } + } +#endif + return PVRSRV_OK; +} + +/* + * Osfunc deinitialisation. + * Note that PVROSFuncInit may not have been called + */ +IMG_VOID PVROSFuncDeInit(IMG_VOID) +{ +#if defined (SUPPORT_ION) + IonDeinit(); +#endif +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + if (psTimerWorkQueue != NULL) + { + destroy_workqueue(psTimerWorkQueue); + } +#endif +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/osperproc.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/osperproc.c new file mode 100644 index 0000000..9b014dc --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/osperproc.c @@ -0,0 +1,155 @@ +/*************************************************************************/ /*! +@Title Linux specific per process data functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "osperproc.h" + +#include "env_perproc.h" + +#if defined (SUPPORT_ION) +#include +#include "ion.h" +extern struct ion_device *gpsIonDev; +#endif + +extern IMG_UINT32 gui32ReleasePID; + +PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_ENV_PER_PROCESS_DATA), + phOsPrivateData, + &hBlockAlloc, + "Environment per Process Data"); + + if (eError != PVRSRV_OK) + { + *phOsPrivateData = IMG_NULL; + + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError)); + return eError; + } + + psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData; + OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc)); + + psEnvPerProc->hBlockAlloc = hBlockAlloc; + + /* Linux specific mmap processing */ + LinuxMMapPerProcessConnect(psEnvPerProc); + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + /* Linked list of PVRSRV_FILE_PRIVATE_DATA structures */ + INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead); +#endif + +#if defined(SUPPORT_ION) + OSSNPrintf(psEnvPerProc->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%d", OSGetCurrentProcessIDKM()); + psEnvPerProc->psIONClient = + ion_client_create(gpsIonDev, + psEnvPerProc->azIonClientName); + + if (IS_ERR_OR_NULL(psEnvPerProc->psIONClient)) + { + PVR_DPF((PVR_DBG_ERROR, "OSPerProcessPrivateDataInit: Couldn't create " + "ion client for per process data")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } +#endif /* defined(SUPPORT_ION) */ + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + PVRSRV_ERROR eError; + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; + + if (hOsPrivateData == IMG_NULL) + { + return PVRSRV_OK; + } + + psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData; + +#if defined(SUPPORT_ION) + if (psEnvPerProc->psIONClient) + { + ion_client_destroy(psEnvPerProc->psIONClient); + psEnvPerProc->psIONClient = IMG_NULL; + } +#endif /* defined(SUPPORT_ION) */ + + /* Linux specific mmap processing */ + LinuxMMapPerProcessDisconnect(psEnvPerProc); + + /* Remove per process /proc entries */ + RemovePerProcessProcDir(psEnvPerProc); + + eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_ENV_PER_PROCESS_DATA), + hOsPrivateData, + psEnvPerProc->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError)); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + return LinuxMMapPerProcessHandleOptions(psHandleBase); +} + +IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID) +{ + if(!gui32ReleasePID) + return NULL; + return PVRSRVPerProcessPrivateData(gui32ReleasePID); +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pdump.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pdump.c new file mode 100644 index 0000000..548e8dc --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pdump.c @@ -0,0 +1,855 @@ +/*************************************************************************/ /*! +@Title Parameter dump macro target routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined (SUPPORT_SGX) || defined (SUPPORT_VGX) +#if defined (PDUMP) + +#include +#include +#if defined (SUPPORT_SGX) +#include "sgxdefs.h" /* Is this still needed? */ +#endif +#include "services_headers.h" + +#include "pvrversion.h" +#include "pvr_debug.h" + +#include "dbgdrvif.h" +#if defined (SUPPORT_SGX) +#include "sgxmmu.h"/* Is this still needed? */ +#endif +#include "mm.h" +#include "pdump_km.h" +#include "pdump_int.h" + +#include // sprintf +#include // strncpy, strlen +#include + +static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags); +static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags); +static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame); +static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker); + +#define PDUMP_DATAMASTER_PIXEL (1) +#define PDUMP_DATAMASTER_EDM (3) + +/* + Maximum file size to split output files +*/ +#define MAX_FILE_SIZE 0x40000000 + +static atomic_t gsPDumpSuspended = ATOMIC_INIT(0); + +static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL; + +DEFINE_MUTEX(sPDumpLock); +DEFINE_MUTEX(sPDumpMsgLock); + +IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2", + "ScriptStream2", + "DriverInfoStream"}; +typedef struct PDBG_PDUMP_STATE_TAG +{ + PDBG_STREAM psStream[PDUMP_NUM_STREAMS]; + IMG_UINT32 ui32ParamFileNum; + + IMG_CHAR *pszMsg; + IMG_CHAR *pszScript; + IMG_CHAR *pszFile; + +} PDBG_PDUMP_STATE; + +static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL}; + +#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 +#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 +#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 + + + + +static inline IMG_BOOL PDumpSuspended(IMG_VOID) +{ + return (atomic_read(&gsPDumpSuspended) != 0) ? IMG_TRUE : IMG_FALSE; +} + +/*! + * \name PDumpOSGetScriptString + */ +PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, + IMG_UINT32 *pui32MaxLen) +{ + *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript; + *pui32MaxLen = SZ_SCRIPT_SIZE_MAX; + if ((!*phScript) || PDumpSuspended()) + { + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + } + return PVRSRV_OK; +} + +/*! + * \name PDumpOSGetMessageString + */ +PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, + IMG_UINT32 *pui32MaxLen) +{ + *ppszMsg = gsDBGPdumpState.pszMsg; + *pui32MaxLen = SZ_MSG_SIZE_MAX; + if ((!*ppszMsg) || PDumpSuspended()) + { + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + } + return PVRSRV_OK; +} + +/*! + * \name PDumpOSGetFilenameString + */ +PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, + IMG_UINT32 *pui32MaxLen) +{ + *ppszFile = gsDBGPdumpState.pszFile; + *pui32MaxLen = SZ_FILENAME_SIZE_MAX; + if ((!*ppszFile) || PDumpSuspended()) + { + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + } + return PVRSRV_OK; +} + +/*! + * \name PDumpOSWriteString2 + */ +IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags) +{ + return PDumpWriteString2(hScript, ui32Flags); +} + +/*! + * \name PDumpOSBufprintf + */ +PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) +{ + IMG_CHAR* pszBuf = hBuf; + IMG_INT32 n; + va_list vaArgs; + + va_start(vaArgs, pszFormat); + + n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs); + + va_end(vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + +#if defined(PDUMP_DEBUG_OUTFILES) + g_ui32EveryLineCounter++; +#endif + return PVRSRV_OK; +} + +/*! + * \name PDumpOSVSprintf + */ +PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) +{ + IMG_INT32 n; + + n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + + return PVRSRV_OK; +} + +/*! + * \name PDumpOSDebugPrintf + */ +IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) +{ + PVR_UNREFERENCED_PARAMETER(pszFormat); + + /* FIXME: Implement using services PVR_DBG or otherwise with kprintf */ +} + +/*! + * \name PDumpOSSprintf + */ +PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) +{ + IMG_INT32 n; + va_list vaArgs; + + va_start(vaArgs, pszFormat); + + n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs); + + va_end(vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + + return PVRSRV_OK; +} + +/*! + * \name PDumpOSBuflen + */ +IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax) +{ + IMG_CHAR* pszBuf = hBuffer; + IMG_UINT32 ui32Count = 0; + + while ((pszBuf[ui32Count]!=0) && (ui32Count= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32CountpfnGetStreamOffset(psStream); +} + +/*! + * \name PDumpOSGetParamFileNum + */ +IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID) +{ + return gsDBGPdumpState.ui32ParamFileNum; +} + +/*! + * \name PDumpOSWriteString + */ +IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream, + IMG_UINT8 *psui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags) +{ + PDBG_STREAM psStream = (PDBG_STREAM)hStream; + return PDumpWriteILock(psStream, + psui8Data, + ui32Size, + ui32Flags); +} + +/*! + * \name PDumpOSCheckForSplitting + */ +IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags) +{ + /* File size limit not implemented for this OS. + */ + PVR_UNREFERENCED_PARAMETER(hStream); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(ui32Flags); +} + +/*! + * \name PDumpOSJTInitialised + */ +IMG_BOOL PDumpOSJTInitialised(IMG_VOID) +{ + if(gpfnDbgDrv) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +/*! + * \name PDumpOSIsSuspended + */ +inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID) +{ + return (atomic_read(&gsPDumpSuspended) != 0) ? IMG_TRUE : IMG_FALSE; +} + +/*! + * \name PDumpOSCPUVAddrToDevPAddr + */ +IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_UINT8 *pui8LinAddr, + IMG_UINT32 ui32PageSize, + IMG_DEV_PHYADDR *psDevPAddr) +{ + IMG_CPU_PHYADDR sCpuPAddr; + + PVR_UNREFERENCED_PARAMETER(pui8LinAddr); + PVR_UNREFERENCED_PARAMETER(ui32PageSize); /* for when no assert */ + + /* Caller must now alway supply hOSMemHandle, even though we only (presently) + use it here in the linux implementation */ + + PVR_ASSERT (hOSMemHandle != IMG_NULL); + + sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset); + PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0); + + /* convert CPU physical addr to device physical */ + *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); +} + +/*! + * \name PDumpOSCPUVAddrToPhysPages + */ +IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_PUINT8 pui8LinAddr, + IMG_UINTPTR_T ui32DataPageMask, + IMG_UINT32 *pui32PageOffset) +{ + if(hOSMemHandle) + { + /* + * If a Services memory handle is provided then use it. + */ + IMG_CPU_PHYADDR sCpuPAddr; + + PVR_UNREFERENCED_PARAMETER(pui8LinAddr); + + sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset); + *pui32PageOffset = sCpuPAddr.uiAddr & ui32DataPageMask; + } + else + { + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + + *pui32PageOffset = ((IMG_UINTPTR_T)pui8LinAddr & ui32DataPageMask); + } +} + +/*! + * \name PDumpOSDebugDriverWrite + */ +IMG_UINT32 PDumpOSDebugDriverWrite( PDBG_STREAM psStream, + PDUMP_DDWMODE eDbgDrvWriteMode, + IMG_UINT8 *pui8Data, + IMG_UINT32 ui32BCount, + IMG_UINT32 ui32Level, + IMG_UINT32 ui32DbgDrvFlags) +{ + switch(eDbgDrvWriteMode) + { + case PDUMP_WRITE_MODE_CONTINUOUS: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, ui32Level); + case PDUMP_WRITE_MODE_LASTFRAME: + return gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, ui32Level, ui32DbgDrvFlags); + case PDUMP_WRITE_MODE_BINCM: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + return gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, ui32Level); + case PDUMP_WRITE_MODE_PERSISTENT: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + return gpfnDbgDrv->pfnWritePersist(psStream, pui8Data, ui32BCount, ui32Level); + default: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + break; + } + return 0xFFFFFFFFU; +} + +/*! + * \name PDumpOSReleaseExecution + */ +IMG_VOID PDumpOSReleaseExecution(IMG_VOID) +{ + OSReleaseThreadQuanta(); +} + +/************************************************************************** + * Function Name : PDumpInit + * Outputs : None + * Returns : + * Description : Reset connection to vldbgdrv + * Then try to connect to PDUMP streams +**************************************************************************/ +IMG_VOID PDumpInit(IMG_VOID) +{ + IMG_UINT32 i; + DBGKM_CONNECT_NOTIFIER sConnectNotifier; + + /* If we tried this earlier, then we might have connected to the driver + * But if pdump.exe was running then the stream connected would fail + */ + if (!gpfnDbgDrv) + { + DBGDrvGetServiceTable(&gpfnDbgDrv); + + + // If something failed then no point in trying to connect streams + if (gpfnDbgDrv == IMG_NULL) + { + return; + } + + /* + * Pass the connection notify callback + */ + sConnectNotifier.pfnConnectNotifier = &PDumpConnectionNotify; + gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier); + + if(!gsDBGPdumpState.pszFile) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0, + "Filename string") != PVRSRV_OK) + { + goto init_failed; + } + } + + if(!gsDBGPdumpState.pszMsg) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0, + "Message string") != PVRSRV_OK) + { + goto init_failed; + } + } + + if(!gsDBGPdumpState.pszScript) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0, + "Script string") != PVRSRV_OK) + { + goto init_failed; + } + } + + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i], + DEBUG_CAPMODE_FRAMED, + DEBUG_OUTMODE_STREAMENABLE, + 0, + 10); + + gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1); + gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0); + } + + PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME); + PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FAMILY); + PDUMPCOMMENT("Start of Init Phase"); + } + + return; + +init_failed: + + if(gsDBGPdumpState.pszFile) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0); + gsDBGPdumpState.pszFile = IMG_NULL; + } + + if(gsDBGPdumpState.pszScript) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0); + gsDBGPdumpState.pszScript = IMG_NULL; + } + + if(gsDBGPdumpState.pszMsg) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0); + gsDBGPdumpState.pszMsg = IMG_NULL; + } + + /* + * Remove the connection notify callback + */ + sConnectNotifier.pfnConnectNotifier = 0; + gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier); + + gpfnDbgDrv = IMG_NULL; +} + + +IMG_VOID PDumpDeInit(IMG_VOID) +{ + IMG_UINT32 i; + DBGKM_CONNECT_NOTIFIER sConnectNotifier; + + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]); + } + + if(gsDBGPdumpState.pszFile) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0); + gsDBGPdumpState.pszFile = IMG_NULL; + } + + if(gsDBGPdumpState.pszScript) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0); + gsDBGPdumpState.pszScript = IMG_NULL; + } + + if(gsDBGPdumpState.pszMsg) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0); + gsDBGPdumpState.pszMsg = IMG_NULL; + } + + /* + * Remove the connection notify callback + */ + sConnectNotifier.pfnConnectNotifier = 0; + gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier); + + gpfnDbgDrv = IMG_NULL; +} + +/************************************************************************** + * Function Name : PDumpStartInitPhaseKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : Resume init phase state +**************************************************************************/ +PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID) +{ + IMG_UINT32 i; + + if (gpfnDbgDrv) + { + PDUMPCOMMENT("Start Init Phase"); + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]); + } + } + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpStopInitPhaseKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : End init phase state +**************************************************************************/ +PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID) +{ + IMG_UINT32 i; + + if (gpfnDbgDrv) + { + PDUMPCOMMENT("Stop Init Phase"); + + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]); + } + } + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpIsLastCaptureFrameKM + * Inputs : None + * Outputs : None + * Returns : True or false + * Description : Tests whether the current frame is being pdumped +**************************************************************************/ +IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID) +{ + return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]); +} + + +/************************************************************************** + * Function Name : PDumpIsCaptureFrameKM + * Inputs : None + * Outputs : None + * Returns : True or false + * Description : Tests whether the current frame is being pdumped +**************************************************************************/ +IMG_BOOL PDumpOSIsCaptureFrameKM(IMG_VOID) +{ + if (PDumpSuspended()) + { + return IMG_FALSE; + } + return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE); +} + +/************************************************************************** + * Function Name : PDumpSetFrameKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : Sets a frame +**************************************************************************/ +PVRSRV_ERROR PDumpOSSetFrameKM(IMG_UINT32 ui32Frame) +{ + IMG_UINT32 ui32Stream; + + for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++) + { + if (gsDBGPdumpState.psStream[ui32Stream]) + { + DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame); + } + } + + return PVRSRV_OK; +} + + +/***************************************************************************** + FUNCTION : PDumpWriteString2 + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags) +{ + return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags); +} + + +/***************************************************************************** + FUNCTION : PDumpWriteILock + + PURPOSE : Writes, making sure it all goes... + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Written = 0; + if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0)) + { + PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteILock: Failed to write 0x%x bytes to stream 0x%p", ui32Count, psStream)); + return IMG_TRUE; + } + + + /* + Set the stream marker to split output files + */ + + if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]) + { + IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]); + + if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE) + { + if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags))) + { + DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos); + gsDBGPdumpState.ui32ParamFileNum++; + } + } + } + + ui32Written = DbgWrite(psStream, pui8Data, ui32Count, ui32Flags); + + if (ui32Written == 0xFFFFFFFF) + { + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/***************************************************************************** + FUNCTION : DbgSetFrame + + PURPOSE : Sets the frame in the stream + + PARAMETERS : psStream - Stream pointer + ui32Frame - Frame number to set + + RETURNS : None +*****************************************************************************/ +static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame) +{ + gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame); +} + +/***************************************************************************** + FUNCTION : DbgSetMarker + + PURPOSE : Sets the marker of the stream to split output files + + PARAMETERS : psStream - Stream pointer + ui32Marker - Marker number to set + + RETURNS : None +*****************************************************************************/ +static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) +{ + gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker); +} + +IMG_VOID PDumpSuspendKM(IMG_VOID) +{ + atomic_inc(&gsPDumpSuspended); +} + +IMG_VOID PDumpResumeKM(IMG_VOID) +{ + atomic_dec(&gsPDumpSuspended); +} + +/* Set to 1 if you want to debug PDump locking issues */ +#define DEBUG_PDUMP_LOCKS 0 + +#if DEBUG_PDUMP_LOCKS +static IMG_UINT32 ui32Count=0; +static IMG_UINT32 aui32LockLine[2] = {0}; +static IMG_UINT32 aui32UnlockLine[2] = {0}; +static IMG_UINT32 ui32LockLineCount = 0; +static IMG_UINT32 ui32UnlockLineCount = 0; +#endif + +IMG_VOID PDumpOSLock(IMG_UINT32 ui32Line) +{ +#if DEBUG_PDUMP_LOCKS + aui32LockLine[ui32LockLineCount++ % 2] = ui32Line; + ui32Count++; + if (ui32Count == 2) + { + IMG_UINT32 i; + printk(KERN_ERR "Double lock\n"); + dump_stack(); + for (i=0;i<2;i++) + { + printk(KERN_ERR "Lock[%d] = %d, Unlock[%d] = %d\n", i, aui32LockLine[i],i, aui32UnlockLine[i]); + } + } +#endif + mutex_lock(&sPDumpLock); +} + +IMG_VOID PDumpOSUnlock(IMG_UINT32 ui32Line) +{ + mutex_unlock(&sPDumpLock); +#if DEBUG_PDUMP_LOCKS + aui32UnlockLine[ui32UnlockLineCount++ % 2] = ui32Line; + ui32Count--; +#endif +} + +IMG_VOID PDumpOSLockMessageBuffer(IMG_VOID) +{ + mutex_lock(&sPDumpMsgLock); +} + +IMG_VOID PDumpOSUnlockMessageBuffer(IMG_VOID) +{ + mutex_unlock(&sPDumpMsgLock); +} + +#endif /* #if defined (PDUMP) */ +#endif /* #if defined (SUPPORT_SGX) */ +/***************************************************************************** + End of file (PDUMP.C) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/private_data.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/private_data.h new file mode 100644 index 0000000..e2a6c74 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/private_data.h @@ -0,0 +1,91 @@ +/*************************************************************************/ /*! +@Title Linux private data structure +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __INCLUDED_PRIVATE_DATA_H_ +#define __INCLUDED_PRIVATE_DATA_H_ + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) +#include +#include +#endif + +/* This structure is required in the rare case that a process creates + * a connection to services, but before closing the file descriptor, + * does a fork(). This fork() will duplicate the file descriptor in the + * child process. If the parent process dies before the child, this can + * cause the PVRSRVRelease() method to be called in a different process + * context than the original PVRSRVOpen(). This is bad because we need + * to update the per-process data reference count and/or free the + * per-process data. So we must keep a record of which PID's per-process + * data to inspect during ->release(). + */ + +typedef struct +{ + /* PID that created this services connection */ + IMG_UINT32 ui32OpenPID; + + /* Global kernel MemInfo handle */ + IMG_HANDLE hKernelMemInfo; + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + /* The private data is on a list in the per-process data structure */ + struct list_head sDRMAuthListItem; + + struct drm_file *psDRMFile; +#endif + +#if defined(SUPPORT_MEMINFO_IDS) + /* Globally unique "stamp" for kernel MemInfo */ + IMG_UINT64 ui64Stamp; +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + + /* Accounting for OSAllocMem */ + IMG_HANDLE hBlockAlloc; + +#if defined(SUPPORT_DRI_DRM_EXT) + IMG_PVOID pPriv; /*private data for extending this struct*/ +#endif +} +PVRSRV_FILE_PRIVATE_DATA; + +#endif /* __INCLUDED_PRIVATE_DATA_H_ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/proc.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/proc.c new file mode 100644 index 0000000..0f954c1 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/proc.c @@ -0,0 +1,1059 @@ +/*************************************************************************/ /*! +@Title Proc files implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions for creating and reading proc filesystem entries. + Proc filesystem support must be built into the kernel for + these functions to be any use. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#include +#include +#include + +#include "services_headers.h" + +#include "queue.h" +#include "resman.h" +#include "pvrmmap.h" +#include "pvr_debug.h" +#include "pvrversion.h" +#include "proc.h" +#include "perproc.h" +#include "env_perproc.h" +#include "linkage.h" + +#include "lists.h" + +struct pvr_proc_dir_entry { + struct proc_dir_entry *pde; + + pvr_next_proc_seq_t *next; + pvr_show_proc_seq_t *show; + pvr_off2element_proc_seq_t *off2element; + pvr_startstop_proc_seq_t *startstop; + + pvr_proc_write_t *write; + + IMG_VOID *data; +}; + +// The proc entry for our /proc/pvr directory +static struct proc_dir_entry * dir; + +static const IMG_CHAR PVRProcDirRoot[] = "pvr"; + +static IMG_INT pvr_proc_open(struct inode *inode,struct file *file); +static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); + +static struct file_operations pvr_proc_operations = +{ + .open = pvr_proc_open, + .read = seq_read, + .write = pvr_proc_write, + .llseek = seq_lseek, + .release = seq_release, +}; + +static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos); +static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos); +static void pvr_proc_seq_stop (struct seq_file *m, void *v); +static int pvr_proc_seq_show (struct seq_file *m, void *v); + +static struct seq_operations pvr_proc_seq_operations = +{ + .start = pvr_proc_seq_start, + .next = pvr_proc_seq_next, + .stop = pvr_proc_seq_stop, + .show = pvr_proc_seq_show, +}; + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) +static struct pvr_proc_dir_entry* g_pProcQueue; +#endif +static struct pvr_proc_dir_entry* g_pProcVersion; +static struct pvr_proc_dir_entry* g_pProcSysNodes; + +#ifdef DEBUG +static struct pvr_proc_dir_entry* g_pProcDebugLevel; +#endif + +#ifdef PVR_MANUAL_POWER_CONTROL +static struct pvr_proc_dir_entry* g_pProcPowerLevel; +#endif + + +static void ProcSeqShowVersion(struct seq_file *sfile,void* el); + +static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off); + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) +#define PDE_DATA(x) PDE(x)->data; +#endif + +#ifdef DEBUG + +/*! +****************************************************************************** + + @Function : ProcSeq1ElementOff2Element + + @Description + + Heleper Offset -> Element function for /proc files with only one entry + without header. + + @Input sfile : seq_file object related to /proc/ file + + @Input off : the offset into the buffer (id of object) + + @Return : Pointer to element to be shown. + +*****************************************************************************/ +static void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off) +{ + PVR_UNREFERENCED_PARAMETER(sfile); + // Return anything that is not PVR_RPOC_SEQ_START_TOKEN and NULL + if(!off) + return (void*)2; + return NULL; +} + +#endif + +/*! +****************************************************************************** + + @Function : ProcSeq1ElementHeaderOff2Element + + @Description + + Heleper Offset -> Element function for /proc files with only one entry + with header. + + @Input sfile : seq_file object related to /proc/ file + + @Input off : the offset into the buffer (id of object) + + @Return : Pointer to element to be shown. + +*****************************************************************************/ +static void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off) +{ + PVR_UNREFERENCED_PARAMETER(sfile); + + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + // Return anything that is not PVR_RPOC_SEQ_START_TOKEN and NULL + if(off == 1) + return (void*)2; + + return NULL; +} + + +/*! +****************************************************************************** + + @Function : pvr_proc_open + + @Description + File opening function passed to proc_dir_entry->proc_fops for /proc entries + created by CreateProcReadEntrySeq. + + @Input inode : inode entry of opened /proc file + + @Input file : file entry of opened /proc file + + @Return : 0 if no errors + +*****************************************************************************/ +static IMG_INT pvr_proc_open(struct inode *inode,struct file *file) +{ + IMG_INT ret = seq_open(file, &pvr_proc_seq_operations); + + struct seq_file *seq = (struct seq_file*)file->private_data; + struct pvr_proc_dir_entry* ppde = PDE_DATA(inode); + + /* Add pointer to handlers to seq_file structure */ + seq->private = ppde; + return ret; +} + +/*! +****************************************************************************** + + @Function : pvr_proc_write + + @Description + File writing function passed to proc_dir_entry->proc_fops for /proc files. + It's exacly the same function that is used as default one (->fs/proc/generic.c), + it calls proc_dir_entry->write_proc for writing procedure. + +*****************************************************************************/ +static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct pvr_proc_dir_entry * ppde; + + PVR_UNREFERENCED_PARAMETER(ppos); + ppde = PDE_DATA(inode); + + if (!ppde->write) + return -EIO; + + return ppde->write(file, buffer, count, ppde->data); +} + + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_start + + @Description + Seq_file start function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + This function ises off2element handler. + + @Input proc_seq_file : sequence file entry + + @Input pos : offset within file (id of entry) + + @Return : Pointer to element from we start enumeration (0 ends it) + +*****************************************************************************/ +static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos) +{ + struct pvr_proc_dir_entry *ppde = (struct pvr_proc_dir_entry*)proc_seq_file->private; + if(ppde->startstop != NULL) + ppde->startstop(proc_seq_file, IMG_TRUE); + return ppde->off2element(proc_seq_file, *pos); +} + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_stop + + @Description + Seq_file stop function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + + @Input proc_seq_file : sequence file entry + + @Input v : current element pointer + +*****************************************************************************/ +static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v) +{ + struct pvr_proc_dir_entry *ppde = (struct pvr_proc_dir_entry*)proc_seq_file->private; + PVR_UNREFERENCED_PARAMETER(v); + + if(ppde->startstop != NULL) + ppde->startstop(proc_seq_file, IMG_FALSE); +} + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_next + + @Description + Seq_file next element function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + It uses supplied 'next' handler for fetching next element (or 0 if there is no one) + + @Input proc_seq_file : sequence file entry + + @Input pos : offset within file (id of entry) + + @Input v : current element pointer + + @Return : next element pointer (or 0 if end) + +*****************************************************************************/ +static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos) +{ + struct pvr_proc_dir_entry *ppde = (struct pvr_proc_dir_entry*)proc_seq_file->private; + (*pos)++; + if(ppde->next != NULL) + return ppde->next( proc_seq_file, v, *pos ); + return ppde->off2element(proc_seq_file, *pos); +} + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_show + + @Description + Seq_file show element function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + It call proper 'show' handler to show (dump) current element using seq_* functions + + @Input proc_seq_file : sequence file entry + + @Input v : current element pointer + + @Return : 0 if everything is OK + +*****************************************************************************/ +static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v) +{ + struct pvr_proc_dir_entry *ppde = (struct pvr_proc_dir_entry*)proc_seq_file->private; + ppde->show( proc_seq_file,v ); + return 0; +} + + + +/*! +****************************************************************************** + + @Function : CreateProcEntryInDirSeq + + @Description + + Create a file under the given directory. These dynamic files can be used at + runtime to get or set information about the device. Whis version uses seq_file + interface + + @Input pdir : parent directory + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Input whandler : the function to interpret writes from the user + + @Return Ptr to proc entry , 0 for failure + + +*****************************************************************************/ +static struct pvr_proc_dir_entry* CreateProcEntryInDirSeq(struct proc_dir_entry *pdir, + const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + pvr_proc_write_t whandler) +{ + + struct pvr_proc_dir_entry * ppde; + mode_t mode; + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name)); + return NULL; + } + + mode = S_IFREG; + + if (show_handler) + { + mode |= S_IRUGO; + } + + if (whandler) + { + mode |= S_IWUSR; + } + + ppde = kmalloc(sizeof(struct pvr_proc_dir_entry), GFP_KERNEL); + if (!ppde) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name)); + return NULL; + } + + ppde->next = next_handler; + ppde->show = show_handler; + ppde->off2element = off2element_handler; + ppde->startstop = startstop_handler; + ppde->write = whandler; + ppde->data = data; + + ppde->pde=proc_create_data(name, mode, pdir, &pvr_proc_operations, ppde); + + if (!ppde->pde) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: proc_create_data failed", PVRProcDirRoot, name)); + kfree(ppde); + return NULL; + } + return ppde; +} + + +/*! +****************************************************************************** + + @Function : CreateProcReadEntrySeq + + @Description + + Create a file under /proc/pvr. These dynamic files can be used at runtime + to get information about the device. Creation WILL fail if proc support is + not compiled into the kernel. That said, the Linux kernel is not even happy + to build without /proc support these days. This version uses seq_file structure + for handling content generation. + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Return Ptr to proc entry , 0 for failure + +*****************************************************************************/ +struct pvr_proc_dir_entry* CreateProcReadEntrySeq (const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler) +{ + return CreateProcEntrySeq(name, + data, + next_handler, + show_handler, + off2element_handler, + startstop_handler, + NULL); +} + +/*! +****************************************************************************** + + @Function : CreateProcEntrySeq + + @Description + + @Description + + Create a file under /proc/pvr. These dynamic files can be used at runtime + to get information about the device. Creation WILL fail if proc support is + not compiled into the kernel. That said, the Linux kernel is not even happy + to build without /proc support these days. This version uses seq_file structure + for handling content generation and is fuller than CreateProcReadEntrySeq (it + supports write access); + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Input whandler : the function to interpret writes from the user + + @Return Ptr to proc entry , 0 for failure + +*****************************************************************************/ +struct pvr_proc_dir_entry* CreateProcEntrySeq (const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + pvr_proc_write_t whandler) +{ + return CreateProcEntryInDirSeq(dir, + name, + data, + next_handler, + show_handler, + off2element_handler, + startstop_handler, + whandler); +} + + + +/*! +****************************************************************************** + + @Function : CreatePerProcessProcEntrySeq + + @Description + + Create a file under /proc/pvr/. Apart from the + directory where the file is created, this works the same way as + CreateProcEntry. It's seq_file version. + + + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Input whandler : the function to interpret writes from the user + + @Return Ptr to proc entry , 0 for failure + +*****************************************************************************/ +struct pvr_proc_dir_entry* CreatePerProcessProcEntrySeq (const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + pvr_proc_write_t whandler) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProc; + IMG_UINT32 ui32PID; + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot)); + return NULL; + } + + ui32PID = OSGetCurrentProcessIDKM(); + + psPerProc = PVRSRVPerProcessPrivateData(ui32PID); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data")); + return NULL; + } + + if (!psPerProc->psProcDir) + { + IMG_CHAR dirname[16]; + IMG_INT ret; + + ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID); + + if (ret <=0 || ret >= (IMG_INT)sizeof(dirname)) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID)); + return NULL; + } + else + { + psPerProc->psProcDir = proc_mkdir(dirname, dir); + if (!psPerProc->psProcDir) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", + PVRProcDirRoot, ui32PID)); + return NULL; + } + } + } + + return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler, + show_handler,off2element_handler,startstop_handler,whandler); +} + + +/*! +****************************************************************************** + + @Function : CreateProcEntries + + @Description + + Create a directory /proc/pvr and the necessary entries within it. These + dynamic files can be used at runtime to get information about the device. + Creation might fail if proc support is not compiled into the kernel or if + there is no memory + + @Input none + + @Return nothing + +*****************************************************************************/ +IMG_INT CreateProcEntries(IMG_VOID) +{ + dir = proc_mkdir (PVRProcDirRoot, NULL); + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot)); + + return -ENOMEM; + } + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL); +#endif + g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL); + g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL); + + if(!g_pProcVersion || !g_pProcSysNodes +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + || !g_pProcQueue +#endif + ) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot)); + + return -ENOMEM; + } + + +#ifdef DEBUG + + g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL, + ProcSeqShowDebugLevel, + ProcSeq1ElementOff2Element, NULL, + (IMG_VOID*)PVRDebugProcSetLevel); + if(!g_pProcDebugLevel) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot)); + + return -ENOMEM; + } + +#ifdef PVR_MANUAL_POWER_CONTROL + g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL, + ProcSeqShowPowerLevel, + ProcSeq1ElementOff2Element, NULL, + PVRProcSetPowerLevel); + if(!g_pProcPowerLevel) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot)); + + return -ENOMEM; + } +#endif +#endif + + return 0; +} + + +/*! +****************************************************************************** + + @Function : RemoveProcEntrySeq + + @Description + + Remove a single node (created using *Seq function) under /proc/pvr. + + @Input proc_entry : structure returned by Create function. + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemoveProcEntrySeq(struct pvr_proc_dir_entry* ppde) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + remove_proc_entry(ppde->pde->name, dir); +#else + proc_remove(ppde->pde); +#endif + kfree(ppde); +} + + +/*! +****************************************************************************** + + @Function : RemovePerProcessProcEntrySeq + + @Description + + Remove a single node under the per process proc directory (created by *Seq function). + + Remove a single node (created using *Seq function) under /proc/pvr. + + @Input proc_entry : structure returned by Create function. + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemovePerProcessProcEntrySeq(struct pvr_proc_dir_entry* ppde) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProc; + + psPerProc = LinuxTerminatingProcessPrivateData(); + if (!psPerProc) + { + psPerProc = PVRSRVFindPerProcessPrivateData(); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't remove proc entry, no per process data")); + return; + } + } + + if (psPerProc->psProcDir) + { + PVR_DPF((PVR_DBG_MESSAGE, "Removing per-process proc entry")); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + remove_proc_entry(ppde->pde->name, psPerProc->psProcDir); +#else + proc_remove(ppde->pde); +#endif + kfree(ppde); + } +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) +/*! +****************************************************************************** + + @Function : RemoveProcEntry + + @Description + + Remove a single node under /proc/pvr. + + @Input name : the name of the node to remove + + @Return nothing + +*****************************************************************************/ +static IMG_VOID RemoveProcEntry(const IMG_CHAR * name) +{ + if (dir) + { + remove_proc_entry(name, dir); + PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name)); + } +} + + +/*! +****************************************************************************** + + @Function : RemovePerProcessProcDir + + @Description + + Remove the per process directorty under /proc/pvr. + + @Input psPerProc : environment specific per process data + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc) +{ + if (psPerProc->psProcDir) + { + while (psPerProc->psProcDir->subdir) + { + PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name)); + + RemoveProcEntry(psPerProc->psProcDir->subdir->name); + } + RemoveProcEntry(psPerProc->psProcDir->name); + } +} +#else +IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc) +{ + proc_remove(psPerProc->psProcDir); +} +#endif +/*! +****************************************************************************** + + @Function : RemoveProcEntries + + Description + + Proc filesystem entry deletion - Remove all proc filesystem entries for + the driver. + + @Input none + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemoveProcEntries(IMG_VOID) +{ +#ifdef DEBUG + RemoveProcEntrySeq( g_pProcDebugLevel ); +#ifdef PVR_MANUAL_POWER_CONTROL + RemoveProcEntrySeq( g_pProcPowerLevel ); +#endif /* PVR_MANUAL_POWER_CONTROL */ +#endif + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + RemoveProcEntrySeq(g_pProcQueue); +#endif + RemoveProcEntrySeq(g_pProcVersion); + RemoveProcEntrySeq(g_pProcSysNodes); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + while (dir->subdir) + { + PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name)); + + RemoveProcEntry(dir->subdir->name); + } + remove_proc_entry(PVRProcDirRoot, NULL); +#else + proc_remove(dir); +#endif + +} + +/*************************************************************************/ /*! +@Function PVRProcGetData +@Description Extract data from PVR proc object. +@Input pointer to pvr_proc_dir_entr object +@Return pointer to data object passed in to Proc create function. +*/ /**************************************************************************/ +void *PVRProcGetData(struct pvr_proc_dir_entry *ppde) +{ + return ppde->data; +} + +/***************************************************************************** + FUNCTION : ProcSeqShowVersion + + PURPOSE : Print the content of version to /proc file + + PARAMETERS : sfile - /proc seq_file + el - Element to print +*****************************************************************************/ +static void ProcSeqShowVersion(struct seq_file *sfile, void* el) +{ + SYS_DATA *psSysData; + IMG_CHAR *pszSystemVersionString = "None"; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf(sfile, + "Version %s (%s) %s\n", + PVRVERSION_STRING, + PVR_BUILD_TYPE, PVR_BUILD_DIR); + return; + } + + psSysData = SysAcquireDataNoCheck(); +#if defined(SUPPORT_TI_VERSION_STRING) + if(psSysData != IMG_NULL && psSysData->szTIVersion != IMG_NULL) + { + seq_printf( sfile, "UM Services Version: %s\n", + psSysData->szTIVersion); + } +#endif + if(psSysData != IMG_NULL && psSysData->pszVersionString != IMG_NULL) + { + pszSystemVersionString = psSysData->pszVersionString; + } + + seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString); +} + +static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType) +{ + switch (deviceType) + { + default: + { + static IMG_CHAR text[10]; + + sprintf(text, "?%x", (IMG_UINT)deviceType); + + return text; + } + } +} + + +static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass) +{ + switch (deviceClass) + { + case PVRSRV_DEVICE_CLASS_3D: + { + return "3D"; + } + case PVRSRV_DEVICE_CLASS_DISPLAY: + { + return "display"; + } + case PVRSRV_DEVICE_CLASS_BUFFER: + { + return "buffer"; + } + default: + { + static IMG_CHAR text[10]; + + sprintf(text, "?%x", (IMG_UINT)deviceClass); + return text; + } + } +} + +static IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va) +{ + off_t *pOff = va_arg(va, off_t*); + if (--(*pOff)) + { + return IMG_NULL; + } + else + { + return psNode; + } +} + +/***************************************************************************** + FUNCTION : ProcSeqShowSysNodes + + PURPOSE : Print the content of version to /proc file + + PARAMETERS : sfile - /proc seq_file + el - Element to print +*****************************************************************************/ +static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el) +{ + PVRSRV_DEVICE_NODE *psDevNode; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf( sfile, + "Registered nodes\n" + "Addr Type Class Index Ref pvDev Size Res\n"); + return; + } + + psDevNode = (PVRSRV_DEVICE_NODE*)el; + + seq_printf( sfile, + "%p %-8s %-8s %4d %2u %p %3u %p\n", + psDevNode, + deviceTypeToString(psDevNode->sDevId.eDeviceType), + deviceClassToString(psDevNode->sDevId.eDeviceClass), + psDevNode->sDevId.eDeviceClass, + psDevNode->ui32RefCount, + psDevNode->pvDevice, + psDevNode->ui32pvDeviceSize, + psDevNode->hResManContext); +} + +/***************************************************************************** + FUNCTION : ProcSeqOff2ElementSysNodes + + PURPOSE : Transale offset to element (/proc stuff) + + PARAMETERS : sfile - /proc seq_file + off - the offset into the buffer + + RETURNS : element to print +*****************************************************************************/ +static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE*psDevNode = IMG_NULL; + + PVR_UNREFERENCED_PARAMETER(sfile); + + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + psSysData = SysAcquireDataNoCheck(); + if (psSysData != IMG_NULL) + { + /* Find Dev Node */ + psDevNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + DecOffPsDev_AnyVaCb, + &off); + } + + /* Return anything that is not PVR_RPOC_SEQ_START_TOKEN and NULL */ + return (void*)psDevNode; +} + +/***************************************************************************** + End of file (proc.c) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/proc.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/proc.h new file mode 100644 index 0000000..e3abec2 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/proc.h @@ -0,0 +1,95 @@ +/*************************************************************************/ /*! +@Title Proc interface definition. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions for creating and reading proc filesystem entries. + Refer to proc.c +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SERVICES_PROC_H__ +#define __SERVICES_PROC_H__ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) +#include +#endif +#include +#include "img_defs.h" + +struct pvr_proc_dir_entry; + +#define PVR_PROC_SEQ_START_TOKEN (void*)1 +typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t); +typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t); +typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*); +typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start); + +typedef int (pvr_proc_write_t)(struct file *file, const char __user *buffer, + unsigned long count, void *data); + +IMG_INT CreateProcEntries(void); +void RemoveProcEntries(void); + +struct pvr_proc_dir_entry* CreateProcReadEntrySeq(const IMG_CHAR* name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler); + +struct pvr_proc_dir_entry* CreateProcEntrySeq(const IMG_CHAR* name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + pvr_proc_write_t whandler); + +struct pvr_proc_dir_entry* CreatePerProcessProcEntrySeq(const IMG_CHAR* name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + pvr_proc_write_t whandler); + +void RemoveProcEntrySeq(struct pvr_proc_dir_entry* proc_entry); +void RemovePerProcessProcEntrySeq(struct pvr_proc_dir_entry* proc_entry); + +void *PVRProcGetData(struct pvr_proc_dir_entry* ppde); + +#endif diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_bridge_k.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_bridge_k.c new file mode 100644 index 0000000..a637e96 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_bridge_k.c @@ -0,0 +1,761 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "img_defs.h" +#include "services.h" +#include "pvr_bridge.h" +#include "perproc.h" +#include "mutex.h" +#include "syscommon.h" +#include "pvr_debug.h" +#include "proc.h" +#include "private_data.h" +#include "linkage.h" +#include "pvr_bridge_km.h" +#include "pvr_uaccess.h" +#include "refcount.h" +#include "buffer_manager.h" + +#if defined(SUPPORT_DRI_DRM) +#include +#include "pvr_drm.h" +#if defined(PVR_SECURE_DRM_AUTH_EXPORT) +#include "env_perproc.h" +#endif +#endif + +/* VGX: */ +#if defined(SUPPORT_VGX) +#include "vgx_bridge.h" +#endif + +/* SGX: */ +#if defined(SUPPORT_SGX) +#include "sgx_bridge.h" +#endif + +#include "bridged_pvr_bridge.h" + +#if defined(SUPPORT_DRI_DRM) +#define PRIVATE_DATA(pFile) ((pFile)->driver_priv) +#else +#define PRIVATE_DATA(pFile) ((pFile)->private_data) +#endif + +#if defined(DEBUG_BRIDGE_KM) + +static struct pvr_proc_dir_entry *g_ProcBridgeStats =0; +static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off); +static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off); +static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start); + +#endif + +extern PVRSRV_LINUX_MUTEX gPVRSRVLock; + +#if defined(SUPPORT_MEMINFO_IDS) +IMG_UINT64 g_ui64MemInfoID; +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + +PVRSRV_ERROR +LinuxBridgeInit(IMG_VOID) +{ +#if defined(DEBUG_BRIDGE_KM) + { + g_ProcBridgeStats = CreateProcReadEntrySeq( + "bridge_stats", + NULL, + ProcSeqNextBridgeStats, + ProcSeqShowBridgeStats, + ProcSeqOff2ElementBridgeStats, + ProcSeqStartstopBridgeStats + ); + if(!g_ProcBridgeStats) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } +#endif + return CommonBridgeInit(); +} + +IMG_VOID +LinuxBridgeDeInit(IMG_VOID) +{ +#if defined(DEBUG_BRIDGE_KM) + RemoveProcEntrySeq(g_ProcBridgeStats); +#endif +} + +#if defined(DEBUG_BRIDGE_KM) + +/* + * Lock MMap regions list (called on page start/stop while reading /proc/mmap) + * + * sfile : seq_file that handles /proc file + * start : TRUE if it's start, FALSE if it's stop + * + */ +static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start) +{ + if(start) + { + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + } + else + { + LinuxUnLockMutex(&gPVRSRVLock); + } +} + + +/* + * Convert offset (index from KVOffsetTable) to element + * (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element that will be dumped + * +*/ +static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off) +{ + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + return (void*)0; + } + + + return (void*)&g_BridgeDispatchTable[off-1]; +} + +/* + * Gets next MMap element to show. (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element to show (0 ends iteration) +*/ +static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off) +{ + return ProcSeqOff2ElementBridgeStats(sfile,off); +} + + +/* + * Show MMap element (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * +*/ +static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el) +{ + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf(sfile, + "Total ioctl call count = %u\n" + "Total number of bytes copied via copy_from_user = %u\n" + "Total number of bytes copied via copy_to_user = %u\n" + "Total number of bytes copied via copy_*_user = %u\n\n" + "%-45s | %-40s | %10s | %20s | %10s\n", + g_BridgeGlobalStats.ui32IOCTLCount, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + "Bridge Name", + "Wrapper Function", + "Call Count", + "copy_from_user Bytes", + "copy_to_user Bytes" + ); + return; + } + + seq_printf(sfile, + "%-45s %-40s %-10u %-20u %-10u\n", + psEntry->pszIOCName, + psEntry->pszFunctionName, + psEntry->ui32CallCount, + psEntry->ui32CopyFromUserTotalBytes, + psEntry->ui32CopyToUserTotalBytes); +} +#endif /* DEBUG_BRIDGE_KM */ + + +#if defined(SUPPORT_DRI_DRM) +int +PVRSRV_BridgeDispatchKM(struct drm_device unref__ *dev, void *arg, struct drm_file *pFile) +#else +long +PVRSRV_BridgeDispatchKM(struct file *pFile, unsigned int unref__ ioctlCmd, unsigned long arg) +#endif +{ + IMG_UINT32 cmd; +#if !defined(SUPPORT_DRI_DRM) + PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg; + PVRSRV_BRIDGE_PACKAGE sBridgePackageKM; +#endif + PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + PVRSRV_PER_PROCESS_DATA *psPerProc; + IMG_INT err = -EFAULT; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + +#if defined(SUPPORT_DRI_DRM) + psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg; + PVR_ASSERT(psBridgePackageKM != IMG_NULL); +#else + psBridgePackageKM = &sBridgePackageKM; + + if(!OSAccessOK(PVR_VERIFY_WRITE, + psBridgePackageUM, + sizeof(PVRSRV_BRIDGE_PACKAGE))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments", + __FUNCTION__)); + + goto unlock_and_return; + } + + /* FIXME - Currently the CopyFromUserWrapper which collects stats about + * how much data is shifted to/from userspace isn't available to us + * here. */ + if(OSCopyFromUser(IMG_NULL, + psBridgePackageKM, + psBridgePackageUM, + sizeof(PVRSRV_BRIDGE_PACKAGE)) + != PVRSRV_OK) + { + goto unlock_and_return; + } +#endif + + cmd = psBridgePackageKM->ui32BridgeID; + + if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES) + { + PVRSRV_ERROR eError; + + eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psPerProc, + psBridgePackageKM->hKernelServices, + PVRSRV_HANDLE_TYPE_PERPROC_DATA); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)", + __FUNCTION__, eError)); + goto unlock_and_return; + } + + if(psPerProc->ui32PID != ui32PID) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data " + "belonging to process %d", __FUNCTION__, ui32PID, + psPerProc->ui32PID)); + goto unlock_and_return; + } + } + else + { + /* lookup per-process data for this process */ + psPerProc = PVRSRVPerProcessData(ui32PID); + if(psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: " + "Couldn't create per-process data area")); + goto unlock_and_return; + } + } + + psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID); + + switch(cmd) + { + case PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo " + "per file descriptor", __FUNCTION__)); + err = -EINVAL; + goto unlock_and_return; + } + break; + } + + case PVRSRV_BRIDGE_MAP_DEV_MEMORY_2: + { + PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN = + (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)(IMG_UINTPTR_T)psBridgePackageKM->hParamIn; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(!psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no " + "associated MemInfo handle", __FUNCTION__)); + err = -EINVAL; + goto unlock_and_return; + } + + if (pvr_put_user(psPrivateData->hKernelMemInfo, &psMapDevMemIN->hKernelMemInfo) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } + + default: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried " + "to use privileged service", __FUNCTION__)); + goto unlock_and_return; + } + break; + } + } + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + switch(cmd) + { + case PVRSRV_BRIDGE_MAP_DEV_MEMORY: + case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + int authenticated = pFile->authenticated; + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; + + if (authenticated) + { + break; + } + + /* + * The DRM file structure we are using for Services + * is not one that DRI authentication was done on. + * Look for an authenticated file structure for + * this process, making sure the DRM master is the + * same as ours. + */ + psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc); + if (psEnvPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__)); + err = -EFAULT; + goto unlock_and_return; + } + + list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem) + { + struct drm_file *psDRMFile = psPrivateData->psDRMFile; + + if (pFile->master == psDRMFile->master) + { + authenticated |= psDRMFile->authenticated; + if (authenticated) + { + break; + } + } + } + + if (!authenticated) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__)); + err = -EPERM; + goto unlock_and_return; + } + break; + } + default: + break; + } +#endif /* defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) */ + + err = BridgedDispatchKM(psPerProc, psBridgePackageKM); + if(err != PVRSRV_OK) + goto unlock_and_return; + + switch(cmd) + { + case PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2: + { + PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT = + (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + IMG_HANDLE hMemInfo; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + if (pvr_get_user(hMemInfo, &psExportDeviceMemOUT->hMemInfo) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + + /* Look up the meminfo we just exported */ + if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psKernelMemInfo, + hMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__)); + err = -EFAULT; + goto unlock_and_return; + } + + /* Bump the refcount; decremented on release of the fd */ + PVRSRVKernelMemInfoIncRef(psKernelMemInfo); + + /* Tell the XProc about the export if required */ + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + BM_XProcIndexAcquire(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + } + + psPrivateData->hKernelMemInfo = hMemInfo; +#if defined(SUPPORT_MEMINFO_IDS) + psPrivateData->ui64Stamp = ++g_ui64MemInfoID; + + psKernelMemInfo->ui64Stamp = psPrivateData->ui64Stamp; + if (pvr_put_user(psPrivateData->ui64Stamp, &psExportDeviceMemOUT->ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } +#endif + break; + } + +#if defined(SUPPORT_MEMINFO_IDS) + case PVRSRV_BRIDGE_MAP_DEV_MEMORY: + case PVRSRV_BRIDGE_MAP_DEV_MEMORY_2: + { + PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT = + (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + if (pvr_put_user(psPrivateData->ui64Stamp, &psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } + + case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY: + { + PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT = + (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; + if (pvr_put_user(++g_ui64MemInfoID, &psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + + default: + break; + } + +unlock_and_return: + LinuxUnLockMutex(&gPVRSRVLock); + return err; +} + +#if defined(CONFIG_COMPAT) +#if defined(SUPPORT_DRI_DRM) +int +PVRSRV_BridgeCompatDispatchKM(struct drm_device unref__ *dev, void *arg, struct drm_file *pFile) +#else +long PVRSRV_BridgeCompatDispatchKM(struct file *pFile, unsigned int unref__ ioctlCmd, unsigned long arg) +#endif +{ + struct bridge_package_from_32 + { + IMG_UINT32 bridge_id; /*!< ioctl bridge group */ + IMG_UINT32 size; /*!< size of structure */ + IMG_UINT64 addr_param_in; /*!< input data buffer */ + IMG_UINT64 addr_param_out; /*!< output data buffer */ + IMG_UINT32 in_buffer_size; /*!< size of input data buffer */ + IMG_UINT32 out_buffer_size; /*!< size of output data buffer */ + IMG_UINT64 hKernelServices; /*!< kernel servcies handle */ + }; + +#if !defined(SUPPORT_DRI_DRM) + struct bridge_package_from_32 params; + struct bridge_package_from_32 * const params_addr = ¶ms; +#endif + PVRSRV_BRIDGE_PACKAGE sBridgePackageKM; + PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + PVRSRV_PER_PROCESS_DATA *psPerProc; + IMG_INT err = -EFAULT; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); +#if defined(SUPPORT_DRI_DRM) + sBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE)(*(PVRSRV_BRIDGE_PACKAGE*)arg); + psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg; + PVR_ASSERT(psBridgePackageKM != IMG_NULL); +#else + if(!OSAccessOK(PVR_VERIFY_READ, (void *) arg, sizeof(struct bridge_package_from_32))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments", __FUNCTION__)); + goto unlock_and_return; + } + + if(OSCopyFromUser(NULL, params_addr, (void*) arg, sizeof(struct bridge_package_from_32)) + != PVRSRV_OK) + { + goto unlock_and_return; + } + + sBridgePackageKM.ui32BridgeID = PVRSRV_GET_BRIDGE_ID(params_addr->bridge_id); + sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM); + sBridgePackageKM.hParamIn = (IMG_HANDLE) ((IMG_UINTPTR_T) params_addr->addr_param_in); + sBridgePackageKM.hParamOut = (IMG_HANDLE) ((IMG_UINTPTR_T) params_addr->addr_param_out); + sBridgePackageKM.ui32InBufferSize = params_addr->in_buffer_size; + sBridgePackageKM.ui32OutBufferSize = params_addr->out_buffer_size; + sBridgePackageKM.hKernelServices = (IMG_HANDLE) ((IMG_SIZE_T) params_addr->hKernelServices); + + psBridgePackageKM = &sBridgePackageKM; +#endif + if(sBridgePackageKM.ui32BridgeID != PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES)) + { + PVRSRV_ERROR eError; + + eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psPerProc, + psBridgePackageKM->hKernelServices, + PVRSRV_HANDLE_TYPE_PERPROC_DATA); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)", + __FUNCTION__, eError)); + goto unlock_and_return; + } + + if(psPerProc->ui32PID != ui32PID) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data " + "belonging to process %d", __FUNCTION__, ui32PID, + psPerProc->ui32PID)); + goto unlock_and_return; + } + } + else + { + /* lookup per-process data for this process */ + psPerProc = PVRSRVPerProcessData(ui32PID); + if(psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: " + "Couldn't create per-process data area")); + goto unlock_and_return; + } + } + + switch(sBridgePackageKM.ui32BridgeID) + { + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2): + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo " + "per file descriptor", __FUNCTION__)); + err = -EINVAL; + goto unlock_and_return; + } + break; + } + + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2): + { + PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN = + (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)(IMG_UINTPTR_T)psBridgePackageKM->hParamIn; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(!psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no " + "associated MemInfo handle", __FUNCTION__)); + err = -EINVAL; + goto unlock_and_return; + } + + if (pvr_put_user(psPrivateData->hKernelMemInfo, &psMapDevMemIN->hKernelMemInfo) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } + + default: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried " + "to use privileged service", __FUNCTION__)); + goto unlock_and_return; + } + break; + } + } + + err = BridgedDispatchKM(psPerProc, psBridgePackageKM); + if(err != PVRSRV_OK) + goto unlock_and_return; + + switch(sBridgePackageKM.ui32BridgeID) + { + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2): + { + PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT = + (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + IMG_HANDLE hMemInfo; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + if (pvr_get_user(hMemInfo, &psExportDeviceMemOUT->hMemInfo) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + + /* Look up the meminfo we just exported */ + if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psKernelMemInfo, + hMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__)); + err = -EFAULT; + goto unlock_and_return; + } + + /* Bump the refcount; decremented on release of the fd */ + PVRSRVKernelMemInfoIncRef(psKernelMemInfo); + + /* Tell the XProc about the export if required */ + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + BM_XProcIndexAcquire(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + } + + psPrivateData->hKernelMemInfo = hMemInfo; +#if defined(SUPPORT_MEMINFO_IDS) + psPrivateData->ui64Stamp = ++g_ui64MemInfoID; + + psKernelMemInfo->ui64Stamp = psPrivateData->ui64Stamp; + if (pvr_put_user(psPrivateData->ui64Stamp, &psExportDeviceMemOUT->ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } +#endif + break; + } + +#if defined(SUPPORT_MEMINFO_IDS) + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2): + { + PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT = + (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + if (pvr_put_user(psPrivateData->ui64Stamp, &psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } + + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY): + { + PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT = + (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)(IMG_UINTPTR_T)psBridgePackageKM->hParamOut; + if (pvr_put_user(++g_ui64MemInfoID, &psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + + default: + break; + } + +unlock_and_return: + LinuxUnLockMutex(&gPVRSRVLock); + return err; +} +#endif /* defined(CONFIG_COMPAT) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.c new file mode 100644 index 0000000..c0485a1 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.c @@ -0,0 +1,158 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux software "counting" timeline fence implementation +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#include +#include +#include +#include + +#include "img_types.h" +#include "services_headers.h" +#include "servicesext.h" +#include "pvr_counting_timeline.h" +#include "pvr_sw_fence.h" + +struct PVR_COUNTING_FENCE_TIMELINE { + char name[32]; + struct PVR_SW_FENCE_CONTEXT *psSwFenceCtx; + + spinlock_t sActive_fences_lock; + IMG_UINT64 ui64Current_value; /* guarded by active_fences_lock */ + struct list_head sActive_fences; + + struct kref sRef; +}; + +struct PVR_COUNTING_FENCE { + IMG_UINT64 ui64Value; + struct dma_fence *psFence; + struct list_head sActive_list_entry; +}; + +struct PVR_COUNTING_FENCE_TIMELINE *pvr_counting_fence_timeline_create(const char *name) +{ + struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline = kmalloc(sizeof(*psFenceTimeline), GFP_KERNEL); + + if (!psFenceTimeline) + goto err_out; + + strlcpy(psFenceTimeline->name, name, sizeof(psFenceTimeline->name)); + + psFenceTimeline->psSwFenceCtx = pvr_sw_fence_context_create(psFenceTimeline->name, "pvr_sw_sync"); + if (!psFenceTimeline->psSwFenceCtx) + goto err_free_timeline; + + psFenceTimeline->ui64Current_value = 0; + kref_init(&psFenceTimeline->sRef); + spin_lock_init(&psFenceTimeline->sActive_fences_lock); + INIT_LIST_HEAD(&psFenceTimeline->sActive_fences); + +err_out: + return psFenceTimeline; + +err_free_timeline: + kfree(psFenceTimeline); + psFenceTimeline = NULL; + goto err_out; +} + +void pvr_counting_fence_timeline_force_complete(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline) +{ + struct list_head *entry, *tmp; + unsigned long flags; + + spin_lock_irqsave(&psFenceTimeline->sActive_fences_lock, flags); + + list_for_each_safe(entry, tmp, &psFenceTimeline->sActive_fences) + { + struct PVR_COUNTING_FENCE *psPvrCountingFence = list_entry(entry, struct PVR_COUNTING_FENCE, sActive_list_entry); + dma_fence_signal(psPvrCountingFence->psFence); + dma_fence_put(psPvrCountingFence->psFence); + psPvrCountingFence->psFence = NULL; + list_del(&psPvrCountingFence->sActive_list_entry); + kfree(psPvrCountingFence); + } + spin_unlock_irqrestore(&psFenceTimeline->sActive_fences_lock, flags); +} + +static void pvr_counting_fence_timeline_destroy(struct kref *kref) +{ + struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline = container_of(kref, struct PVR_COUNTING_FENCE_TIMELINE, sRef); + + WARN_ON(!list_empty(&psFenceTimeline->sActive_fences)); + + pvr_sw_fence_context_destroy(psFenceTimeline->psSwFenceCtx); + kfree(psFenceTimeline); +} + +void pvr_counting_fence_timeline_put(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline) +{ + kref_put(&psFenceTimeline->sRef, pvr_counting_fence_timeline_destroy); +} + +struct PVR_COUNTING_FENCE_TIMELINE *pvr_counting_fence_timeline_get(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline) +{ + if (!psFenceTimeline) + return NULL; + kref_get(&psFenceTimeline->sRef); + return psFenceTimeline; +} + +struct dma_fence *pvr_counting_fence_create(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline, IMG_UINT64 ui64Value) +{ + unsigned long flags; + struct dma_fence *psSwFence; + struct PVR_COUNTING_FENCE *psCountFence = kmalloc(sizeof(*psCountFence), GFP_KERNEL); + + if (!psCountFence) + return NULL; + + psSwFence = pvr_sw_fence_create(psFenceTimeline->psSwFenceCtx); + if (!psSwFence) + goto err_free_fence; + + psCountFence->psFence = dma_fence_get(psSwFence); + psCountFence->ui64Value = ui64Value; + + spin_lock_irqsave(&psFenceTimeline->sActive_fences_lock, flags); + + list_add_tail(&psCountFence->sActive_list_entry, &psFenceTimeline->sActive_fences); + + spin_unlock_irqrestore(&psFenceTimeline->sActive_fences_lock, flags); + + return psSwFence; + +err_free_fence: + kfree(psCountFence); + return NULL; +} + +void pvr_counting_fence_timeline_inc(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline, IMG_UINT64 ui64Value) +{ + struct list_head *entry, *tmp; + unsigned long flags; + + spin_lock_irqsave(&psFenceTimeline->sActive_fences_lock, flags); + + psFenceTimeline->ui64Current_value += ui64Value; + + list_for_each_safe(entry, tmp, &psFenceTimeline->sActive_fences) + { + struct PVR_COUNTING_FENCE *psCountFence = list_entry(entry, struct PVR_COUNTING_FENCE, sActive_list_entry); + if (psCountFence->ui64Value <= psFenceTimeline->ui64Current_value) + { + dma_fence_signal(psCountFence->psFence); + dma_fence_put(psCountFence->psFence); + psCountFence->psFence = NULL; + list_del(&psCountFence->sActive_list_entry); + kfree(psCountFence); + } + } + + spin_unlock_irqrestore(&psFenceTimeline->sActive_fences_lock, flags); +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.h new file mode 100644 index 0000000..b8e2d96 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_counting_timeline.h @@ -0,0 +1,25 @@ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#if !defined(__PVR_COUNTING_TIMELINE_H__) +#define __PVR_COUNTING_TIMELINE_H__ + +struct PVR_COUNTING_FENCE_TIMELINE; + +struct PVR_COUNTING_FENCE_TIMELINE *pvr_counting_fence_timeline_create(const char *name); + +void pvr_counting_fence_timeline_put(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline); + +struct PVR_COUNTING_FENCE_TIMELINE *pvr_counting_fence_timeline_get(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline); + +struct dma_fence *pvr_counting_fence_create(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline, u64 value); + +void pvr_counting_fence_timeline_inc(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline, u64 value); + +void pvr_counting_fence_timeline_force_complete(struct PVR_COUNTING_FENCE_TIMELINE *psFenceTimeline); + +#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_debug.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_debug.c new file mode 100644 index 0000000..621a30d --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_debug.c @@ -0,0 +1,522 @@ +/*************************************************************************/ /*! +@Title Debug Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides kernel side Debug Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include // strncpy, strlen +#include +#include +#include "img_types.h" +#include "servicesext.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "mutex.h" +#include "linkage.h" +#include "pvr_uaccess.h" + +#if !defined(CONFIG_PREEMPT) +#define PVR_DEBUG_ALWAYS_USE_SPINLOCK +#endif + +#if defined(PVRSRV_NEED_PVR_DPF) + +/******** BUFFERED LOG MESSAGES ********/ + +/* Because we don't want to have to handle CCB wrapping, each buffered + * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means + * there is the same fixed number of messages that can be stored, + * regardless of message length. + */ + +#if defined(PVRSRV_DEBUG_CCB_MAX) + +#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN + +#include +#include + +typedef struct +{ + const IMG_CHAR *pszFile; + IMG_INT iLine; + IMG_UINT32 ui32TID; + IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX]; + struct timeval sTimeVal; +} +PVRSRV_DEBUG_CCB; + +static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX] = { { 0 } }; + +static IMG_UINT giOffset = 0; + +static PVRSRV_LINUX_MUTEX gsDebugCCBMutex; + +static void +AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, + const IMG_CHAR *szBuffer) +{ + LinuxLockMutex(&gsDebugCCBMutex); + + gsDebugCCB[giOffset].pszFile = pszFileName; + gsDebugCCB[giOffset].iLine = ui32Line; + gsDebugCCB[giOffset].ui32TID = current->tgid; + + do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); + + strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1); + gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0; + + giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; + + LinuxUnLockMutex(&gsDebugCCBMutex); +} + +IMG_EXPORT IMG_VOID PVRSRVDebugPrintfDumpCCB(void) +{ + int i; + + LinuxLockMutex(&gsDebugCCBMutex); + + for(i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) + { + PVRSRV_DEBUG_CCB *psDebugCCBEntry = + &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; + + /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ + if(!psDebugCCBEntry->pszFile) + continue; + + printk("%s:%d:\t[%5ld.%6ld] %s\n", + psDebugCCBEntry->pszFile, + psDebugCCBEntry->iLine, + (long)psDebugCCBEntry->sTimeVal.tv_sec, + (long)psDebugCCBEntry->sTimeVal.tv_usec, + psDebugCCBEntry->pcMesg); + } + + LinuxUnLockMutex(&gsDebugCCBMutex); +} + +#else /* defined(PVRSRV_DEBUG_CCB_MAX) */ +static INLINE void +AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, + const IMG_CHAR *szBuffer) +{ + (void)pszFileName; + (void)szBuffer; + (void)ui32Line; +} + +IMG_EXPORT IMG_VOID PVRSRVDebugPrintfDumpCCB(void) +{ + /* Not available */ +} + +#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ + +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, + const IMG_CHAR* pszFormat, va_list VArgs) + IMG_FORMAT_PRINTF(3, 0); + + +#if defined(PVRSRV_NEED_PVR_DPF) + +/* NOTE: Must NOT be static! Used in module.c.. */ +IMG_UINT32 gPVRDebugLevel = + (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_BUFFERED); + +#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */ + +#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN + +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) +/* Message buffer for non-IRQ messages */ +static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1]; +#endif + +/* Message buffer for IRQ messages */ +static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1]; + +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) +/* The lock is used to control access to gszBufferNonIRQ */ +static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +/* The lock is used to control access to gszBufferIRQ */ +/* PRQA S 0671,0685 1 */ /* ignore warnings about C99 style initialisation */ +static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED; +#else +static DEFINE_SPINLOCK(gsDebugLockIRQ); +#endif + +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) +#if !defined (USE_SPIN_LOCK) /* to keep QAC happy */ +#define USE_SPIN_LOCK (in_interrupt() || !preemptible()) +#endif +#endif + +static inline void GetBufferLock(unsigned long *pulLockFlags) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + if (USE_SPIN_LOCK) +#endif + { + spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags); + } +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + else + { + LinuxLockMutexNested(&gsDebugMutexNonIRQ, PVRSRV_LOCK_CLASS_PVR_DEBUG); + } +#endif +} + +static inline void ReleaseBufferLock(unsigned long ulLockFlags) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + if (USE_SPIN_LOCK) +#endif + { + spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags); + } +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + else + { + LinuxUnLockMutex(&gsDebugMutexNonIRQ); + } +#endif +} + +static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + if (USE_SPIN_LOCK) +#endif + { + *ppszBuf = gszBufferIRQ; + *pui32BufSiz = sizeof(gszBufferIRQ); + } +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + else + { + *ppszBuf = gszBufferNonIRQ; + *pui32BufSiz = sizeof(gszBufferNonIRQ); + } +#endif +} + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, pointed + * to by the var args list. + */ +static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs) +{ + IMG_UINT32 ui32Used; + IMG_UINT32 ui32Space; + IMG_INT32 i32Len; + + ui32Used = strlen(pszBuf); + BUG_ON(ui32Used >= ui32BufSiz); + ui32Space = ui32BufSiz - ui32Used; + + i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs); + pszBuf[ui32BufSiz - 1] = 0; + + /* Return true if string was truncated */ + return (i32Len < 0 || i32Len >= (IMG_INT32)ui32Space) ? IMG_TRUE : IMG_FALSE; +} + +/* Actually required for ReleasePrintf too */ + +IMG_VOID PVRDPFInit(IMG_VOID) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + LinuxInitMutex(&gsDebugMutexNonIRQ); +#endif +#if defined(PVRSRV_DEBUG_CCB_MAX) + LinuxInitMutex(&gsDebugCCBMutex); +#endif +} + +/*! +****************************************************************************** + @Function PVRSRVReleasePrintf + @Description To output an important message to the user in release builds + @Input pszFormat - The message format string + @Input ... - Zero or more arguments for use by the format string + @Return None + ******************************************************************************/ +IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) +{ + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf; + IMG_UINT32 ui32BufSiz; + + SelectBuffer(&pszBuf, &ui32BufSiz); + + va_start(vaArgs, pszFormat); + + GetBufferLock(&ulLockFlags); + strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1)); + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + + ReleaseBufferLock(ulLockFlags); + va_end(vaArgs); +} + +#if defined(PVRSRV_NEED_PVR_TRACE) + +/*! +****************************************************************************** + @Function PVRTrace + @Description To output a debug message to the user + @Input pszFormat - The message format string + @Input ... - Zero or more arguments for use by the format string + @Return None + ******************************************************************************/ +IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...) +{ + va_list VArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf; + IMG_UINT32 ui32BufSiz; + + SelectBuffer(&pszBuf, &ui32BufSiz); + + va_start(VArgs, pszFormat); + + GetBufferLock(&ulLockFlags); + + strncpy(pszBuf, "PVR: ", (ui32BufSiz -1)); + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + + ReleaseBufferLock(ulLockFlags); + + va_end(VArgs); +} + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + +#if defined(PVRSRV_NEED_PVR_DPF) + +/*! +****************************************************************************** + @Function PVRSRVDebugPrintf + @Description To output a debug message to the user + @Input uDebugLevel - The current debug level + @Input pszFile - The source file generating the message + @Input uLine - The line of the source file + @Input pszFormat - The message format string + @Input ... - Zero or more arguments for use by the format string + @Return None + ******************************************************************************/ +IMG_VOID PVRSRVDebugPrintf ( + IMG_UINT32 ui32DebugLevel, + const IMG_CHAR* pszFullFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR* pszFormat, + ... + ) +{ + IMG_BOOL bTrace; + const IMG_CHAR *pszFileName = pszFullFileName; + + bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE; + + if (gPVRDebugLevel & ui32DebugLevel) + { + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf; + IMG_UINT32 ui32BufSiz; + + SelectBuffer(&pszBuf, &ui32BufSiz); + + va_start(vaArgs, pszFormat); + + GetBufferLock(&ulLockFlags); + + /* Add in the level of warning */ + if (bTrace == IMG_FALSE) + { + switch(ui32DebugLevel) + { + case DBGPRIV_FATAL: + { + strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_ERROR: + { + strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_WARNING: + { + strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_MESSAGE: + { + strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_VERBOSE: + { + strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_BUFFERED: + { + strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1)); + break; + } + default: + { + strncpy (pszBuf, "PVR_K:(Unknown message level): ", (ui32BufSiz -1)); + break; + } + } + } + else + { + strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1)); + } + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + if (ui32DebugLevel & DBGPRIV_BUFFERED) + { + /* We don't need the full path here */ + const IMG_CHAR *pszShortName = strrchr(pszFileName, '/') + 1; + if(pszShortName) + pszFileName = pszShortName; + + AddToBufferCCB(pszFileName, ui32Line, pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + } + + ReleaseBufferLock(ulLockFlags); + + va_end (vaArgs); + } +} + +#endif /* PVRSRV_NEED_PVR_DPF */ + +#if defined(DEBUG) + +IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data) +{ +#define _PROC_SET_BUFFER_SZ 6 + IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ]; + + PVR_UNREFERENCED_PARAMETER(file); + PVR_UNREFERENCED_PARAMETER(data); + + if (count > _PROC_SET_BUFFER_SZ) + { + return -EINVAL; + } + else + { + if (pvr_copy_from_user(data_buffer, buffer, count)) + return -EINVAL; + if (data_buffer[count - 1] != '\n') + return -EINVAL; + if (sscanf(data_buffer, "%u", &gPVRDebugLevel) == 0) + return -EINVAL; + gPVRDebugLevel &= (1 << DBGPRIV_DBGLEVEL_COUNT) - 1; + } + return (count); +} + +void ProcSeqShowDebugLevel(struct seq_file *sfile, void* el) +{ + PVR_UNREFERENCED_PARAMETER(el); + + seq_printf(sfile, "%u\n", gPVRDebugLevel); +} + +#endif /* defined(DEBUG) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.c new file mode 100644 index 0000000..9e6b92c --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.c @@ -0,0 +1,808 @@ +/*************************************************************************/ /*! +@Title PowerVR drm driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description linux module setup +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(SUPPORT_DRI_DRM) + +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "img_defs.h" +#include "services.h" +#include "kerneldisplay.h" +#include "kernelbuffer.h" +#include "syscommon.h" +#include "pvrmmap.h" +#include "mm.h" +#include "mmap.h" +#include "mutex.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "perproc.h" +#include "handle.h" +#include "pvr_bridge_km.h" +#include "pvr_bridge.h" +#include "pvrmodule.h" +#include "pvrversion.h" +#include "lock.h" +#include "linkage.h" +#include "pvr_drm.h" + +#if defined(PVR_DRI_DRM_NOT_PCI) +#include "pvr_drm_mod.h" +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) +#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) +#endif + +#if (defined(PVR_LDM_PLATFORM_PRE_REGISTERED) || defined(PVR_LDM_DEVICE_TREE)) && !defined(NO_HARDWARE) +#define PVR_USE_PRE_REGISTERED_PLATFORM_DEV +#endif + +#if defined(PVR_LDM_DEVICE_TREE) && !defined(NO_HARDWARE) +#define PVR_USE_DEVICE_TREE +#endif + +#if (defined(PVR_DRI_DRM_PLATFORM_DEV) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV)) || defined(NO_HARDWARE) +#define PVR_DRM_NAME SYS_SGX_DEV_NAME +#else +#define PVR_DRM_NAME PVRSRV_MODNAME +#endif + +#define PVR_DRM_DESC "Imagination Technologies PVR DRM" + +#define PVR_DRM_DATE "20110701" + +#if defined(PVR_DRI_DRM_PLATFORM_DEV) && !defined(SUPPORT_DRI_DRM_PLUGIN) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) +#define PVR_NEW_STYLE_DRM_PLATFORM_DEV +#else +#define PVR_OLD_STYLE_DRM_PLATFORM_DEV +#endif +#endif + +/* + * Prior to Linux 2.6.36, we couldn't do the release processing in post close + * when workqueues were being used, because drm_release held the big kernel + * lock (BKL) when it called post close. + * If the resman needs to wait for processing being done by a workqueue, + * that processing won't complete whilst the lock is held by another thread, + * as the workqueue won't get scheduled. + */ +#undef PVR_DRI_DRM_USE_POST_CLOSE +#if (defined(SUPPORT_DRI_DRM_EXT) && !defined(PVR_LINUX_USING_WORKQUEUES)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) +#define PVR_DRI_DRM_USE_POST_CLOSE +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) +#define PVR_DRM_DRIVER_RENDER DRIVER_RENDER +#define PVR_DRM_RENDER_ALLOW DRM_RENDER_ALLOW +#else +#define PVR_DRM_DRIVER_RENDER 0 +#define PVR_DRM_RENDER_ALLOW 0 +#endif + +DECLARE_WAIT_QUEUE_HEAD(sWaitForInit); + +#if defined(SUPPORT_DRM_MODESET) +static struct drm_driver sPVRDrmDriver; +#endif + +/* Once bInitComplete and bInitFailed are set, they stay set */ +IMG_BOOL bInitComplete; +IMG_BOOL bInitFailed; + +#if !defined(PVR_DRI_DRM_NOT_PCI) && !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(PVR_DRI_DRM_PLATFORM_DEV) +#if defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) +static struct platform_device *psPlatDev; +#endif +struct platform_device *gpsPVRLDMDev; +#else +struct pci_dev *gpsPVRLDMDev; +#endif +#endif + +struct drm_device *gpsPVRDRMDev; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) +#error "Linux kernel version 2.6.25 or later required for PVR DRM support" +#endif + +#define PVR_DRM_FILE struct drm_file * + +#if !defined(SUPPORT_DRI_DRM_EXT) && !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(PVR_USE_DEVICE_TREE) +static struct of_device_id asPlatIdList[] = { + { + .compatible = SYS_SGX_DEV_NAME + }, + {} +}; +MODULE_DEVICE_TABLE(of, asPlatIdList); +#else +#if defined(PVR_DRI_DRM_PLATFORM_DEV) +static struct platform_device_id asPlatIdList[] = { + {SYS_SGX_DEV_NAME, 0}, + {} +}; +#else /* defined(PVR_DRI_DRM_PLATFORM_DEV) */ +static struct pci_device_id asPciIdList[] = { +#if defined(PVR_DRI_DRM_NOT_PCI) + {1, 1, 1, 1, 0, 0, 0}, +#else /* defined(PVR_DRI_DRM_NOT_PCI) */ + {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, +#if defined(SYS_SGX_DEV1_DEVICE_ID) + {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV1_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, +#endif /* defined(SYS_SGX_DEV1_DEVICE_ID) */ +#endif /* defined(PVR_DRI_DRM_NOT_PCI) */ + {0} +}; +#endif /* defined(PVR_DRI_DRM_PLATFORM_DEV) */ +#endif /* defined(PVR_DEVICE_TREE) */ +#endif /* !defined(SUPPORT_DRI_DRM_EXT) */ + +struct device * +PVRLDMGetDevice(void) +{ + return gpsPVRDRMDev->dev; +} + +DRI_DRM_STATIC int +PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags) +{ + int iRes = 0; + + PVR_TRACE(("PVRSRVDrmLoad")); + + gpsPVRDRMDev = dev; +#if !defined(PVR_DRI_DRM_NOT_PCI) && !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(PVR_DRI_DRM_PLATFORM_DEV) + gpsPVRLDMDev = dev->platformdev; +#else + gpsPVRLDMDev = dev->pdev; +#endif +#endif + +#if defined(PDUMP) + iRes = dbgdrv_init(); + if (iRes != 0) + { + goto exit; + } +#endif + /* Module initialisation */ + iRes = PVRCore_Init(); + if (iRes != 0) + { + goto exit_dbgdrv_cleanup; + } + +#if defined(DISPLAY_CONTROLLER) + iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(dev); + if (iRes != 0) + { + goto exit_pvrcore_cleanup; + } +#endif + goto exit; + +#if defined(DISPLAY_CONTROLLER) +exit_pvrcore_cleanup: + PVRCore_Cleanup(); +#endif +exit_dbgdrv_cleanup: +#if defined(PDUMP) + dbgdrv_cleanup(); +#endif +exit: + if (iRes != 0) + { + bInitFailed = IMG_TRUE; + } + bInitComplete = IMG_TRUE; + + wake_up_interruptible(&sWaitForInit); + + return iRes; +} + +DRI_DRM_STATIC int +PVRSRVDrmUnload(struct drm_device *dev) +{ + PVR_TRACE(("PVRSRVDrmUnload")); + +#if defined(DISPLAY_CONTROLLER) + PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(dev); +#endif + + PVRCore_Cleanup(); + +#if defined(PDUMP) + dbgdrv_cleanup(); +#endif + + return 0; +} + +DRI_DRM_STATIC int +PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file) +{ + while (!bInitComplete) + { + DEFINE_WAIT(sWait); + + prepare_to_wait(&sWaitForInit, &sWait, TASK_INTERRUPTIBLE); + + if (!bInitComplete) + { + PVR_TRACE(("%s: Waiting for module initialisation to complete", __FUNCTION__)); + + schedule(); + } + + finish_wait(&sWaitForInit, &sWait); + + if (signal_pending(current)) + { + return -ERESTARTSYS; + } + } + + if (bInitFailed) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Module initialisation failed", __FUNCTION__)); + return -EINVAL; + } + + return PVRSRVOpen(dev, file); +} + +#if defined(PVR_DRI_DRM_USE_POST_CLOSE) || defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(SUPPORT_DRI_DRM_PLUGIN) +DRI_DRM_STATIC int +PVRSRVDrmRelease(struct drm_device *dev, struct drm_file *file) +#else +DRI_DRM_STATIC void +PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file) +#endif +{ + PVRSRVRelease(file->driver_priv); + + file->driver_priv = NULL; + +#if defined(SUPPORT_DRI_DRM_PLUGIN) + return 0; +#endif +} +#else +DRI_DRM_STATIC int +PVRSRVDrmRelease(struct inode *inode, struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + void *psDriverPriv = file_priv->driver_priv; + int ret; + + ret = drm_release(inode, filp); + + if (ret != 0) + { + /* + * An error means drm_release didn't call drm_lastclose, + * but it will have freed file_priv. + */ + PVR_DPF((PVR_DBG_ERROR, "%s : drm_release failed: %d", + __FUNCTION__, ret)); + } + + PVRSRVRelease(psDriverPriv); + + return 0; +} +#endif + +DRI_DRM_STATIC int +PVRDRMIsMaster(struct drm_device *dev, void *arg, struct drm_file *pFile) +{ + return 0; +} + +#if defined(SUPPORT_DRI_DRM_EXT) +int +PVRDRM_Dummy_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile) +{ + return 0; +} +#endif + +DRI_DRM_STATIC int +PVRDRMUnprivCmd(struct drm_device *dev, void *arg, struct drm_file *pFile) +{ + int ret = 0; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + if (arg == NULL) + { + ret = -EFAULT; + } + else + { + drm_pvr_unpriv_cmd *psArgs = (drm_pvr_unpriv_cmd *)arg; + + switch (psArgs->cmd) + { + case PVR_DRM_UNPRIV_INIT_SUCCESFUL: + psArgs->res = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0; + break; + + default: + ret = -EFAULT; + } + + } + + LinuxUnLockMutex(&gPVRSRVLock); + + return ret; +} + +#if defined(DISPLAY_CONTROLLER) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL) +static int +PVRDRM_Display_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile) +{ + int res; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + res = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(dev, arg, pFile); + + LinuxUnLockMutex(&gPVRSRVLock); + + return res; +} +#endif + +#if defined(SUPPORT_DRM_MODESET) +static int +PVRSRVPciProbe(struct pci_dev *dev, const struct pci_device_id *id) +{ + PVR_TRACE(("PVRSRVPciProbe")); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) + return drm_get_pci_dev(dev, id, &sPVRDrmDriver); +#else + return drm_get_dev(dev, id, &sPVRDrmDriver); +#endif +} + +static void +PVRSRVPciRemove(struct pci_dev *dev) +{ + struct drm_device *psDrmDev; + + PVR_TRACE(("PVRSRVPciRemove")); + + psDrmDev = pci_get_drvdata(dev); + drm_put_dev(psDrmDev); +} +#endif + +/* + * For Linux 2.6.33 and above, the DRM ioctl entry point is of the unlocked + * variety. The big kernel lock is still taken for ioctls, unless + * the DRM_UNLOCKED flag is set. If you revise one of the driver specific + * ioctls, or add a new one, consider whether the gPVRSRVLock mutex needs + * to be taken. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) +#define PVR_DRM_FOPS_IOCTL .unlocked_ioctl +#define PVR_DRM_UNLOCKED DRM_UNLOCKED +#else +#define PVR_DRM_FOPS_IOCTL .ioctl +#define PVR_DRM_UNLOCKED 0 +#endif + +#if !defined(DRM_IOCTL_DEF_DRV) +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) DRM_IOCTL_DEF(DRM_##ioctl, _func, _flags) +#endif + +#if !defined(SUPPORT_DRI_DRM_EXT) +struct drm_ioctl_desc sPVRDrmIoctls[] = { + DRM_IOCTL_DEF_DRV(PVR_SRVKM, PVRSRV_BridgeDispatchKM, PVR_DRM_RENDER_ALLOW | PVR_DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(PVR_IS_MASTER, PVRDRMIsMaster, PVR_DRM_RENDER_ALLOW | DRM_MASTER | PVR_DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(PVR_UNPRIV, PVRDRMUnprivCmd, PVR_DRM_RENDER_ALLOW | PVR_DRM_UNLOCKED), +#if defined(PDUMP) + DRM_IOCTL_DEF_DRV(PVR_DBGDRV, dbgdrv_ioctl, PVR_DRM_RENDER_ALLOW | PVR_DRM_UNLOCKED), +#endif +#if defined(DISPLAY_CONTROLLER) && defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL) + DRM_IOCTL_DEF_DRV(PVR_DISP, PVRDRM_Display_ioctl, DRM_MASTER | PVR_DRM_UNLOCKED) +#endif +}; + +#if !defined(SUPPORT_DRI_DRM_PLUGIN) +static int pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls); +#endif + +#if defined(PVR_DRI_DRM_PLATFORM_DEV) && !defined(SUPPORT_DRI_DRM_EXT) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +static int PVRSRVDrmProbe(struct platform_device *pDevice); +static int PVRSRVDrmRemove(struct platform_device *pDevice); +#endif /* defined(PVR_DRI_DRM_PLATFORM_DEV) && !defined(SUPPORT_DRI_DRM_EXT) */ + +#if defined(SUPPORT_DRI_DRM_PLUGIN) +static PVRSRV_DRM_PLUGIN sPVRDrmPlugin = +{ + .name = PVR_DRM_NAME, + + .open = PVRSRVDrmOpen, + .load = PVRSRVDrmLoad, + .unload = PVRSRVDrmUnload, + + .release = PVRSRVDrmRelease, + + .mmap = PVRMMap, + + .ioctls = sPVRDrmIoctls, + .num_ioctls = DRM_ARRAY_SIZE(sPVRDrmIoctls), + .ioctl_start = 0 +}; +#else /* defined(SUPPORT_DRI_DRM_PLUGIN) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#if defined(CONFIG_COMPAT) +static long pvr_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + + if (nr < DRM_COMMAND_BASE) + { + return drm_compat_ioctl(file, cmd, arg); + } + + return drm_ioctl(file, cmd, arg); +} +#endif /* defined(CONFIG_COMPAT) */ + +static const struct file_operations sPVRFileOps = +{ + .owner = THIS_MODULE, + .open = drm_open, +#if defined(PVR_DRI_DRM_USE_POST_CLOSE) + .release = drm_release, +#else + .release = PVRSRVDrmRelease, +#endif + PVR_DRM_FOPS_IOCTL = drm_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = pvr_compat_ioctl, +#endif + .mmap = PVRMMap, + .poll = drm_poll, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) + .fasync = drm_fasync, +#endif +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) */ + +static struct drm_driver sPVRDrmDriver = +{ + .driver_features = PVR_DRM_DRIVER_RENDER +#if defined(PVR_OLD_STYLE_DRM_PLATFORM_DEV) + | DRIVER_USE_PLATFORM_DEVICE +#endif + , + .dev_priv_size = 0, + .load = PVRSRVDrmLoad, + .unload = PVRSRVDrmUnload, + .open = PVRSRVDrmOpen, +#if defined(PVR_DRI_DRM_USE_POST_CLOSE) + .postclose = PVRSRVDrmPostClose, +#endif +#if !defined(PVR_DRI_DRM_PLATFORM_DEV) && !defined(SUPPORT_DRM_MODESET) + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)) + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, +#endif + .ioctls = sPVRDrmIoctls, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + .fops = &sPVRFileOps, +#else + .fops = + { + .owner = THIS_MODULE, + .open = drm_open, +#if defined(PVR_DRI_DRM_USE_POST_CLOSE) + .release = drm_release, +#else + .release = PVRSRVDrmRelease, +#endif + PVR_DRM_FOPS_IOCTL = drm_ioctl, + .mmap = PVRMMap, + .poll = drm_poll, + .fasync = drm_fasync, + }, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +#if defined(PVR_OLD_STYLE_DRM_PLATFORM_DEV) + .platform_driver = + { + .id_table = asPlatIdList, + .driver = + { + .name = PVR_DRM_NAME, + }, + .probe = PVRSRVDrmProbe, + .remove = PVRSRVDrmRemove, + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, + .shutdown = PVRSRVDriverShutdown, + }, +#else + .pci_driver = + { + .name = PVR_DRM_NAME, + .id_table = asPciIdList, +#if defined(SUPPORT_DRM_MODESET) + .probe = PVRSRVPciProbe, + .remove = PVRSRVPciRemove, + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, +#endif + }, +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +#if defined(LDM_PLATFORM) + .set_busid = drm_platform_set_busid, +#else +#if defined(LDM_PCI) + .set_busid = drm_pci_set_busid, +#else + #error "LDM_PLATFORM or LDM_PCI must be set" +#endif +#endif +#endif + .name = "pvr", + .desc = PVR_DRM_DESC, + .date = PVR_DRM_DATE, + .major = PVRVERSION_MAJ, + .minor = PVRVERSION_MIN, + .patchlevel = PVRVERSION_BUILD, +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) && !defined(PVR_DRI_DRM_PLATFORM_DEV) +static struct pci_driver sPVRPCIDriver = +{ + .name = PVR_DRM_NAME, + .id_table = asPciIdList, +#if defined(SUPPORT_DRM_MODESET) + .probe = PVRSRVPciProbe, + .remove = PVRSRVPciRemove, + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, +#endif +}; +#endif + +#if defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) +#if !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) +static struct platform_device_info sPVRPlatDeviceInfo = { + .name = PVR_DRM_NAME, + .id = -1, + .dma_mask = DMA_BIT_MASK(32) +}; +#endif + +static struct platform_driver sPVRPlatDriver = +{ +#if !defined(PVR_USE_DEVICE_TREE) + .id_table = asPlatIdList, +#endif + .driver = + { + .name = PVR_DRM_NAME, +#if defined(PVR_USE_DEVICE_TREE) + .of_match_table = asPlatIdList, +#endif + }, + .probe = PVRSRVDrmProbe, + .remove = PVRSRVDrmRemove, + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, + .shutdown = PVRSRVDriverShutdown, +}; +#endif + +#endif /* defined(SUPPORT_DRI_DRM_PLUGIN) */ + +#if defined(PVR_DRI_DRM_PLATFORM_DEV) && !defined(SUPPORT_DRI_DRM_EXT) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +static int +PVRSRVDrmProbe(struct platform_device *pDevice) +{ + PVR_TRACE(("PVRSRVDrmProbe")); + +#if defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) + gpsPVRLDMDev = pDevice; + + return drm_platform_init(&sPVRDrmDriver, gpsPVRLDMDev); +#else + return drm_get_platform_dev(pDevice, &sPVRDrmDriver); +#endif +} + +static int +PVRSRVDrmRemove(struct platform_device *pDevice) +{ + PVR_TRACE(("PVRSRVDrmRemove")); + +#if defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + drm_platform_exit(&sPVRDrmDriver, gpsPVRLDMDev); +#else + drm_put_dev(gpsPVRDRMDev); +#endif + return 0; +} +#endif + +static int __init PVRSRVDrmInit(void) +{ + int iRes; +#if !defined(SUPPORT_DRI_DRM_PLUGIN) + sPVRDrmDriver.num_ioctls = pvr_max_ioctl; +#endif + +#if defined(SUPPORT_DRM_MODESET) + sPVRDrmDriver.driver_features |= DRIVER_MODESET; +#endif + + /* Must come before attempting to print anything via Services */ + PVRDPFInit(); + +#if defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) + iRes = platform_driver_register(&sPVRPlatDriver); +#if !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + if (iRes == 0) + { + psPlatDev = platform_device_register_full(&sPVRPlatDeviceInfo); + if (IS_ERR(psPlatDev)) + { + iRes = PTR_ERR(psPlatDev); + psPlatDev = NULL; + platform_driver_unregister(&sPVRPlatDriver); + } + } +#endif +#else /* defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) */ +#if defined(SUPPORT_DRI_DRM_PLUGIN) + iRes = SysDRMRegisterPlugin(&sPVRDrmPlugin); +#else /* defined(SUPPORT_DRI_DRM_PLUGIN) */ +#if defined(PVR_DRI_DRM_NOT_PCI) + iRes = drm_pvr_dev_add(); + if (iRes != 0) + { + return iRes; + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) +#if defined(PVR_DRI_DRM_PLATFORM_DEV) + iRes = drm_platform_init(&sPVRDrmDriver, gpsPVRLDMDev); +#else + iRes = drm_pci_init(&sPVRDrmDriver, &sPVRPCIDriver); +#endif +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */ + iRes = drm_init(&sPVRDrmDriver); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */ + +#if defined(PVR_DRI_DRM_NOT_PCI) + if (iRes != 0) + { + drm_pvr_dev_remove(); + } +#endif +#endif /* defined(SUPPORT_DRI_DRM_PLUGIN) */ +#endif /* defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) */ + return iRes; +} + +static void __exit PVRSRVDrmExit(void) +{ +#if defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) +#if !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + platform_device_unregister(psPlatDev); +#endif + platform_driver_unregister(&sPVRPlatDriver); +#else /* defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) */ +#if defined(SUPPORT_DRI_DRM_PLUGIN) + SysDRMUnregisterPlugin(&sPVRDrmPlugin); +#else /* defined(SUPPORT_DRI_DRM_PLUGIN) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) +#if defined(PVR_DRI_DRM_PLATFORM_DEV) + drm_platform_exit(&sPVRDrmDriver, gpsPVRLDMDev); +#else + drm_pci_exit(&sPVRDrmDriver, &sPVRPCIDriver); +#endif +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */ + drm_exit(&sPVRDrmDriver); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) */ + +#if defined(PVR_DRI_DRM_NOT_PCI) + drm_pvr_dev_remove(); +#endif +#endif /* defined(SUPPORT_DRI_DRM_PLUGIN) */ +#endif /* defined(PVR_NEW_STYLE_DRM_PLATFORM_DEV) */ +} + +/* + * These macro calls define the initialisation and removal functions of the + * driver. Although they are prefixed `module_', they apply when compiling + * statically as well; in both cases they define the function the kernel will + * run to start/stop the driver. +*/ +module_init(PVRSRVDrmInit); +module_exit(PVRSRVDrmExit); +#endif /* !defined(SUPPORT_DRI_DRM_EXT) */ +#endif /* defined(SUPPORT_DRI_DRM) */ + + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.h new file mode 100644 index 0000000..1d6e274 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_drm.h @@ -0,0 +1,185 @@ +/*************************************************************************/ /*! +@Title PowerVR drm driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description drm module +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if !defined(__PVR_DRM_H__) +#define __PVR_DRM_H__ + +#if defined (PDUMP) +#include "linuxsrv.h" +#endif + +#include "pvr_drm_shared.h" + +#if defined(SUPPORT_DRI_DRM) + +#if defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL) +#include "3rdparty_dc_drm_shared.h" +#endif + +#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y +#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y) + +#if defined(PVR_DRI_DRM_PLATFORM_DEV) +#define LDM_DEV struct platform_device +#endif + +int PVRCore_Init(void); +void PVRCore_Cleanup(void); +int PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile); +void PVRSRVRelease(void *pvPrivData); + +#if !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(PVR_DRI_DRM_PLATFORM_DEV) +void PVRSRVDriverShutdown(LDM_DEV *pDevice); +int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state); +int PVRSRVDriverResume(LDM_DEV *pDevice); +#else +#if defined(SUPPORT_DRM_MODESET) +int PVRSRVDriverSuspend(struct pci_dev *pDevice, pm_message_t state); +int PVRSRVDriverResume(struct pci_dev *pDevice); +#else +int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state); +int PVRSRVDriverResume(struct drm_device *pDevice); +#endif /* defined(SUPPORT_DRM_MODESET) */ +#endif /* defined(PVR_DRI_DRM_PLATFORM_DEV) */ +#endif /* !defined(SUPPORT_DRI_DRM_PLUGIN) */ + +int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, struct drm_file *pFile); +int PVRSRV_BridgeCompatDispatchKM(struct drm_device *dev, void *arg, struct drm_file *pFile); + +#if defined(SUPPORT_DRI_DRM_EXT) +#define DRI_DRM_STATIC +/*Exported functions to common drm layer*/ +int PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags); +int PVRSRVDrmUnload(struct drm_device *dev); +int PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file); +#if defined(PVR_LINUX_USING_WORKQUEUES) +DRI_DRM_STATIC int PVRSRVDrmRelease(struct inode *inode, struct file *filp); +#else +void PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file); +#endif +int PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile); +int PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile); +int PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile); +#else +#define DRI_DRM_STATIC static +#endif /* defined(SUPPORT_DRI_DRM_EXT) */ + +#if defined(DISPLAY_CONTROLLER) +extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *); +extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *); +extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Suspend)(struct drm_device *); +extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Resume)(struct drm_device *); +#if defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL) +extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(struct drm_device *dev, void *arg, struct drm_file *pFile); +#endif +#endif + +#if defined(PDUMP) +int dbgdrv_init(void); +void dbgdrv_cleanup(void); +IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile); +#endif + +#if !defined(SUPPORT_DRI_DRM_EXT) +/* + * We need the command number names to begin with "DRM_" for newer versions + * of the macro used to fill out the DRM ioctl table. Similarly, the + * ioctl number names must begin with "DRM_IOCTL_". The suffixes for the + * two sets of strings must match (e.g. end with "PVR_SRVKM" in both + * cases). + */ + +#define DRM_PVR_SRVKM PVR_DRM_SRVKM_CMD +#define DRM_PVR_IS_MASTER PVR_DRM_IS_MASTER_CMD +#define DRM_PVR_UNPRIV PVR_DRM_UNPRIV_CMD +#define DRM_PVR_DBGDRV PVR_DRM_DBGDRV_CMD +#define DRM_PVR_DISP PVR_DRM_DISP_CMD + +/* + * Some versions of the kernel will dereference a NULL pointer if data is + * is passed to an ioctl that doesn't expect any, so avoid using the _IO + * macro, and use _IOW instead, specifying a dummy argument. +*/ +typedef struct { + char dummy[4]; +} drm_pvr_dummy_arg; + +/* IOCTL numbers */ +#define DRM_IOCTL_PVR_SRVKM DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM, PVRSRV_BRIDGE_PACKAGE) +#define DRM_IOCTL_PVR_IS_MASTER DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_IS_MASTER, drm_pvr_dummy_arg) +#define DRM_IOCTL_PVR_UNPRIV DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_UNPRIV, drm_pvr_unpriv_cmd) + +#if defined(PDUMP) +#define DRM_IOCTL_PVR_DBGDRV DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_DBGDRV, IOCTL_PACKAGE) +#endif + +#if defined(PVR_DISPLAY_CONTROLLER_DRM_IOCTL) +#define DRM_IOCTL_PVR_DISP DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_DISP, drm_pvr_display_cmd) +#endif +#endif /* !defined(SUPPORT_DRI_DRM_EXT) */ + +#if defined(SUPPORT_DRI_DRM_PLUGIN) +typedef struct { + char *name; + + int (*load)(struct drm_device *dev, unsigned long flags); + int (*unload)(struct drm_device *dev); + + int (*open)(struct drm_device *dev, struct drm_file *file); + int (*release)(struct drm_device *dev, struct drm_file *file); + + int (*mmap)(struct file* pFile, struct vm_area_struct* ps_vma); + + struct drm_ioctl_desc *ioctls; + int num_ioctls; + int ioctl_start; +} PVRSRV_DRM_PLUGIN; + +int SysDRMRegisterPlugin(PVRSRV_DRM_PLUGIN *psDRMPlugin); +void SysDRMUnregisterPlugin(PVRSRV_DRM_PLUGIN *psDRMPlugin); +#endif /* defined(SUPPORT_DRI_DRM_PLUGIN) */ + +#endif /* defined(SUPPORT_DRI_DRM) */ + +#endif /* defined(__PVR_DRM_H__) */ + + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.c new file mode 100644 index 0000000..0eba5a6 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.c @@ -0,0 +1,1681 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux fence interface +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#include "pvr_sync_common.h" +#include "pvr_fence.h" +#include "pvr_counting_timeline.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "services_headers.h" +#include "sgxutils.h" +#include "ttrace.h" +#include "mutex.h" +#include "lock.h" + +//#define DEBUG_PRINT + +#if defined(DEBUG_PRINT) +#define DPF(fmt, ...) PVR_DPF((PVR_DBG_BUFFERED, fmt, __VA_ARGS__)) +#else +#define DPF(fmt, ...) do {} while(0) +#endif + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; +}; +#define SW_SYNC_IOC_MAGIC 'W' +#define SW_SYNC_IOC_CREATE_FENCE \ + (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data)) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +/* Gobal WQ for scheduling work */ +static struct workqueue_struct *gpsWorkQueue; + +/* Linux work struct for workqueue. */ +static struct work_struct gsWork; + +static const struct file_operations pvr_sync_fops; + +/* The "defer-free" object list. Driver global. */ +static LIST_HEAD(gSyncInfoFreeList); +static DEFINE_SPINLOCK(gSyncInfoFreeListLock); + +/* List of timelines, used by MISR callback to find signaled fences + * and also to kick the hardware if signalling may allow progress to be + * made. + */ +static LIST_HEAD(gFenceCtxList); +static DEFINE_MUTEX(gFenceCtxListLock); + +/* Forward declare due to cyclic dependency on gsSyncFenceAllocFOps */ +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd); + +/* Global data for the sync driver */ +static struct { + /* Process that initialized the sync driver. House-keep this so + * the correct per-proc data is used during shutdown. This PID is + * conventionally whatever `pvrsrvctl' was when it was alive. + */ + IMG_UINT32 ui32Pid; + + /* Device cookie for services allocation functions. The device would + * ordinarily be SGX, and the first/only device in the system. + */ + IMG_HANDLE hDevCookie; + + /* Device memory context that all SYNC_INFOs allocated by this driver + * will be created in. Because SYNC_INFOs are placed in a shared heap, + * it does not matter from which process the create ioctl originates. + */ + IMG_HANDLE hDevMemContext; + struct PVR_FENCE_CONTEXT *psForeignFenceCtx; +} +gsSyncServicesConnection; + + +/* NOTE: Must only be called with services bridge mutex held */ +static void PVRSyncSWTakeOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + psKernelSyncInfo->psSyncData->ui32WriteOpsPending = 1; +} + +static void PVRSyncSWCompleteOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + psKernelSyncInfo->psSyncData->ui32WriteOpsComplete = 1; +} + +#define PVR_DUMPDEBUG_LOG(fmt, ...) \ + do { \ + PVR_DPF((PVR_DBG_ERROR, fmt "\n", ## __VA_ARGS__)); \ + } while (0) + +static IMG_BOOL PVRSyncIsSyncInfoInUse(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo) +{ + + return !(psSyncInfo->psSyncData->ui32WriteOpsPending == + psSyncInfo->psSyncData->ui32WriteOpsComplete && + psSyncInfo->psSyncData->ui32ReadOpsPending == + psSyncInfo->psSyncData->ui32ReadOpsComplete && + psSyncInfo->psSyncData->ui32ReadOps2Pending == + psSyncInfo->psSyncData->ui32ReadOps2Complete); +} + +static inline bool +pvr_fence_sync_value_met(struct PVR_FENCE *psPVRFence) +{ + return !PVRSyncIsSyncInfoInUse(psPVRFence->psSyncData->psSyncInfo->psBase); +} + +static void PVRSyncReleaseSyncInfo(struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo) +{ + unsigned long flags; + + spin_lock_irqsave(&gSyncInfoFreeListLock, flags); + list_add_tail(&psSyncInfo->sHead, &gSyncInfoFreeList); + spin_unlock_irqrestore(&gSyncInfoFreeListLock, flags); + + queue_work(gpsWorkQueue, &gsWork); +} + +static void PVRSyncFreeSyncData(struct PVR_SYNC_DATA *psSyncData) +{ + PVRSyncReleaseSyncInfo(psSyncData->psSyncInfo); + psSyncData->psSyncInfo = NULL; + kfree(psSyncData); +} + +static void +pvr_fence_context_fences_dump(struct PVR_FENCE_CONTEXT *psFenceCtx) +{ + struct PVR_FENCE *psPVRFence; + unsigned long flags; + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_for_each_entry(psPVRFence, &psFenceCtx->sFenceList, sFenceHead) + { + PVR_DUMPDEBUG_LOG( + "f %llu: WOCVA=0x%.8X WriteOps P %d C %d ReadOps P %d C %d ReadOps2 P %d C %d, %s %s", + (u64) psPVRFence->psFenceCtx->ui64FenceCtx, + psPVRFence->psSyncData->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psPVRFence->psSyncData->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending, + psPVRFence->psSyncData->psSyncInfo->psBase->psSyncData->ui32WriteOpsComplete, + psPVRFence->psSyncData->psSyncInfo->psBase->psSyncData->ui32ReadOpsPending, + psPVRFence->psSyncData->psSyncInfo->psBase->psSyncData->ui32ReadOpsComplete, + psPVRFence->psSyncData->psSyncInfo->psBase->psSyncData->ui32ReadOps2Pending, + psPVRFence->psSyncData->psSyncInfo->psBase->psSyncData->ui32ReadOps2Complete, + psPVRFence->pName, + (&psPVRFence->sBase != psPVRFence->psFence) ? "(foreign)" : ""); + } + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); +} + +static inline +IMG_UINT32 pvr_fence_context_seqno_next(struct PVR_FENCE_CONTEXT *psFenceCtx) +{ + return atomic_inc_return(&psFenceCtx->sSeqno) - 1; +} + +static inline void +pvr_fence_context_free_deferred(struct PVR_FENCE_CONTEXT *psFenceCtx) +{ + struct PVR_FENCE *psPVRFence, *psPVRFenceTmp; + LIST_HEAD(deferred_free_list); + unsigned long flags; +#if defined(DEBUG_PRINT) + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; +#endif + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_for_each_entry_safe(psPVRFence, psPVRFenceTmp, &psFenceCtx->sDeferredFreeList, sFenceHead) + { + list_move(&psPVRFence->sFenceHead, &deferred_free_list); + } + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + + list_for_each_entry_safe(psPVRFence, psPVRFenceTmp, &deferred_free_list, sFenceHead) + { +#if defined(DEBUG_PRINT) + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = psPVRFence->psSyncData->psSyncInfo->psBase; +#endif + list_del(&psPVRFence->sFenceHead); + DPF("R( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X " + "WOP/C=0x%x/0x%x ROP/C=0x%x/0x%x RO2P/C=0x%x/0x%x " + "S=0x%x, Name=%s", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOps2CompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32WriteOpsPending, + psSyncInfo->psSyncData->ui32WriteOpsComplete, + psSyncInfo->psSyncData->ui32ReadOpsPending, + psSyncInfo->psSyncData->ui32ReadOpsComplete, + psSyncInfo->psSyncData->ui32ReadOps2Pending, + psSyncInfo->psSyncData->ui32ReadOps2Complete, + psPVRFence->psSyncData->ui32WOPSnapshot, + psPVRFence->pName); + PVRSyncFreeSyncData(psPVRFence->psSyncData); + dma_fence_free(&psPVRFence->sBase); + } +} + +static void +pvr_fence_context_destroy_work(struct work_struct *psData) +{ + struct PVR_FENCE_CONTEXT *psFenceCtx = + container_of(psData, struct PVR_FENCE_CONTEXT, sDestroyWork); + + pvr_fence_context_free_deferred(psFenceCtx); + + if (WARN_ON(!list_empty_careful(&psFenceCtx->sFenceList))) + { + PVR_DPF((PVR_DBG_ERROR, "List is not empty in pvr_fence_context_destroy_kref")); + pvr_fence_context_fences_dump(psFenceCtx); + } + + destroy_workqueue(psFenceCtx->psFenceWq); + + kfree(psFenceCtx); +} + +static void +pvr_fence_context_destroy_kref(struct kref *pKref) +{ + struct PVR_FENCE_CONTEXT *psFenceCtx = + container_of(pKref, struct PVR_FENCE_CONTEXT, sRef); + + schedule_work(&psFenceCtx->sDestroyWork); +} + +/** + * pvr_fence_context_destroy - destroys a context + * @fctx: PVR fence context to destroy + * + * Destroys a PVR fence context with the expectation that all fences have been + * destroyed. + */ +void +pvr_fence_context_destroy(struct PVR_FENCE_CONTEXT *psFenceCtx) +{ + mutex_lock(&gFenceCtxListLock); + list_del(&psFenceCtx->sFenceCtxList); + mutex_unlock(&gFenceCtxListLock); + + kref_put(&psFenceCtx->sRef, pvr_fence_context_destroy_kref); +} + +static void +pvr_fence_context_signal_fences(struct work_struct *psData) +{ + struct PVR_FENCE_CONTEXT *psFenceCtx = + container_of(psData, struct PVR_FENCE_CONTEXT, sSignalWork); + struct PVR_FENCE *psPVRFence, *psPVRTmp; + unsigned long flags; + LIST_HEAD(signal_list); + + /* + * We can't call fence_signal while holding the lock as we can end up + * in a situation whereby pvr_fence_foreign_signal_sync, which also + * takes the list lock, ends up being called as a result of the + * fence_signal below, i.e. fence_signal(fence) -> fence->callback() + * -> fence_signal(foreign_fence) -> foreign_fence->callback() where + * the foreign_fence callback is pvr_fence_foreign_signal_sync. + * + * So extract the items we intend to signal and add them to their own + * queue. + */ + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_for_each_entry_safe(psPVRFence, psPVRTmp, &psFenceCtx->sSignalList, sSignalHead) + { + if (pvr_fence_sync_value_met(psPVRFence)) + { + list_move(&psPVRFence->sSignalHead, &signal_list); + } + } + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + + list_for_each_entry_safe(psPVRFence, psPVRTmp, &signal_list, sSignalHead) + { + + PVR_FENCE_TRACE(&psPVRFence->sBase, "signalled fence (%s) %p\n", psPVRFence->pName, psPVRFence); + list_del(&psPVRFence->sSignalHead); + dma_fence_signal(psPVRFence->psFence); + dma_fence_put(psPVRFence->psFence); + } + + /* + * Take this opportunity to free up any fence objects we + * have deferred freeing. + */ + pvr_fence_context_free_deferred(psFenceCtx); + + /* Put back ref taken duing queing of fence context work */ + kref_put(&psFenceCtx->sRef, pvr_fence_context_destroy_kref); +} + +IMG_INTERNAL +void PVRSyncUpdateAllSyncs(void) +{ + IMG_BOOL bNeedToProcessQueues = IMG_FALSE; + struct list_head *psEntry; + + /* Check to see if any syncs have signalled. If they have, it may unblock + * the GPU. Decide what is needed and optionally schedule queue + * processing. + */ + mutex_lock(&gFenceCtxListLock); + list_for_each(psEntry, &gFenceCtxList) + { + struct PVR_FENCE_CONTEXT *psFenceCtx = container_of(psEntry, struct PVR_FENCE_CONTEXT, sFenceCtxList); + + if(psFenceCtx->bSyncHasSignaled) + { + psFenceCtx->bSyncHasSignaled = IMG_FALSE; + bNeedToProcessQueues = IMG_TRUE; + } + /* + * We need to take a reference on fence context as this + * function and fence context destruction call can come + * in any order. And release it in after serving work. + */ + kref_get(&psFenceCtx->sRef); + queue_work(psFenceCtx->psFenceWq, &psFenceCtx->sSignalWork); + } + mutex_unlock(&gFenceCtxListLock); + + if(bNeedToProcessQueues) + { + queue_work(gpsWorkQueue, &gsWork); + } +} + +/** + * pvr_fence_context_create - creates a PVR fence context + * @name: context name (used for debugging) + * + * Creates a PVR fence context that can be used to create PVR fences or to + * create PVR fences from an existing fence. + * + * pvr_fence_context_destroy should be called to clean up the fence context. + * + * Returns NULL if a context cannot be created. + */ +struct PVR_FENCE_CONTEXT * +pvr_fence_context_create(const char *pName) +{ + struct PVR_FENCE_CONTEXT *psFenceCtx; + + psFenceCtx = kzalloc(sizeof(*psFenceCtx), GFP_KERNEL); + if (!psFenceCtx) + return NULL; + + spin_lock_init(&psFenceCtx->sLock); + atomic_set(&psFenceCtx->sSeqno, 0); + INIT_WORK(&psFenceCtx->sSignalWork, pvr_fence_context_signal_fences); + INIT_WORK(&psFenceCtx->sDestroyWork, pvr_fence_context_destroy_work); + spin_lock_init(&psFenceCtx->sListLock); + INIT_LIST_HEAD(&psFenceCtx->sSignalList); + INIT_LIST_HEAD(&psFenceCtx->sFenceList); + INIT_LIST_HEAD(&psFenceCtx->sDeferredFreeList); + + psFenceCtx->ui64FenceCtx = dma_fence_context_alloc(1); + psFenceCtx->pName = pName; + psFenceCtx->bSyncHasSignaled = IMG_FALSE; + + psFenceCtx->psFenceWq = create_freezable_workqueue("pvr_fence_sync_workqueue"); + if (!psFenceCtx->psFenceWq) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to create fence workqueue\n", __func__)); + goto err_destroy_workqueue; + } + + kref_init(&psFenceCtx->sRef); + + mutex_lock(&gFenceCtxListLock); + list_add_tail(&psFenceCtx->sFenceCtxList, &gFenceCtxList); + mutex_unlock(&gFenceCtxListLock); + + PVR_FENCE_CTX_TRACE(psFenceCtx, "created fence context (%s)\n", pName); + + return psFenceCtx; + +err_destroy_workqueue: + destroy_workqueue(psFenceCtx->psFenceWq); + kfree(psFenceCtx); + return NULL; +} + +static const char * +pvr_fence_get_driver_name(struct dma_fence *psFence) +{ + return PVR_LDM_DRIVER_REGISTRATION_NAME; +} + +static const char * +pvr_fence_get_timeline_name(struct dma_fence *psFence) +{ + struct PVR_FENCE *psPVRFence = to_pvr_fence(psFence); + + return psPVRFence->psFenceCtx->pName; +} + +static bool +pvr_fence_enable_signaling(struct dma_fence *psFence) +{ + struct PVR_FENCE *psPVRFence = to_pvr_fence(psFence); + struct PVR_FENCE_CONTEXT *psFenceCtx = psPVRFence->psFenceCtx; + unsigned long flags; + + WARN_ON_SMP(!spin_is_locked(&psFenceCtx->sLock)); + + if (pvr_fence_sync_value_met(psPVRFence)) + { + return false; + } + + dma_fence_get(&psPVRFence->sBase); + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_add_tail(&psPVRFence->sSignalHead, &psFenceCtx->sSignalList); + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + + PVR_FENCE_TRACE(&psPVRFence->sBase, "signalling enabled (%p)\n", psPVRFence); + + return true; +} + +static bool +pvr_fence_is_signaled(struct dma_fence *psFence) +{ + struct PVR_FENCE *psPVRFence = to_pvr_fence(psFence); + + if(pvr_fence_sync_value_met(psPVRFence)) + { + psPVRFence->psFenceCtx->bSyncHasSignaled = IMG_TRUE; + return true; + } + else + { + return false; + } +} + +static void +pvr_fence_release(struct dma_fence *psFence) +{ + struct PVR_FENCE *psPVRFence = to_pvr_fence(psFence); + struct PVR_FENCE_CONTEXT *psFenceCtx = psPVRFence->psFenceCtx; + unsigned long flags; + + PVR_FENCE_TRACE(&psPVRFence->sBase, "released fence (%s) %p\n", psPVRFence->pName, psPVRFence); + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_move(&psPVRFence->sFenceHead, &psFenceCtx->sDeferredFreeList); + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + + kref_put(&psFenceCtx->sRef, pvr_fence_context_destroy_kref); +} + +const struct dma_fence_ops pvr_fence_ops = { + .get_driver_name = pvr_fence_get_driver_name, + .get_timeline_name = pvr_fence_get_timeline_name, + .enable_signaling = pvr_fence_enable_signaling, + .signaled = pvr_fence_is_signaled, + .wait = dma_fence_default_wait, + .release = pvr_fence_release, +}; + +/** + * pvr_fence_create - creates a PVR fence + * @fctx: PVR fence context on which the PVR fence should be created + * @name: PVR fence name (used for debugging) + * + * Creates a PVR fence. + * + * Once the fence is finished with pvr_fence_destroy should be called. + * + * Returns NULL if a PVR fence cannot be created. + */ +struct PVR_FENCE * +pvr_fence_create(struct PVR_FENCE_CONTEXT *psFenceCtx, const char *name, struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo) +{ + struct PVR_FENCE *psPVRFence; + unsigned int seqno; + unsigned long flags; + + psPVRFence = kzalloc(sizeof(*psPVRFence), GFP_KERNEL); + if (!psPVRFence) + return NULL; + + psPVRFence->psSyncData = kmalloc(sizeof(struct PVR_SYNC_DATA), GFP_KERNEL); + if(!psPVRFence->psSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_SYNC_DATA", __func__)); + goto err_free_fence; + } + psPVRFence->psSyncData->psSyncInfo = psSyncInfo; + + INIT_LIST_HEAD(&psPVRFence->sFenceHead); + INIT_LIST_HEAD(&psPVRFence->sSignalHead); + psPVRFence->psFenceCtx = psFenceCtx; + psPVRFence->pName = name; + psPVRFence->psFence = &psPVRFence->sBase; + + seqno = pvr_fence_context_seqno_next(psFenceCtx); + dma_fence_init(&psPVRFence->sBase, &pvr_fence_ops, &psFenceCtx->sLock, + psFenceCtx->ui64FenceCtx, seqno); + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_add_tail(&psPVRFence->sFenceHead, &psFenceCtx->sFenceList); + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + + kref_get(&psFenceCtx->sRef); + + PVR_FENCE_TRACE(&psPVRFence->sBase, "created fence (%s) %p\n", name, psPVRFence); + + return psPVRFence; + +err_free_fence: + kfree(psPVRFence); + return NULL; +} + +static const char * +pvr_fence_foreign_get_driver_name(struct dma_fence *psFence) +{ + return "unknown"; +} + +static const char * +pvr_fence_foreign_get_timeline_name(struct dma_fence *psFence) +{ + return "unknown"; +} + +static bool +pvr_fence_foreign_enable_signaling(struct dma_fence *psFence) +{ + WARN_ON("cannot enable signalling on foreign fence"); + return false; +} + +static signed long +pvr_fence_foreign_wait(struct dma_fence *psFence, bool intr, signed long timeout) +{ + WARN_ON("cannot wait on foreign fence"); + return 0; +} + +static void +pvr_fence_foreign_release(struct dma_fence *psFence) +{ + struct PVR_FENCE *psPVRFence = to_pvr_fence(psFence); + struct PVR_FENCE_CONTEXT *psFenceCtx = psPVRFence->psFenceCtx; + unsigned long flags; + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_move(&psPVRFence->sFenceHead, &psFenceCtx->sDeferredFreeList); + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + + kref_put(&psFenceCtx->sRef, pvr_fence_context_destroy_kref); +} + +const struct dma_fence_ops pvr_fence_foreign_ops = { + .get_driver_name = pvr_fence_foreign_get_driver_name, + .get_timeline_name = pvr_fence_foreign_get_timeline_name, + .enable_signaling = pvr_fence_foreign_enable_signaling, + .wait = pvr_fence_foreign_wait, + .release = pvr_fence_foreign_release, +}; + +static void +pvr_fence_foreign_signal_sync(struct dma_fence *psFence, struct dma_fence_cb *psCb) +{ + struct PVR_FENCE *psPVRFence = container_of(psCb, struct PVR_FENCE, sFenceCb); + + if (WARN_ON_ONCE(is_pvr_fence(psFence))) + return; + + PVRSyncSWCompleteOp(psPVRFence->psSyncData->psSyncInfo->psBase); + + PVR_FENCE_TRACE(&psPVRFence->sBase, + "foreign fence %llu#%d signalled (%s)\n", + psPVRFence->psFenceCtx->ui64FenceCtx, + psPVRFence->psFenceCtx->sSeqno, psPVRFence->pName); + + psPVRFence->psFenceCtx->bSyncHasSignaled = IMG_TRUE; + + /* Drop the reference on the base fence */ + dma_fence_put(&psPVRFence->sBase); +} + +/** + * pvr_fence_create_from_fence - creates a PVR fence from a fence + * @fctx: PVR fence context on which the PVR fence should be created + * @fence: fence from which the PVR fence should be created + * @name: PVR fence name (used for debugging) + * + * Creates a PVR fence from an existing fence. If the fence is a foreign fence, + * i.e. one that doesn't originate from a PVR fence context, then a new PVR + * fence will be created. Otherwise, a reference will be taken on the underlying + * fence and the PVR fence will be returned. + * + * Once the fence is finished with pvr_fence_destroy should be called. + * + * Returns NULL if a PVR fence cannot be created. + */ +struct PVR_FENCE * +pvr_fence_create_from_fence(struct PVR_FENCE_CONTEXT *psFenceCtx, + struct dma_fence *psFence, + const char *name) +{ + struct PVR_FENCE *psPVRFence; + unsigned int seqno; + unsigned long flags; + struct PVR_SYNC_KERNEL_SYNC_INFO *psKernelSyncInfo; + int err; + + psPVRFence = kzalloc(sizeof(*psPVRFence), GFP_KERNEL); + if (!psPVRFence) + return NULL; + + psPVRFence->psSyncData = kmalloc(sizeof(struct PVR_SYNC_DATA), GFP_KERNEL); + if (!psPVRFence->psSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_SYNC_DATA", + __func__)); + err = -ENOMEM; + goto err_free_pvr_fence; + } + + psKernelSyncInfo = kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if (!psKernelSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + err = -ENOMEM; + goto err_free_pvr_fence; + } + + /* Allocate a "shadow" SYNCINFO for this foreign fence and set it up to be + * completed by the callback. + */ + err = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psKernelSyncInfo->psBase); + if(err != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate syncinfo", __func__)); + goto err_free_sync_data; + } + + PVRSyncSWTakeOp(psKernelSyncInfo->psBase); + + INIT_LIST_HEAD(&psPVRFence->sFenceHead); + INIT_LIST_HEAD(&psPVRFence->sSignalHead); + psPVRFence->psFenceCtx = psFenceCtx; + psPVRFence->pName = name; + psPVRFence->psFence = psFence; + psPVRFence->psSyncData->psSyncInfo = psKernelSyncInfo; + /* + * We use the base fence to refcount the PVR fence and to do the + * necessary clean up once the refcount drops to 0. + */ + seqno = pvr_fence_context_seqno_next(psFenceCtx); + dma_fence_init(&psPVRFence->sBase, &pvr_fence_foreign_ops, &psFenceCtx->sLock, + psFenceCtx->ui64FenceCtx, seqno); + + /* + * Take an extra reference on the base fence that gets dropped when the + * foreign fence is signalled. + */ + dma_fence_get(&psPVRFence->sBase); + + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_add_tail(&psPVRFence->sFenceHead, &psFenceCtx->sFenceList); + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + kref_get(&psFenceCtx->sRef); + + PVR_FENCE_TRACE(&psPVRFence->sBase, + "created fence from foreign fence %llu#%d (%s)\n", + (u64) psPVRFence->psFenceCtx->ui64FenceCtx, + psPVRFence->psFenceCtx->sSeqno, name); + + err = dma_fence_add_callback(psFence, &psPVRFence->sFenceCb, + pvr_fence_foreign_signal_sync); + if (err) { + if (err != -ENOENT) + goto err_put_ref; + + PVRSyncSWCompleteOp(psKernelSyncInfo->psBase); + PVR_FENCE_TRACE(&psPVRFence->sBase, + "foreign fence %llu#%d already signaled (%s)\n", + (u64) psPVRFence->psFenceCtx->ui64FenceCtx, + psPVRFence->psFenceCtx->sSeqno, + name); + dma_fence_put(&psPVRFence->sBase); + } + + + return psPVRFence; + +err_put_ref: + kref_put(&psFenceCtx->sRef, pvr_fence_context_destroy_kref); + spin_lock_irqsave(&psFenceCtx->sListLock, flags); + list_del(&psPVRFence->sFenceHead); + spin_unlock_irqrestore(&psFenceCtx->sListLock, flags); + PVRSyncSWCompleteOp(psKernelSyncInfo->psBase); + PVRSRVReleaseSyncInfoKM(psKernelSyncInfo->psBase); +err_free_sync_data: + kfree(psPVRFence->psSyncData); +err_free_pvr_fence: + kfree(psPVRFence); + return NULL; +} + +/** + * pvr_fence_destroy - destroys a PVR fence + * @pvr_fence: PVR fence to destroy + * + * Destroys a PVR fence. Upon return, the PVR fence may still exist if something + * else still references the underlying fence, e.g. a reservation object, or if + * software signalling has been enabled and the fence hasn't yet been signalled. + */ +void +pvr_fence_destroy(struct PVR_FENCE *psPVRFence) +{ + PVR_FENCE_TRACE(&psPVRFence->sBase, "destroyed fence (%s)\n", psPVRFence->pName); + + dma_fence_put(&psPVRFence->sBase); +} + +static bool is_pvr_timeline(struct file *psFile) +{ + return psFile->f_op == &pvr_sync_fops; +} + +static struct PVR_SYNC_TIMELINE *pvr_sync_timeline_fget(int fd) +{ + struct file *psFile = fget(fd); + + if (!psFile) + return NULL; + + if (!is_pvr_timeline(psFile)) { + fput(psFile); + return NULL; + } + + return psFile->private_data; +} + +static void pvr_sync_timeline_fput(struct PVR_SYNC_TIMELINE *psTimeLine) +{ + fput(psTimeLine->psFile); +} + +static int PVRSyncOpen(struct inode *inode, struct file *psFile) +{ + struct PVR_FENCE_CONTEXT *psFenceCtx; + struct PVR_SYNC_TIMELINE *psTimeline; + char task_comm[TASK_COMM_LEN]; + int err = -ENOMEM; + + get_task_comm(task_comm, current); + + psTimeline = kzalloc(sizeof(*psTimeline), GFP_KERNEL); + if (!psTimeline) + goto err_out; + + strlcpy(psTimeline->name, task_comm, sizeof(psTimeline->name)); + + psFenceCtx = pvr_fence_context_create(psTimeline->name); + if (!psFenceCtx) { + PVR_DPF((PVR_DBG_ERROR, "pvr_fence: %s: pvr_fence_context_create failed\n", __func__)); + goto err_free_timeline; + } + + psTimeline->psSyncInfo = kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if(!psTimeline->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + goto err_free_fence; + } + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + err = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psTimeline->psSyncInfo->psBase); + LinuxUnLockMutex(&gPVRSRVLock); + + if (err != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate timeline syncinfo", + __func__)); + goto err_free_syncinfo; + } + + psTimeline->psFenceCtx = psFenceCtx; + psTimeline->psFile = psFile; + + psFile->private_data = psTimeline; + err = 0; +err_out: + return err; +err_free_syncinfo: + kfree(psTimeline->psSyncInfo); +err_free_fence: + pvr_fence_context_destroy(psFenceCtx); +err_free_timeline: + kfree(psTimeline); + goto err_out; +} + +static int PVRSyncRelease(struct inode *inode, struct file *psFile) +{ + struct PVR_SYNC_TIMELINE *psTimeline = psFile->private_data; + + if (psTimeline->pSWTimeline) + { + /* This makes sure any outstanding SW syncs are marked as + * complete at timeline close time. Otherwise it'll leak the + * timeline (as outstanding fences hold a ref) and possibly + * wedge the system is something is waiting on one of those + * fences + */ + pvr_counting_fence_timeline_force_complete(psTimeline->pSWTimeline); + pvr_counting_fence_timeline_put(psTimeline->pSWTimeline); + } + + pvr_fence_context_destroy(psTimeline->psFenceCtx); + PVRSyncReleaseSyncInfo(psTimeline->psSyncInfo); + kfree(psTimeline); + + return 0; +} + +static long PVRSyncIOCTLCreate(struct PVR_SYNC_TIMELINE *psTimeline, void __user *pvData) +{ + struct PVR_SYNC_KERNEL_SYNC_INFO *psProvidedSyncInfo = NULL; + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData; + struct PVR_SYNC_CREATE_IOCTL_DATA sData; + int err = -EFAULT, iFd; + struct PVR_FENCE *psPVRFence; + struct sync_file *psSyncfile; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iFd = get_unused_fd_flags(0); +#else + iFd = get_unused_fd(); +#endif + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iFd)); + goto err_out; + } + + if (!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_put_fd; + + if (copy_from_user(&sData, pvData, sizeof(sData))) + goto err_put_fd; + + if (sData.allocdSyncInfo < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Requested to create a fence from " + " an invalid alloc'd fd (%d)", __func__, + sData.allocdSyncInfo)); + goto err_put_fd; + } + + psAllocSyncData = PVRSyncAllocFDGet(sData.allocdSyncInfo); + if (!psAllocSyncData) { + PVR_DPF((PVR_DBG_ERROR, "pvr_fence: %s: Failed to open supplied file fd (%d)\n", + __func__, sData.allocdSyncInfo)); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_put_fd; + } + + /* Move the psSyncInfo to the newly created sync, to avoid attempting + * to create multiple syncs from the same allocation. + */ + psProvidedSyncInfo = psAllocSyncData->psSyncInfo; + psAllocSyncData->psSyncInfo = NULL; + + if (psProvidedSyncInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Alloc'd sync info is null - " + "possibly already CREATEd?", __func__)); + fput(psAllocSyncData->psFile); + goto err_put_fd; + } + fput(psAllocSyncData->psFile); + + sData.name[sizeof(sData.name) - 1] = '\0'; + + psPVRFence = pvr_fence_create(psAllocSyncData->psTimeline->psFenceCtx, sData.name, psProvidedSyncInfo); + if (!psPVRFence) + { + PVR_DPF((PVR_DBG_ERROR, "pvr_fence: %s: Failed to create new pvr_fence\n", __func__)); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + + psPVRFence->psSyncData->ui32WOPSnapshot = psAllocSyncData->psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending; + + psSyncfile = sync_file_create(&psPVRFence->sBase); + if (!psSyncfile) { + PVR_DPF((PVR_DBG_ERROR, ": %s: Failed to create sync_file\n", __func__)); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_destroy_fence; + } + + sData.fence = iFd; + + if (!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + { + goto err_destroy_fence; + } + + if (copy_to_user(pvData, &sData, sizeof(sData))) + { + goto err_destroy_fence; + } + + /* If the fence is a 'real' one, its signal status will be updated by + * the MISR calling PVRSyncUpdateAllSyncs(). However, if we created + * a 'fake' fence (for power optimization reasons) it has already + * completed, and needs to be marked signalled (as the MISR will + * never run for 'fake' fences). + */ + if(psProvidedSyncInfo->psBase->psSyncData->ui32WriteOpsPending == 0) + { + psPVRFence->psFenceCtx->bSyncHasSignaled = IMG_TRUE; + } + + DPF("Create: WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X F=%p %s", + psProvidedSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psProvidedSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psProvidedSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr, + psPVRFence, sData.name); + + fd_install(iFd, psSyncfile->file); + err = 0; +err_out: + return err; + +err_destroy_fence: + pvr_fence_destroy(psPVRFence); +err_put_fd: + put_unused_fd(iFd); + goto err_out; +} + +static long PVRSyncIOCTLRename(struct PVR_SYNC_TIMELINE *psTimeline, void __user *user_data) +{ + int err = 0; + struct PVR_SYNC_RENAME_IOCTL_DATA data; + + if (!access_ok(VERIFY_READ, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + data.szName[sizeof(data.szName) - 1] = '\0'; + strlcpy(psTimeline->name, data.szName, sizeof(psTimeline->name)); + +err: + return err; +} + +static long PVRSyncIOCTLForceSw(struct PVR_SYNC_TIMELINE *psTimeline, void **private_data) +{ + /* Already in SW mode? */ + if (psTimeline->pSWTimeline) + return 0; + + /* Create a sw_sync timeline with the old GPU timeline's name */ + psTimeline->pSWTimeline = pvr_counting_fence_timeline_create(psTimeline->name); + + /* Don't add SW timeline to global timeline list */ + mutex_lock(&gFenceCtxListLock); + list_del(&psTimeline->psFenceCtx->sFenceCtxList); + mutex_unlock(&gFenceCtxListLock); + + if (!psTimeline->pSWTimeline) + return -ENOMEM; + + return 0; +} + +static long PVRSyncIOCTLCreateSwFence(struct PVR_SYNC_TIMELINE *psTimeline, void __user *user_data) +{ + struct sw_sync_create_fence_data sData; + struct sync_file *psSyncFile; + int fd = get_unused_fd_flags(0); + struct dma_fence *psFence; + int err = -EFAULT; + + if (fd < 0) + { + PVR_DPF((PVR_DBG_ERROR,"pvr_fence: %s: Failed to find unused fd (%d)", __func__, fd)); + goto err_out; + } + + if (copy_from_user(&sData, user_data, sizeof(sData))) + { + PVR_DPF((PVR_DBG_ERROR,"pvr_fence: %s: Failed copy from user", __func__)); + goto err_put_fd; + } + + psFence = pvr_counting_fence_create(psTimeline->pSWTimeline, sData.value); + if (!psFence) + { + PVR_DPF((PVR_DBG_ERROR,"pvr_fence: %s: Failed to create a sync point (%d)", __func__, fd)); + err = -ENOMEM; + goto err_put_fd; + } + + psSyncFile = sync_file_create(psFence); + if (!psSyncFile) + { + PVR_DPF((PVR_DBG_ERROR,"pvr_fence: %s: Failed to create a sync point (%d)", __func__, fd)); + err = -ENOMEM; + goto err_put_fence; + } + + sData.fence = fd; + + if (copy_to_user(user_data, &sData, sizeof(sData))) + { + PVR_DPF((PVR_DBG_ERROR,"pvr_fence: %s: Failed copy to user", __func__)); + goto err_put_fence; + } + + fd_install(fd, psSyncFile->file); + err = 0; +err_out: + return err; +err_put_fence: + dma_fence_put(psFence); +err_put_fd: + put_unused_fd(fd); + goto err_out; +} + +static long PVRSyncIOCTLSWInc(struct PVR_SYNC_TIMELINE *psTimeline, void __user *user_data) +{ + u32 value; + + if (copy_from_user(&value, user_data, sizeof(value))) + return -EFAULT; + + pvr_counting_fence_timeline_inc(psTimeline->pSWTimeline, value); + return 0; +} + +static int PVRSyncFenceAllocRelease(struct inode *inode, struct file *file) +{ + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData = file->private_data; + + if(psAllocSyncData->psSyncInfo) + { + + DPF("R(a): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psAllocSyncData->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + PVRSyncReleaseSyncInfo(psAllocSyncData->psSyncInfo); + psAllocSyncData->psSyncInfo = NULL; + } + + kfree(psAllocSyncData); + return 0; +} + +static const struct file_operations gsSyncFenceAllocFOps = +{ + .release = PVRSyncFenceAllocRelease, +}; + +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd) +{ + struct file *file = fget(fd); + if (!file) + return NULL; + if (file->f_op != &gsSyncFenceAllocFOps) + goto err; + return file->private_data; +err: + fput(file); + return NULL; +} + +static long +PVRSyncIOCTLAlloc(struct PVR_SYNC_TIMELINE *psTimeline, void __user *pvData) +{ + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData; + int err = -EFAULT, iFd; + struct PVR_SYNC_ALLOC_IOCTL_DATA sData; + PVRSRV_SYNC_DATA *psSyncData; + struct file *psFile; + PVRSRV_ERROR eError; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iFd = get_unused_fd_flags(0); +#else + iFd = get_unused_fd(); +#endif + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iFd)); + goto err_out; + } + + if (!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_put_fd; + + if (copy_from_user(&sData, pvData, sizeof(sData))) + goto err_put_fd; + + psAllocSyncData = kmalloc(sizeof(struct PVR_ALLOC_SYNC_DATA), GFP_KERNEL); + if (!psAllocSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_ALLOC_SYNC_DATA", __func__)); + err = -ENOMEM; + goto err_put_fd; + } + + psAllocSyncData->psSyncInfo = kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if (!psAllocSyncData->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + err = -ENOMEM; + goto err_free_alloc_sync_data; + } + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + eError = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psAllocSyncData->psSyncInfo->psBase); + LinuxUnLockMutex(&gPVRSRVLock); + + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc syncinfo (%d)", __func__, eError)); + err = -ENOMEM; + goto err_free_sync_info; + } + + psFile = anon_inode_getfile("pvr_fence_alloc", &gsSyncFenceAllocFOps, psAllocSyncData, 0); + if (!psFile) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create anon inode", __func__)); + err = -ENOMEM; + goto err_release_sync_info; + } + + sData.fence = iFd; + + /* Check if this timeline looks idle. If there are still TQs running + * on it, userspace shouldn't attempt any kind of power optimization + * (e.g. it must not dummy-process GPU fences). + * + * Determining idleness here is safe because the ALLOC and CREATE + * pvr_sync ioctls must be called under the gralloc module lock, so + * we can't be creating another new fence op while we are still + * processing this one. + * + * Take the bridge lock anyway so we can be sure that we read the + * timeline sync's pending value coherently. The complete value may + * be modified by the GPU, but worse-case we will decide we can't do + * the power optimization and will still be correct. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + psSyncData = psTimeline->psSyncInfo->psBase->psSyncData; + if(psSyncData->ui32WriteOpsPending == psSyncData->ui32WriteOpsComplete) + { + sData.bTimelineIdle = IMG_TRUE; + } + else + { + sData.bTimelineIdle = IMG_FALSE; + } + + LinuxUnLockMutex(&gPVRSRVLock); + + if (!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + goto err_release_file; + + if (copy_to_user(pvData, &sData, sizeof(sData))) + goto err_release_file; + + psAllocSyncData->psTimeline = psTimeline; + psAllocSyncData->psFile = psFile; + + DPF("A( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psAllocSyncData->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + fd_install(iFd, psFile); + err = 0; +err_out: + return err; +err_release_sync_info: + PVRSRVReleaseSyncInfoKM(psAllocSyncData->psSyncInfo->psBase); +err_free_sync_info: + kfree(psAllocSyncData->psSyncInfo); +err_free_alloc_sync_data: + kfree(psAllocSyncData); +err_put_fd: + put_unused_fd(iFd); + goto err_out; +err_release_file: + fput(psFile); + put_unused_fd(iFd); + goto err_out; +} + +static long +PVRSyncIOCTLDebug(struct PVR_SYNC_TIMELINE *psTimeline, void __user *pvData) +{ + struct PVR_SYNC_DEBUG_IOCTL_DATA sData; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + struct dma_fence *psFence; + struct PVR_FENCE *psPVRFence; + int err = -EFAULT; + PVR_SYNC_DEBUG *psMetaData; + + if(!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_out; + + if(copy_from_user(&sData, pvData, sizeof(sData))) + goto err_out; + + psMetaData = &sData.sSync[0].sMetaData; + + psFence = sync_file_get_fence(sData.iFenceFD); + if(!psFence) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get fence from fd", __func__)); + goto err_out; + } + + psPVRFence = to_pvr_fence(psFence); + /* Don't dump foreign fence */ + if(!psPVRFence) + return 0; + + psKernelSyncInfo = psPVRFence->psSyncData->psSyncInfo->psBase; + PVR_ASSERT(psKernelSyncInfo != NULL); + + /* The sync refcount is valid as long as the FenceFD stays open, + * so we can access it directly without worrying about it being + * freed. + */ + sData.sSync[0].sSyncData = *psKernelSyncInfo->psSyncData; + + psMetaData->ui32WriteOpsPendingSnapshot = psPVRFence->psSyncData->ui32WOPSnapshot; + + dma_fence_put(psFence); + + sData.ui32NumPoints = 1; + + if(!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + goto err_out; + + if(copy_to_user(pvData, &sData, sizeof(sData))) + goto err_out; + + err = 0; +err_out: + return err; +} + +static long +PVRSyncIOCTL(struct file *file, unsigned int cmd, unsigned long __user arg) +{ + void __user *user_data = (void __user *)arg; + long err = -ENOTTY; + struct PVR_SYNC_TIMELINE *psTimeline = file->private_data; + bool is_sw_timeline = psTimeline->pSWTimeline != NULL; + + if (!is_sw_timeline) { + + switch (cmd) { + case PVR_SYNC_IOC_CREATE_FENCE: + err = PVRSyncIOCTLCreate(psTimeline, user_data); + break; + case PVR_SYNC_IOC_DEBUG_FENCE: + err = PVRSyncIOCTLDebug(psTimeline, user_data); + break; + case PVR_SYNC_IOC_ALLOC_FENCE: + err = PVRSyncIOCTLAlloc(psTimeline, user_data); + break; + case PVR_SYNC_IOC_RENAME: + err = PVRSyncIOCTLRename(psTimeline, user_data); + break; + case PVR_SYNC_IOC_FORCE_SW_ONLY: + err = PVRSyncIOCTLForceSw(psTimeline, &file->private_data); + break; + default: + err = -ENOTTY; + } + } else { + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + err = PVRSyncIOCTLCreateSwFence(psTimeline, user_data); + break; + case SW_SYNC_IOC_INC: + err = PVRSyncIOCTLSWInc(psTimeline, user_data); + break; + default: + err = -ENOTTY; + } + } + + return err; +} + +static void PVRSyncWorkQueueFunction(struct work_struct *data) +{ + PVRSRV_DEVICE_NODE *psDevNode = + (PVRSRV_DEVICE_NODE*)gsSyncServicesConnection.hDevCookie; + struct list_head sFreeList, *psEntry, *n; + unsigned long flags; + + /* We lock the bridge mutex here for two reasons. + * + * Firstly, the SGXScheduleProcessQueuesKM and PVRSRVReleaseSyncInfoKM + * functions require that they are called under lock. Multiple threads + * into services are not allowed. + * + * Secondly, we need to ensure that when processing the defer-free list, + * the PVRSyncIsSyncInfoInUse() function is called *after* any freed + * sync was attached as a HW dependency (had ROP/ROP2 taken). This is + * because for 'foreign' sync timelines we allocate a new object and + * mark it for deletion immediately. If the 'foreign' sync_pt signals + * before the kick ioctl has completed, we can block it from being + * prematurely freed by holding the bridge mutex. + * + * NOTE: This code relies on the assumption that we can acquire a + * spinlock while a mutex is held and that other users of the spinlock + * do not need to hold the bridge mutex. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + /* A completed SW operation may un-block the GPU */ + SGXScheduleProcessQueuesKM(psDevNode); + + /* We can't call PVRSRVReleaseSyncInfoKM directly in this loop because + * that will take the mmap mutex. We can't take mutexes while we have + * this list locked with a spinlock. So move all the items we want to + * free to another, local list (no locking required) and process it + * in a second loop. + */ + + INIT_LIST_HEAD(&sFreeList); + spin_lock_irqsave(&gSyncInfoFreeListLock, flags); + list_for_each_safe(psEntry, n, &gSyncInfoFreeList) + { + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo = + container_of(psEntry, struct PVR_SYNC_KERNEL_SYNC_INFO, sHead); + + if(!PVRSyncIsSyncInfoInUse(psSyncInfo->psBase)) + list_move_tail(psEntry, &sFreeList); + + } + spin_unlock_irqrestore(&gSyncInfoFreeListLock, flags); + + list_for_each_safe(psEntry, n, &sFreeList) + { + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo = + container_of(psEntry, struct PVR_SYNC_KERNEL_SYNC_INFO, sHead); + + list_del(psEntry); + + DPF("F(d): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + PVRSRVReleaseSyncInfoKM(psSyncInfo->psBase); + psSyncInfo->psBase = NULL; + + kfree(psSyncInfo); + } + + LinuxUnLockMutex(&gPVRSRVLock); +} + +static const struct file_operations pvr_sync_fops = { + .owner = THIS_MODULE, + .open = PVRSyncOpen, + .release = PVRSyncRelease, + .unlocked_ioctl = PVRSyncIOCTL, + .compat_ioctl = PVRSyncIOCTL, +}; + +static struct miscdevice pvr_sync_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "pvr_sync", + .fops = &pvr_sync_fops, +}; + +IMG_INTERNAL +int PVRSyncDeviceInit(void) +{ + int err = -1; + + if(PVRSyncInitServices() != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise services", + __func__)); + goto err_out; + } + + gsSyncServicesConnection.psForeignFenceCtx = pvr_fence_context_create("foreign_sync"); + if (!gsSyncServicesConnection.psForeignFenceCtx) + { + PVR_DPF((PVR_DBG_ERROR,"pvr_fence: %s: Failed to create foreign sync context\n", + __func__)); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + + gpsWorkQueue = create_freezable_workqueue("pvr_sync_workqueue"); + if(!gpsWorkQueue) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create pvr_sync workqueue", __func__)); + goto err_deinit_services; + } + + INIT_WORK(&gsWork, PVRSyncWorkQueueFunction); + + err = misc_register(&pvr_sync_device); + if(err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register pvr_sync misc " + "device (err=%d)", __func__, err)); + goto err_destory_wq; + } + err = PVRSRV_OK; + +err_out: + return err; +err_destory_wq: + destroy_workqueue(gpsWorkQueue); +err_deinit_services: + pvr_fence_context_destroy(gsSyncServicesConnection.psForeignFenceCtx); + PVRSyncCloseServices(); + goto err_out; +} + +void PVRSyncDeviceDeInit(void) +{ + pvr_fence_cleanup(); + misc_deregister(&pvr_sync_device); + pvr_fence_context_destroy(gsSyncServicesConnection.psForeignFenceCtx); + destroy_workqueue(gpsWorkQueue); + PVRSyncCloseServices(); +} + +struct PVR_COUNTING_FENCE_TIMELINE *pvr_sync_get_sw_timeline(int fd) +{ + struct PVR_SYNC_TIMELINE *psTimeline; + struct PVR_COUNTING_FENCE_TIMELINE *psSwTimeline = NULL; + + psTimeline = pvr_sync_timeline_fget(fd); + if (!psTimeline) + return NULL; + + psSwTimeline = pvr_counting_fence_timeline_get(psTimeline->pSWTimeline); + + pvr_sync_timeline_fput(psTimeline); + return psSwTimeline; +} + +IMG_BOOL +ExpandAndDeDuplicateFenceSyncs(IMG_UINT32 ui32NumSyncs, + IMG_HANDLE aiFenceFds[], + IMG_UINT32 ui32SyncPointLimit, + struct dma_fence *apsFence[], + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]) +{ + IMG_UINT32 i, ui32FenceIndex = 0; + IMG_BOOL bRet = IMG_TRUE; + + *pui32NumRealSyncs = 0; + + for(i = 0; i < ui32NumSyncs; i++) + { + struct PVR_FENCE *psPVRFence; + + /* Skip any invalid fence file descriptors without error */ + if((IMG_INT32)aiFenceFds[i] < 0) + continue; + + /* By converting a file descriptor to a struct sync_fence, we are + * taking a reference on the fence. We don't want the fence to go + * away until we have submitted the command, even if it signals + * before we dispatch the command, or the timeline(s) are destroyed. + * + * This reference should be released by the caller of this function + * once hardware operations have been scheduled on the GPU sync_pts + * participating in this fence. When our MISR is scheduled, the + * defer-free list will be processed, cleaning up the SYNCINFO. + * + * Note that this reference *isn't* enough for non-GPU sync_pts. + * We'll take another reference on the fence for those operations + * later (the life-cycle requirements there are totally different). + * + * Fence lookup may fail here if the fd became invalid since it was + * patched in userspace. That's really a userspace driver bug, so + * just fail here instead of not synchronizing. + */ + apsFence[ui32FenceIndex] = sync_file_get_fence((IMG_INT32)aiFenceFds[i]); + if(!apsFence[ui32FenceIndex]) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get fence from fd=%d", + __func__, (IMG_SIZE_T)aiFenceFds[i])); + bRet = IMG_FALSE; + goto err_out; + } + + /* If this fence has any points from foreign timelines, we need to + * allocate a 'shadow' SYNCINFO and update it in software ourselves, + * so the ukernel can test the readiness of the dependency. + * + * It's tempting to just handle all fences like this (since most of + * the time they *will* be merged with sw_sync) but such 'shadow' + * syncs are slower. This is because we need to wait for the MISR to + * schedule to update the GPU part of the fence (normally the ukernel + * would be able to make the update directly). + */ + psPVRFence = to_pvr_fence(apsFence[ui32FenceIndex]); + if(!psPVRFence) + { + + psPVRFence = pvr_fence_create_from_fence(gsSyncServicesConnection.psForeignFenceCtx, apsFence[ui32FenceIndex], "foreign"); + if(psPVRFence) + { + if(!AddSyncInfoToArray(psPVRFence->psSyncData->psSyncInfo->psBase, ui32SyncPointLimit, + pui32NumRealSyncs, apsSyncInfo)) + { + /* Soft-fail. Stop synchronizing. */ + goto err_out; + } + } + } + else + { + if(!AddSyncInfoToArray(psPVRFence->psSyncData->psSyncInfo->psBase, ui32SyncPointLimit, pui32NumRealSyncs, apsSyncInfo)) + goto err_out; + } + ui32FenceIndex++; + } + +err_out: + return bRet; +} + +PVRSRV_ERROR PVRSyncInitServices(void) +{ + IMG_BOOL bCreated, bShared[PVRSRV_MAX_CLIENT_HEAPS]; + PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; + IMG_UINT32 ui32ClientHeapCount = 0; + PVRSRV_PER_PROCESS_DATA *psPerProc; + PVRSRV_ERROR eError; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + gsSyncServicesConnection.ui32Pid = OSGetCurrentProcessIDKM(); + + eError = PVRSRVProcessConnect(gsSyncServicesConnection.ui32Pid, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVProcessConnect failed", + __func__)); + goto err_unlock; + } + + psPerProc = PVRSRVFindPerProcessData(); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVFindPerProcessData failed", + __func__)); + goto err_disconnect; + } + + eError = PVRSRVAcquireDeviceDataKM(0, PVRSRV_DEVICE_TYPE_SGX, + &gsSyncServicesConnection.hDevCookie); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVAcquireDeviceDataKM failed", + __func__)); + goto err_disconnect; + } + + if (!gsSyncServicesConnection.hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR, "%s: hDevCookie is NULL", __func__)); + goto err_disconnect; + } + + eError = PVRSRVCreateDeviceMemContextKM(gsSyncServicesConnection.hDevCookie, + psPerProc, + &gsSyncServicesConnection.hDevMemContext, + &ui32ClientHeapCount, + &sHeapInfo[0], + &bCreated, + &bShared[0]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVCreateDeviceMemContextKM failed", + __func__)); + goto err_disconnect; + } + + if (!gsSyncServicesConnection.hDevMemContext) + { + PVR_DPF((PVR_DBG_ERROR, "%s: hDevMemContext is NULL", __func__)); + goto err_disconnect; + } + +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); + return eError; + +err_disconnect: + PVRSRVProcessDisconnect(gsSyncServicesConnection.ui32Pid); + goto err_unlock; +} + +void PVRSyncCloseServices(void) +{ + IMG_BOOL bDummy; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + PVRSRVDestroyDeviceMemContextKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &bDummy); + gsSyncServicesConnection.hDevMemContext = NULL; + gsSyncServicesConnection.hDevCookie = NULL; + + PVRSRVProcessDisconnect(gsSyncServicesConnection.ui32Pid); + gsSyncServicesConnection.ui32Pid = 0; + + LinuxUnLockMutex(&gPVRSRVLock); +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.h new file mode 100644 index 0000000..f7e9b22 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_fence.h @@ -0,0 +1,252 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux fence interface +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#if !defined(__PVR_FENCE_H__) +#define __PVR_FENCE_H__ + +#include +#include +#include +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) +#include +#else +#include +#endif + +//#define PVR_FENCE_DEBUG 1 + +struct PVR_SYNC_KERNEL_SYNC_INFO +{ + /* Base services sync info structure */ + PVRSRV_KERNEL_SYNC_INFO *psBase; + + /* Sync points can go away when there are deferred hardware + * operations still outstanding. We must not free the SYNC_INFO + * until the hardware is finished, so we add it to a defer list + * which is processed periodically ("defer-free"). + * + * This is also used for "defer-free" of a timeline -- the process + * may destroy its timeline or terminate abnormally but the HW could + * still be using the sync object hanging off of the timeline. + * + * Note that the defer-free list is global, not per-timeline. + */ + struct list_head sHead; +}; + +struct PVR_SYNC_DATA +{ + /* Every sync fence has a services sync object. This object is used + * by the hardware to enforce ordering -- it is attached as a source + * dependency to various commands. + */ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; + + /* This is purely a debug feature. Record the WOP snapshot from the + * timeline synchronization object when a new fence is created. + */ + IMG_UINT32 ui32WOPSnapshot; +}; + +/* A PVR_ALLOC_SYNC_DATA is used to back an allocated, but not yet created + * and inserted into a timeline, sync data. This is required as we must + * allocate the syncinfo to be passed down with the transfer task used to + * implement fences in the hardware. + */ +struct PVR_ALLOC_SYNC_DATA +{ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; + + /* A link to the timeline is required to add a per-timeline sync + * to the fence transfer task. + */ + struct PVR_SYNC_TIMELINE *psTimeline; + struct file *psFile; +}; + +/** + * PVR_FENCE_CONTEXT - PVR fence context used to create and manage PVR fences + * @sLock: protects the context and fences created on the context + * @pcName: fence context name (used for debugging) + * @ui64FenceCtx: fence context with which to associate fences + * @sSeqno: sequence number to use for the next fence + * @psFenceWq: work queue for signalled fence work + * @sSignalWork: work item used to signal fences when fence syncs are met + * @sListLock: protects the active and active foreign lists + * @sSignalList: list of fences waiting to be signalled + * @sFenceList: list of fences (used for debugging) + * @sDeferredFreeList: list of fences that we will free when we are no longer + * @sFenceCtxList: list of all fence context + * holding spinlocks. The frees get implemented when an update fence is + * signalled or the context is freed. + */ +struct PVR_FENCE_CONTEXT +{ + spinlock_t sLock; + const char *pName; + + /* True if a sync fence on the fence context has signaled */ + IMG_BOOL bSyncHasSignaled; + + IMG_UINT64 ui64FenceCtx; + atomic_t sSeqno; + + struct workqueue_struct *psFenceWq; + struct work_struct sSignalWork; + + spinlock_t sListLock; + struct list_head sSignalList; + struct list_head sFenceList; + struct list_head sDeferredFreeList; + struct list_head sFenceCtxList; + + struct kref sRef; + struct workqueue_struct *psDestroyWq; + struct work_struct sDestroyWork; +}; + +/** + * PVR_FENCE - PVR fence that represents both native and foreign fences + * @sBase: fence structure + * @psFenceCtx: fence context on which this fence was created + * @pcName: fence name (used for debugging) + * @psFencefence: pointer to base fence structure or foreign fence + * @psSyncData: services sync data used by hardware + * @sFenceHead: entry on the context fence and deferred free list + * @sSignalHead: entry on the context signal list + * @sFenceCb: foreign fence callback to set the sync to signalled + */ +struct PVR_FENCE { + struct dma_fence sBase; + struct PVR_FENCE_CONTEXT *psFenceCtx; + const char *pName; + + struct dma_fence *psFence; + struct PVR_SYNC_DATA *psSyncData; + + struct list_head sFenceHead; + struct list_head sSignalHead; + struct dma_fence_cb sFenceCb; +}; + +/* This is the actual timeline metadata. We might keep this around after the + * base sync driver has destroyed the pvr_sync_timeline_wrapper object. + */ +struct PVR_SYNC_TIMELINE { + struct PVR_FENCE_CONTEXT *psFenceCtx; + struct file *psFile; + char name[32]; + struct PVR_COUNTING_FENCE_TIMELINE *pSWTimeline; + + /* Every timeline has a services sync object. This object must not + * be used by the hardware to enforce ordering -- that's what the + * per sync-point objects are for. This object is attached to every + * TQ scheduled on the timeline and is primarily useful for debugging. + */ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; +}; + +extern const struct dma_fence_ops pvr_fence_ops; +extern const struct dma_fence_ops pvr_fence_foreign_ops; + +static inline bool is_our_fence(struct PVR_FENCE_CONTEXT *psFenceCtx, + struct dma_fence *psFence) +{ + return (psFence->context == psFenceCtx->ui64FenceCtx); +} + +static inline bool is_pvr_fence(struct dma_fence *psFence) +{ + return ((psFence->ops == &pvr_fence_ops) || + (psFence->ops == &pvr_fence_foreign_ops)); +} + +static inline struct PVR_FENCE *to_pvr_fence(struct dma_fence *psFence) +{ + if (is_pvr_fence(psFence)) + return container_of(psFence, struct PVR_FENCE, sBase); + + return NULL; +} + +struct PVR_FENCE_CONTEXT *pvr_fence_context_create(const char *pcName); +void pvr_fence_context_destroy(struct PVR_FENCE_CONTEXT *psFenceCtx); + +struct PVR_FENCE *pvr_fence_create(struct PVR_FENCE_CONTEXT *fctx, + const char *name, struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo); +struct PVR_FENCE *pvr_fence_create_from_fence(struct PVR_FENCE_CONTEXT *psFenceCtx, + struct dma_fence *psFence, + const char *pcName); +void pvr_fence_destroy(struct PVR_FENCE *psPvrFence); + +PVRSRV_ERROR PVRSyncInitServices(void); +void PVRSyncCloseServices(void); + +IMG_BOOL ExpandAndDeDuplicateFenceSyncs(IMG_UINT32 ui32NumSyncs, + IMG_HANDLE aiFenceFds[], + IMG_UINT32 ui32SyncPointLimit, + struct dma_fence *apsFence[], + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]); + +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd); + +struct PVR_COUNTING_FENCE_TIMELINE *pvr_sync_get_sw_timeline(int fd); + +static inline void pvr_fence_cleanup(void) +{ + /* + * Ensure all PVR fence contexts have been destroyed, by flushing + * the global workqueue. + * For those versions of the DDK don't use PVR fences, this isn't + * necessary, but should be harmless. + */ + flush_scheduled_work(); +} + +#if defined(PVR_FENCE_DEBUG) +#define PVR_FENCE_CTX_TRACE(c, fmt, ...) \ + do { \ + struct PVR_FENCE_CONTEXT *__fctx = (c); \ + pr_err("c %llu: (PVR) " fmt, (u64) __fctx->ui64FenceCtx, \ + ## __VA_ARGS__); \ + } while (0) +#else +#define PVR_FENCE_CTX_TRACE(c, fmt, ...) +#endif + +#define PVR_FENCE_CTX_WARN(c, fmt, ...) \ + do { \ + struct PVR_FENCE_CONTEXT *__fctx = (c); \ + pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->ui64FenceCtx, \ + ## __VA_ARGS__); \ + } while (0) + +#define PVR_FENCE_CTX_ERR(c, fmt, ...) \ + do { \ + struct PVR_FENCE_CONTEXT *__fctx = (c); \ + pr_err("c %llu: (PVR) " fmt, (u64) __fctx->ui64FenceCtx, \ + ## __VA_ARGS__); \ + } while (0) + +#if defined(PVR_FENCE_DEBUG) +#define PVR_FENCE_TRACE(f, fmt, ...) \ + FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) +#else +#define PVR_FENCE_TRACE(f, fmt, ...) +#endif + +#define PVR_FENCE_WARN(f, fmt, ...) \ + FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__) + +#define PVR_FENCE_ERR(f, fmt, ...) \ + FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) + +#endif /* !defined(__PVR_FENCE_H__) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.c new file mode 100644 index 0000000..dac4ba8 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.c @@ -0,0 +1,1533 @@ +/*************************************************************************/ /*! +@File +@Title Linux fence interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux module setup +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#include "srvkm.h" +#include "syscommon.h" +#include "pvr_linux_fence.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dmabuf.h" + +#define BLOCKED_ON_READ 1 +#define BLOCKED_ON_WRITE 2 + +struct pvr_fence_context +{ + struct mutex mutex; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + IMG_HANDLE hNativeSync; + struct work_struct fence_work; + struct list_head fence_frame_list; + struct list_head fence_context_notify_list; + struct list_head fence_context_list; +}; + +struct pvr_fence_frame; + +struct pvr_blocking_fence +{ + struct dma_fence *fence; + struct dma_fence_cb cb; + struct pvr_fence_frame *pvr_fence_frame; + bool installed; +}; + +struct pvr_fence_frame +{ + struct list_head fence_frame_list; + struct pvr_fence_context *pvr_fence_context; + u32 tag; + bool is_dst; + IMG_UINT32 ui32ReadOpsPending; + IMG_UINT32 ui32ReadOps2Pending; + IMG_UINT32 ui32WriteOpsPending; + int blocked_on; + struct pvr_blocking_fence *blocking_fences; + unsigned blocking_fence_count; + atomic_t blocking_count; + struct dma_fence *fence_to_signal; + bool unblock; + bool have_blocking_fences; +}; + +struct pvr_fence +{ + struct dma_fence fence; + spinlock_t lock; +}; + +static LIST_HEAD(fence_context_list); +static LIST_HEAD(fence_context_notify_list); +static DEFINE_MUTEX(pvr_fence_mutex); + +static struct workqueue_struct *workqueue; +static unsigned fence_context; +static atomic_t fence_seqno = ATOMIC_INIT(0); +static atomic_t fences_outstanding = ATOMIC_INIT(0); + +#if defined(DEBUG) +static atomic_t fences_allocated = ATOMIC_INIT(0); +static atomic_t fences_signalled = ATOMIC_INIT(0); +static atomic_t callbacks_installed = ATOMIC_INIT(0); +static atomic_t callbacks_called = ATOMIC_INIT(0); +#endif + +#if defined(PVR_DRM_DRIVER_NAME) +static const char *drvname = PVR_DRM_DRIVER_NAME; +#else +static const char *drvname = "pvr"; +#endif +static const char *timeline_name = "PVR"; + +static unsigned next_seqno(void) +{ + return atomic_inc_return(&fence_seqno) - 1; +} + +static const char *get_driver_name(struct dma_fence *fence) +{ + return drvname; +} + +static const char *get_timeline_name(struct dma_fence *fence) +{ + return timeline_name; +} + +static bool enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void release_fence(struct dma_fence *fence) +{ + struct pvr_fence *pvr_fence = container_of(fence, struct pvr_fence, fence); + kfree(pvr_fence); + + atomic_dec(&fences_outstanding); +} + +static struct dma_fence_ops fence_ops = +{ + .get_driver_name = get_driver_name, + .get_timeline_name = get_timeline_name, + .enable_signaling = enable_signaling, + .wait = dma_fence_default_wait, + .release = release_fence +}; + +static inline bool is_pvr_fence(const struct dma_fence *fence) +{ + return fence->ops == &fence_ops; +} + +static struct dma_fence *create_fence_to_signal(struct pvr_fence_frame *pvr_fence_frame) +{ + struct pvr_fence *pvr_fence; + unsigned seqno = next_seqno(); + + pvr_fence = kmalloc(sizeof(*pvr_fence), GFP_KERNEL); + if (!pvr_fence) + { + return NULL; + } + + spin_lock_init(&pvr_fence->lock); + + dma_fence_init(&pvr_fence->fence, &fence_ops, &pvr_fence->lock, fence_context, seqno); + + pvr_fence_frame->fence_to_signal = &pvr_fence->fence; + +#if defined(DEBUG) + atomic_inc(&fences_allocated); +#endif + atomic_inc(&fences_outstanding); + + return pvr_fence_frame->fence_to_signal; +} + +static inline bool is_blocking_fence(const struct dma_fence *fence) +{ + return fence && !is_pvr_fence(fence); +} + +static inline bool is_unsignalled_blocking_fence(const struct dma_fence *fence) +{ + return is_blocking_fence(fence) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags); +} + +static void signal_and_put_fence(struct pvr_fence_frame *pvr_fence_frame) +{ + if (pvr_fence_frame->fence_to_signal) + { + dma_fence_signal(pvr_fence_frame->fence_to_signal); + dma_fence_put(pvr_fence_frame->fence_to_signal); + + pvr_fence_frame->fence_to_signal = NULL; + +#if defined(DEBUG) + atomic_inc(&fences_signalled); +#endif + } +} + +static void blocking_fence_signalled(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct pvr_blocking_fence *pvr_blocking_fence = container_of(cb, struct pvr_blocking_fence, cb); + struct pvr_fence_frame *pvr_fence_frame = pvr_blocking_fence->pvr_fence_frame; + struct pvr_fence_context *pvr_fence_context = pvr_fence_frame->pvr_fence_context; + + if (atomic_dec_and_test(&pvr_fence_frame->blocking_count)) + { + queue_work(workqueue, &pvr_fence_context->fence_work); + } +#if defined(DEBUG) + atomic_inc(&callbacks_called); +#endif +} + +static bool allocate_blocking_fence_storage(struct pvr_fence_frame *pvr_fence_frame, unsigned count) +{ + pvr_fence_frame->blocking_fences = kzalloc(count * sizeof(*pvr_fence_frame->blocking_fences), GFP_KERNEL); + if (pvr_fence_frame->blocking_fences) + { + pvr_fence_frame->blocking_fence_count = count; + return true; + } + return false; +} + +static void free_blocking_fence_storage(struct pvr_fence_frame *pvr_fence_frame) +{ + if (pvr_fence_frame->blocking_fence_count) + { + kfree(pvr_fence_frame->blocking_fences); + pvr_fence_frame->blocking_fence_count = 0; + } +} + +static int install_and_get_blocking_fence(struct pvr_fence_frame *pvr_fence_frame, unsigned index, struct dma_fence *fence) +{ + struct pvr_blocking_fence *pvr_blocking_fence = &pvr_fence_frame->blocking_fences[index]; + int ret; + + BUG_ON(index >= pvr_fence_frame->blocking_fence_count); + + pvr_blocking_fence->fence = fence; + pvr_blocking_fence->pvr_fence_frame = pvr_fence_frame; + + atomic_inc(&pvr_fence_frame->blocking_count); + + ret = dma_fence_add_callback(pvr_blocking_fence->fence, + &pvr_blocking_fence->cb, + blocking_fence_signalled); + + pvr_blocking_fence->installed = !ret; + if (!pvr_blocking_fence->installed) + { + atomic_dec(&pvr_fence_frame->blocking_count); + return 1; + } + else + { + dma_fence_get(fence); +#if defined(DEBUG) + atomic_inc(&callbacks_installed); +#endif + return 0; + } +} + +static void uninstall_and_put_blocking_fence(struct pvr_fence_frame *pvr_fence_frame, unsigned index) +{ + struct pvr_blocking_fence *pvr_blocking_fence = &pvr_fence_frame->blocking_fences[index]; + + BUG_ON(index >= pvr_fence_frame->blocking_fence_count); + + if (pvr_blocking_fence->installed) + { + if (dma_fence_remove_callback(pvr_blocking_fence->fence, &pvr_blocking_fence->cb)) + { + atomic_dec(&pvr_fence_frame->blocking_count); + } + dma_fence_put(pvr_blocking_fence->fence); + } +} + +static inline int update_reservation_return_value(int ret, bool blocked_on_write) +{ + return ret < 0 ? ret : (ret ? 0 : (blocked_on_write ? BLOCKED_ON_WRITE : BLOCKED_ON_READ)); +} + +static int update_reservation_object_fences_dst(struct pvr_fence_frame *pvr_fence_frame, + struct reservation_object *resv) +{ + struct reservation_object_list *flist; + struct dma_fence *fence_to_signal; + unsigned shared_fence_count; + unsigned blocking_fence_count; + unsigned i; + int ret; + + flist = reservation_object_get_list(resv); + shared_fence_count = flist ? flist->shared_count : 0; + + /* + * There were not any blocking fences when we originally checked + * the reservation object, but there could be now, as the reservation + * object lock was dropped in the meantime. Check the fences again, + * and don't add our exclusive fence if there are blocking fences + * now. + */ + if (!pvr_fence_frame->have_blocking_fences) + { + struct dma_fence *fence; + + fence = reservation_object_get_excl(resv); + if (is_unsignalled_blocking_fence(fence)) + { + return 0; + } + + for (i = 0; i < shared_fence_count; i++) + { + fence = rcu_dereference_protected(flist->shared[i], reservation_object_held(resv)); + + if (is_unsignalled_blocking_fence(fence)) + { + return 0; + } + } + } + + fence_to_signal = create_fence_to_signal(pvr_fence_frame); + if (!fence_to_signal) + { + return -ENOMEM; + } + + if (!pvr_fence_frame->have_blocking_fences) + { + reservation_object_add_excl_fence(resv, fence_to_signal); + return 0; + } + + if (!shared_fence_count) + { + struct dma_fence *fence = reservation_object_get_excl(resv); + + if (is_blocking_fence(fence)) + { + if (allocate_blocking_fence_storage(pvr_fence_frame, 1)) + { + ret = install_and_get_blocking_fence(pvr_fence_frame, 0, fence); + } + else + { + dma_fence_put(fence_to_signal); + return -ENOMEM; + } + } + else + { + ret = 1; + } + + reservation_object_add_excl_fence(resv, fence_to_signal); + return update_reservation_return_value(ret, true); + } + + for (i = 0, blocking_fence_count = 0; i < shared_fence_count; i++) + { + + struct dma_fence *fence = rcu_dereference_protected(flist->shared[i], reservation_object_held(resv)); + + if (is_blocking_fence(fence)) + { + blocking_fence_count++; + } + } + + ret = 1; + if (blocking_fence_count) + { + if (allocate_blocking_fence_storage(pvr_fence_frame, blocking_fence_count)) + { + unsigned index = 0; + + for (i = 0; i < shared_fence_count; i++) + { + struct dma_fence *fence = rcu_dereference_protected(flist->shared[i], reservation_object_held(resv)); + + if (is_blocking_fence(fence)) + { + if (!install_and_get_blocking_fence(pvr_fence_frame, index++, fence)) + { + ret = 0; + } + } + } + } + else + { + dma_fence_put(fence_to_signal); + return -ENOMEM; + } + } + + reservation_object_add_excl_fence(resv, fence_to_signal); + + return update_reservation_return_value(ret, false); +} + +static int update_reservation_object_fences_src(struct pvr_fence_frame *pvr_fence_frame, + struct reservation_object *resv) +{ + struct dma_fence *fence_to_signal; + struct dma_fence *fence; + int ret; + + ret = reservation_object_reserve_shared(resv); + if (ret) + { + return ret; + } + + fence_to_signal = create_fence_to_signal(pvr_fence_frame); + if (!fence_to_signal) + { + return -ENOMEM; + } + + if (!pvr_fence_frame->have_blocking_fences) + { + reservation_object_add_shared_fence(resv, fence_to_signal); + + return 0; + } + + fence = reservation_object_get_excl(resv); + if (is_blocking_fence(fence)) + { + if (allocate_blocking_fence_storage(pvr_fence_frame, 1)) + { + ret = install_and_get_blocking_fence(pvr_fence_frame, 0, fence); + } + else + { + dma_fence_put(fence_to_signal); + return -ENOMEM; + } + } + else + { + ret = 1; + } + + reservation_object_add_shared_fence(resv, fence_to_signal); + + return update_reservation_return_value(ret, true); +} + +/* Must be called with pvr_fence_context mutex held */ +static void destroy_fence_frame(struct pvr_fence_frame *pvr_fence_frame) +{ + unsigned i; + + signal_and_put_fence(pvr_fence_frame); + + for (i = 0; i < pvr_fence_frame->blocking_fence_count; i++) + { + uninstall_and_put_blocking_fence(pvr_fence_frame, i); + } + free_blocking_fence_storage(pvr_fence_frame); + + list_del(&pvr_fence_frame->fence_frame_list); + + kfree(pvr_fence_frame); +} + +static inline bool sync_GE(const u32 a, const u32 b) +{ + return (a - b) < (U32_MAX / 2); +} +static inline bool sync_GT(const u32 a, const u32 b) +{ + return (a != b) && sync_GE(a, b); +} + +static bool sync_is_ready(struct pvr_fence_frame *pvr_fence_frame) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = pvr_fence_frame->pvr_fence_context->psSyncInfo; + + return (!pvr_fence_frame->is_dst || + sync_GE(psSyncInfo->psSyncData->ui32ReadOpsComplete, + pvr_fence_frame->ui32ReadOpsPending)) && + sync_GE(psSyncInfo->psSyncData->ui32ReadOps2Complete, + pvr_fence_frame->ui32ReadOps2Pending) && + sync_GE(psSyncInfo->psSyncData->ui32WriteOpsComplete, + pvr_fence_frame->ui32WriteOpsPending); +} + +static bool sync_gpu_read_op_is_complete(struct pvr_fence_frame *pvr_fence_frame) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = pvr_fence_frame->pvr_fence_context->psSyncInfo; + + /* + * If there aren't any blocking fences, we will have recorded the + * read ops pending value after it had been updated for the GPU + * op. + */ + return pvr_fence_frame->have_blocking_fences ? + sync_GT(psSyncInfo->psSyncData->ui32ReadOpsComplete, + pvr_fence_frame->ui32ReadOpsPending) : + sync_GE(psSyncInfo->psSyncData->ui32ReadOpsComplete, + pvr_fence_frame->ui32ReadOpsPending); +} + +static bool sync_gpu_write_op_is_complete(struct pvr_fence_frame *pvr_fence_frame) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = pvr_fence_frame->pvr_fence_context->psSyncInfo; + + /* + * If there aren't any blocking fences, we will have recorded the + * write ops pending value after it had been updated for the GPU + * op. + */ + return pvr_fence_frame->have_blocking_fences ? + sync_GT(psSyncInfo->psSyncData->ui32WriteOpsComplete, + pvr_fence_frame->ui32WriteOpsPending) : + sync_GE(psSyncInfo->psSyncData->ui32WriteOpsComplete, + pvr_fence_frame->ui32WriteOpsPending); +} + +static void sync_complete_read_op(struct pvr_fence_frame *pvr_fence_frame) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = pvr_fence_frame->pvr_fence_context->psSyncInfo; + + psSyncInfo->psSyncData->ui32ReadOps2Complete = ++pvr_fence_frame->ui32ReadOps2Pending; +} + +static void sync_complete_write_op(struct pvr_fence_frame *pvr_fence_frame) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = pvr_fence_frame->pvr_fence_context->psSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = ++pvr_fence_frame->ui32WriteOpsPending; +} + +static bool fence_work(struct pvr_fence_context *pvr_fence_context) +{ + bool schedule_device_callbacks = false; + + for(;;) + { + struct pvr_fence_frame *pvr_fence_frame; + bool reprocess = false; + bool next_frame = false; + + pvr_fence_frame = list_first_entry_or_null(&pvr_fence_context->fence_frame_list, struct pvr_fence_frame, fence_frame_list); + + if (pvr_fence_frame) + { + if (!atomic_read(&pvr_fence_frame->blocking_count) && sync_is_ready(pvr_fence_frame)) + { + switch (pvr_fence_frame->blocked_on) + { + case BLOCKED_ON_READ: + sync_complete_read_op(pvr_fence_frame); + pvr_fence_frame->blocked_on = 0; + schedule_device_callbacks = true; + reprocess = true; + break; + case BLOCKED_ON_WRITE: + sync_complete_write_op(pvr_fence_frame); + pvr_fence_frame->blocked_on = 0; + schedule_device_callbacks = true; + reprocess = true; + break; + default: + next_frame = pvr_fence_frame->is_dst ? sync_gpu_write_op_is_complete(pvr_fence_frame) : sync_gpu_read_op_is_complete(pvr_fence_frame); + break; + } + + if (pvr_fence_frame->unblock) + { + next_frame = true; + } + } + } + + if (next_frame) + { + destroy_fence_frame(pvr_fence_frame); + } + else + { + if (!reprocess) + { + break; + } + } + + } + + return schedule_device_callbacks; +} + +static void do_fence_work(struct work_struct *work) +{ + struct pvr_fence_context *pvr_fence_context = container_of(work, struct pvr_fence_context, fence_work); + bool schedule_device_callbacks; + + mutex_lock(&pvr_fence_context->mutex); + schedule_device_callbacks = fence_work(pvr_fence_context); + mutex_unlock(&pvr_fence_context->mutex); + + if (schedule_device_callbacks) + { + PVRSRVScheduleDeviceCallbacks(); + } + +} + +void PVRLinuxFenceContextDestroy(IMG_HANDLE hFenceContext) +{ + + struct pvr_fence_context *pvr_fence_context = (struct pvr_fence_context *)hFenceContext; + struct list_head *entry, *temp; + + mutex_lock(&pvr_fence_mutex); + mutex_lock(&pvr_fence_context->mutex); + + list_del(&pvr_fence_context->fence_context_list); + if (!list_empty(&pvr_fence_context->fence_context_notify_list)) + { + list_del(&pvr_fence_context->fence_context_notify_list); + } + mutex_unlock(&pvr_fence_mutex); + + list_for_each_safe(entry, temp, &pvr_fence_context->fence_frame_list) + { + struct pvr_fence_frame *pvr_fence_frame = list_entry(entry, struct pvr_fence_frame, fence_frame_list); + + destroy_fence_frame(pvr_fence_frame); + } + + mutex_unlock(&pvr_fence_context->mutex); + + flush_work(&pvr_fence_context->fence_work); + + mutex_destroy(&pvr_fence_context->mutex); + + DmaBufFreeNativeSyncHandle(pvr_fence_context->hNativeSync); + + kfree(pvr_fence_context); +} + +IMG_HANDLE PVRLinuxFenceContextCreate(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_HANDLE hImport) +{ + struct pvr_fence_context *pvr_fence_context; + + pvr_fence_context = kzalloc(sizeof(*pvr_fence_context), GFP_KERNEL); + if (!pvr_fence_context) + { + return NULL; + } + + pvr_fence_context->hNativeSync = DmaBufGetNativeSyncHandle(hImport); + if (!pvr_fence_context->hNativeSync) + { + kfree(pvr_fence_context); + return NULL; + } + + INIT_LIST_HEAD(&pvr_fence_context->fence_frame_list); + INIT_LIST_HEAD(&pvr_fence_context->fence_context_list); + INIT_LIST_HEAD(&pvr_fence_context->fence_context_notify_list); + + mutex_init(&pvr_fence_context->mutex); + + INIT_WORK(&pvr_fence_context->fence_work, do_fence_work); + + pvr_fence_context->psSyncInfo = psSyncInfo; + mutex_lock(&pvr_fence_mutex); + list_add_tail(&pvr_fence_context->fence_context_list, &fence_context_list); + mutex_unlock(&pvr_fence_mutex); + + return (IMG_HANDLE)pvr_fence_context; +} + +static int process_reservation_object(struct pvr_fence_context *pvr_fence_context, struct reservation_object *resv, bool is_dst, u32 tag, bool have_blocking_fences) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = pvr_fence_context->psSyncInfo; + struct pvr_fence_frame *pvr_fence_frame; + int ret; + + pvr_fence_frame = kzalloc(sizeof(*pvr_fence_frame), GFP_KERNEL); + if (!pvr_fence_frame) + { + return -ENOMEM; + } + + pvr_fence_frame->is_dst = is_dst; + pvr_fence_frame->tag = tag; + pvr_fence_frame->pvr_fence_context = pvr_fence_context; + pvr_fence_frame->have_blocking_fences = have_blocking_fences; + atomic_set(&pvr_fence_frame->blocking_count, 0); + INIT_LIST_HEAD(&pvr_fence_frame->fence_frame_list); + + ret = is_dst ? + update_reservation_object_fences_dst(pvr_fence_frame, resv) : + update_reservation_object_fences_src(pvr_fence_frame, resv); + if (ret < 0) + { + kfree(pvr_fence_frame); + return ret; + } + else + { + BUG_ON(ret && !have_blocking_fences); + + pvr_fence_frame->blocked_on = ret; + + + /* + * If there are no blocking fences, the ops pending values + * are recorded after being updated for the GPU operation, + * rather than before, so the test for completion of the + * operation is different for the two cases. + */ + pvr_fence_frame->ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending; + + pvr_fence_frame->ui32ReadOps2Pending = (pvr_fence_frame->blocked_on == BLOCKED_ON_READ) ? SyncTakeReadOp2(psSyncInfo, SYNC_OP_CLASS_LINUX_FENCE) : psSyncInfo->psSyncData->ui32ReadOps2Pending; + + pvr_fence_frame->ui32WriteOpsPending = (pvr_fence_frame->blocked_on == BLOCKED_ON_WRITE) ? SyncTakeWriteOp(psSyncInfo, SYNC_OP_CLASS_LINUX_FENCE) : psSyncInfo->psSyncData->ui32WriteOpsPending; + + list_add_tail(&pvr_fence_frame->fence_frame_list, &pvr_fence_context->fence_frame_list); + } + + return 0; +} + +static int process_syncinfo(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, bool is_dst, u32 tag, bool have_blocking_fences) +{ + struct pvr_fence_context *pvr_fence_context = (struct pvr_fence_context *)psSyncInfo->hFenceContext; + struct reservation_object *resv; + int ret = 0; + + if (!pvr_fence_context) + { + return 0; + } + + mutex_lock(&pvr_fence_context->mutex); + if ((resv = DmaBufGetReservationObject(pvr_fence_context->hNativeSync))) + { + ret = process_reservation_object(pvr_fence_context, + resv, + is_dst, + tag, + have_blocking_fences); + } + mutex_unlock(&pvr_fence_context->mutex); + + mutex_lock(&pvr_fence_mutex); + mutex_lock(&pvr_fence_context->mutex); + if (list_empty(&pvr_fence_context->fence_context_notify_list)) + { + list_add_tail(&pvr_fence_context->fence_context_notify_list, &fence_context_notify_list); + queue_work(workqueue, &pvr_fence_context->fence_work); + } + mutex_unlock(&pvr_fence_context->mutex); + mutex_unlock(&pvr_fence_mutex); + + return ret; +} + + +static inline bool sync_enabled(const IMG_BOOL *pbEnabled, + const IMG_HANDLE *phSyncInfo, + unsigned index) +{ + return (!pbEnabled || pbEnabled[index]) && phSyncInfo && phSyncInfo[index]; +} + +static bool resv_is_blocking(struct reservation_object *resv, bool is_dst) +{ + struct reservation_object_list *flist; + struct dma_fence *fence; + bool blocking; + unsigned shared_count; + unsigned seq; + +retry: + shared_count = 0; + blocking = false; + + seq = read_seqcount_begin(&resv->seq); + rcu_read_lock(); + + flist = rcu_dereference(resv->fence); + if (read_seqcount_retry(&resv->seq, seq)) + { + goto unlock_retry; + } + + if (flist) + { + shared_count = flist->shared_count; + } + + if (is_dst) + { + unsigned i; + + for (i = 0; (i < shared_count) && !blocking; i++) + { + fence = rcu_dereference(flist->shared[i]); + + blocking = is_unsignalled_blocking_fence(fence); + } + } + + if (!blocking && (!is_dst || !shared_count)) + { + fence = rcu_dereference(resv->fence_excl); + if (read_seqcount_retry(&resv->seq, seq)) + { + goto unlock_retry; + } + + blocking = is_unsignalled_blocking_fence(fence); + } + + rcu_read_unlock(); + + return blocking; + +unlock_retry: + rcu_read_unlock(); + goto retry; +} + +static unsigned count_reservation_objects(unsigned num_syncs, + IMG_HANDLE *phSyncInfo, + const IMG_BOOL *pbEnabled, + bool is_dst, + bool *have_blocking_fences) +{ + unsigned i; + unsigned count = 0; + bool blocking_fences = false; + + for (i = 0; i < num_syncs; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + struct pvr_fence_context *pvr_fence_context; + + if (!sync_enabled(pbEnabled, phSyncInfo, i)) + { + continue; + } + + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)phSyncInfo[i]; + pvr_fence_context = (struct pvr_fence_context *)psSyncInfo->hFenceContext; + if (pvr_fence_context) + { + struct reservation_object *resv; + + if ((resv = DmaBufGetReservationObject(pvr_fence_context->hNativeSync))) + { + count++; + + if (!blocking_fences) + { + blocking_fences = resv_is_blocking(resv, + is_dst); + } + } + } + } + + *have_blocking_fences = blocking_fences; + return count; +} + +static unsigned get_reservation_objects(unsigned num_resvs, + struct reservation_object **resvs, + unsigned num_syncs, + IMG_HANDLE *phSyncInfo, + const IMG_BOOL *pbEnabled) +{ + unsigned i; + unsigned count = 0; + + for (i = 0; i < num_syncs; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + struct pvr_fence_context *pvr_fence_context; + + if (!sync_enabled(pbEnabled, phSyncInfo, i)) + { + continue; + } + + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)phSyncInfo[i]; + pvr_fence_context = (struct pvr_fence_context *)psSyncInfo->hFenceContext; + if (pvr_fence_context) + { + struct reservation_object *resv; + + if ((resv = DmaBufGetReservationObject(pvr_fence_context->hNativeSync))) + { + BUG_ON(count >= num_resvs); + resvs[count++] = resv; + } + } + } + + return count; +} + +static void get_all_reservation_objects(unsigned num_resvs, + struct reservation_object **resvs, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + unsigned num_src_resvs; + + num_src_resvs = get_reservation_objects(num_resvs, + resvs, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled); + + get_reservation_objects(num_resvs - num_src_resvs, + &resvs[num_src_resvs], + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled); +} + +static void unlock_reservation_objects(unsigned num_resvs, + struct reservation_object **resvs) +{ + unsigned i; + + for (i = 0; i < num_resvs; i++) + { + if (resvs[i]) + { + ww_mutex_unlock(&(resvs[i]->lock)); + } + } +} + +static int lock_reservation_objects_no_retry(struct ww_acquire_ctx *ww_acquire_ctx, + bool interruptible, + unsigned num_resvs, + struct reservation_object **resvs, + struct reservation_object **contended_resv) +{ + unsigned i; + + for (i = 0; i < num_resvs; i++) + { + int ret; + + if (!resvs[i]) + { + continue; + } + if (resvs[i] == *contended_resv) + { + *contended_resv = NULL; + continue; + } + + ret = interruptible ? + ww_mutex_lock_interruptible(&(resvs[i]->lock), ww_acquire_ctx) : + ww_mutex_lock(&(resvs[i]->lock), ww_acquire_ctx); + if (ret) + { + if (ret == -EALREADY) + { + resvs[i] = NULL; + continue; + } + + unlock_reservation_objects(i, resvs); + + if (*contended_resv) + { + ww_mutex_unlock(&((*contended_resv)->lock)); + *contended_resv = NULL; + } + + if (ret == -EDEADLK) + { + *contended_resv = resvs[i]; + } + + return ret; + } + } + + return 0; +} + +static int lock_reservation_objects(struct ww_acquire_ctx *ww_acquire_ctx, + bool interruptible, + unsigned num_resvs, + struct reservation_object **resvs) +{ + int ret; + struct reservation_object *contended_resv = NULL; + + do { + ret = lock_reservation_objects_no_retry(ww_acquire_ctx, + interruptible, + num_resvs, + resvs, + &contended_resv); + if (ret == -EDEADLK) + { + if (interruptible) + { + int res = ww_mutex_lock_slow_interruptible( + &(contended_resv->lock), + ww_acquire_ctx); + if (res) + { + return res; + } + } + else + { + ww_mutex_lock_slow(&(contended_resv->lock), ww_acquire_ctx); + } + } + } while (ret == -EDEADLK); + + return ret; +} + +static int process_syncinfos(u32 tag, + bool is_dst, + bool have_blocking_fences, + IMG_UINT32 ui32NumSyncs, + IMG_HANDLE *phSyncInfo, + const IMG_BOOL *pbEnabled) +{ + unsigned i; + + for (i = 0; i < ui32NumSyncs; i++) + { + if (sync_enabled(pbEnabled, phSyncInfo, i)) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)phSyncInfo[i]; + int ret; + + ret = process_syncinfo(psSyncInfo, + is_dst, + tag, + have_blocking_fences); + if (ret) + { + break; + } + } + } + + return 0; +} + +static int process_all_syncinfos(u32 tag, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled, + bool have_blocking_fences) +{ + int ret; + + ret = process_syncinfos(tag, + false, + have_blocking_fences, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled); + + if (ret) + { + return ret; + } + + ret = process_syncinfos(tag, + true, + have_blocking_fences, + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled); + + return ret; +} + +static void unblock_frames(u32 tag, + IMG_UINT32 ui32NumSyncs, + IMG_HANDLE *phSyncInfo, + const IMG_BOOL *pbEnabled) +{ + unsigned i; + + for (i = 0; i < ui32NumSyncs; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + struct pvr_fence_context *pvr_fence_context; + struct list_head *entry; + + if (!sync_enabled(pbEnabled, phSyncInfo, i)) + { + continue; + } + + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)phSyncInfo[i]; + pvr_fence_context = (struct pvr_fence_context *)psSyncInfo->hFenceContext; + if (!pvr_fence_context) + { + continue; + } + + + mutex_lock(&pvr_fence_context->mutex); + + list_for_each(entry, &pvr_fence_context->fence_frame_list) + { + struct pvr_fence_frame *pvr_fence_frame = list_entry(entry, struct pvr_fence_frame, fence_frame_list); + + if (pvr_fence_frame->tag == tag) + { + pvr_fence_frame->unblock = true; + } + } + + mutex_unlock(&pvr_fence_context->mutex); + } + + return; +} + +static void unblock_all_frames(u32 tag, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + unblock_frames(tag, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled); + + unblock_frames(tag, + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled); +} + +static PVRSRV_ERROR pvr_error(int error) +{ + switch(error) + { + case 0: + return PVRSRV_OK; + case -EINTR: + return PVRSRV_ERROR_RETRY; + case -ENOMEM: + return PVRSRV_ERROR_OUT_OF_MEMORY; + default: + break; + } + + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +static u32 new_frame_tag(void) +{ + static u32 frame_tag; + + return (++frame_tag) ? frame_tag : ++frame_tag; +} + +IMG_UINT32 PVRLinuxFenceNumResvObjs(IMG_BOOL *pbBlockingFences, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + unsigned count; + bool blocking_fences_src, blocking_fences_dst; + + count = count_reservation_objects(ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled, + false, + &blocking_fences_src); + + count += count_reservation_objects(ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled, + true, + &blocking_fences_dst); + + *pbBlockingFences = (IMG_BOOL) (blocking_fences_src | + blocking_fences_dst); + + return count; +} + +PVRSRV_ERROR PVRLinuxFenceProcess(IMG_UINT32 *pui32Tag, + IMG_UINT32 ui32NumResvObjs, + IMG_BOOL bBlockingFences, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + u32 tag; + struct ww_acquire_ctx ww_acquire_ctx; + struct reservation_object **resvs = NULL; + int ret; + + if (!ui32NumResvObjs) + { + *pui32Tag = 0; + ret = 0; + goto exit; + } + tag = new_frame_tag(); + + resvs = kmalloc(ui32NumResvObjs * sizeof(*resvs), GFP_KERNEL); + if (!resvs) + { + ret = -ENOMEM; + goto exit; + } + + get_all_reservation_objects(ui32NumResvObjs, + resvs, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled, + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled); + + ww_acquire_init(&ww_acquire_ctx, &reservation_ww_class); + + /* + * If there are no blocking fences, we will be processing + * reservation objects after the GPU operation has been + * started, so returning an error that may result in the + * GPU operation being retried may be inappropriate. + */ + ret = lock_reservation_objects(&ww_acquire_ctx, + (bool)bBlockingFences, + ui32NumResvObjs, + resvs); + if (ret) + { + ww_acquire_fini(&ww_acquire_ctx); + goto exit; + } + ww_acquire_done(&ww_acquire_ctx); + + ret = process_all_syncinfos(tag, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled, + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled, + (bool)bBlockingFences); + + unlock_reservation_objects(ui32NumResvObjs, resvs); + + ww_acquire_fini(&ww_acquire_ctx); + + if (ret) + { + unblock_all_frames(tag, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled, + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled); + } + else + { + *pui32Tag = tag; + } + +exit: + if (resvs) + { + kfree(resvs); + } + return pvr_error(ret); + +} + +void PVRLinuxFenceRelease(IMG_UINT32 uTag, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + if (uTag) + { + unblock_all_frames(uTag, + ui32NumSrcSyncs, + phSrcSyncInfo, + pbSrcEnabled, + ui32NumDstSyncs, + phDstSyncInfo, + pbDstEnabled); + } +} + +void PVRLinuxFenceCheckAll(void) +{ + struct list_head *entry, *temp; + bool schedule_device_callbacks = false; + + mutex_lock(&pvr_fence_mutex); + list_for_each_safe(entry, temp, &fence_context_notify_list) + { + struct pvr_fence_context *pvr_fence_context = list_entry(entry, struct pvr_fence_context, fence_context_notify_list); + + mutex_lock(&pvr_fence_context->mutex); + if (list_empty(&pvr_fence_context->fence_frame_list)) + { + list_del_init(&pvr_fence_context->fence_context_notify_list); + } + else + { + if (fence_work(pvr_fence_context)) + { + schedule_device_callbacks = true; + } + } + mutex_unlock(&pvr_fence_context->mutex); + } + mutex_unlock(&pvr_fence_mutex); + + if (schedule_device_callbacks) + PVRSRVScheduleDeviceCallbacks(); +} + +void PVRLinuxFenceDeInit(void) +{ + unsigned fences_remaining; + bool contexts_leaked; + + if (workqueue) + { + destroy_workqueue(workqueue); + } + + fences_remaining = atomic_read(&fences_outstanding); + if (fences_remaining) + { + printk(KERN_WARNING "%s: %u fences leaked\n", + __func__, fences_remaining); + } + +#if defined(DEBUG) + printk(KERN_INFO "%s: %u fences allocated\n", + __func__, atomic_read(&fences_allocated)); + + printk(KERN_INFO "%s: %u fences signalled\n", + __func__, atomic_read(&fences_signalled)); + + printk(KERN_INFO "%s: %u callbacks installed\n", + __func__, atomic_read(&callbacks_installed)); + + printk(KERN_INFO "%s: %u callbacks called\n", + __func__, atomic_read(&callbacks_called)); +#endif + + mutex_lock(&pvr_fence_mutex); + contexts_leaked = !list_empty(&fence_context_list); + mutex_unlock(&pvr_fence_mutex); + + mutex_destroy(&pvr_fence_mutex); + + BUG_ON(contexts_leaked); +} + +int PVRLinuxFenceInit(void) +{ + workqueue = create_workqueue("PVR Linux Fence"); + if (!workqueue) + { + return -ENOMEM; + } + + fence_context = dma_fence_context_alloc(1); + + return 0; +} +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17)) */ +IMG_HANDLE PVRLinuxFenceContextCreate(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_HANDLE hImport) +{ + (void) psSyncInfo; + (void) hImport; + + return (IMG_HANDLE)(IMG_UINTPTR_T)0xbad; +} + +void PVRLinuxFenceContextDestroy(IMG_HANDLE hFenceContext) +{ + (void) hFenceContext; +} + +IMG_UINT32 PVRLinuxFenceNumResvObjs(IMG_BOOL *pbBlockingFences, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + (void) pbBlockingFences; + (void) ui32NumSrcSyncs; + (void) phSrcSyncInfo; + (void) pbSrcEnabled; + (void) ui32NumDstSyncs; + (void) phDstSyncInfo; + (void) pbDstEnabled; + + return PVRSRV_OK; +} + +PVRSRV_ERROR PVRLinuxFenceProcess(IMG_UINT32 *pui32Tag, + IMG_UINT32 ui32NumResvObjs, + IMG_BOOL bBlockingFences, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + (void) pui32Tag; + (void) ui32NumResvObjs; + (void) bBlockingFences; + (void) ui32NumSrcSyncs; + (void) phSrcSyncInfo; + (void) pbSrcEnabled; + (void) ui32NumDstSyncs; + (void) phDstSyncInfo; + (void) pbDstEnabled; + + return PVRSRV_OK; +} + +void PVRLinuxFenceRelease(IMG_UINT32 ui32Tag, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled) +{ + (void) ui32Tag; + (void) ui32NumSrcSyncs; + (void) phSrcSyncInfo; + (void) pbSrcEnabled; + (void) ui32NumDstSyncs; + (void) phDstSyncInfo; + (void) pbDstEnabled; +} + +void PVRLinuxFenceCheckAll(void) +{ +} + +int PVRLinuxFenceInit(void) +{ + return 0; +} + +void PVRLinuxFenceDeInit(void) +{ +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17)) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.h new file mode 100644 index 0000000..aed12ff --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_linux_fence.h @@ -0,0 +1,81 @@ +/*************************************************************************/ /*! +@File +@Title PowerVR Linux fence interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description drm module +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__PVR_LINUX_FENCE_H__) +#define __PVR_LINUX_FENCE_H__ + +IMG_HANDLE PVRLinuxFenceContextCreate(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_HANDLE hImport); +void PVRLinuxFenceContextDestroy(IMG_HANDLE hFenceContext); + +IMG_UINT32 PVRLinuxFenceNumResvObjs(IMG_BOOL *pbBlockingFences, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled); + +PVRSRV_ERROR PVRLinuxFenceProcess(IMG_UINT32 *pui32Tag, + IMG_UINT32 ui32NumResvObjs, + IMG_BOOL bBlockingFences, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled); + +void PVRLinuxFenceRelease(IMG_UINT32 ui32Tag, + IMG_UINT32 ui32NumSrcSyncs, + IMG_HANDLE *phSrcSyncInfo, + const IMG_BOOL *pbSrcEnabled, + IMG_UINT32 ui32NumDstSyncs, + IMG_HANDLE *phDstSyncInfo, + const IMG_BOOL *pbDstEnabled); + +void PVRLinuxFenceCheckAll(void); + +int PVRLinuxFenceInit(void); +void PVRLinuxFenceDeInit(void); + +#endif /* !defined(__PVR_LINUX_FENCE_H__) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.c new file mode 100644 index 0000000..7c3006d --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.c @@ -0,0 +1,141 @@ +/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */ +/* vi: set ts=8 sw=8 sts=8: */ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include "img_types.h" +#include "services_headers.h" +#include "servicesext.h" +#include "pvr_sw_fence.h" + +struct PVR_SW_FENCE_CONTEXT +{ + struct kref sRef; + IMG_INT32 i32ContextId; + const char *psCtxName; + const char *psDriverName; + atomic_t sSeqno; + atomic_t sFenceCnt; +}; + +struct PVR_SW_FENCE +{ + struct dma_fence sBase; + struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx; + spinlock_t sLock; +}; + +#define to_pvr_sw_fence(fence) container_of(fence, struct PVR_SW_FENCE, sBase) + +static inline unsigned +pvr_sw_fence_context_seqno_next(struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx) +{ + return atomic_inc_return(&psSWFenceCtx->sSeqno) - 1; +} + +static const char * pvr_sw_fence_get_driver_name(struct dma_fence *psFence) +{ + struct PVR_SW_FENCE *psPVRSwFence = to_pvr_sw_fence(psFence); + + return psPVRSwFence->psSWFenceCtx->psDriverName; +} + +static const char * pvr_sw_fence_get_timeline_name(struct dma_fence *psFence) +{ + struct PVR_SW_FENCE *psPVRSwFence = to_pvr_sw_fence(psFence); + + return psPVRSwFence->psSWFenceCtx->psCtxName; +} + +static bool pvr_sw_fence_enable_signaling(struct dma_fence *psFence) +{ + return true; +} + +static void pvr_sw_fence_context_destroy_kref(struct kref *kref) +{ + struct PVR_SW_FENCE_CONTEXT *psPVRSwFence = container_of(kref, struct PVR_SW_FENCE_CONTEXT, sRef); + unsigned fence_count; + + fence_count = atomic_read(&psPVRSwFence->sFenceCnt); + if (WARN_ON(fence_count)) + pr_debug("%s context has %u fence(s) remaining\n", psPVRSwFence->psCtxName, fence_count); + + kfree(psPVRSwFence); +} + +static void pvr_sw_fence_release(struct dma_fence *psFence) +{ + struct PVR_SW_FENCE *psPVRSwFence = to_pvr_sw_fence(psFence); + + atomic_dec(&psPVRSwFence->psSWFenceCtx->sFenceCnt); + kref_put(&psPVRSwFence->psSWFenceCtx->sRef, + pvr_sw_fence_context_destroy_kref); + kfree(psPVRSwFence); +} + +static struct dma_fence_ops pvr_sw_fence_ops = { + .get_driver_name = pvr_sw_fence_get_driver_name, + .get_timeline_name = pvr_sw_fence_get_timeline_name, + .enable_signaling = pvr_sw_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = pvr_sw_fence_release, +}; + +struct PVR_SW_FENCE_CONTEXT * +pvr_sw_fence_context_create(const char *context_name, const char *driver_name) +{ + struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx; + + psSWFenceCtx = kmalloc(sizeof(*psSWFenceCtx), GFP_KERNEL); + if (!psSWFenceCtx) + return NULL; + + psSWFenceCtx->i32ContextId = dma_fence_context_alloc(1); + psSWFenceCtx->psCtxName = context_name; + psSWFenceCtx->psDriverName = driver_name; + atomic_set(&psSWFenceCtx->sSeqno, 0); + atomic_set(&psSWFenceCtx->sFenceCnt, 0); + kref_init(&psSWFenceCtx->sRef); + + return psSWFenceCtx; +} + +void pvr_sw_fence_context_destroy(struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx) +{ + kref_put(&psSWFenceCtx->sRef, pvr_sw_fence_context_destroy_kref); +} + +struct dma_fence * +pvr_sw_fence_create(struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx) +{ + struct PVR_SW_FENCE *psPVRSwFence; + unsigned int seqno; + + psPVRSwFence = kmalloc(sizeof(*psPVRSwFence), GFP_KERNEL); + if (!psPVRSwFence) + return NULL; + + spin_lock_init(&psPVRSwFence->sLock); + psPVRSwFence->psSWFenceCtx = psSWFenceCtx; + + seqno = pvr_sw_fence_context_seqno_next(psSWFenceCtx); + dma_fence_init(&psPVRSwFence->sBase, &pvr_sw_fence_ops, &psPVRSwFence->sLock, psSWFenceCtx->i32ContextId, seqno); + + atomic_inc(&psSWFenceCtx->sFenceCnt); + kref_get(&psSWFenceCtx->sRef); + + return &psPVRSwFence->sBase; +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.h new file mode 100644 index 0000000..35934ce --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sw_fence.h @@ -0,0 +1,17 @@ +/*************************************************************************/ /*! +@File +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#if !defined(__PVR_SW_FENCES_H__) +#define __PVR_SW_FENCES_H__ + +struct PVR_SW_FENCE_CONTEXT; + +struct PVR_SW_FENCE_CONTEXT *pvr_sw_fence_context_create(const char *name, const char *driver_name); +void pvr_sw_fence_context_destroy(struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx); +struct dma_fence *pvr_sw_fence_create(struct PVR_SW_FENCE_CONTEXT *psSWFenceCtx); + +#endif /* !defined(__PVR_SW_FENCES_H__) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.c new file mode 100644 index 0000000..56f5949 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.c @@ -0,0 +1,1492 @@ +/*************************************************************************/ /*! +@File pvr_sync.c +@Title Kernel driver for Android's sync mechanism +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_sync_common.h" +#include "pvr_sync.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "services_headers.h" +#include "sgxutils.h" +#include "ttrace.h" +#include "mutex.h" +#include "lock.h" + +//#define DEBUG_PRINT + +#if defined(DEBUG_PRINT) +#define DPF(fmt, ...) PVR_DPF((PVR_DBG_BUFFERED, fmt, __VA_ARGS__)) +#else +#define DPF(fmt, ...) do {} while(0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) + +static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +{ + return pt->parent; +} + +#define for_each_sync_pt(s, f, c) \ + (void)c; \ +list_for_each_entry((s), &(f)->pt_list_head, pt_list) + +#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */ + +#define for_each_sync_pt(s, f, c) \ + for ((c) = 0, (s) = (struct sync_pt *)(f)->cbs[0].sync_pt; \ + (c) < (f)->num_fences; \ + (c)++, (s) = (struct sync_pt *)(f)->cbs[c].sync_pt) + +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */ + +/* We can't support this code when the MISR runs in atomic context because + * PVRSyncFreeSync() may be called by sync_timeline_signal() which may be + * scheduled by the MISR. PVRSyncFreeSync() needs to protect the handle + * tables against modification and so uses the Linux bridge mutex. + * + * You can't lock a mutex in atomic context. + */ +#if !defined(PVR_LINUX_MISR_USING_WORKQUEUE) && \ + !defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) +#error The Android sync driver requires that the SGX MISR runs in wq context +#endif + +/* Multi-purpose workqueue. Various functions in the Google sync driver + * may call down to us in atomic context. However, sometimes we may need + * to lock a mutex. To work around this conflict, use the workqueue to + * defer whatever the operation was. + */ +static struct workqueue_struct *gpsWorkQueue; + +/* Linux work struct for workqueue. */ +static struct work_struct gsWork; + +/* List of timelines, used by MISR callback to find signalled sync points + * and also to kick the hardware if signalling may allow progress to be + * made. + */ +static LIST_HEAD(gTimelineList); +static DEFINE_MUTEX(gTimelineListLock); + +/* The "defer-free" object list. Driver global. */ +static LIST_HEAD(gSyncInfoFreeList); +static DEFINE_SPINLOCK(gSyncInfoFreeListLock); + +/* The "defer-put" object list. Driver global. */ +static LIST_HEAD(gFencePutList); +static DEFINE_SPINLOCK(gFencePutListLock); + +/* Sync point stamp counter -- incremented on creation of a new sync point */ +static IMG_UINT64 gui64SyncPointStamp; + +/* Forward declare due to cyclic dependency on gsSyncFenceAllocFOps */ +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd); + +/* Global data relating to PVR services connection */ + +static struct +{ + /* Process that initialized the sync driver. House-keep this so + * the correct per-proc data is used during shutdown. This PID is + * conventionally whatever `pvrsrvctl' was when it was alive. + */ + IMG_UINT32 ui32Pid; + + /* Device cookie for services allocation functions. The device would + * ordinarily be SGX, and the first/only device in the system. + */ + IMG_HANDLE hDevCookie; + + /* Device memory context that all SYNC_INFOs allocated by this driver + * will be created in. Because SYNC_INFOs are placed in a shared heap, + * it does not matter from which process the create ioctl originates. + */ + IMG_HANDLE hDevMemContext; +} +gsSyncServicesConnection; + +/* NOTE: Must only be called with services bridge mutex held */ +static void PVRSyncSWTakeOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + psKernelSyncInfo->psSyncData->ui32WriteOpsPending = 1; +} + +static void PVRSyncSWCompleteOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + psKernelSyncInfo->psSyncData->ui32WriteOpsComplete = 1; +} + +static struct PVR_SYNC * +PVRSyncCreateSync(struct PVR_SYNC_TIMELINE *obj, + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo) +{ + struct PVR_SYNC *psPt = NULL; + + psPt = (struct PVR_SYNC *) + sync_pt_create(&obj->obj, sizeof(struct PVR_SYNC)); + if(!psPt) + { + PVR_DPF((PVR_DBG_ERROR, "%s: sync_pt_create failed", __func__)); + goto err_out; + } + + psPt->psSyncData = kmalloc(sizeof(struct PVR_SYNC_DATA), GFP_KERNEL); + if(!psPt->psSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_SYNC_DATA", + __func__)); + goto err_free_pt; + } + + atomic_set(&psPt->psSyncData->sRefcount, 1); + + psPt->psSyncData->ui32WOPSnapshot = + obj->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending; + + psPt->psSyncData->psSyncInfo = psSyncInfo; + + /* Stamp the point and update the global counter under lock */ + mutex_lock(&obj->sTimelineLock); + psPt->psSyncData->ui64Stamp = gui64SyncPointStamp++; + mutex_unlock(&obj->sTimelineLock); + +err_out: + return psPt; +err_free_pt: + sync_pt_free((struct sync_pt *)psPt); + psPt = NULL; + goto err_out; +} + +static IMG_BOOL PVRSyncIsSyncInfoInUse(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo) +{ + return !(psSyncInfo->psSyncData->ui32WriteOpsPending == + psSyncInfo->psSyncData->ui32WriteOpsComplete && + psSyncInfo->psSyncData->ui32ReadOpsPending == + psSyncInfo->psSyncData->ui32ReadOpsComplete && + psSyncInfo->psSyncData->ui32ReadOps2Pending == + psSyncInfo->psSyncData->ui32ReadOps2Complete); +} + +/* Releases a sync info by adding it to a deferred list to be freed later. */ +static void +PVRSyncReleaseSyncInfo(struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo) +{ + unsigned long flags; + + spin_lock_irqsave(&gSyncInfoFreeListLock, flags); + list_add_tail(&psSyncInfo->sHead, &gSyncInfoFreeList); + spin_unlock_irqrestore(&gSyncInfoFreeListLock, flags); + + queue_work(gpsWorkQueue, &gsWork); +} + +static void PVRSyncFreeSyncData(struct PVR_SYNC_DATA *psSyncData) +{ + PVR_ASSERT(atomic_read(&psSyncData->sRefcount) == 0); + PVRSyncReleaseSyncInfo(psSyncData->psSyncInfo); + psSyncData->psSyncInfo = NULL; + kfree(psSyncData); +} + +static void PVRSyncFreeSync(struct sync_pt *psPt) +{ + struct PVR_SYNC *psSync = (struct PVR_SYNC *)psPt; +#if defined(DEBUG_PRINT) + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = + psSync->psSyncData->psSyncInfo->psBase; +#endif + + PVR_ASSERT(atomic_read(&psSync->psSyncData->sRefcount) > 0); + + /* Only free on the last reference */ + if (atomic_dec_return(&psSync->psSyncData->sRefcount) != 0) + return; + + DPF("R( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X " + "WOP/C=0x%x/0x%x ROP/C=0x%x/0x%x RO2P/C=0x%x/0x%x " + "ID=%llu, S=0x%x, F=%p", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOps2CompleteDevVAddr.uiAddr, + psSyncInfo->psSyncData->ui32WriteOpsPending, + psSyncInfo->psSyncData->ui32WriteOpsComplete, + psSyncInfo->psSyncData->ui32ReadOpsPending, + psSyncInfo->psSyncData->ui32ReadOpsComplete, + psSyncInfo->psSyncData->ui32ReadOps2Pending, + psSyncInfo->psSyncData->ui32ReadOps2Complete, + psSync->psSyncData->ui64Stamp, + psSync->psSyncData->ui32WOPSnapshot, + psSync->pt.fence); + + PVRSyncFreeSyncData(psSync->psSyncData); + psSync->psSyncData = NULL; +} + +static struct sync_pt *PVRSyncDup(struct sync_pt *sync_pt) +{ + struct PVR_SYNC *psPt, *psParentPt = (struct PVR_SYNC *)sync_pt; + + psPt = (struct PVR_SYNC *) + sync_pt_create(sync_pt_parent(sync_pt), sizeof(struct PVR_SYNC)); + if(!psPt) + { + PVR_DPF((PVR_DBG_ERROR, "%s: sync_pt_create failed", __func__)); + goto err_out; + } + + psPt->psSyncData = psParentPt->psSyncData; + atomic_inc(&psPt->psSyncData->sRefcount); + + PVR_ASSERT(atomic_read(&psPt->psSyncData->sRefcount) > 1); + +err_out: + return (struct sync_pt*)psPt; +} + +static int PVRSyncHasSignaled(struct sync_pt *sync_pt) +{ + struct PVR_SYNC *psPt = (struct PVR_SYNC *)sync_pt; + struct PVR_SYNC_TIMELINE *psTimeline = + (struct PVR_SYNC_TIMELINE *) sync_pt_parent(sync_pt); + PVRSRV_SYNC_DATA *psSyncData = + psPt->psSyncData->psSyncInfo->psBase->psSyncData; + + if (psSyncData->ui32WriteOpsComplete >= psSyncData->ui32WriteOpsPending) + { + psTimeline->bSyncHasSignaled = IMG_TRUE; + return 1; + } + + return 0; +} + +static int PVRSyncCompare(struct sync_pt *a, struct sync_pt *b) +{ + IMG_UINT64 ui64StampA = ((struct PVR_SYNC *)a)->psSyncData->ui64Stamp; + IMG_UINT64 ui64StampB = ((struct PVR_SYNC *)b)->psSyncData->ui64Stamp; + + if (ui64StampA == ui64StampB) + return 0; + else if (ui64StampA > ui64StampB) + return 1; + else + return -1; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) +static void PVRSyncPrintTimeline(struct seq_file *s, + struct sync_timeline *psObj) +{ + struct PVR_SYNC_TIMELINE *psTimeline = (struct PVR_SYNC_TIMELINE *)psObj; + + seq_printf(s, "WOP/WOC=0x%x/0x%x", + psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending, + psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsComplete); +} + +static void PVRSyncPrint(struct seq_file *s, struct sync_pt *psPt) +{ + struct PVR_SYNC *psSync = (struct PVR_SYNC *)psPt; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = + psSync->psSyncData->psSyncInfo->psBase; + + seq_printf(s, "ID=%llu, refs=%u, WOPSnapshot=0x%x, parent=%p", + psSync->psSyncData->ui64Stamp, + atomic_read(&psSync->psSyncData->sRefcount), + psSync->psSyncData->ui32WOPSnapshot, + sync_pt_parent(&psSync->pt)); + seq_printf(s, "\n WOP/WOC=0x%x/0x%x, " + "ROP/ROC=0x%x/0x%x, ROP2/ROC2=0x%x/0x%x, " + "WOC DevVA=0x%.8x, ROC DevVA=0x%.8x, " + "ROC2 DevVA=0x%.8x", + psSyncInfo->psSyncData->ui32WriteOpsPending, + psSyncInfo->psSyncData->ui32WriteOpsComplete, + psSyncInfo->psSyncData->ui32ReadOpsPending, + psSyncInfo->psSyncData->ui32ReadOpsComplete, + psSyncInfo->psSyncData->ui32ReadOps2Pending, + psSyncInfo->psSyncData->ui32ReadOps2Complete, + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOps2CompleteDevVAddr.uiAddr); +} +#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */ +static void PVRSyncPrintTimeline(struct sync_timeline *psObj, + char *str, int size) +{ + struct PVR_SYNC_TIMELINE *psTimeline = (struct PVR_SYNC_TIMELINE *)psObj; + + snprintf(str, size, "WOP/WOC=0x%x/0x%x", + psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending, + psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsComplete); +} +static void PVRSyncPrint(struct sync_pt *psPt, char *str, int size) +{ + struct PVR_SYNC *psSync = (struct PVR_SYNC *)psPt; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = + psSync->psSyncData->psSyncInfo->psBase; + + snprintf(str, size, "ID=%llu, refs=%u, WOPSnapshot=0x%x, parent=%p", + psSync->psSyncData->ui64Stamp, + atomic_read(&psSync->psSyncData->sRefcount), + psSync->psSyncData->ui32WOPSnapshot, + sync_pt_parent(&psSync->pt));//psSync->pt.parent); + snprintf(str, size, "\n WOP/WOC=0x%x/0x%x, " + "ROP/ROC=0x%x/0x%x, ROP2/ROC2=0x%x/0x%x, " + "WOC DevVA=0x%.8x, ROC DevVA=0x%.8x, " + "ROC2 DevVA=0x%.8x", + psSyncInfo->psSyncData->ui32WriteOpsPending, + psSyncInfo->psSyncData->ui32WriteOpsComplete, + psSyncInfo->psSyncData->ui32ReadOpsPending, + psSyncInfo->psSyncData->ui32ReadOpsComplete, + psSyncInfo->psSyncData->ui32ReadOps2Pending, + psSyncInfo->psSyncData->ui32ReadOps2Complete, + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOps2CompleteDevVAddr.uiAddr); +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */ + +static void PVRSyncReleaseTimeline(struct sync_timeline *psObj) +{ + struct PVR_SYNC_TIMELINE *psTimeline = (struct PVR_SYNC_TIMELINE *)psObj; + + mutex_lock(&gTimelineListLock); + list_del(&psTimeline->sTimelineList); + mutex_unlock(&gTimelineListLock); + + DPF("R(t): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X " + "WOP/C=0x%x/0x%x ROP/C=0x%x/0x%x RO2P/C=0x%x/0x%x T=%p", + psTimeline->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psTimeline->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psTimeline->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr, + psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending, + psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsComplete, + psTimeline->psSyncInfo->psBase->psSyncData->ui32ReadOpsPending, + psTimeline->psSyncInfo->psBase->psSyncData->ui32ReadOpsComplete, + psTimeline->psSyncInfo->psBase->psSyncData->ui32ReadOps2Pending, + psTimeline->psSyncInfo->psBase->psSyncData->ui32ReadOps2Complete, + psTimeline); + + PVRSyncReleaseSyncInfo(psTimeline->psSyncInfo); + psTimeline->psSyncInfo = NULL; +} + +PVRSRV_ERROR PVRSyncInitServices(void) +{ + IMG_BOOL bCreated, bShared[PVRSRV_MAX_CLIENT_HEAPS]; + PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; + IMG_UINT32 ui32ClientHeapCount = 0; + PVRSRV_PER_PROCESS_DATA *psPerProc; + PVRSRV_ERROR eError; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + gsSyncServicesConnection.ui32Pid = OSGetCurrentProcessIDKM(); + + eError = PVRSRVProcessConnect(gsSyncServicesConnection.ui32Pid, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVProcessConnect failed", + __func__)); + goto err_unlock; + } + + psPerProc = PVRSRVFindPerProcessData(); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVFindPerProcessData failed", + __func__)); + goto err_disconnect; + } + + eError = PVRSRVAcquireDeviceDataKM(0, PVRSRV_DEVICE_TYPE_SGX, + &gsSyncServicesConnection.hDevCookie); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVAcquireDeviceDataKM failed", + __func__)); + goto err_disconnect; + } + + if (!gsSyncServicesConnection.hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR, "%s: hDevCookie is NULL", __func__)); + goto err_disconnect; + } + + eError = PVRSRVCreateDeviceMemContextKM(gsSyncServicesConnection.hDevCookie, + psPerProc, + &gsSyncServicesConnection.hDevMemContext, + &ui32ClientHeapCount, + &sHeapInfo[0], + &bCreated, + &bShared[0]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVCreateDeviceMemContextKM failed", + __func__)); + goto err_disconnect; + } + + if (!gsSyncServicesConnection.hDevMemContext) + { + PVR_DPF((PVR_DBG_ERROR, "%s: hDevMemContext is NULL", __func__)); + goto err_disconnect; + } + +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); + return eError; + +err_disconnect: + PVRSRVProcessDisconnect(gsSyncServicesConnection.ui32Pid); + goto err_unlock; +} + +void PVRSyncCloseServices(void) +{ + IMG_BOOL bDummy; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + PVRSRVDestroyDeviceMemContextKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &bDummy); + gsSyncServicesConnection.hDevMemContext = NULL; + gsSyncServicesConnection.hDevCookie = NULL; + + PVRSRVProcessDisconnect(gsSyncServicesConnection.ui32Pid); + gsSyncServicesConnection.ui32Pid = 0; + + LinuxUnLockMutex(&gPVRSRVLock); +} + +static struct sync_timeline_ops gsTimelineOps = +{ + .driver_name = "pvr_sync", + .dup = PVRSyncDup, + .has_signaled = PVRSyncHasSignaled, + .compare = PVRSyncCompare, + .release_obj = PVRSyncReleaseTimeline, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) + .print_obj = PVRSyncPrintTimeline, + .print_pt = PVRSyncPrint, +#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */ + .timeline_value_str = PVRSyncPrintTimeline, + .pt_value_str = PVRSyncPrint, +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) */ + .free_pt = PVRSyncFreeSync, +}; + +static struct PVR_SYNC_TIMELINE *PVRSyncCreateTimeline(const IMG_CHAR *pszName) +{ + struct PVR_SYNC_TIMELINE *psTimeline; + PVRSRV_ERROR eError; + + psTimeline = (struct PVR_SYNC_TIMELINE *) + sync_timeline_create(&gsTimelineOps, sizeof(struct PVR_SYNC_TIMELINE), + pszName); + if (!psTimeline) + { + PVR_DPF((PVR_DBG_ERROR, "%s: sync_timeline_create failed", __func__)); + goto err_out; + } + + psTimeline->psSyncInfo = + kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if(!psTimeline->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + goto err_free_timeline; + } + + psTimeline->bSyncHasSignaled = IMG_FALSE; + + mutex_init(&psTimeline->sTimelineLock); + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + eError = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psTimeline->psSyncInfo->psBase); + LinuxUnLockMutex(&gPVRSRVLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate timeline syncinfo", + __func__)); + goto err_free_syncinfo; + } + + DPF("A(t): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X T=%p %s", + psTimeline->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psTimeline->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psTimeline->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr, + psTimeline, pszName); + +err_out: + return psTimeline; +err_free_syncinfo: + kfree(psTimeline->psSyncInfo); +err_free_timeline: + sync_timeline_destroy((struct sync_timeline *)psTimeline); + psTimeline = NULL; + goto err_out; +} + +static int PVRSyncOpen(struct inode *inode, struct file *file) +{ + struct PVR_SYNC_TIMELINE *psTimeline; + IMG_CHAR task_comm[TASK_COMM_LEN+1]; + + get_task_comm(task_comm, current); + + psTimeline = PVRSyncCreateTimeline(task_comm); + if (!psTimeline) + return -ENOMEM; + + mutex_lock(&gTimelineListLock); + list_add_tail(&psTimeline->sTimelineList, &gTimelineList); + mutex_unlock(&gTimelineListLock); + + file->private_data = psTimeline; + return 0; +} + +static int PVRSyncRelease(struct inode *inode, struct file *file) +{ + struct PVR_SYNC_TIMELINE *psTimeline = file->private_data; + sync_timeline_destroy(&psTimeline->obj); + return 0; +} + +static long +PVRSyncIOCTLCreate(struct PVR_SYNC_TIMELINE *psObj, void __user *pvData) +{ + struct PVR_SYNC_KERNEL_SYNC_INFO *psProvidedSyncInfo = NULL; + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData; + struct PVR_SYNC_CREATE_IOCTL_DATA sData; + int err = -EFAULT, iFd; + struct sync_fence *psFence; + struct sync_pt *psPt; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iFd = get_unused_fd_flags(0); +#else + iFd = get_unused_fd(); +#endif + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iFd)); + goto err_out; + } + + if (!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_put_fd; + + if (copy_from_user(&sData, pvData, sizeof(sData))) + goto err_put_fd; + + if (sData.allocdSyncInfo < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Requested to create a fence from " + " an invalid alloc'd fd (%d)", __func__, + sData.allocdSyncInfo)); + goto err_put_fd; + } + + psAllocSyncData = PVRSyncAllocFDGet(sData.allocdSyncInfo); + if (!psAllocSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSyncAllocFDGet returned NULL, " + "possibly fd passed to CREATE is not an " + "ALLOC'd sync?", __func__)); + goto err_put_fd; + } + + /* Move the psSyncInfo to the newly created sync, to avoid attempting + * to create multiple syncs from the same allocation. + */ + psProvidedSyncInfo = psAllocSyncData->psSyncInfo; + psAllocSyncData->psSyncInfo = NULL; + + if (psProvidedSyncInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Alloc'd sync info is null - " + "possibly already CREATEd?", __func__)); + fput(psAllocSyncData->psFile); + goto err_put_fd; + } + + fput(psAllocSyncData->psFile); + + psPt = (struct sync_pt *)PVRSyncCreateSync(psObj, psProvidedSyncInfo); + if (!psPt) + { + err = -ENOMEM; + goto err_put_fd; + } + + sData.name[sizeof(sData.name) - 1] = '\0'; + psFence = sync_fence_create(sData.name, psPt); + if (!psFence) + { + sync_pt_free(psPt); + err = -ENOMEM; + goto err_put_fd; + } + + sData.fence = iFd; + + if (!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + { + sync_fence_put(psFence); + goto err_put_fd; + } + + if (copy_to_user(pvData, &sData, sizeof(sData))) + { + sync_fence_put(psFence); + goto err_put_fd; + } + + /* If the fence is a 'real' one, its signal status will be updated by + * the MISR calling PVRSyncUpdateAllSyncs(). However, if we created + * a 'fake' fence (for power optimization reasons) it has already + * completed, and needs to be marked signalled (as the MISR will + * never run for 'fake' fences). + */ + if(psProvidedSyncInfo->psBase->psSyncData->ui32WriteOpsPending == 0) + sync_timeline_signal((struct sync_timeline *)psObj); + + DPF("C( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X F=%p %s", + psProvidedSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psProvidedSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psProvidedSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr, + psFence, sData.name); + + sync_fence_install(psFence, iFd); + err = 0; +err_out: + return err; + +err_put_fd: + put_unused_fd(iFd); + goto err_out; +} + +static long +PVRSyncIOCTLDebug(struct PVR_SYNC_TIMELINE *psObj, void __user *pvData) +{ + struct PVR_SYNC_DEBUG_IOCTL_DATA sData; + struct sync_fence *psFence; + struct sync_pt *sync_pt; + int i = 0, j, err = -EFAULT; + + if(!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_out; + + if(copy_from_user(&sData, pvData, sizeof(sData))) + goto err_out; + + psFence = sync_fence_fdget(sData.iFenceFD); + if(!psFence) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get fence from fd", __func__)); + goto err_out; + } + + for_each_sync_pt(sync_pt, psFence, j) + { + PVR_SYNC_DEBUG *psMetaData = &sData.sSync[i].sMetaData; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + struct PVR_SYNC_TIMELINE *psTimeline; + struct PVR_SYNC *psPt; + + if(i == PVR_SYNC_DEBUG_MAX_POINTS) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Fence merged with more than %d " + "points", __func__, + PVR_SYNC_DEBUG_MAX_POINTS)); + break; + } + + psPt = (struct PVR_SYNC *)sync_pt; + + /* Don't dump foreign points */ + if(sync_pt_parent(&psPt->pt)->ops != &gsTimelineOps) + continue; + + psTimeline = (struct PVR_SYNC_TIMELINE *)sync_pt_parent(&psPt->pt); + psKernelSyncInfo = psPt->psSyncData->psSyncInfo->psBase; + PVR_ASSERT(psKernelSyncInfo != NULL); + + /* The sync refcount is valid as long as the FenceFD stays open, + * so we can access it directly without worrying about it being + * freed. + */ + sData.sSync[i].sSyncData = *psKernelSyncInfo->psSyncData; + + psMetaData->ui64Stamp = psPt->psSyncData->ui64Stamp; + psMetaData->ui32WriteOpsPendingSnapshot = psPt->psSyncData->ui32WOPSnapshot; + i++; + } + + sync_fence_put(psFence); + + sData.ui32NumPoints = i; + + if(!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + goto err_out; + + if(copy_to_user(pvData, &sData, sizeof(sData))) + goto err_out; + + err = 0; +err_out: + return err; +} + +static int PVRSyncFenceAllocRelease(struct inode *inode, struct file *file) +{ + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData = file->private_data; + + if(psAllocSyncData->psSyncInfo) + { +#if defined(DEBUG_PRINT) + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = + psAllocSyncData->psSyncInfo->psBase; +#endif + + DPF("R(a): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->sReadOps2CompleteDevVAddr.uiAddr); + + PVRSyncReleaseSyncInfo(psAllocSyncData->psSyncInfo); + psAllocSyncData->psSyncInfo = NULL; + } + + kfree(psAllocSyncData); + return 0; +} + +static const struct file_operations gsSyncFenceAllocFOps = +{ + .release = PVRSyncFenceAllocRelease, +}; + +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd) +{ + struct file *file = fget(fd); + if (!file) + return NULL; + if (file->f_op != &gsSyncFenceAllocFOps) + goto err; + return file->private_data; +err: + fput(file); + return NULL; +} + +static long +PVRSyncIOCTLAlloc(struct PVR_SYNC_TIMELINE *psTimeline, void __user *pvData) +{ + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData; + int err = -EFAULT, iFd; + struct PVR_SYNC_ALLOC_IOCTL_DATA sData; + PVRSRV_SYNC_DATA *psSyncData; + struct file *psFile; + PVRSRV_ERROR eError; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iFd = get_unused_fd_flags(0); +#else + iFd = get_unused_fd(); +#endif + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iFd)); + goto err_out; + } + + if (!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_put_fd; + + if (copy_from_user(&sData, pvData, sizeof(sData))) + goto err_put_fd; + + psAllocSyncData = kmalloc(sizeof(struct PVR_ALLOC_SYNC_DATA), GFP_KERNEL); + if (!psAllocSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_ALLOC_SYNC_DATA", + __func__)); + err = -ENOMEM; + goto err_put_fd; + } + + psAllocSyncData->psSyncInfo = + kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if (!psAllocSyncData->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + err = -ENOMEM; + goto err_free_alloc_sync_data; + } + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + eError = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psAllocSyncData->psSyncInfo->psBase); + LinuxUnLockMutex(&gPVRSRVLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc syncinfo (%d)", + __func__, eError)); + err = -ENOMEM; + goto err_free_sync_info; + } + + psFile = anon_inode_getfile("pvr_sync_alloc", + &gsSyncFenceAllocFOps, psAllocSyncData, 0); + if (!psFile) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create anon inode", + __func__)); + err = -ENOMEM; + goto err_release_sync_info; + } + + sData.fence = iFd; + + /* Check if this timeline looks idle. If there are still TQs running + * on it, userspace shouldn't attempt any kind of power optimization + * (e.g. it must not dummy-process GPU fences). + * + * Determining idleness here is safe because the ALLOC and CREATE + * pvr_sync ioctls must be called under the gralloc module lock, so + * we can't be creating another new fence op while we are still + * processing this one. + * + * Take the bridge lock anyway so we can be sure that we read the + * timeline sync's pending value coherently. The complete value may + * be modified by the GPU, but worse-case we will decide we can't do + * the power optimization and will still be correct. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + psSyncData = psTimeline->psSyncInfo->psBase->psSyncData; + if(psSyncData->ui32WriteOpsPending == psSyncData->ui32WriteOpsComplete) + sData.bTimelineIdle = IMG_TRUE; + else + sData.bTimelineIdle = IMG_FALSE; + + LinuxUnLockMutex(&gPVRSRVLock); + + if (!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + goto err_release_file; + + if (copy_to_user(pvData, &sData, sizeof(sData))) + goto err_release_file; + + psAllocSyncData->psTimeline = psTimeline; + psAllocSyncData->psFile = psFile; + + DPF("A( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psAllocSyncData->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + fd_install(iFd, psFile); + err = 0; +err_out: + return err; +err_release_sync_info: + PVRSRVReleaseSyncInfoKM(psAllocSyncData->psSyncInfo->psBase); +err_free_sync_info: + kfree(psAllocSyncData->psSyncInfo); +err_free_alloc_sync_data: + kfree(psAllocSyncData); +err_put_fd: + put_unused_fd(iFd); + goto err_out; +err_release_file: + fput(psFile); + put_unused_fd(iFd); + goto err_out; +} + +static long +PVRSyncIOCTL(struct file *file, unsigned int cmd, unsigned long __user arg) +{ + struct PVR_SYNC_TIMELINE *psTimeline = file->private_data; + void __user *pvData = (void __user *)arg; + + switch (cmd) + { + case PVR_SYNC_IOC_CREATE_FENCE: + return PVRSyncIOCTLCreate(psTimeline, pvData); + case PVR_SYNC_IOC_DEBUG_FENCE: + return PVRSyncIOCTLDebug(psTimeline, pvData); + case PVR_SYNC_IOC_ALLOC_FENCE: + return PVRSyncIOCTLAlloc(psTimeline, pvData); + default: + return -ENOTTY; + } +} + +static void PVRSyncWorkQueueFunction(struct work_struct *data) +{ + PVRSRV_DEVICE_NODE *psDevNode = + (PVRSRV_DEVICE_NODE*)gsSyncServicesConnection.hDevCookie; + struct list_head sFreeList, *psEntry, *n; + unsigned long flags; + + /* We lock the bridge mutex here for two reasons. + * + * Firstly, the SGXScheduleProcessQueuesKM and PVRSRVReleaseSyncInfoKM + * functions require that they are called under lock. Multiple threads + * into services are not allowed. + * + * Secondly, we need to ensure that when processing the defer-free list, + * the PVRSyncIsSyncInfoInUse() function is called *after* any freed + * sync was attached as a HW dependency (had ROP/ROP2 taken). This is + * because for 'foreign' sync timelines we allocate a new object and + * mark it for deletion immediately. If the 'foreign' sync_pt signals + * before the kick ioctl has completed, we can block it from being + * prematurely freed by holding the bridge mutex. + * + * NOTE: This code relies on the assumption that we can acquire a + * spinlock while a mutex is held and that other users of the spinlock + * do not need to hold the bridge mutex. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + /* A completed SW operation may un-block the GPU */ + SGXScheduleProcessQueuesKM(psDevNode); + + /* We can't call PVRSRVReleaseSyncInfoKM directly in this loop because + * that will take the mmap mutex. We can't take mutexes while we have + * this list locked with a spinlock. So move all the items we want to + * free to another, local list (no locking required) and process it + * in a second loop. + */ + + INIT_LIST_HEAD(&sFreeList); + spin_lock_irqsave(&gSyncInfoFreeListLock, flags); + list_for_each_safe(psEntry, n, &gSyncInfoFreeList) + { + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo = + container_of(psEntry, struct PVR_SYNC_KERNEL_SYNC_INFO, sHead); + + if(!PVRSyncIsSyncInfoInUse(psSyncInfo->psBase)) + list_move_tail(psEntry, &sFreeList); + + } + spin_unlock_irqrestore(&gSyncInfoFreeListLock, flags); + + list_for_each_safe(psEntry, n, &sFreeList) + { + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo = + container_of(psEntry, struct PVR_SYNC_KERNEL_SYNC_INFO, sHead); + + list_del(psEntry); + + DPF("F(d): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + PVRSRVReleaseSyncInfoKM(psSyncInfo->psBase); + psSyncInfo->psBase = NULL; + + kfree(psSyncInfo); + } + + LinuxUnLockMutex(&gPVRSRVLock); + + /* Copying from one list to another (so a spinlock isn't held) used to + * work around the problem that PVRSyncReleaseSyncInfo() would hold the + * services mutex. However, we no longer do this, so this code could + * potentially be simplified. + * + * Note however that sync_fence_put must be called from process/WQ + * context because it uses fput(), which is not allowed to be called + * from interrupt context in kernels <3.6. + */ + INIT_LIST_HEAD(&sFreeList); + spin_lock_irqsave(&gFencePutListLock, flags); + list_for_each_safe(psEntry, n, &gFencePutList) + { + list_move_tail(psEntry, &sFreeList); + } + spin_unlock_irqrestore(&gFencePutListLock, flags); + + list_for_each_safe(psEntry, n, &sFreeList) + { + struct PVR_SYNC_FENCE *psSyncFence = + container_of(psEntry, struct PVR_SYNC_FENCE, sHead); + + list_del(psEntry); + + sync_fence_put(psSyncFence->psBase); + psSyncFence->psBase = NULL; + + kfree(psSyncFence); + } +} + +static const struct file_operations gsPVRSyncFOps = +{ + .owner = THIS_MODULE, + .open = PVRSyncOpen, + .release = PVRSyncRelease, + .unlocked_ioctl = PVRSyncIOCTL, + .compat_ioctl = PVRSyncIOCTL, +}; + +static struct miscdevice gsPVRSyncDev = +{ + .minor = MISC_DYNAMIC_MINOR, + .name = "pvr_sync", + .fops = &gsPVRSyncFOps, +}; + +IMG_INTERNAL +int PVRSyncDeviceInit(void) +{ + int err = -1; + + if(PVRSyncInitServices() != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise services", + __func__)); + goto err_out; + } + + gpsWorkQueue = create_freezable_workqueue("pvr_sync_workqueue"); + if(!gpsWorkQueue) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create pvr_sync workqueue", + __func__)); + goto err_deinit_services; + } + + INIT_WORK(&gsWork, PVRSyncWorkQueueFunction); + + err = misc_register(&gsPVRSyncDev); + if(err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register pvr_sync misc " + "device (err=%d)", __func__, err)); + goto err_deinit_services; + } + + err = 0; +err_out: + return err; +err_deinit_services: + PVRSyncCloseServices(); + goto err_out; +} + +IMG_INTERNAL +void PVRSyncDeviceDeInit(void) +{ + misc_deregister(&gsPVRSyncDev); + destroy_workqueue(gpsWorkQueue); + PVRSyncCloseServices(); +} + +IMG_INTERNAL +void PVRSyncUpdateAllSyncs(void) +{ + IMG_BOOL bNeedToProcessQueues = IMG_FALSE; + struct list_head *psEntry; + + /* Check to see if any syncs have signalled. If they have, it may unblock + * the GPU. Decide what is needed and optionally schedule queue + * processing. + */ + mutex_lock(&gTimelineListLock); + list_for_each(psEntry, &gTimelineList) + { + struct PVR_SYNC_TIMELINE *psTimeline = + container_of(psEntry, struct PVR_SYNC_TIMELINE, sTimelineList); + + sync_timeline_signal((struct sync_timeline *)psTimeline); + + if(psTimeline->bSyncHasSignaled) + { + psTimeline->bSyncHasSignaled = IMG_FALSE; + bNeedToProcessQueues = IMG_TRUE; + } + } + mutex_unlock(&gTimelineListLock); + + if(bNeedToProcessQueues) + queue_work(gpsWorkQueue, &gsWork); +} + +static IMG_BOOL +PVRSyncIsDuplicate(PVRSRV_KERNEL_SYNC_INFO *psA, PVRSRV_KERNEL_SYNC_INFO *psB) +{ + return psA->sWriteOpsCompleteDevVAddr.uiAddr == + psB->sWriteOpsCompleteDevVAddr.uiAddr ? IMG_TRUE : IMG_FALSE; +} + +static void ForeignSyncPtSignaled(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + struct PVR_SYNC_FENCE_WAITER *psWaiter = + (struct PVR_SYNC_FENCE_WAITER *)waiter; + unsigned long flags; + + PVRSyncSWCompleteOp(psWaiter->psSyncInfo->psBase); + + DPF("R(f): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X " + "WOP/C=0x%x/0x%x ROP/C=0x%x/0x%x RO2P/C=0x%x/0x%x", + psWaiter->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psWaiter->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psWaiter->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr, + psWaiter->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending, + psWaiter->psSyncInfo->psBase->psSyncData->ui32WriteOpsComplete, + psWaiter->psSyncInfo->psBase->psSyncData->ui32ReadOpsPending, + psWaiter->psSyncInfo->psBase->psSyncData->ui32ReadOpsComplete, + psWaiter->psSyncInfo->psBase->psSyncData->ui32ReadOps2Pending, + psWaiter->psSyncInfo->psBase->psSyncData->ui32ReadOps2Complete); + + PVRSyncReleaseSyncInfo(psWaiter->psSyncInfo); + psWaiter->psSyncInfo = NULL; + + /* We can 'put' the fence now, but this function might be called in irq + * context so we must defer to WQ. + */ + spin_lock_irqsave(&gFencePutListLock, flags); + list_add_tail(&psWaiter->psSyncFence->sHead, &gFencePutList); + psWaiter->psSyncFence = NULL; + spin_unlock_irqrestore(&gFencePutListLock, flags); + + /* The PVRSyncReleaseSyncInfo() call above already queued work */ + /*queue_work(gpsWorkQueue, &gsWork);*/ + + kfree(psWaiter); +} + +static PVRSRV_KERNEL_SYNC_INFO *ForeignSyncPointToSyncInfo(int iFenceFd) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + struct PVR_SYNC_FENCE_WAITER *psWaiter; + struct PVR_SYNC_FENCE *psSyncFence; + struct sync_fence *psFence; + PVRSRV_ERROR eError; + int err; + + /* FIXME: Could optimize this function by pre-testing sync_wait(.., 0) + * to determine if it has already signalled. We must avoid this + * for now because the sync driver was broken in earlier kernels. + */ + + /* The custom waiter structure is freed in the waiter callback */ + psWaiter = kmalloc(sizeof(struct PVR_SYNC_FENCE_WAITER), GFP_KERNEL); + if(!psWaiter) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate waiter", __func__)); + goto err_out; + } + + psWaiter->psSyncInfo = + kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if(!psWaiter->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + goto err_free_waiter; + } + + /* We take another reference on the parent fence, each time we see a + * 'foreign' sync_pt. This is to ensure the timeline, fence and sync_pts + * from the foreign timeline cannot go away until the sync_pt signals. + * In practice this also means they will not go away until the entire + * fence signals. It means that we will always get a + * sync_fence_wait_async() callback for these points. + */ + psFence = sync_fence_fdget(iFenceFd); + if(!psFence) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to take reference on fence", + __func__)); + goto err_free_syncinfo; + } + + /* Allocate packet we can store this fence on (with a list head) so we + * can add it to the defer-put list without allocating memory in irq + * context. + * + * NOTE: At the moment we allocate one of these per sync_pts, but it + * might be possible to optimize this to one per fence. + */ + psSyncFence = kmalloc(sizeof(struct PVR_SYNC_FENCE), GFP_KERNEL); + if(!psSyncFence) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_FENCE", __func__)); + goto err_sync_fence_put; + } + + psSyncFence->psBase = psFence; + psWaiter->psSyncFence = psSyncFence; + + /* Allocate a "shadow" SYNCINFO for this sync_pt and set it up to be + * completed by the callback. + */ + eError = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate syncinfo", __func__)); + goto err_free_sync_fence; + } + + /* Make sure we take the SW operation before adding the waiter, to avoid + * racing with parallel completes. + */ + PVRSyncSWTakeOp(psKernelSyncInfo); + + sync_fence_waiter_init(&psWaiter->sWaiter, ForeignSyncPtSignaled); + psWaiter->psSyncInfo->psBase = psKernelSyncInfo; + + err = sync_fence_wait_async(psFence, &psWaiter->sWaiter); + if(err) + { + if(err < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Fence was in error state", __func__)); + /* Fall-thru */ + } + + /* -1 means the fence was broken, 1 means the fence already + * signalled. In either case, roll back what we've done and + * skip using this sync_pt for synchronization. + */ + goto err_release_sync_info; + } + + DPF("A(f): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X F=%p", + psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr, + psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr, + psKernelSyncInfo->sReadOps2CompleteDevVAddr.uiAddr, + psFence); + + /* NOTE: Don't use psWaiter after this point as it may asynchronously + * signal before this function completes (and be freed already). + */ + + /* Even if the fence signals while we're hanging on to this, the sync + * can't be freed until the bridge mutex is taken in the callback. The + * bridge mutex won't be released by the caller of this function until + * the GPU operation has been scheduled, which increments ROP, + * preventing the sync from being freed when still in use by the GPU. + */ + return psKernelSyncInfo; + +err_release_sync_info: + PVRSyncSWCompleteOp(psKernelSyncInfo); + PVRSRVReleaseSyncInfoKM(psKernelSyncInfo); +err_free_sync_fence: + kfree(psSyncFence); +err_sync_fence_put: + sync_fence_put(psFence); +err_free_syncinfo: + kfree(psWaiter->psSyncInfo); +err_free_waiter: + kfree(psWaiter); +err_out: + return NULL; +} + +static IMG_BOOL FenceHasForeignPoints(struct sync_fence *psFence) +{ + struct sync_pt *psPt; + int j; + + for_each_sync_pt(psPt, psFence, j) + { + if(sync_pt_parent(psPt)->ops != &gsTimelineOps) + return IMG_TRUE; + } + + return IMG_FALSE; +} + +IMG_BOOL +ExpandAndDeDuplicateFenceSyncs(IMG_UINT32 ui32NumSyncs, + IMG_HANDLE aiFenceFds[], + IMG_UINT32 ui32SyncPointLimit, + struct sync_fence *apsFence[], + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]) +{ + IMG_UINT32 i, j, ui32FenceIndex = 0; + IMG_BOOL bRet = IMG_TRUE; + struct sync_pt *psPt; + + *pui32NumRealSyncs = 0; + + for(i = 0; i < ui32NumSyncs; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + + /* Skip any invalid fence file descriptors without error */ + if((IMG_INT32)aiFenceFds[i] < 0) + continue; + + /* By converting a file descriptor to a struct sync_fence, we are + * taking a reference on the fence. We don't want the fence to go + * away until we have submitted the command, even if it signals + * before we dispatch the command, or the timeline(s) are destroyed. + * + * This reference should be released by the caller of this function + * once hardware operations have been scheduled on the GPU sync_pts + * participating in this fence. When our MISR is scheduled, the + * defer-free list will be processed, cleaning up the SYNCINFO. + * + * Note that this reference *isn't* enough for non-GPU sync_pts. + * We'll take another reference on the fence for those operations + * later (the life-cycle requirements there are totally different). + * + * Fence lookup may fail here if the fd became invalid since it was + * patched in userspace. That's really a userspace driver bug, so + * just fail here instead of not synchronizing. + */ + apsFence[ui32FenceIndex] = sync_fence_fdget((IMG_INT32)aiFenceFds[i]); + if(!apsFence[ui32FenceIndex]) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get fence from fd=%d", + __func__, (IMG_SIZE_T)aiFenceFds[i])); + bRet = IMG_FALSE; + goto err_out; + } + + /* If this fence has any points from foreign timelines, we need to + * allocate a 'shadow' SYNCINFO and update it in software ourselves, + * so the ukernel can test the readiness of the dependency. + * + * It's tempting to just handle all fences like this (since most of + * the time they *will* be merged with sw_sync) but such 'shadow' + * syncs are slower. This is because we need to wait for the MISR to + * schedule to update the GPU part of the fence (normally the ukernel + * would be able to make the update directly). + */ + if(FenceHasForeignPoints(apsFence[ui32FenceIndex])) + { + psSyncInfo = ForeignSyncPointToSyncInfo((IMG_INT32)aiFenceFds[i]); + if(psSyncInfo) + { + if(!AddSyncInfoToArray(psSyncInfo, ui32SyncPointLimit, + pui32NumRealSyncs, apsSyncInfo)) + { + /* Soft-fail. Stop synchronizing. */ + goto err_out; + } + } + ui32FenceIndex++; + continue; + } + + /* FIXME: The ForeignSyncPointToSyncInfo() path optimizes away already + * signalled fences. Consider optimizing this path too. + */ + for_each_sync_pt(psPt, apsFence[ui32FenceIndex], j) + { + + psSyncInfo = + ((struct PVR_SYNC *)psPt)->psSyncData->psSyncInfo->psBase; + + /* Walk the current list of points and make sure this isn't a + * duplicate. Duplicates will deadlock. + */ + for(j = 0; j < *pui32NumRealSyncs; j++) + { + /* The point is from a different timeline so we must use it */ + if(!PVRSyncIsDuplicate(apsSyncInfo[j], psSyncInfo)) + continue; + + /* There's no need to bump the real sync count as we either + * ignored the duplicate or replaced an previously counted + * entry. + */ + break; + } + + if(j == *pui32NumRealSyncs) + { + /* It's not a duplicate; moving on.. */ + if(!AddSyncInfoToArray(psSyncInfo, ui32SyncPointLimit, + pui32NumRealSyncs, apsSyncInfo)) + goto err_out; + } + } + + ui32FenceIndex++; + } + +err_out: + return bRet; +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.h new file mode 100644 index 0000000..853586b --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync.h @@ -0,0 +1,201 @@ +/*************************************************************************/ /*! +@File pvr_sync.c +@Title Kernel driver for Android's sync mechanism +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* This is the IMG extension of a sync_timeline */ + +struct PVR_SYNC_TIMELINE +{ + struct sync_timeline obj; + + /* Needed to keep a global list of all timelines for MISR checks. */ + struct list_head sTimelineList; + + /* True if a sync point on the timeline has signaled */ + IMG_BOOL bSyncHasSignaled; + + /* A mutex, as we want to ensure that the comparison (and possible + * reset) of the highest SW fence value is atomic with the takeop, + * so both the SW fence value and the WOP snapshot should both have + * the same order for all SW syncs. + * + * This mutex also protects modifications to the fence stamp counter. + */ + struct mutex sTimelineLock; + + /* Every timeline has a services sync object. This object must not + * be used by the hardware to enforce ordering -- that's what the + * per sync-point objects are for. This object is attached to every + * TQ scheduled on the timeline and is primarily useful for debugging. + */ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; +}; + +/* A PVR_SYNC_DATA is the basic guts of a sync point. It's kept separate + * because sync points can be dup'ed, and we don't want to duplicate all + * of the shared metadata. + * + * This is also used to back an allocated sync info, which can be passed to + * the CREATE ioctl to insert the fence and add it to the timeline. This is + * used as an intermediate step as a PVRSRV_KERNEL_SYNC_INFO is needed to + * attach to the transfer task used as a fence in the hardware. + */ + +struct PVR_SYNC_DATA +{ + /* Every sync point has a services sync object. This object is used + * by the hardware to enforce ordering -- it is attached as a source + * dependency to various commands. + */ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; + + /* This refcount is incremented at create and dup time, and decremented + * at free time. It ensures the object doesn't start the defer-free + * process until it is no longer referenced. + */ + atomic_t sRefcount; + + /* This is purely a debug feature. Record the WOP snapshot from the + * timeline synchronization object when a new fence is created. + */ + IMG_UINT32 ui32WOPSnapshot; + + /* This is a globally unique ID for the sync point. If a sync point is + * duplicated, its stamp is copied over (seems counter-intuitive, but in + * nearly all cases a sync point is merged with another, the original + * is freed). + */ + IMG_UINT64 ui64Stamp; +}; + +/* This is the IMG extension of a sync_pt */ + +struct PVR_SYNC +{ + struct sync_pt pt; + struct PVR_SYNC_DATA *psSyncData; +}; + +struct PVR_SYNC_FENCE +{ + /* Base sync_fence structure */ + struct sync_fence *psBase; + + /* To ensure callbacks are always received for fences / sync_pts, even + * after the fence has been 'put' (freed), we must take a reference to + * the fence. We still need to 'put' the fence ourselves, but this might + * happen in irq context, where fput() is not allowed (in kernels <3.6). + * We must add the fence to a list which is processed in WQ context. + */ + struct list_head sHead; +}; + +/* Any sync point from a foreign (non-PVR) timeline needs to have a "shadow" + * syncinfo. This is modelled as a software operation. The foreign driver + * completes the operation by calling a callback we registered with it. + * + * Because we are allocating SYNCINFOs for each sync_pt, rather than each + * fence, we need to extend the waiter struct slightly to include the + * necessary metadata. + */ +struct PVR_SYNC_FENCE_WAITER +{ + /* Base sync driver waiter structure */ + struct sync_fence_waiter sWaiter; + + /* "Shadow" syncinfo backing the foreign driver's sync_pt */ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; + + /* Optimizes lookup of fence for defer-put operation */ + struct PVR_SYNC_FENCE *psSyncFence; +}; + +/* Local wrapper around PVRSRV_KERNEL_SYNC_INFO to add a list head */ + +struct PVR_SYNC_KERNEL_SYNC_INFO +{ + /* Base services sync info structure */ + PVRSRV_KERNEL_SYNC_INFO *psBase; + + /* Sync points can go away when there are deferred hardware + * operations still outstanding. We must not free the SYNC_INFO + * until the hardware is finished, so we add it to a defer list + * which is processed periodically ("defer-free"). + * + * This is also used for "defer-free" of a timeline -- the process + * may destroy its timeline or terminate abnormally but the HW could + * still be using the sync object hanging off of the timeline. + * + * Note that the defer-free list is global, not per-timeline. + */ + struct list_head sHead; +}; + +/* A PVR_ALLOC_SYNC_DATA is used to back an allocated, but not yet created + * and inserted into a timeline, sync data. This is required as we must + * allocate the syncinfo to be passed down with the transfer task used to + * implement fences in the hardware. + */ +struct PVR_ALLOC_SYNC_DATA +{ + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo; + + /* A link to the timeline is required to add a per-timeline sync + * to the fence transfer task. + */ + struct PVR_SYNC_TIMELINE *psTimeline; + struct file *psFile; +}; + + +IMG_BOOL +ExpandAndDeDuplicateFenceSyncs(IMG_UINT32 ui32NumSyncs, + IMG_HANDLE aiFenceFds[], + IMG_UINT32 ui32SyncPointLimit, + struct sync_fence *apsFence[], + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]); + +PVRSRV_ERROR PVRSyncInitServices(void); +void PVRSyncCloseServices(void); + +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd); + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.c new file mode 100644 index 0000000..b9120c1 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.c @@ -0,0 +1,379 @@ +/*************************************************************************/ /*! +@File pvr_sync.c +@Title Kernel driver for Android's sync mechanism +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_sync_common.h" +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) +#include "pvr_sync.h" +#else +#include "pvr_fence.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "services_headers.h" +#include "sgxutils.h" +#include "ttrace.h" +#include "mutex.h" +#include "lock.h" + +static void +CopyKernelSyncInfoToDeviceSyncObject(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + PVRSRV_DEVICE_SYNC_OBJECT *psSyncObject) +{ + psSyncObject->sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psSyncObject->sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSyncObject->sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + psSyncObject->ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + psSyncObject->ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + psSyncObject->ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; +} + +IMG_BOOL +AddSyncInfoToArray(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_UINT32 ui32SyncPointLimit, + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]) +{ + /* Ran out of syncs. Not much userspace can do about this, since it + * could have been passed multiple merged syncs and doesn't know they + * were merged. Allow this through, but print a warning and stop + * synchronizing. + */ + if(*pui32NumRealSyncs == ui32SyncPointLimit) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Ran out of source syncs %d == %d", + __func__, *pui32NumRealSyncs, + ui32SyncPointLimit)); + return IMG_FALSE; + } + + apsSyncInfo[*pui32NumRealSyncs] = psSyncInfo; + (*pui32NumRealSyncs)++; + return IMG_TRUE; +} + +IMG_INTERNAL PVRSRV_ERROR +PVRSyncPatchCCBKickSyncInfos(IMG_HANDLE ahSyncs[SGX_MAX_SRC_SYNCS_TA], + PVRSRV_DEVICE_SYNC_OBJECT asDevSyncs[SGX_MAX_SRC_SYNCS_TA], + IMG_UINT32 *pui32NumSrcSyncs) +{ + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[SGX_MAX_SRC_SYNCS_TA]; +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + struct sync_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] = {}; +#else /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) */ + struct dma_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] = {}; +#endif + IMG_UINT32 i, ui32NumRealSrcSyncs; + PVRSRV_ERROR eError = PVRSRV_OK; + + if(!ExpandAndDeDuplicateFenceSyncs(*pui32NumSrcSyncs, + (IMG_HANDLE *)ahSyncs, + SGX_MAX_SRC_SYNCS_TA, + apsFence, + &ui32NumRealSrcSyncs, + apsSyncInfo)) + { + eError = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_put_fence; + } + + /* There should only be one destination sync for a transfer. + * Ultimately this will be patched to two (the sync_pt SYNCINFO, + * and the timeline's SYNCINFO for debugging). + */ + for(i = 0; i < ui32NumRealSrcSyncs; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = apsSyncInfo[i]; + + /* The following code is mostly the same as the texture dependencies + * handling in SGXDoKickKM, but we have to copy it here because it + * must be run while the fence is 'locked' by sync_fence_fdget. + */ + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_SRC_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + CopyKernelSyncInfoToDeviceSyncObject(psSyncInfo, &asDevSyncs[i]); + + /* Texture dependencies are read operations */ + psSyncInfo->psSyncData->ui32ReadOpsPending++; + + /* Finally, patch the sync back into the input array. + * NOTE: The syncs are protected here by the defer-free worker. + */ + ahSyncs[i] = psSyncInfo; + } + + /* Updating this allows the PDUMP handling and ROP rollbacks to work + * correctly in SGXDoKickKM. + */ + *pui32NumSrcSyncs = ui32NumRealSrcSyncs; + +err_put_fence: + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + sync_fence_put(apsFence[i]); +#else + dma_fence_put(apsFence[i]); +#endif + return eError; +} + +/* Patching for TQ fence in queueBuffer() */ +IMG_INTERNAL PVRSRV_ERROR +PVRSyncPatchTransferSyncInfos(IMG_HANDLE ahSyncs[SGX_MAX_SRC_SYNCS_TA], + PVRSRV_DEVICE_SYNC_OBJECT asDevSyncs[SGX_MAX_SRC_SYNCS_TA], + IMG_UINT32 *pui32NumSrcSyncs) +{ + struct PVR_ALLOC_SYNC_DATA *psTransferSyncData; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (*pui32NumSrcSyncs != 1) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid number of syncs (%d), clamping " + "to 1", __func__, *pui32NumSrcSyncs)); + } + + psTransferSyncData = PVRSyncAllocFDGet((int)ahSyncs[0]); + + if (!psTransferSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PVR_SYNC_DATA from " + "supplied fd", __func__)); + eError = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_out; + } + + /* There should only be one destination sync for a transfer. + * Ultimately this will be patched to two (the sync_pt SYNCINFO, + * and the timeline's SYNCINFO for debugging). + */ + psSyncInfo = psTransferSyncData->psSyncInfo->psBase; + + /* The following code is mostly the same as the texture dependencies + * handling in SGXDoKickKM, but we have to copy it here because it + * must be run while the fence is 'locked' by sync_fence_fdget. + */ + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_SRC_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + CopyKernelSyncInfoToDeviceSyncObject(psSyncInfo, &asDevSyncs[0]); + CopyKernelSyncInfoToDeviceSyncObject(psTransferSyncData->psTimeline->psSyncInfo->psBase, + &asDevSyncs[1]); + + /* Treat fence TQs as write operations */ + psSyncInfo->psSyncData->ui32WriteOpsPending++; + psTransferSyncData->psTimeline->psSyncInfo->psBase->psSyncData->ui32WriteOpsPending++; + + /* Finally, patch the sync back into the input array. + * NOTE: The syncs are protected here by the defer-free worker. + */ + ahSyncs[0] = psSyncInfo; + ahSyncs[1] = psTransferSyncData->psTimeline->psSyncInfo->psBase; + + /* Updating this allows the PDUMP handling and ROP rollbacks to work + * correctly in SGXDoKickKM. + */ + *pui32NumSrcSyncs = 2; + + fput(psTransferSyncData->psFile); +err_out: + return eError; +} + + +/* NOTE: This returns an array of sync_fences which need to be 'put' + * or they will leak. + */ +/* Display side patching */ +IMG_INTERNAL PVRSRV_ERROR +PVRSyncFencesToSyncInfos(PVRSRV_KERNEL_SYNC_INFO *apsSyncs[], + IMG_UINT32 *pui32NumSyncs, +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + struct sync_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] +#else /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) */ + struct dma_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] +#endif + ) +{ + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[SGX_MAX_SRC_SYNCS_TA]; + IMG_UINT32 i, ui32NumRealSrcSyncs; + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + memset(apsFence, 0, sizeof(struct sync_fence *) * SGX_MAX_SRC_SYNCS_TA); +#else /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) */ + memset(apsFence, 0, sizeof(struct dma_fence *) * SGX_MAX_SRC_SYNCS_TA); +#endif + + if(!ExpandAndDeDuplicateFenceSyncs(*pui32NumSyncs, + (IMG_HANDLE *)apsSyncs, + *pui32NumSyncs, + apsFence, + &ui32NumRealSrcSyncs, + apsSyncInfo)) + { + for(i = 0; i < SGX_MAX_SRC_SYNCS_TA && apsFence[i]; i++) +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + sync_fence_put(apsFence[i]); +#else /* defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) */ + dma_fence_put(apsFence[i]); +#endif + + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + /* We don't expect to see merged syncs here. Abort if that happens. + * Allow through cases where the same fence was specified more than + * once -- we can handle that without reallocation of memory. + */ + PVR_ASSERT(ui32NumRealSrcSyncs <= *pui32NumSyncs); + + for(i = 0; i < ui32NumRealSrcSyncs; i++) + apsSyncs[i] = apsSyncInfo[i]; + + *pui32NumSyncs = ui32NumRealSrcSyncs; + //PVR_DPF((PVR_DBG_ERROR, "%s END HERE", __func__)); + return eError; +} + +/*PVRSRV_ERROR PVRSyncInitServices(void) +{ + IMG_BOOL bCreated, bShared[PVRSRV_MAX_CLIENT_HEAPS]; + PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; + IMG_UINT32 ui32ClientHeapCount = 0; + PVRSRV_PER_PROCESS_DATA *psPerProc; + PVRSRV_ERROR eError; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + gsSyncServicesConnection.ui32Pid = OSGetCurrentProcessIDKM(); + + eError = PVRSRVProcessConnect(gsSyncServicesConnection.ui32Pid, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVProcessConnect failed", + __func__)); + goto err_unlock; + } + + psPerProc = PVRSRVFindPerProcessData(); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVFindPerProcessData failed", + __func__)); + goto err_disconnect; + } + + eError = PVRSRVAcquireDeviceDataKM(0, PVRSRV_DEVICE_TYPE_SGX, + &gsSyncServicesConnection.hDevCookie); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVAcquireDeviceDataKM failed", + __func__)); + goto err_disconnect; + } + + if (!gsSyncServicesConnection.hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR, "%s: hDevCookie is NULL", __func__)); + goto err_disconnect; + } + + eError = PVRSRVCreateDeviceMemContextKM(gsSyncServicesConnection.hDevCookie, + psPerProc, + &gsSyncServicesConnection.hDevMemContext, + &ui32ClientHeapCount, + &sHeapInfo[0], + &bCreated, + &bShared[0]); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVCreateDeviceMemContextKM failed", + __func__)); + goto err_disconnect; + } + + if (!gsSyncServicesConnection.hDevMemContext) + { + PVR_DPF((PVR_DBG_ERROR, "%s: hDevMemContext is NULL", __func__)); + goto err_disconnect; + } + +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); + return eError; + +err_disconnect: + PVRSRVProcessDisconnect(gsSyncServicesConnection.ui32Pid); + goto err_unlock; +} + +void PVRSyncCloseServices(void) +{ + IMG_BOOL bDummy; + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + PVRSRVDestroyDeviceMemContextKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &bDummy); + gsSyncServicesConnection.hDevMemContext = NULL; + gsSyncServicesConnection.hDevCookie = NULL; + + PVRSRVProcessDisconnect(gsSyncServicesConnection.ui32Pid); + gsSyncServicesConnection.ui32Pid = 0; + + LinuxUnLockMutex(&gPVRSRVLock); +}*/ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.h new file mode 100644 index 0000000..cdd9ed0 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_common.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@File pvr_sync_common.h +@Title Kernel sync driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Version numbers and strings for PVR Consumer services + components. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _PVR_SYNC_COMMON_H +#define _PVR_SYNC_COMMON_H + +#include +#include + +#if !defined(__KERNEL__) || (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) +#include +#elif !defined(__KERNEL__) || (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#include <../drivers/staging/android/sync.h> +#else +#include <../drivers/dma-buf/sync_debug.h> +#endif + +#include "pvr_sync_user.h" +#include "servicesint.h" // PVRSRV_DEVICE_SYNC_OBJECT + +/* services4 internal interface */ + +int PVRSyncDeviceInit(void); +void PVRSyncDeviceDeInit(void); +void PVRSyncUpdateAllSyncs(void); + +IMG_BOOL +AddSyncInfoToArray(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_UINT32 ui32SyncPointLimit, + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]); +PVRSRV_ERROR +PVRSyncPatchCCBKickSyncInfos(IMG_HANDLE ahSyncs[SGX_MAX_SRC_SYNCS_TA], + PVRSRV_DEVICE_SYNC_OBJECT asDevSyncs[SGX_MAX_SRC_SYNCS_TA], + IMG_UINT32 *pui32NumSrcSyncs); +PVRSRV_ERROR +PVRSyncPatchTransferSyncInfos(IMG_HANDLE ahSyncs[SGX_MAX_SRC_SYNCS_TA], + PVRSRV_DEVICE_SYNC_OBJECT asDevSyncs[SGX_MAX_SRC_SYNCS_TA], + IMG_UINT32 *pui32NumSrcSyncs); +PVRSRV_ERROR +PVRSyncFencesToSyncInfos(PVRSRV_KERNEL_SYNC_INFO *apsSyncs[], + IMG_UINT32 *pui32NumSyncs, +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) + struct sync_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] +#else + struct dma_fence *apsFence[SGX_MAX_SRC_SYNCS_TA] +#endif + ); +#endif /* _PVR_SYNC_COMMON_H */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_dma_fence.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_dma_fence.c new file mode 100644 index 0000000..75dc943 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_sync_dma_fence.c @@ -0,0 +1,812 @@ +/*************************************************************************/ /*! +@File pvr_sync2_dma_fence.c +@Title Kernel driver for Android's sync mechanism +@Codingstyle LinuxKernel +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Strictly Confidential. +*/ /**************************************************************************/ + +#include "pvr_sync2.h" +#include "pvr_sync_common.h" +#include "pvr_sync_user.h" + +#include "pvr_counting_timeline.h" + +/* FIXME: Proper interface file? */ +#include +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; +}; +#define SW_SYNC_IOC_MAGIC 'W' +#define SW_SYNC_IOC_CREATE_FENCE \ + (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data)) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +#include +#include +#include +#include +#include +#include + +#define DEBUG_OUTPUT 1 +#ifdef DEBUG_OUTPUT +#define DPF(fmt, ...) PVR_DPF((PVR_DBG_ERROR, "pvr_sync_dma_fence: " fmt "\n", __VA_ARGS__) +#else +#define DPF(fmt, ...) do {} while (0) +#endif + +static struct workqueue_struct *gpsWorkQueue; + +/* Linux work struct for workqueue. */ +static struct work_struct gsWork; + +static const struct file_operations pvr_sync_fops; + +static bool is_pvr_timeline(struct file *file) +{ + return file->f_op == &pvr_sync_fops; +} + +static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd) +{ + struct file *file = fget(fd); + + if (!file) + return NULL; + + if (!is_pvr_timeline(file)) { + fput(file); + return NULL; + } + + return file->private_data; +} + +struct PVR_ALLOC_SYNC_DATA *PVRSyncAllocFDGet(int fd) +{ + struct file *file = fget(fd); + if (!file) + return NULL; + + if (!is_pvr_timeline(file)) + { + fput(file); + return NULL; + } + + return file->private_data; +} + +/* ioctl and fops handling */ + +static int pvr_sync_open(struct inode *inode, struct file *file) +{ + struct pvr_fence_context *fence_context; + struct pvr_sync_timeline *psTimeline; + char task_comm[TASK_COMM_LEN]; + int err = -ENOMEM; + + get_task_comm(task_comm, current); + + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) + goto err_out; + + strlcpy(timeline->name, task_comm, sizeof(timeline->name)); + + PVR_DPF((PVR_DBG_ERROR, "BG: %s: pvr_sync_open ", timeline->name)); + fence_context = pvr_fence_context_create(timeline->name); + if (!fence_context) { + pr_err("pvr_sync2: %s: pvr_fence_context_create failed\n", + __func__); + goto err_free_timeline; + } + + timeline->psSyncInfo = kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if(!timeline->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + goto err_free_fence; + } + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + eError = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &timeline->psSyncInfo->psBase); + LinuxUnLockMutex(&gPVRSRVLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate timeline syncinfo", + __func__)); + goto err_free_syncinfo; + } + + timeline->fence_context = fence_context; + timeline->file = file; + + file->private_data = timeline; + err = 0; +err_out: + return err; +err_free_syncinfo: + kfree(timeline->psSyncInfo); +err_free_fence: + pvr_fence_context_destroy(fence_context); +err_free_timeline: + kfree(timeline); + goto err_out; +} + +static int pvr_sync_close(struct inode *inode, struct file *file) +{ + struct pvr_sync_timeline *timeline = file->private_data; + + if (timeline->sw_timeline) + { + /* This makes sure any outstanding SW syncs are marked as + * complete at timeline close time. Otherwise it'll leak the + * timeline (as outstanding fences hold a ref) and possibly + * wedge the system is something is waiting on one of those + * fences + */ + pvr_counting_fence_timeline_force_complete(timeline->sw_timeline); + pvr_counting_fence_timeline_put(timeline->sw_timeline); + } + + pvr_fence_context_destroy(timeline->fence_context); + kfree(timeline); + + return 0; +} + +static long PVRSyncIOCTLCreate(struct pvr_sync_timeline *psTimeline, void __user *pvData) +{ + struct PVR_SYNC_KERNEL_SYNC_INFO *psProvidedSyncInfo = NULL; + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData; + struct PVR_SYNC_CREATE_IOCTL_DATA sData; + int err = -EFAULT, iFd; + struct sync_fence *psFence; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iFd = get_unused_fd_flags(0); +#else + iFd = get_unused_fd(); +#endif + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iFd)); + goto err_out; + } + + if (!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_put_fd; + + if (copy_from_user(&sData, pvData, sizeof(sData))) + goto err_put_fd; + + if (sData.allocdSyncInfo < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Requested to create a fence from " + " an invalid alloc'd fd (%d)", __func__, + sData.allocdSyncInfo)); + goto err_put_fd; + } + + psAllocSyncData = PVRSyncAllocFDGet(sData.allocdSyncInfo); + if (!psAllocSyncData) { + pr_err("pvr_sync2: %s: Failed to open supplied file fd (%d)\n", + __func__, new_fence_timeline); + err = PVRSRV_ERROR_HANDLE_NOT_FOUND; + goto err_put_fd; + } + + /* Move the psSyncInfo to the newly created sync, to avoid attempting + * to create multiple syncs from the same allocation. + */ + psProvidedSyncInfo = psAllocSyncData->psSyncInfo; + psAllocSyncData->psSyncInfo = NULL; + + if (psProvidedSyncInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Alloc'd sync info is null - " + "possibly already CREATEd?", __func__)); + fput(psAllocSyncData->psFile); + goto err_put_fd; + } + fput(psAllocSyncData->psFile); + + sData.name[sizeof(sData.name) - 1] = '\0'; + pvr_fence = pvr_fence_create(timeline->fence_context, sData.name, psProvidedSyncInfo); + if (!pvr_fence) { + pr_err("pvr_sync2: %s: Failed to create new pvr_fence\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_put_fd; + } + + sync_file = sync_file_create(&pvr_fence->base); + if (!sync_file) { + pr_err(FILE_NAME ": %s: Failed to create sync_file\n", + __func__); + err = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_destroy_fence; + } + fence_put(&pvr_fence->base); + + sData.fence = iFd; + + if (!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + { + goto err_destroy_fence; + } + + if (copy_to_user(pvData, &sData, sizeof(sData))) + { + goto err_destroy_fence; + } + + DPF("C( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X F=%p %s", + psProvidedSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psProvidedSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psProvidedSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr, + psFence, sData.name); + + fd_install(iFd, sync_file->file); + err = 0; +err_out: + return err; + +err_destroy_fence: + pvr_fence_destroy(pvr_fence); +err_put_fd: + put_unused_fd(iFd); + goto err_out; +} + +static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline, void __user *user_data) +{ + int err = 0; + struct PVR_SYNC_RENAME_IOCTL_DATA data; + + if (!access_ok(VERIFY_READ, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + err = -EFAULT; + goto err; + } + + data.szName[sizeof(data.szName) - 1] = '\0'; + strlcpy(timeline->name, data.name, sizeof(timeline->name)); + +err: + return err; +} + +static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline, + void **private_data) +{ + /* Already in SW mode? */ + if (timeline->sw_timeline) + return 0; + /* Create a sw_sync timeline with the old GPU timeline's name */ + timeline->sw_timeline = pvr_counting_fence_timeline_create( + timeline->name); + if (!timeline->sw_timeline) + return -ENOMEM; + + return 0; +} + +static long pvr_sync_ioctl_sw_create_fence(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + struct sw_sync_create_fence_data data; + struct sync_file *sync_file; + int fd = get_unused_fd_flags(0); + struct fence *fence; + int err = -EFAULT; + + if (fd < 0) { + pr_err("pvr_sync2: %s: Failed to find unused fd (%d)\n", + __func__, fd); + goto err_out; + } + + if (copy_from_user(&data, user_data, sizeof(data))) { + pr_err("pvr_sync2: %s: Failed copy from user", __func__); + goto err_put_fd; + } + + fence = pvr_counting_fence_create(timeline->sw_timeline, data.value); + if (!fence) { + pr_err("pvr_sync2: %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fd; + } + + sync_file = sync_file_create(fence); + if (!sync_file) { + pr_err("pvr_sync2: %s: Failed to create a sync point (%d)\n", + __func__, fd); + err = -ENOMEM; + goto err_put_fence; + } + + data.fence = fd; + + if (copy_to_user(user_data, &data, sizeof(data))) { + pr_err("pvr_sync2: %s: Failed copy to user", __func__); + goto err_put_fence; + } + + fd_install(fd, sync_file->file); + err = 0; +err_out: + return err; +err_put_fence: + fence_put(fence); +err_put_fd: + put_unused_fd(fd); + goto err_out; +} + +static long pvr_sync_ioctl_sw_inc(struct pvr_sync_timeline *timeline, + void __user *user_data) +{ + u32 value; + + if (copy_from_user(&value, user_data, sizeof(value))) + return -EFAULT; + + pvr_counting_fence_timeline_inc(timeline->sw_timeline, value); + return 0; +} + +static long +PVRSyncIOCTLAlloc(struct pvr_sync_timeline *psTimeline, void __user *pvData) +{ + struct PVR_ALLOC_SYNC_DATA *psAllocSyncData; + int err = -EFAULT, iFd; + struct PVR_SYNC_ALLOC_IOCTL_DATA sData; + PVRSRV_SYNC_DATA *psSyncData; + struct file *psFile; + PVRSRV_ERROR eError; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,2,0)) + iFd = get_unused_fd_flags(0); +#else + iFd = get_unused_fd(); +#endif + if (iFd < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to find unused fd (%d)", + __func__, iFd)); + goto err_out; + } + + if (!access_ok(VERIFY_READ, pvData, sizeof(sData))) + goto err_put_fd; + + if (copy_from_user(&sData, pvData, sizeof(sData))) + goto err_put_fd; + + psAllocSyncData = kmalloc(sizeof(struct PVR_ALLOC_SYNC_DATA), GFP_KERNEL); + if (!psAllocSyncData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate PVR_ALLOC_SYNC_DATA", + __func__)); + err = -ENOMEM; + goto err_put_fd; + } + + psAllocSyncData->psSyncInfo = + kmalloc(sizeof(struct PVR_SYNC_KERNEL_SYNC_INFO), GFP_KERNEL); + if (!psAllocSyncData->psSyncInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate " + "PVR_SYNC_KERNEL_SYNC_INFO", __func__)); + err = -ENOMEM; + goto err_free_alloc_sync_data; + } + + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + eError = PVRSRVAllocSyncInfoKM(gsSyncServicesConnection.hDevCookie, + gsSyncServicesConnection.hDevMemContext, + &psAllocSyncData->psSyncInfo->psBase); + LinuxUnLockMutex(&gPVRSRVLock); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc syncinfo (%d)", + __func__, eError)); + err = -ENOMEM; + goto err_free_sync_info; + } + + psFile = anon_inode_getfile("pvr_fence_alloc", + &pvr_sync_fops, psAllocSyncData, 0); + if (!psFile) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create anon inode", + __func__)); + err = -ENOMEM; + goto err_release_sync_info; + } + + sData.fence = iFd; + + /* Check if this timeline looks idle. If there are still TQs running + * on it, userspace shouldn't attempt any kind of power optimization + * (e.g. it must not dummy-process GPU fences). + * + * Determining idleness here is safe because the ALLOC and CREATE + * pvr_sync ioctls must be called under the gralloc module lock, so + * we can't be creating another new fence op while we are still + * processing this one. + * + * Take the bridge lock anyway so we can be sure that we read the + * timeline sync's pending value coherently. The complete value may + * be modified by the GPU, but worse-case we will decide we can't do + * the power optimization and will still be correct. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + psSyncData = psTimeline->psSyncInfo->psBase->psSyncData; + if(psSyncData->ui32WriteOpsPending == psSyncData->ui32WriteOpsComplete) + sData.bTimelineIdle = IMG_TRUE; + else + sData.bTimelineIdle = IMG_FALSE; + + LinuxUnLockMutex(&gPVRSRVLock); + + if (!access_ok(VERIFY_WRITE, pvData, sizeof(sData))) + goto err_release_file; + + if (copy_to_user(pvData, &sData, sizeof(sData))) + goto err_release_file; + + psAllocSyncData->psTimeline = psTimeline; + psAllocSyncData->psFile = psFile; + + DPF("A( ): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psAllocSyncData->psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psAllocSyncData->psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + fd_install(iFd, psFile); + err = 0; +err_out: + return err; +err_release_sync_info: + PVRSRVReleaseSyncInfoKM(psAllocSyncData->psSyncInfo->psBase); +err_free_sync_info: + kfree(psAllocSyncData->psSyncInfo); +err_free_alloc_sync_data: + kfree(psAllocSyncData); +err_put_fd: + put_unused_fd(iFd); + goto err_out; +err_release_file: + fput(psFile); + put_unused_fd(iFd); + goto err_out; +} + +static long +pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long __user arg) +{ + void __user *user_data = (void __user *)arg; + long err = -ENOTTY; + struct pvr_sync_timeline *pvr = file->private_data; + bool is_sw_timeline = pvr->sw_timeline != NULL; + + if (!is_sw_timeline) { + + switch (cmd) { + case PVR_SYNC_IOC_CREATE_FENCE: + err = PVRSyncIOCTLCreate(pvr, user_data); + break; + /*case PVR_SYNC_IOC_DEBUG_FENCE: + err = PVRSyncIOCTLDebug(pvr, user_data); + break;*/ + case PVR_SYNC_IOC_ALLOC_FENCE: + err = PVRSyncIOCTLAlloc(pvr, user_data); + break; + case PVR_SYNC_IOC_RENAME: + err = pvr_sync_ioctl_rename(pvr, user_data); + break; + case PVR_SYNC_IOC_FORCE_SW_ONLY: + err = pvr_sync_ioctl_force_sw_only(pvr, &file->private_data); + break; + default: + err = -ENOTTY; + } + } else { + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + err = pvr_sync_ioctl_sw_create_fence(pvr, user_data); + break; + case SW_SYNC_IOC_INC: + err = pvr_sync_ioctl_sw_inc(pvr, user_data); + break; + default: + err = -ENOTTY; + } + } + + return err; +} + +static void PVRSyncWorkQueueFunction(struct work_struct *data) +{ + PVRSRV_DEVICE_NODE *psDevNode = + (PVRSRV_DEVICE_NODE*)gsSyncServicesConnection.hDevCookie; + struct list_head sFreeList, *psEntry, *n; + unsigned long flags; + + /* We lock the bridge mutex here for two reasons. + * + * Firstly, the SGXScheduleProcessQueuesKM and PVRSRVReleaseSyncInfoKM + * functions require that they are called under lock. Multiple threads + * into services are not allowed. + * + * Secondly, we need to ensure that when processing the defer-free list, + * the PVRSyncIsSyncInfoInUse() function is called *after* any freed + * sync was attached as a HW dependency (had ROP/ROP2 taken). This is + * because for 'foreign' sync timelines we allocate a new object and + * mark it for deletion immediately. If the 'foreign' sync_pt signals + * before the kick ioctl has completed, we can block it from being + * prematurely freed by holding the bridge mutex. + * + * NOTE: This code relies on the assumption that we can acquire a + * spinlock while a mutex is held and that other users of the spinlock + * do not need to hold the bridge mutex. + */ + LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); + + /* A completed SW operation may un-block the GPU */ + SGXScheduleProcessQueuesKM(psDevNode); + + /* We can't call PVRSRVReleaseSyncInfoKM directly in this loop because + * that will take the mmap mutex. We can't take mutexes while we have + * this list locked with a spinlock. So move all the items we want to + * free to another, local list (no locking required) and process it + * in a second loop. + */ + + INIT_LIST_HEAD(&sFreeList); + spin_lock_irqsave(&gSyncInfoFreeListLock, flags); + list_for_each_safe(psEntry, n, &gSyncInfoFreeList) + { + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo = + container_of(psEntry, struct PVR_SYNC_KERNEL_SYNC_INFO, sHead); + + if(!PVRSyncIsSyncInfoInUse(psSyncInfo->psBase)) + list_move_tail(psEntry, &sFreeList); + + } + spin_unlock_irqrestore(&gSyncInfoFreeListLock, flags); + + list_for_each_safe(psEntry, n, &sFreeList) + { + struct PVR_SYNC_KERNEL_SYNC_INFO *psSyncInfo = + container_of(psEntry, struct PVR_SYNC_KERNEL_SYNC_INFO, sHead); + + list_del(psEntry); + + DPF("F(d): WOCVA=0x%.8X ROCVA=0x%.8X RO2CVA=0x%.8X", + psSyncInfo->psBase->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psBase->sReadOpsCompleteDevVAddr.uiAddr, + psSyncInfo->psBase->sReadOps2CompleteDevVAddr.uiAddr); + + PVRSRVReleaseSyncInfoKM(psSyncInfo->psBase); + psSyncInfo->psBase = NULL; + + kfree(psSyncInfo); + } + + LinuxUnLockMutex(&gPVRSRVLock); +} + +static const struct file_operations pvr_sync_fops = { + .owner = THIS_MODULE, + .open = pvr_sync_open, + .release = pvr_sync_close, + .unlocked_ioctl = pvr_sync_ioctl, + .compat_ioctl = pvr_sync_ioctl, +}; + +static struct miscdevice pvr_sync_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "pvr_sync", + .fops = &pvr_sync_fops, +}; + +IMG_INTERNAL +int PVRSyncDeviceInit(void) +{ + int err = -1; + + DPF("%s", __func__); + + if(PVRSyncInitServices() != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise services", + __func__)); + goto err_out; + } + + gsSyncServicesConnection.foreign_fence_context = pvr_fence_context_create("foreign_sync"); + if (!pvr_sync_data.foreign_fence_context) { + pr_err("pvr_sync2: %s: Failed to create foreign sync context\n", + __func__); + error = PVRSRV_ERROR_OUT_OF_MEMORY; + goto err_out; + } + + gpsWorkQueue = create_freezable_workqueue("pvr_sync_workqueue"); + if(!gpsWorkQueue) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create pvr_sync workqueue", + __func__)); + goto err_deinit_services; + } + + INIT_WORK(&gsWork, PVRSyncWorkQueueFunction); + + err = misc_register(&pvr_sync_device); + if(err) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register pvr_sync misc " + "device (err=%d)", __func__, err)); + goto err_deinit_services; + } + error = PVRSRV_OK; + +err_out: + return err; +err_deinit_services: + pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); + PVRSyncCloseServices(); + goto err_out; +} + +void PVRSyncDeviceDeInit(void) +{ + DPF("%s", __func__); + misc_deregister(&pvr_sync_device); + pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); + PVRSyncCloseServices(); +} + +struct pvr_counting_fence_timeline *pvr_sync_get_sw_timeline(int fd) +{ + struct pvr_sync_timeline *timeline; + struct pvr_counting_fence_timeline *sw_timeline = NULL; + + timeline = pvr_sync_timeline_fget(fd); + if (!timeline) + return NULL; + + sw_timeline = pvr_counting_fence_timeline_get(timeline->sw_timeline); + + pvr_sync_timeline_fput(timeline); + return sw_timeline; +} + +IMG_BOOL +ExpandAndDeDuplicateFenceSyncs(IMG_UINT32 ui32NumSyncs, + IMG_HANDLE aiFenceFds[], + IMG_UINT32 ui32SyncPointLimit, + struct sync_fence *apsFence[], + IMG_UINT32 *pui32NumRealSyncs, + PVRSRV_KERNEL_SYNC_INFO *apsSyncInfo[]) +{ + IMG_UINT32 i, j, ui32FenceIndex = 0; + IMG_BOOL bRet = IMG_TRUE; + struct sync_pt *psPt; + + *pui32NumRealSyncs = 0; + + for(i = 0; i < ui32NumSyncs; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + struct pvr_fence *fence; + + /* Skip any invalid fence file descriptors without error */ + if((IMG_INT32)aiFenceFds[i] < 0) + continue; + + /* By converting a file descriptor to a struct sync_fence, we are + * taking a reference on the fence. We don't want the fence to go + * away until we have submitted the command, even if it signals + * before we dispatch the command, or the timeline(s) are destroyed. + * + * This reference should be released by the caller of this function + * once hardware operations have been scheduled on the GPU sync_pts + * participating in this fence. When our MISR is scheduled, the + * defer-free list will be processed, cleaning up the SYNCINFO. + * + * Note that this reference *isn't* enough for non-GPU sync_pts. + * We'll take another reference on the fence for those operations + * later (the life-cycle requirements there are totally different). + * + * Fence lookup may fail here if the fd became invalid since it was + * patched in userspace. That's really a userspace driver bug, so + * just fail here instead of not synchronizing. + */ + apsFence[ui32FenceIndex] = sync_file_get_fence((IMG_INT32)aiFenceFds[i]); + if(!apsFence[ui32FenceIndex]) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get fence from fd=%d", + __func__, (IMG_SIZE_T)aiFenceFds[i])); + bRet = IMG_FALSE; + goto err_out; + } + + /* If this fence has any points from foreign timelines, we need to + * allocate a 'shadow' SYNCINFO and update it in software ourselves, + * so the ukernel can test the readiness of the dependency. + * + * It's tempting to just handle all fences like this (since most of + * the time they *will* be merged with sw_sync) but such 'shadow' + * syncs are slower. This is because we need to wait for the MISR to + * schedule to update the GPU part of the fence (normally the ukernel + * would be able to make the update directly). + */ + fence = to_pvr_fence(apsFence[ui32FenceIndex]); + if(!fence) + { + fence = pvr_fence_create_from_fence(gsSyncServicesConnection.foreign_fence_context, apsFence[ui32FenceIndex], "foreign"); + if(fence) + { + if(!AddSyncInfoToArray(fence->psSyncData->psSyncInfo, ui32SyncPointLimit, + pui32NumRealSyncs, apsSyncInfo)) + { + /* Soft-fail. Stop synchronizing. */ + goto err_out; + } + } + } + else + { + /* Walk the current list of points and make sure this isn't a + * duplicate. Duplicates will deadlock. + */ + for(j = 0; j < *pui32NumRealSyncs; j++) + { + /* The point is from a different timeline so we must use it */ + if(!PVRSyncIsDuplicate(apsSyncInfo[j], fence->psSyncData->psSyncInfo)) + continue; + + /* There's no need to bump the real sync count as we either + * ignored the duplicate or replaced an previously counted + * entry. + */ + break; + } + if(j == *pui32NumRealSyncs) + { + /* It's not a duplicate; moving on.. */ + if(!AddSyncInfoToArray(psSyncInfo, ui32SyncPointLimit, + pui32NumRealSyncs, apsSyncInfo)) + goto err_out; + } + } + ui32FenceIndex++; + } + +err_out: + return bRet; +} diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_uaccess.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_uaccess.h new file mode 100644 index 0000000..10e289b --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvr_uaccess.h @@ -0,0 +1,89 @@ +/*************************************************************************/ /*! +@Title Utility functions for user space access +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PVR_UACCESS_H__ +#define __PVR_UACCESS_H__ + +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include +#endif +#endif + +#include +#include + +static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + if (access_ok(VERIFY_WRITE, pvTo, ulBytes)) + { + return __copy_to_user(pvTo, pvFrom, ulBytes); + } + return ulBytes; +#else + return copy_to_user(pvTo, pvFrom, ulBytes); +#endif +} + +static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + /* + * The compile time correctness checking introduced for copy_from_user in + * Linux 2.6.33 isn't fully comaptible with our usage of the function. + */ + if (access_ok(VERIFY_READ, pvFrom, ulBytes)) + { + return __copy_from_user(pvTo, pvFrom, ulBytes); + } + return ulBytes; +#else + return copy_from_user(pvTo, pvFrom, ulBytes); +#endif +} + +#define pvr_put_user put_user +#define pvr_get_user get_user + +#endif /* __PVR_UACCESS_H__ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/pvrsrv_sync_server.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvrsrv_sync_server.h new file mode 100644 index 0000000..277d4b9 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/pvrsrv_sync_server.h @@ -0,0 +1,29 @@ +#ifndef _PVRSRV_SYNC_SERVER_H_ +#define _PVRSRV_SYNC_SERVER_H_ + +#include "img_types.h" + +#define SYNC_SW_TIMELINE_MAX_LENGTH 32 +#define SYNC_SW_FENCE_MAX_LENGTH 32 + +/*****************************************************************************/ +/* */ +/* SW TIMELINE SPECIFIC FUNCTIONS */ +/* */ +/*****************************************************************************/ + +struct dma_fence* SyncSWTimelineFenceCreateKM(IMG_INT32 iSWTimeline, + IMG_UINT32 ui32NextSyncPtVal, + const IMG_CHAR *pszFenceName); + +PVRSRV_ERROR SyncSWTimelineAdvanceKM(IMG_PVOID pvSWTimelineObj); + +PVRSRV_ERROR SyncSWTimelineReleaseKM(IMG_PVOID pvSWTimelineObj); + +PVRSRV_ERROR SyncSWTimelineFenceReleaseKM(IMG_PVOID i32SWFenceObj); + +PVRSRV_ERROR SyncSWGetTimelineObj(IMG_INT32 iSWTimeline, IMG_PVOID *ppvSWTimelineObj); + +PVRSRV_ERROR SyncSWGetFenceObj(IMG_INT32 iSWFence, IMG_PVOID *ppvSWFenceObj); + +#endif /* _PVRSRV_SYNC_SERVER_H_ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.c b/sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.c new file mode 100644 index 0000000..f09eba4 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.c @@ -0,0 +1,376 @@ +/*************************************************************************/ /*! +@Title Systrace related functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "systrace.h" + +#define CREATE_TRACE_POINTS +#include + +#include +#include +#include + +#include "img_types.h" + +#if defined(EUR_CR_TIMER) + +/*Kernel debugfs variables*/ +#define PVRSRV_SYSTRACE_TIMEINDEX_LIMIT 32 +static bool capture_hwperfdata; +static struct dentry *pvrdir_ret; + +static PVRSRV_SYSTRACE_ERROR CreateJob(PVRSRV_SYSTRACE_DATA *psSystraceData, IMG_UINT32 ui32PID, IMG_UINT32 ui32FrameNum, IMG_UINT32 ui32RTData) +{ + PVRSRV_SYSTRACE_CONTEXT *psContext = NULL; + PVRSRV_SYSTRACE_JOB *psJob = NULL; + IMG_UINT32 i = 0; + + if(psSystraceData == NULL) + return PVRSRV_SYSTRACE_NOT_INITIALISED; + + /*Look for the PID in the context CB*/ + for(i = 0; i < 8; ++i) + { + if(psSystraceData->asSystraceContext[i].ui32PID == ui32PID) + { + psContext = &(psSystraceData->asSystraceContext[i]); + break; + } + } + + /*If we find it lets check its jobs, otherwise we create it*/ + if(psContext == NULL) + { + psSystraceData->ui32Index = (psSystraceData->ui32Index+1)%8; + + psSystraceData->asSystraceContext[psSystraceData->ui32Index].ui32CtxID = psSystraceData->ui32CurrentCtxID; + ++psSystraceData->ui32CurrentCtxID; + psSystraceData->asSystraceContext[psSystraceData->ui32Index].ui32PID = ui32PID; + psSystraceData->asSystraceContext[psSystraceData->ui32Index].ui32Start = 0; + psSystraceData->asSystraceContext[psSystraceData->ui32Index].ui32End = 0; + psSystraceData->asSystraceContext[psSystraceData->ui32Index].ui32CurrentJobID = 0; + + psContext = &(psSystraceData->asSystraceContext[psSystraceData->ui32Index]); + } + + /*This is just done during the first kick so it must not be in the job list*/ + /*JobID not found, we create it*/ + psJob = &(psContext->asJobs[psContext->ui32End]); + psJob->ui32JobID = psContext->ui32CurrentJobID; + ++psContext->ui32CurrentJobID; + + psJob->ui32FrameNum = ui32FrameNum; + psJob->ui32RTData = ui32RTData; + /*Advance the CB*/ + psContext->ui32End = (psContext->ui32End + 1)%16; + if(psContext->ui32End == psContext->ui32Start) + psContext->ui32Start = (psContext->ui32Start + 1)%16; + + return PVRSRV_SYSTRACE_OK; +} + +static PVRSRV_SYSTRACE_ERROR GetCtxAndJobID(PVRSRV_SYSTRACE_DATA *psSystraceData, IMG_UINT32 ui32PID, IMG_UINT32 ui32FrameNum, IMG_UINT32 ui32RTData, + IMG_UINT32 *pui32CtxID, IMG_UINT32 *pui32JobID) +{ + PVRSRV_SYSTRACE_CONTEXT *psContext = NULL; + //PVRSRV_SYSTRACE_JOB *psJob = NULL; + IMG_UINT32 i = 0; + + if(psSystraceData == NULL) + return PVRSRV_SYSTRACE_NOT_INITIALISED; + + /*Look for the PID in the context CB*/ + for(i = 0; i < 8; ++i) + { + if(psSystraceData->asSystraceContext[i].ui32PID == ui32PID) + { + psContext = &(psSystraceData->asSystraceContext[i]); + break; + } + } + /*If we find it lets check its jobs, otherwise we create it*/ + if(psContext == NULL) + { + /*Don't create anything here*/ + return PVRSRV_SYSTRACE_JOB_NOT_FOUND; + } + /*Look for the JobID in the jobs CB otherwise create it and return ID*/ + for(i = 0; i < 16; ++i) + { + if((psContext->asJobs[i].ui32FrameNum == ui32FrameNum) && + (psContext->asJobs[i].ui32RTData == ui32RTData)) + { + *pui32CtxID = psContext->ui32CtxID; + *pui32JobID = psContext->asJobs[i].ui32JobID; + return PVRSRV_SYSTRACE_OK; + } + } + /*Not found*/ + return PVRSRV_SYSTRACE_JOB_NOT_FOUND; +} + +void SystraceCreateFS(void) +{ + struct dentry *capture_sgx_hwperfdata_ret; + + pvrdir_ret = debugfs_create_dir("pvr", NULL); + capture_sgx_hwperfdata_ret = debugfs_create_bool("gpu_tracing_on", S_IFREG | S_IRUGO | S_IWUSR, pvrdir_ret, &capture_hwperfdata); +} + +void SystraceDestroyFS(void) +{ + debugfs_remove_recursive(pvrdir_ret); +} + +IMG_BOOL SystraceIsCapturingHWData(void) +{ + return capture_hwperfdata; +} + +void SystraceTAKick(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FrameNum, IMG_UINT32 ui32RTData, IMG_BOOL bIsFirstKick) +{ + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + IMG_UINT32 ui32JobID = 0; + IMG_UINT32 ui32CtxID = 0; + PVRSRV_SYSTRACE_ERROR eError = PVRSRV_SYSTRACE_OK; + + if(psDevInfo->bSystraceInitialised) + { + if(bIsFirstKick) + { + eError = CreateJob(psDevInfo->psSystraceData, ui32PID, ui32FrameNum, ui32RTData); + if(eError != PVRSRV_SYSTRACE_OK) + { + PVR_DPF((PVR_DBG_WARNING,"Systrace: Error creating a Job")); + } + } + + eError = GetCtxAndJobID(psDevInfo->psSystraceData, ui32PID, ui32FrameNum, ui32RTData, &ui32CtxID, &ui32JobID); + + if(eError != PVRSRV_SYSTRACE_OK) + { + PVR_DPF((PVR_DBG_WARNING,"Systrace: Job not found")); + } + + trace_gpu_job_enqueue(ui32CtxID, ui32JobID, "TA"); + } +} + +void SystraceInitializeTimeCorr(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + IMG_UINT32 ui32CurrentIndex = psDevInfo->psSystraceData->ui32TimeCorrIndex; + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + + if(psSGXHostCtl->ui32TicksAtPowerUp != 0) + { + IMG_UINT32 ui32Clocksx16Difference = 0; + IMG_UINT64 ui64TimeDifference = 0; + IMG_UINT64 ui64HostTime = 0; + IMG_UINT32 ui32SGXClocksx16 = 0; + IMG_UINT32 ui32ClockMultiplier = 0; + + ui64HostTime = sched_clock(); + ui32SGXClocksx16 = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_TIMER,0)); + + if(ui32SGXClocksx16 > psSGXHostCtl->ui32TicksAtPowerUp) + { + + /* Get the ui32ClockMultipliertiplier per 1us*/ + ui32ClockMultiplier = (psDevInfo->ui32CoreClockSpeed) / (1000*1000); + + ui32Clocksx16Difference = ui32SGXClocksx16 - psSGXHostCtl->ui32TicksAtPowerUp; + + /* Multiply it by 16 and 1000 to convert from us to ns + * Breaking it in two steps to avoid overflow */ + ui64TimeDifference = (16 * ui32Clocksx16Difference) / ui32ClockMultiplier; + ui64TimeDifference = (unsigned long long)1000 * ui64TimeDifference; + + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui64HostTime = ui64HostTime - ui64TimeDifference; + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui32SGXClocksx16 = psSGXHostCtl->ui32TicksAtPowerUp; + + ui32CurrentIndex = (ui32CurrentIndex + 1) % PVRSRV_SYSTRACE_TIMEINDEX_LIMIT; + } + } + + /* Initialize with current GPU ticks and host clock */ + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui64HostTime = sched_clock(); + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui32SGXClocksx16 = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_TIMER,0)); + + psDevInfo->psSystraceData->ui32TimeCorrIndex = ui32CurrentIndex; + psSGXHostCtl->ui32SystraceIndex = psDevInfo->psSystraceData->ui32TimeCorrIndex; + psSGXHostCtl->ui32TicksAtPowerUp = 0; +} + +void SystraceUpdateTimeCorr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ClockMultiplier) +{ + IMG_UINT32 ui32LastIndex = psDevInfo->psSystraceData->ui32TimeCorrIndex; + IMG_UINT32 ui32CurrentIndex = 0; + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + + if(psSGXHostCtl->ui32SGXPoweredOn) + { + if((psDevInfo->psSystraceData->bLastPowerDown == IMG_TRUE) && (psSGXHostCtl->ui32TicksAtPowerUp != 0)) + { + IMG_UINT32 ui32Clocksx16Difference = 0; + IMG_UINT64 ui64TimeDifference = 0; + IMG_UINT64 ui64HostTime = 0; + IMG_UINT32 ui32SGXClocksx16 = 0; + + ui64HostTime = sched_clock(); + ui32SGXClocksx16 = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_TIMER,0)); + + if(ui32SGXClocksx16 > psSGXHostCtl->ui32TicksAtPowerUp) + { + ui32CurrentIndex = ui32LastIndex; + + ui32Clocksx16Difference = ui32SGXClocksx16 - psSGXHostCtl->ui32TicksAtPowerUp; + + /* Multiply it by 16 and 1000 to convert from us to ns + * Breaking it in two steps to avoid overflow */ + ui64TimeDifference = (16 * ui32Clocksx16Difference) / ui32ClockMultiplier; + ui64TimeDifference = (unsigned long long)1000 * ui64TimeDifference; + + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui64HostTime = ui64HostTime - ui64TimeDifference; + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui32SGXClocksx16 = psSGXHostCtl->ui32TicksAtPowerUp; + } + } + ui32CurrentIndex = (ui32LastIndex + 1) % PVRSRV_SYSTRACE_TIMEINDEX_LIMIT; + + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui64HostTime = sched_clock(); + psDevInfo->psSystraceData->asTimeCorrArray[ui32CurrentIndex].ui32SGXClocksx16 = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_TIMER,0)); + + psDevInfo->psSystraceData->ui32TimeCorrIndex = ui32CurrentIndex ; + psSGXHostCtl->ui32SystraceIndex = psDevInfo->psSystraceData->ui32TimeCorrIndex; + psSGXHostCtl->ui32TicksAtPowerUp = 0; + psDevInfo->psSystraceData->bLastPowerDown = IMG_FALSE; + } + else + { + if(psDevInfo->psSystraceData->bLastPowerDown != IMG_TRUE) + { + /* Device is powered down first time from powered up state, + * pre-increment index so it will be used for initial packets generated before first MISR after power up */ + + ui32CurrentIndex = (ui32LastIndex + 1) % PVRSRV_SYSTRACE_TIMEINDEX_LIMIT; + psDevInfo->psSystraceData->ui32TimeCorrIndex = ui32CurrentIndex ; + psSGXHostCtl->ui32SystraceIndex = psDevInfo->psSystraceData->ui32TimeCorrIndex; + } + psDevInfo->psSystraceData->bLastPowerDown = IMG_TRUE; + psSGXHostCtl->ui32TicksAtPowerUp = 0; + PVR_DPF((PVR_DBG_WARNING, "Systrace: Device PoweredOff, skipping update!")); + } +} + +void SystraceHWPerfPackets(PVRSRV_SGXDEV_INFO *psDevInfo, PVRSRV_SGX_HWPERF_CB_ENTRY* psSGXHWPerf, IMG_UINT32 ui32DataCount, IMG_UINT32 ui32SgxClockspeed) +{ + IMG_UINT32 ui32PID, ui32FrameNo, ui32EvtType, ui32RTData, ui32Clocksx16Difference, ui32ClockMultiplier, ui32SystraceIndex; + + IMG_UINT32 ui32SgxClocksx16 = 0; + IMG_UINT32 i = 0; + IMG_UINT64 ui64TimeDifference = 0; + IMG_UINT64 ui64PacketTimeStamp = 0; + + IMG_UINT32 ui32JobID = 0; + IMG_UINT32 ui32CtxID = 0; + + /* Get the ui32ClockMultipliertiplier per 1us*/ + ui32ClockMultiplier = (ui32SgxClockspeed)/(1000*1000); + + SystraceUpdateTimeCorr(psDevInfo, ui32ClockMultiplier); + + for(i = 0; i < ui32DataCount; ++i) + { + ui32SgxClocksx16 = psSGXHWPerf[i].ui32Clocksx16; + ui32EvtType = psSGXHWPerf[i].ui32Type; + ui32FrameNo = psSGXHWPerf[i].ui32FrameNo; + ui32PID = psSGXHWPerf[i].ui32PID; + ui32RTData = psSGXHWPerf[i].ui32RTData; + ui32SystraceIndex = psSGXHWPerf[i].ui32SystraceIndex; + + if ((ui32EvtType == PVRSRV_SGX_HWPERF_TYPE_TA_START) || + (ui32EvtType == PVRSRV_SGX_HWPERF_TYPE_TA_END) || + (ui32EvtType == PVRSRV_SGX_HWPERF_TYPE_3D_START) || + (ui32EvtType == PVRSRV_SGX_HWPERF_TYPE_3D_END)) + { + /*Get the JobID*/ + GetCtxAndJobID(psDevInfo->psSystraceData, ui32PID, ui32FrameNo, ui32RTData, &ui32CtxID, &ui32JobID); + if (ui32SgxClocksx16 < psDevInfo->psSystraceData->asTimeCorrArray[ui32SystraceIndex].ui32SGXClocksx16) + { + PVR_DPF((PVR_DBG_ERROR, "Systrace: Dropping current HW packet!")); + continue; + } + + ui32Clocksx16Difference = (ui32SgxClocksx16 - psDevInfo->psSystraceData->asTimeCorrArray[ui32SystraceIndex].ui32SGXClocksx16); + + /* Multiply it by 16 and 1000 to convert from us to ns + * Breaking it in two steps to avoid overflow */ + ui64TimeDifference = (16 * ui32Clocksx16Difference) / ui32ClockMultiplier; + ui64TimeDifference = (unsigned long long)1000 * ui64TimeDifference; + + /* Add the time diff to the last time-stamp, in nanoseconds*/ + ui64PacketTimeStamp = (unsigned long long) psDevInfo->psSystraceData->asTimeCorrArray[ui32SystraceIndex].ui64HostTime \ + + (unsigned long long)ui64TimeDifference; + + switch(ui32EvtType) + { + case PVRSRV_SGX_HWPERF_TYPE_TA_START: + trace_gpu_sched_switch("TA", ui64PacketTimeStamp, ui32CtxID, ui32FrameNo, ui32JobID); + break; + + case PVRSRV_SGX_HWPERF_TYPE_TA_END: + trace_gpu_sched_switch("TA", ui64PacketTimeStamp, 0, ui32FrameNo, ui32JobID); + break; + + case PVRSRV_SGX_HWPERF_TYPE_3D_START: + trace_gpu_sched_switch("3D", ui64PacketTimeStamp, ui32CtxID, ui32FrameNo, ui32JobID); + break; + + case PVRSRV_SGX_HWPERF_TYPE_3D_END: + trace_gpu_sched_switch("3D", ui64PacketTimeStamp, 0, ui32FrameNo, ui32JobID); + break; + + default: + break; + } + } + } +} +#endif /* #if defined(EUR_CR_TIMER) */ diff --git a/sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.h b/sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.h new file mode 100644 index 0000000..71c80dc --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/env/linux/systrace.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title Systrace related functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SYSTRACE_ +#define _SYSTRACE_ + +#include "img_defs.h" +#include "img_types.h" + +#include "services_headers.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" + +typedef enum +{ + PVRSRV_SYSTRACE_OK = 0x00, + PVRSRV_SYSTRACE_NOT_INITIALISED, + PVRSRV_SYSTRACE_JOB_NOT_FOUND +} PVRSRV_SYSTRACE_ERROR; + + +void SystraceHWPerfPackets(PVRSRV_SGXDEV_INFO *psDevInfo, PVRSRV_SGX_HWPERF_CB_ENTRY* psSGXHWPerf, IMG_UINT32 ui32DataCount, IMG_UINT32 ui32SgxClockspeed); +void SystraceTAKick(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FrameNum, IMG_UINT32 ui32RTData, IMG_BOOL bIsFirstKick); +void SystraceInitializeTimeCorr(PVRSRV_SGXDEV_INFO *psDevInfo); +void SystraceUpdateTimeCorr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ClockMultiplier); + +void SystraceCreateFS(void); +void SystraceDestroyFS(void); +IMG_BOOL SystraceIsCapturingHWData(void); + +#endif /* _SYSTRACE_ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/hwdefs/mnemedefs.h b/sgx_km/eurasia_km/services4/srvkm/hwdefs/mnemedefs.h new file mode 100644 index 0000000..83a65f5 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/hwdefs/mnemedefs.h @@ -0,0 +1,117 @@ +/*************************************************************************/ /*! +@Title Hardware defs for MNEME. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _MNEMEDEFS_KM_H_ +#define _MNEMEDEFS_KM_H_ + +/* Register MNE_CR_CTRL */ +#define MNE_CR_CTRL 0x0D00 +#define MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000U +#define MNE_CR_CTRL_BYP_CC_N_SHIFT 16 +#define MNE_CR_CTRL_BYP_CC_N_SIGNED 0 +#define MNE_CR_CTRL_BYP_CC_MASK 0x00008000U +#define MNE_CR_CTRL_BYP_CC_SHIFT 15 +#define MNE_CR_CTRL_BYP_CC_SIGNED 0 +#define MNE_CR_CTRL_USE_INVAL_REQ_MASK 0x00007800U +#define MNE_CR_CTRL_USE_INVAL_REQ_SHIFT 11 +#define MNE_CR_CTRL_USE_INVAL_REQ_SIGNED 0 +#define MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400U +#define MNE_CR_CTRL_BYPASS_ALL_SHIFT 10 +#define MNE_CR_CTRL_BYPASS_ALL_SIGNED 0 +#define MNE_CR_CTRL_BYPASS_MASK 0x000003E0U +#define MNE_CR_CTRL_BYPASS_SHIFT 5 +#define MNE_CR_CTRL_BYPASS_SIGNED 0 +#define MNE_CR_CTRL_PAUSE_MASK 0x00000010U +#define MNE_CR_CTRL_PAUSE_SHIFT 4 +#define MNE_CR_CTRL_PAUSE_SIGNED 0 +/* Register MNE_CR_USE_INVAL */ +#define MNE_CR_USE_INVAL 0x0D04 +#define MNE_CR_USE_INVAL_ADDR_MASK 0xFFFFFFFFU +#define MNE_CR_USE_INVAL_ADDR_SHIFT 0 +#define MNE_CR_USE_INVAL_ADDR_SIGNED 0 +/* Register MNE_CR_STAT */ +#define MNE_CR_STAT 0x0D08 +#define MNE_CR_STAT_PAUSED_MASK 0x00000400U +#define MNE_CR_STAT_PAUSED_SHIFT 10 +#define MNE_CR_STAT_PAUSED_SIGNED 0 +#define MNE_CR_STAT_READS_MASK 0x000003FFU +#define MNE_CR_STAT_READS_SHIFT 0 +#define MNE_CR_STAT_READS_SIGNED 0 +/* Register MNE_CR_STAT_STATS */ +#define MNE_CR_STAT_STATS 0x0D0C +#define MNE_CR_STAT_STATS_RST_MASK 0x000FFFF0U +#define MNE_CR_STAT_STATS_RST_SHIFT 4 +#define MNE_CR_STAT_STATS_RST_SIGNED 0 +#define MNE_CR_STAT_STATS_SEL_MASK 0x0000000FU +#define MNE_CR_STAT_STATS_SEL_SHIFT 0 +#define MNE_CR_STAT_STATS_SEL_SIGNED 0 +/* Register MNE_CR_STAT_STATS_OUT */ +#define MNE_CR_STAT_STATS_OUT 0x0D10 +#define MNE_CR_STAT_STATS_OUT_VALUE_MASK 0xFFFFFFFFU +#define MNE_CR_STAT_STATS_OUT_VALUE_SHIFT 0 +#define MNE_CR_STAT_STATS_OUT_VALUE_SIGNED 0 +/* Register MNE_CR_EVENT_STATUS */ +#define MNE_CR_EVENT_STATUS 0x0D14 +#define MNE_CR_EVENT_STATUS_INVAL_MASK 0x00000001U +#define MNE_CR_EVENT_STATUS_INVAL_SHIFT 0 +#define MNE_CR_EVENT_STATUS_INVAL_SIGNED 0 +/* Register MNE_CR_EVENT_CLEAR */ +#define MNE_CR_EVENT_CLEAR 0x0D18 +#define MNE_CR_EVENT_CLEAR_INVAL_MASK 0x00000001U +#define MNE_CR_EVENT_CLEAR_INVAL_SHIFT 0 +#define MNE_CR_EVENT_CLEAR_INVAL_SIGNED 0 +/* Register MNE_CR_CTRL_INVAL */ +#define MNE_CR_CTRL_INVAL 0x0D20 +#define MNE_CR_CTRL_INVAL_PREQ_PDS_MASK 0x00000008U +#define MNE_CR_CTRL_INVAL_PREQ_PDS_SHIFT 3 +#define MNE_CR_CTRL_INVAL_PREQ_PDS_SIGNED 0 +#define MNE_CR_CTRL_INVAL_PREQ_USEC_MASK 0x00000004U +#define MNE_CR_CTRL_INVAL_PREQ_USEC_SHIFT 2 +#define MNE_CR_CTRL_INVAL_PREQ_USEC_SIGNED 0 +#define MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK 0x00000002U +#define MNE_CR_CTRL_INVAL_PREQ_CACHE_SHIFT 1 +#define MNE_CR_CTRL_INVAL_PREQ_CACHE_SIGNED 0 +#define MNE_CR_CTRL_INVAL_ALL_MASK 0x00000001U +#define MNE_CR_CTRL_INVAL_ALL_SHIFT 0 +#define MNE_CR_CTRL_INVAL_ALL_SIGNED 0 + +#endif /* _MNEMEDEFS_KM_H_ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/hwdefs/ocpdefs.h b/sgx_km/eurasia_km/services4/srvkm/hwdefs/ocpdefs.h new file mode 100644 index 0000000..07a6412 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/hwdefs/ocpdefs.h @@ -0,0 +1,308 @@ +/*************************************************************************/ /*! +@Title OCP HW definitions. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _OCPDEFS_H_ +#define _OCPDEFS_H_ + +/* Register EUR_CR_OCP_REVISION */ +#define EUR_CR_OCP_REVISION 0xFE00 +#define EUR_CR_OCP_REVISION_REV_MASK 0xFFFFFFFFUL +#define EUR_CR_OCP_REVISION_REV_SHIFT 0 +#define EUR_CR_OCP_REVISION_REV_SIGNED 0 + +/* Register EUR_CR_OCP_HWINFO */ +#define EUR_CR_OCP_HWINFO 0xFE04 +#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_MASK 0x00000003UL +#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SHIFT 0 +#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SIGNED 0 + +#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_MASK 0x00000004UL +#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SHIFT 2 +#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SIGNED 0 + +/* Register EUR_CR_OCP_SYSCONFIG */ +#define EUR_CR_OCP_SYSCONFIG 0xFE10 +#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_MASK 0x0000000CUL +#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 2 +#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SIGNED 0 + +#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_MASK 0x00000030UL +#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 4 +#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_RAW_0 */ +#define EUR_CR_OCP_IRQSTATUS_RAW_0 0xFE24 +#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_RAW_1 */ +#define EUR_CR_OCP_IRQSTATUS_RAW_1 0xFE28 +#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_RAW_2 */ +#define EUR_CR_OCP_IRQSTATUS_RAW_2 0xFE2C +#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_0 */ +#define EUR_CR_OCP_IRQSTATUS_0 0xFE30 +#define EUR_CR_OCP_IRQSTATUS_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_1 */ +#define EUR_CR_OCP_IRQSTATUS_1 0xFE34 +#define EUR_CR_OCP_IRQSTATUS_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_2 */ +#define EUR_CR_OCP_IRQSTATUS_2 0xFE38 +#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_SET_0 */ +#define EUR_CR_OCP_IRQENABLE_SET_0 0xFE3C +#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_SET_1 */ +#define EUR_CR_OCP_IRQENABLE_SET_1 0xFE40 +#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_SET_2 */ +#define EUR_CR_OCP_IRQENABLE_SET_2 0xFE44 +#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_CLR_0 */ +#define EUR_CR_OCP_IRQENABLE_CLR_0 0xFE48 +#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_CLR_1 */ +#define EUR_CR_OCP_IRQENABLE_CLR_1 0xFE4C +#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_CLR_2 */ +#define EUR_CR_OCP_IRQENABLE_CLR_2 0xFE50 +#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_PAGE_CONFIG */ +#define EUR_CR_OCP_PAGE_CONFIG 0xFF00 +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_MASK 0x00000001UL +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SHIFT 0 +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SIGNED 0 + +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_MASK 0x00000004UL +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SHIFT 2 +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SIGNED 0 + +#define EUR_CR_OCP_PAGE_CONFIG_SIZE_MASK 0x00000018UL +#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SHIFT 3 +#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SIGNED 0 + +/* Register EUR_CR_OCP_INTERRUPT_EVENT */ +#define EUR_CR_OCP_INTERRUPT_EVENT 0xFF04 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_MASK 0x00000001UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SHIFT 0 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_MASK 0x00000002UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SHIFT 1 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_MASK 0x00000004UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SHIFT 2 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_MASK 0x00000008UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SHIFT 3 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_MASK 0x00000010UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SHIFT 4 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_MASK 0x00000020UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SHIFT 5 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_MASK 0x00000100UL +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SHIFT 8 +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_MASK 0x00000200UL +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SHIFT 9 +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_MASK 0x00000400UL +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SHIFT 10 +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SIGNED 0 + +/* Register EUR_CR_OCP_DEBUG_CONFIG */ +#define EUR_CR_OCP_DEBUG_CONFIG 0xFF08 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_MASK 0x00000003UL +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SHIFT 0 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_MASK 0x0000000CUL +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SHIFT 2 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_MASK 0x00000010UL +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SHIFT 4 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_MASK 0x00000020UL +#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SHIFT 5 +#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK 0x80000000UL +#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SHIFT 31 +#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SIGNED 0 + +/* Register EUR_CR_OCP_DEBUG_STATUS */ +#define EUR_CR_OCP_DEBUG_STATUS 0xFF0C +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_MASK 0x00000003UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SHIFT 0 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_MASK 0x00000004UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SHIFT 2 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_MASK 0x00000008UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SHIFT 3 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_MASK 0x00000030UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SHIFT 4 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_MASK 0x000000C0UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SHIFT 6 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_MASK 0x00000300UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SHIFT 8 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_MASK 0x00000400UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SHIFT 10 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_MASK 0x00000800UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SHIFT 11 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_MASK 0x00001000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SHIFT 12 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_MASK 0x00006000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SHIFT 13 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_MASK 0x00008000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SHIFT 15 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_MASK 0x00010000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SHIFT 16 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_MASK 0x00020000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SHIFT 17 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_MASK 0x001C0000UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SHIFT 18 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_MASK 0x03E00000UL +#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SHIFT 21 +#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_MASK 0x04000000UL +#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SHIFT 26 +#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_MASK 0x08000000UL +#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SHIFT 27 +#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_MASK 0x10000000UL +#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SHIFT 28 +#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_MASK 0x20000000UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SHIFT 29 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_MASK 0x40000000UL +#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SHIFT 30 +#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_MASK 0x80000000UL +#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SHIFT 31 +#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SIGNED 0 + + +#endif /* _OCPDEFS_H_ */ + +/***************************************************************************** + End of file (ocpdefs.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx520defs.h b/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx520defs.h new file mode 100644 index 0000000..80c3363 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx520defs.h @@ -0,0 +1,555 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX520. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX520DEFS_KM_H_ +#define _SGX520DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0004 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x0008 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0010 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0014 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0018 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x001C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012C +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_USE_CODE_BASE_0 */ +#define EUR_CR_USE_CODE_BASE_0 0x0A0C +#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_1 */ +#define EUR_CR_USE_CODE_BASE_1 0x0A10 +#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_2 */ +#define EUR_CR_USE_CODE_BASE_2 0x0A14 +#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_3 */ +#define EUR_CR_USE_CODE_BASE_3 0x0A18 +#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_4 */ +#define EUR_CR_USE_CODE_BASE_4 0x0A1C +#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_5 */ +#define EUR_CR_USE_CODE_BASE_5 0x0A20 +#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_6 */ +#define EUR_CR_USE_CODE_BASE_6 0x0A24 +#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_7 */ +#define EUR_CR_USE_CODE_BASE_7 0x0A28 +#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_8 */ +#define EUR_CR_USE_CODE_BASE_8 0x0A2C +#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_9 */ +#define EUR_CR_USE_CODE_BASE_9 0x0A30 +#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_10 */ +#define EUR_CR_USE_CODE_BASE_10 0x0A34 +#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_11 */ +#define EUR_CR_USE_CODE_BASE_11 0x0A38 +#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_12 */ +#define EUR_CR_USE_CODE_BASE_12 0x0A3C +#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_13 */ +#define EUR_CR_USE_CODE_BASE_13 0x0A40 +#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_14 */ +#define EUR_CR_USE_CODE_BASE_14 0x0A44 +#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_15 */ +#define EUR_CR_USE_CODE_BASE_15 0x0A48 +#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 20 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV2 */ +#define EUR_CR_PDS_INV2 0x0AD8 +#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV2_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 20 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX520DEFS_KM_H_ */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx530defs.h b/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx530defs.h new file mode 100644 index 0000000..3223feb --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgx530defs.h @@ -0,0 +1,542 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX530. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX530DEFS_KM_H_ +#define _SGX530DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0004 +#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x0008 +#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0010 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0014 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0018 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x001C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012CU +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV2 */ +#define EUR_CR_PDS_INV2 0x0AD8 +#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV2_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TWOD_REQ_BASE */ +#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88 +#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 +#define EUR_CR_MNE_CR_CTRL 0x0D00 +#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000U +#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_SHIFT 16 +#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000U +#define EUR_CR_MNE_CR_CTRL_BYP_CC_SHIFT 15 +#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_MASK 0x00007800U +#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_SHIFT 11 +#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400U +#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_SHIFT 10 +#define EUR_CR_MNE_CR_CTRL_BYPASS_MASK 0x000003E0U +#define EUR_CR_MNE_CR_CTRL_BYPASS_SHIFT 5 +#define EUR_CR_MNE_CR_CTRL_PAUSE_MASK 0x00000010U +#define EUR_CR_MNE_CR_CTRL_PAUSE_SHIFT 4 +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_MASK 0x0000000EU +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT 1 +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_PDS_MASK (1UL< SGX_FEATURE_MP_CORE_COUNT_3D) +#error Number of TA cores larger than number of 3D cores not supported in current driver +#endif /* (SGX_FEATURE_MP_CORE_COUNT_TA > SGX_FEATURE_MP_CORE_COUNT_3D) */ +#else +#if defined(SGX_FEATURE_MP_CORE_COUNT) +#define SGX_FEATURE_MP_CORE_COUNT_TA (SGX_FEATURE_MP_CORE_COUNT) +#define SGX_FEATURE_MP_CORE_COUNT_3D (SGX_FEATURE_MP_CORE_COUNT) +#else +#error Either SGX_FEATURE_MP_CORE_COUNT or \ +both SGX_FEATURE_MP_CORE_COUNT_TA and SGX_FEATURE_MP_CORE_COUNT_3D \ +must be defined when SGX_FEATURE_MP is defined +#endif /* SGX_FEATURE_MP_CORE_COUNT */ +#endif /* defined(SGX_FEATURE_MP_CORE_COUNT_TA) && defined(SGX_FEATURE_MP_CORE_COUNT_3D) */ +#else +#define SGX_FEATURE_MP_CORE_COUNT (1) +#define SGX_FEATURE_MP_CORE_COUNT_TA (1) +#define SGX_FEATURE_MP_CORE_COUNT_3D (1) +#endif /* SGX_FEATURE_MP */ + +#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING) +#define SUPPORT_SGX_PRIORITY_SCHEDULING +#endif + +#include "img_types.h" + +/****************************************************************************** + End of file (sgxfeaturedefs.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxmmu.h b/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxmmu.h new file mode 100644 index 0000000..a6a907a --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/hwdefs/sgxmmu.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@Title SGX MMU defines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides SGX MMU declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SGXMMU_KM_H__) +#define __SGXMMU_KM_H__ + +/* to be implemented */ + +/* SGX MMU maps 4Kb pages */ +#define SGX_MMU_PAGE_SHIFT (12) +#define SGX_MMU_PAGE_SIZE (1U<ui64Stamp; +} + +#endif /* __DMABUF_SYNC_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/handle.h b/sgx_km/eurasia_km/services4/srvkm/include/handle.h new file mode 100644 index 0000000..f636073 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/handle.h @@ -0,0 +1,547 @@ +/*************************************************************************/ /*! +@Title Handle Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __HANDLE_H__ +#define __HANDLE_H__ + +/* + * Handle API + * ---------- + * The handle API is intended to provide handles for kernel resources, + * which can then be passed back to user space processes. + * + * The following functions comprise the API. Each function takes a + * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated + * for each process, and stored in the per-process data area. Use + * KERNEL_HANDLE_BASE for handles not allocated for a particular process, + * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE + * structure for the process is available. + * + * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag); + * + * Allocate a handle phHandle, for the resource of type eType pointed to by + * pvData. + * + * For handles that have a definite lifetime, where the corresponding + * resource is explicitly created and destroyed, eFlag should be zero. + * + * If the resource is not explicitly created and destroyed, eFlag should be + * set to PVRSRV_HANDLE_ALLOC_FLAG_SHARED. For a given process, the same + * handle will be returned each time a handle for the resource is allocated + * with the PVRSRV_HANDLE_ALLOC_FLAG_SHARED flag. + * + * If a particular resource may be referenced multiple times by a + * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI + * will allow multiple handles to be allocated for the resource. + * Such handles cannot be found with PVRSRVFindHandle. + * + * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + * + * This function is similar to PVRSRVAllocHandle, except that the allocated + * handles are associated with a parent handle, hParent, that has been + * allocated previously. Subhandles are automatically deallocated when their + * parent handle is dealloacted. + * Subhandles can be treated as ordinary handles. For example, they may + * have subhandles of their own, and may be explicity deallocated using + * PVRSRVReleaseHandle (see below). + * + * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType); + * + * Find the handle previously allocated for the resource pointed to by + * pvData, of type eType. Handles allocated with the flag + * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this + * function. + * + * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Given a handle for a resource of type eType, return the pointer to the + * resource. + * + * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, + * IMH_HANDLE hAncestor); + * + * Similar to PVRSRVLookupHandle, but checks the handle is a descendent + * of hAncestor. + * + * PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle); + * + * This function returns the resource pointer corresponding to the + * given handle, and the resource type in peType. This function is + * intended for situations where a handle may be one of several types, + * but the type isn't known beforehand. + * + * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Deallocate a handle of given type. + * + * PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * This function combines the functionality of PVRSRVLookupHandle and + * PVRSRVReleaseHandle, deallocating the handle after looking it up. + * + * PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Return the parent of a handle in *phParent, or IMG_NULL if the handle has + * no parent. + * + * PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, + * IMG_UINT32 ui32BatchSize) + * + * Allocate a new handle batch. This preallocates ui32BatchSize handles. + * Batch mode simplifies the handling of handle allocation failures. + * The handle API is unchanged in batch mode, except that handles freed + * in batch mode will not be available for reallocation until the batch + * is committed or released (see below). + * + * PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase) + * void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase) + * + * When handle allocation from a handle batch is complete, the + * batch must be committed by calling PVRSRVCommitHandleBatch. If + * an error occurred, and none of the handles in the batch are no + * longer needed, PVRSRVReleaseHandleBatch must be called. + * The macros PVRSRVAllocHandleNR, and PVRSRVAllocSubHandleNR + * are defined for use in batch mode. These work the same way + * as PVRSRVAllocHandle and PVRSRVAllocSubHandle, except that + * they don't return a value, relying on the fact that + * PVRSRVCommitHandleBatch will not commit any of the handles + * in a batch if there was an error allocating one of the + * handles in the batch. + * + * PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_UINT32 ui32MaxHandle) + * Set the maximum handle number. This is intended to restrict the + * handle range so that it will fit within a given field width. For + * example, setting the maximum handle number to 0x7fffffff, would + * ensure the handles would fit within a 31 bit width field. This + * facility should be used with caution, as it restricts the number of + * handles that can be allocated. + * + * IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase) + * Return the maximum handle number, or 0 if the setting of a limit + * is not supported. + * + * PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase) + * Allows unused handle space to be reclaimed, by calling + * PVRSRVPurgeHandles. Note that allocating handles may have a + * higher overhead if purging is enabled. + * + * PVRSRV_ERROR PVRSRVPurgeHandles((PVRSRV_HANDLE_BASE *psBase) + * Purge handles for a handle base that has purging enabled. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "hash.h" +#include "resman.h" + +typedef enum +{ + PVRSRV_HANDLE_TYPE_NONE = 0, + PVRSRV_HANDLE_TYPE_PERPROC_DATA, + PVRSRV_HANDLE_TYPE_DEV_NODE, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_TYPE_DISP_INFO, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + PVRSRV_HANDLE_TYPE_BUF_INFO, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + PVRSRV_HANDLE_TYPE_BUF_BUFFER, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_TYPE_MMAP_INFO, + PVRSRV_HANDLE_TYPE_SOC_TIMER, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ, + PVRSRV_HANDLE_TYPE_RESITEM_INFO +} PVRSRV_HANDLE_TYPE; + +typedef enum +{ + /* No flags */ + PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, + /* Share a handle that already exists for a given data pointer */ + PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01, + /* Muliple handles can point at the given data pointer */ + PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02, + /* Subhandles are allocated in a private handle space */ + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04 +} PVRSRV_HANDLE_ALLOC_FLAG; + +struct _PVRSRV_HANDLE_BASE_; +typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE; + +#if defined(PVR_SECURE_HANDLES) +extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; + +#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) + +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag); + +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle); + +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor); + +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize); + +PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase); + +IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle); + +IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase); + +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID); + +PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID); + +#else /* #if defined (PVR_SECURE_HANDLES) */ + +#define KERNEL_HANDLE_BASE IMG_NULL + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVAllocHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(eFlag); + PVR_UNREFERENCED_PARAMETER(psBase); + + *phHandle = pvData; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVAllocSubHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(eFlag); + PVR_UNREFERENCED_PARAMETER(hParent); + PVR_UNREFERENCED_PARAMETER(psBase); + + *phHandle = pvData; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFindHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(psBase); + + *phHandle = pvData; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupHandleAnyType) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + /* + * Unlike the other functions here, the returned results will need + * to be handled differently for the secure and non-secure cases. + */ + *peType = PVRSRV_HANDLE_TYPE_NONE; + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(eType); + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupSubHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(hAncestor); + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetParentHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(hHandle); + + *phParent = IMG_NULL; + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupAndReleaseHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(psBase); + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVReleaseHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(hHandle); + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVNewHandleBatch) +#endif +static INLINE +PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(ui32BatchSize); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVCommitHandleBatch) +#endif +static INLINE +PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVReleaseHandleBatch) +#endif +static INLINE +IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSetMaxHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(ui32MaxHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetMaxHandle) +#endif +static INLINE +IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return 0; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVEnableHandlePurging) +#endif +static INLINE +PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPurgeHandles) +#endif +static INLINE +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVAllocHandleBase) +#endif +static INLINE +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase) +{ + *ppsBase = IMG_NULL; + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFreeHandleBase) +#endif +static INLINE +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVHandleInit) +#endif +static INLINE +PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVHandleDeInit) +#endif +static INLINE +PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID) +{ + return PVRSRV_OK; +} + +#endif /* #if defined (PVR_SECURE_HANDLES) */ + +/* + * Versions of PVRSRVAllocHandle and PVRSRVAllocSubHandle with no return + * values. Intended for use with batched handle allocation, relying on + * CommitHandleBatch to detect handle allocation errors. + */ +#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \ + (IMG_VOID)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag) + +#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \ + (IMG_VOID)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent) + +#if defined (__cplusplus) +} +#endif + +#endif /* __HANDLE_H__ */ + +/****************************************************************************** + End of file (handle.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/hash.h b/sgx_km/eurasia_km/services4/srvkm/include/hash.h new file mode 100644 index 0000000..1ed6fd0 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/hash.h @@ -0,0 +1,277 @@ +/*************************************************************************/ /*! +@Title Self scaling hash tables +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements simple self scaling hash tables. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HASH_H_ +#define _HASH_H_ + +#include "img_types.h" +#include "osfunc.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + * Keys passed to the comparsion function are only guaranteed to + * be aligned on an IMG_UINTPTR_T boundary. + */ +typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen); +typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2); + +typedef struct _HASH_TABLE_ HASH_TABLE; + +typedef PVRSRV_ERROR (*HASH_pfnCallback) ( + IMG_UINTPTR_T k, + IMG_UINTPTR_T v +); + +/*! +****************************************************************************** + @Function HASH_Func_Default + + @Description Hash function intended for hashing keys composed of + IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey - a pointer to the key to hash. + @Input uHashTabLen - the length of the hash table. + + @Return The hash value. +******************************************************************************/ +IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen); + +/*! +****************************************************************************** + @Function HASH_Key_Comp_Default + + @Description Compares keys composed of IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey1 - pointer to first hash key to compare. + @Input pKey2 - pointer to second hash key to compare. + + @Return IMG_TRUE - the keys match. + IMG_FALSE - the keys don't match. +******************************************************************************/ +IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2); + +/*! +****************************************************************************** + @Function HASH_Create_Extended + + @Description Create a self scaling hash table, using the supplied + key size, and the supllied hash and key comparsion + functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the number + of entries in the hash table, not its size in + bytes. + @Input uKeySize - the size of the key, in bytes. + @Input pfnHashFunc - pointer to hash function. + @Input pfnKeyComp - pointer to key comparsion function. + + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp); + +/*! +****************************************************************************** + @Function HASH_Create + + @Description Create a self scaling hash table with a key + consisting of a single IMG_UINTPTR_T, and using + the default hash and key comparison functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the + number of entries in the hash table, not its size + in bytes. + + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen); + +/*! +****************************************************************************** + @Function HASH_Delete + + @Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been + removed before calling this function. + + @Input pHash - hash table + + @Return None +******************************************************************************/ +IMG_VOID HASH_Delete (HASH_TABLE *pHash); + +/*! +****************************************************************************** + @Function HASH_Insert_Extended + + @Description Insert a key value pair into a hash table created + with HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success. + IMG_FALSE - failure. +******************************************************************************/ +IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v); + +/*! +****************************************************************************** + @Function HASH_Insert + + @Description Insert a key value pair into a hash table created with + HASH_Create. + + @Input pHash - the hash table. + @Input k - the key value. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success. + IMG_FALSE - failure. +******************************************************************************/ +IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v); + +/*! +****************************************************************************** + @Function HASH_Remove_Extended + + @Description Remove a key from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to key. + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey); + +/*! +****************************************************************************** + @Function HASH_Remove + + @Description Remove a key value pair from a hash table created + with HASH_Create. + + @Input pHash - the hash table + @Input k - the key + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k); + +/*! +****************************************************************************** + @Function HASH_Retrieve_Extended + + @Description Retrieve a value from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey); + +/*! +****************************************************************************** + @Function HASH_Retrieve + + @Description Retrieve a value from a hash table created with + HASH_Create. + + @Input pHash - the hash table + @Input k - the key + + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k); + +/*! +****************************************************************************** + @Function HASH_Interate + + @Description Iterate over every entry in the hash table + + @Input pHash - the old hash table + @Input HASH_pfnCallback - the size of the old hash table + + @Return Callback error if any, otherwise PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback); + +#ifdef HASH_TRACE +/*! +****************************************************************************** + @Function HASH_Dump + + @Description Dump out some information about a hash table. + + @Input pHash - the hash table + + @Return None +******************************************************************************/ +IMG_VOID HASH_Dump (HASH_TABLE *pHash); +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* _HASH_H_ */ + +/****************************************************************************** + End of file (hash.h) +******************************************************************************/ + + diff --git a/sgx_km/eurasia_km/services4/srvkm/include/ion_sync.h b/sgx_km/eurasia_km/services4/srvkm/include/ion_sync.h new file mode 100644 index 0000000..8600a19 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/ion_sync.h @@ -0,0 +1,73 @@ +/*************************************************************************/ /*! +@Title Services Ion synchronisation integration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "img_types.h" +#include "servicesint.h" + +#ifndef __ION_SYNC_H__ +#define __ION_SYNC_H__ + +typedef struct _PVRSRV_ION_SYNC_INFO_ { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + IMG_HANDLE hUnique; + IMG_UINT32 ui32RefCount; + IMG_UINT64 ui64Stamp; +} PVRSRV_ION_SYNC_INFO; + +PVRSRV_ERROR PVRSRVIonBufferSyncAcquire(IMG_HANDLE hUnique, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_ION_SYNC_INFO **ppsIonSyncInfo); + +IMG_VOID PVRSRVIonBufferSyncRelease(PVRSRV_ION_SYNC_INFO *psIonSyncInfo); + +static INLINE PVRSRV_KERNEL_SYNC_INFO *IonBufferSyncGetKernelSyncInfo(PVRSRV_ION_SYNC_INFO *psIonSyncInfo) +{ + return psIonSyncInfo->psSyncInfo; +} + +static INLINE IMG_UINT64 IonBufferSyncGetStamp(PVRSRV_ION_SYNC_INFO *psIonSyncInfo) +{ + return psIonSyncInfo->ui64Stamp; +} + +#endif /* __ION_SYNC_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/lists.h b/sgx_km/eurasia_km/services4/srvkm/include/lists.h new file mode 100644 index 0000000..31f5409 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/lists.h @@ -0,0 +1,353 @@ +/*************************************************************************/ /*! +@Title Linked list shared functions templates. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Definition of the linked list function templates. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __LISTS_UTILS__ +#define __LISTS_UTILS__ + +/* instruct QAC to ignore warnings about the following custom formatted macros */ +/* PRQA S 0881,3410 ++ */ +#include +#include "img_types.h" + +/* + - USAGE - + + The list functions work with any structure that provides the fields psNext and + ppsThis. In order to make a function available for a given type, it is required + to use the funcion template macro that creates the actual code. + + There are 4 main types of functions: + - INSERT : given a pointer to the head pointer of the list and a pointer to + the node, inserts it as the new head. + - REMOVE : given a pointer to a node, removes it from its list. + - FOR EACH : apply a function over all the elements of a list. + - ANY : apply a function over the elements of a list, until one of them + return a non null value, and then returns it. + + The two last functions can have a variable argument form, with allows to pass + additional parameters to the callback function. In order to do this, the + callback function must take two arguments, the first is the current node and + the second is a list of variable arguments (va_list). + + The ANY functions have also another for wich specifies the return type of the + callback function and the default value returned by the callback function. + +*/ + +/*! +****************************************************************************** + @Function List_##TYPE##_ForEach + + @Description Apply a callback function to all the elements of a list. + + @Input psHead - the head of the list to be processed. + @Input pfnCallBack - the function to be applied to each element + of the list. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_FOR_EACH(TYPE) \ +IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_FOR_EACH(TYPE) \ +IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\ +{\ + while(psHead)\ + {\ + pfnCallBack(psHead);\ + psHead = psHead->psNext;\ + }\ +} + + +#define DECLARE_LIST_FOR_EACH_VA(TYPE) \ +IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \ +IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \ +{\ + va_list ap;\ + while(psHead)\ + {\ + va_start(ap, pfnCallBack);\ + pfnCallBack(psHead, ap);\ + psHead = psHead->psNext;\ + va_end(ap);\ + }\ +} + + +/*! +****************************************************************************** + @Function List_##TYPE##_Any + + @Description Applies a callback function to the elements of a list until + the function returns a non null value, then returns it. + + @Input psHead - the head of the list to be processed. + @Input pfnCallBack - the function to be applied to each element + of the list. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_ANY(TYPE) \ +IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY(TYPE) \ +IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\ +{ \ + IMG_VOID *pResult;\ + TYPE *psNextNode;\ + pResult = IMG_NULL;\ + psNextNode = psHead;\ + while(psHead && !pResult)\ + {\ + psNextNode = psNextNode->psNext;\ + pResult = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + + +/*with variable arguments, that will be passed as a va_list to the callback function*/ + +#define DECLARE_LIST_ANY_VA(TYPE) \ +IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA(TYPE) \ +IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + IMG_VOID* pResult = IMG_NULL;\ + while(psHead && !pResult)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + pResult = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + +/*those ones are for extra type safety, so there's no need to use castings for the results*/ + +#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\ +{ \ + RTYPE result;\ + TYPE *psNextNode;\ + result = CONTINUE;\ + psNextNode = psHead;\ + while(psHead && result == CONTINUE)\ + {\ + psNextNode = psNextNode->psNext;\ + result = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + RTYPE result = CONTINUE;\ + while(psHead && result == CONTINUE)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + result = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +/*! +****************************************************************************** + @Function List_##TYPE##_Remove + + @Description Removes a given node from the list. + + @Input psNode - the pointer to the node to be removed. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_REMOVE(TYPE) \ +IMG_VOID List_##TYPE##_Remove(TYPE *psNode) + +#define IMPLEMENT_LIST_REMOVE(TYPE) \ +IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\ +{\ + (*psNode->ppsThis)=psNode->psNext;\ + if(psNode->psNext)\ + {\ + psNode->psNext->ppsThis = psNode->ppsThis;\ + }\ +} + +/*! +****************************************************************************** + @Function List_##TYPE##_Insert + + @Description Inserts a given node at the beginnning of the list. + + @Input psHead - The pointer to the pointer to the head node. + @Input psNode - The pointer to the node to be inserted. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_INSERT(TYPE) \ +IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode) + +#define IMPLEMENT_LIST_INSERT(TYPE) \ +IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\ +{\ + psNewNode->ppsThis = ppsHead;\ + psNewNode->psNext = *ppsHead;\ + *ppsHead = psNewNode;\ + if(psNewNode->psNext)\ + {\ + psNewNode->psNext->ppsThis = &(psNewNode->psNext);\ + }\ +} + +/*! +****************************************************************************** + @Function List_##TYPE##_Reverse + + @Description Reverse a list in place + + @Input ppsHead - The pointer to the pointer to the head node. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_REVERSE(TYPE) \ +IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead) + +#define IMPLEMENT_LIST_REVERSE(TYPE) \ +IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead)\ +{\ + TYPE *psTmpNode1; \ + TYPE *psTmpNode2; \ + TYPE *psCurNode; \ + psTmpNode1 = IMG_NULL; \ + psCurNode = *ppsHead; \ + while(psCurNode) { \ + psTmpNode2 = psCurNode->psNext; \ + psCurNode->psNext = psTmpNode1; \ + psTmpNode1 = psCurNode; \ + psCurNode = psTmpNode2; \ + if(psCurNode) \ + { \ + psTmpNode1->ppsThis = &(psCurNode->psNext); \ + } \ + else \ + { \ + psTmpNode1->ppsThis = ppsHead; \ + } \ + } \ + *ppsHead = psTmpNode1; \ +} + +#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL) + +#include "services_headers.h" + +DECLARE_LIST_ANY_VA(BM_HEAP); +DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH_VA(BM_HEAP); +DECLARE_LIST_REMOVE(BM_HEAP); +DECLARE_LIST_INSERT(BM_HEAP); + +DECLARE_LIST_ANY_VA(BM_CONTEXT); +DECLARE_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL); +DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH(BM_CONTEXT); +DECLARE_LIST_REMOVE(BM_CONTEXT); +DECLARE_LIST_INSERT(BM_CONTEXT); + +DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE); +DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE); +DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE); + +DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV); +DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_INSERT(PVRSRV_POWER_DEV); +DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV); + +DECLARE_LIST_ANY_2(PVRSRV_KERNEL_SYNC_INFO, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_INSERT(PVRSRV_KERNEL_SYNC_INFO); +DECLARE_LIST_REMOVE(PVRSRV_KERNEL_SYNC_INFO); + +#undef DECLARE_LIST_ANY_2 +#undef DECLARE_LIST_ANY_VA +#undef DECLARE_LIST_ANY_VA_2 +#undef DECLARE_LIST_FOR_EACH +#undef DECLARE_LIST_FOR_EACH_VA +#undef DECLARE_LIST_INSERT +#undef DECLARE_LIST_REMOVE + +IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va); +IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va); + +#endif + +/* re-enable warnings */ +/* PRQA S 0881,3410 -- */ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/metrics.h b/sgx_km/eurasia_km/services4/srvkm/include/metrics.h new file mode 100644 index 0000000..18079cb --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/metrics.h @@ -0,0 +1,146 @@ +/*************************************************************************/ /*! +@Title Time measurement interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _METRICS_ +#define _METRICS_ + + +#if defined (__cplusplus) +extern "C" { +#endif + + +#if defined(DEBUG) || defined(TIMING) + + +typedef struct +{ + IMG_UINT32 ui32Start; + IMG_UINT32 ui32Stop; + IMG_UINT32 ui32Total; + IMG_UINT32 ui32Count; +} Temporal_Data; + +extern Temporal_Data asTimers[]; + +extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID); +extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo); +extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID); + + +#define PVRSRV_TIMER_DUMMY 0 + +#define PVRSRV_TIMER_EXAMPLE_1 1 +#define PVRSRV_TIMER_EXAMPLE_2 2 + + +#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1) + +#define PVRSRV_TIME_START(X) { \ + asTimers[X].ui32Count += 1; \ + asTimers[X].ui32Count |= 0x80000000L; \ + asTimers[X].ui32Start = PVRSRVTimeNow(); \ + asTimers[X].ui32Stop = 0; \ + } + +#define PVRSRV_TIME_SUSPEND(X) { \ + asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \ + } + +#define PVRSRV_TIME_RESUME(X) { \ + asTimers[X].ui32Start = PVRSRVTimeNow(); \ + } + +#define PVRSRV_TIME_STOP(X) { \ + asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \ + asTimers[X].ui32Total += asTimers[X].ui32Stop; \ + asTimers[X].ui32Count &= 0x7FFFFFFFL; \ + } + +#define PVRSRV_TIME_RESET(X) { \ + asTimers[X].ui32Start = 0; \ + asTimers[X].ui32Stop = 0; \ + asTimers[X].ui32Total = 0; \ + asTimers[X].ui32Count = 0; \ + } + + +#if defined(__sh__) + +#define TST_REG ((volatile IMG_UINT8 *) (psDevInfo->pvSOCRegsBaseKM)) // timer start register + +#define TCOR_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+28)) // timer constant register_2 +#define TCNT_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+32)) // timer counter register_2 +#define TCR_2 ((volatile IMG_UINT16 *)(psDevInfo->pvSOCRegsBaseKM+36)) // timer control register_2 + +#define TIMER_DIVISOR 4 + +#endif /* defined(__sh__) */ + + + +#else /* defined(DEBUG) || defined(TIMING) */ + + + +#define PVRSRV_TIME_START(X) +#define PVRSRV_TIME_SUSPEND(X) +#define PVRSRV_TIME_RESUME(X) +#define PVRSRV_TIME_STOP(X) +#define PVRSRV_TIME_RESET(X) + +#define PVRSRVSetupMetricTimers(X) +#define PVRSRVOutputMetricTotals() + + + +#endif /* defined(DEBUG) || defined(TIMING) */ + +#if defined(__cplusplus) +} +#endif + + +#endif /* _METRICS_ */ + +/************************************************************************** + End of file (metrics.h) +**************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/osfunc.h b/sgx_km/eurasia_km/services4/srvkm/include/osfunc.h new file mode 100644 index 0000000..61d4be3 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/osfunc.h @@ -0,0 +1,802 @@ +/*************************************************************************/ /*! +@Title OS functions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifdef DEBUG_RELEASE_BUILD +#pragma optimize( "", off ) +#define DEBUG 1 +#endif + +#ifndef __OSFUNC_H__ +#define __OSFUNC_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(__linux__) && defined(__KERNEL__) +#include +#include +#if defined(__arm__) || defined(__aarch64__) +#include +#endif +#endif + + +/* setup conditional pageable / non-pageable select */ + /* Other OSs only need pageable */ + #define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP + +/****************************************************************************** + * Static defines + *****************************************************************************/ +#define KERNEL_ID 0xffffffffL +#define POWER_MANAGER_ID 0xfffffffeL +#define ISR_ID 0xfffffffdL +#define TIMER_ID 0xfffffffcL + + +#define HOST_PAGESIZE OSGetPageSize +#define HOST_PAGEMASK (HOST_PAGESIZE()-1) +#define HOST_PAGEALIGN(addr) (((addr) + HOST_PAGEMASK) & ~HOST_PAGEMASK) + +/****************************************************************************** + * Host memory heaps + *****************************************************************************/ +#define PVRSRV_OS_HEAP_MASK 0xf /* host heap flags mask */ +#define PVRSRV_OS_PAGEABLE_HEAP 0x1 /* allocation pageable */ +#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2 /* allocation non pageable */ +#if defined (__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define PVRSRV_SWAP_BUFFER_ALLOCATION 0x4 /* allocation for swap buffer */ +#else +#define PVRSRV_SWAP_BUFFER_ALLOCATION 0x0 +#endif + +#if defined (PVRSRV_DEVMEM_TIME_STATS) +IMG_UINT64 OSClockMonotonicus(IMG_VOID); +#endif +IMG_UINT32 OSClockus(IMG_VOID); +IMG_UINT32 OSGetPageSize(IMG_VOID); +PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData, + IMG_UINT32 ui32Irq, + IMG_CHAR *pszISRName, + IMG_VOID *pvDeviceNode); +PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData); +PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq); +PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData); +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData); +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData); +IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_HANDLE, IMG_VOID* pvLinAddr); +IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T uiSize); +IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle); +IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle); + +PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE hBMHandle, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle); +PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle); + +/* Some terminology: + * + * FLUSH Flush w/ invalidate + * CLEAN Flush w/o invalidate + * INVALIDATE Invalidate w/o flush + */ + +#if defined(__linux__) && defined(__KERNEL__) + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID); + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID); + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length); +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length); +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length); + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSFlushCPUCacheKM) +#endif +static INLINE IMG_VOID OSFlushCPUCacheKM(IMG_VOID) {} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSCleanCPUCacheKM) +#endif +static INLINE IMG_VOID OSCleanCPUCacheKM(IMG_VOID) {} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSFlushCPUCacheRangeKM) +#endif +static INLINE IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart); + PVR_UNREFERENCED_PARAMETER(ui32Length); + return IMG_FALSE; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSCleanCPUCacheRangeKM) +#endif +static INLINE IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart); + PVR_UNREFERENCED_PARAMETER(ui32Length); + return IMG_FALSE; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSInvalidateCPUCacheRangeKM) +#endif +static INLINE IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart); + PVR_UNREFERENCED_PARAMETER(ui32Length); + return IMG_FALSE; +} + +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, + IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandle); +PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hOSMemHandle); +#else /* defined(__linux__) */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSRegisterDiscontigMem) +#endif +static INLINE PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, + IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pBasePAddr); + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(phOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSUnRegisterDiscontigMem) +#endif +static INLINE PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} +#endif /* defined(__linux__) */ + + +#if defined(__linux__) || defined(__QNXNTO__) +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReserveDiscontigPhys) +#endif +static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle) +{ +#if defined(__linux__) || defined(__QNXNTO__) + *ppvCpuVAddr = IMG_NULL; + return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, uBytes, ui32Flags, phOSMemHandle); +#else + extern IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr); + + /* + * On uITRON we know: + * 1. We will only be called with a non-contig physical if we + * already have a contiguous CPU linear + * 2. There is a one->one mapping of CpuPAddr -> CpuVAddr + * 3. Looking up the first CpuPAddr will find the first CpuVAddr + * 4. We don't need to unmap + */ + + return OSReservePhys(SysSysPAddrToCpuPAddr(pBasePAddr[0]), uBytes, ui32Flags, IMG_NULL, ppvCpuVAddr, phOSMemHandle); +#endif +} + +static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle) +{ +#if defined(__linux__) || defined(__QNXNTO__) + OSUnRegisterDiscontigMem(pvCpuVAddr, uBytes, ui32Flags, hOSMemHandle); +#endif + /* We don't need to unmap */ + return PVRSRV_OK; +} +#else /* defined(__linux__) */ + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReserveDiscontigPhys) +#endif +static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pBasePAddr); + PVR_UNREFERENCED_PARAMETER(uBytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(ppvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(phOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSUnReserveDiscontigPhys) +#endif +static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T uBytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(uBytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} +#endif /* defined(__linux__) */ + +PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, + IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandle); +PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hOSMemHandle); + + + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, + IMG_UINTPTR_T uByteOffset, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandleRet); +PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSGetSubMemHandle) +#endif +static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, + IMG_UINTPTR_T uByteOffset, + IMG_SIZE_T uBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandleRet) +{ + PVR_UNREFERENCED_PARAMETER(uByteOffset); + PVR_UNREFERENCED_PARAMETER(uBytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + + *phOSMemHandleRet = hOSMemHandle; + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + return PVRSRV_OK; +} +#endif + +IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID); +#if defined (MEM_TRACK_INFO_DEBUG) +IMG_UINT32 OSGetCurrentTimeInUSecsKM(IMG_VOID); +#endif +IMG_UINTPTR_T OSGetCurrentThreadID( IMG_VOID ); +IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T uSize); + +PVRSRV_ERROR OSAllocPages_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uSize, IMG_UINT32 ui32PageSize, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength, IMG_HANDLE hBMHandle, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc); +PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_SIZE_T uSize, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc); + + +/*--------------------- +The set of macros below follows this pattern: + +f(x) = if F -> f2(g(x)) + else -> g(x) + +g(x) = if G -> g2(h(x)) + else -> h(x) + +h(x) = ... + +-----------------------*/ + +/*If level 3 wrapper is enabled, we add a PVR_TRACE and call the next level, else just call the next level*/ +#ifdef PVRSRV_LOG_MEMORY_ALLOCS + #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \ + (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", (unsigned long)size)), \ + OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)) + + #define OSAllocPages(flags, size, pageSize, privdata, privdatalength, bmhandle, linAddr, pageAlloc) \ + (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", (unsigned long)size)), \ + OSAllocPages_Impl(flags, size, pageSize, privdata, privdatalength, bmhandle, linAddr, pageAlloc)) + + #define OSFreeMem(flags, size, linAddr, blockAlloc) \ + (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%p)", linAddr)), \ + OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)) +#else + #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \ + OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__) + + #define OSAllocPages OSAllocPages_Impl + + #define OSFreeMem(flags, size, linAddr, blockAlloc) \ + OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__) +#endif + +/*If level 2 wrapper is enabled declare the function, +else alias to level 1 wrapper, else the wrapper function will be used*/ +#ifdef PVRSRV_DEBUG_OS_MEMORY + + PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID *ppvCpuVAddr, + IMG_HANDLE *phBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line); + + PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID pvCpuVAddr, + IMG_HANDLE hBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line); + + + typedef struct + { + IMG_UINT8 sGuardRegionBefore[8]; + IMG_CHAR sFileName[128]; + IMG_UINT32 uLineNo; + IMG_SIZE_T uSize; + IMG_SIZE_T uSizeParityCheck; + enum valid_tag + { isFree = 0x277260FF, + isAllocated = 0x260511AA + } eValid; + } OSMEM_DEBUG_INFO; + + #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO)) + #define TEST_BUFFER_PADDING_AFTER (8) + #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER) +#else + #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations + #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations +#endif + +/*If level 1 wrapper is enabled declare the functions with extra parameters +else alias to level 0 and declare the functions without the extra debugging parameters*/ +#if (defined(__linux__) || defined(__QNXNTO__)) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uSize, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line); + PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uSize, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line); + + #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl + #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl +#else + PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uSize, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc); + PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T uSize, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc); + + #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \ + OSAllocMem_Impl(flags, size, addr, blockAlloc) + #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \ + OSFreeMem_Impl(flags, size, addr, blockAlloc) +#endif + + +#if defined(__linux__) || defined(__QNXNTO__) +IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINTPTR_T uiByteOffset); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSMemHandleToCpuPAddr) +#endif +static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_UINTPTR_T uiByteOffset) +{ + IMG_CPU_PHYADDR sCpuPAddr; + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(uiByteOffset); + sCpuPAddr.uiAddr = 0; + return sCpuPAddr; +} +#endif + +#if defined(__linux__) +IMG_BOOL OSMemHandleIsPhysContig(IMG_VOID *hOSMemHandle); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSMemHandleIsPhysContig) +#endif +static INLINE IMG_BOOL OSMemHandleIsPhysContig(IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + return IMG_FALSE; +} +#endif + +PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData); +PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData); +IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc); +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T uSize, const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4); +#define OSStringLength(pszString) strlen(pszString) + +PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, + PVRSRV_EVENTOBJECT *psEventObject); +PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT *psEventObject); +PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM); +PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM); +PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT *psEventObject, + IMG_HANDLE *phOSEvent); +PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT *psEventObject, + IMG_HANDLE hOSEventKM); + + +PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T uSize, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr); +PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T uSize, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr); + +IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_SIZE_T uSize,IMG_HANDLE *phMemBlock); +IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess); +IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess); + +IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_SIZE_T uSize, IMG_HANDLE hMemBlock); + +PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie, + IMG_SYS_PHYADDR sCPUPhysAddr, + IMG_SIZE_T uiSizeInBytes, + IMG_UINT32 ui32CacheFlags, + IMG_PVOID *ppvUserAddr, + IMG_SIZE_T *puiActualSize, + IMG_HANDLE hMappingHandle); + +PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie, + IMG_PVOID pvUserAddr, + IMG_PVOID pvProcess); + +PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); + +#if !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) +PVRSRV_ERROR OSLockResourceAndBlockMISR(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +PVRSRV_ERROR OSUnlockResourceAndUnblockMISR(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +#endif /* !defined(PVR_LINUX_USING_WORKQUEUES) && defined(__linux__) */ + +IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource); +PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource); +IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); + +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) +#define OSPowerLockWrap SysPowerLockWrap +#define OSPowerLockUnwrap SysPowerLockUnwrap +#else +/****************************************************************************** + @Function OSPowerLockWrap + + @Description OS-specific wrapper around the power lock + + @Input bTryLock - don't block on lock contention + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR OSPowerLockWrap(IMG_BOOL bTryLock); + +/****************************************************************************** + @Function OSPowerLockUnwrap + + @Description OS-specific wrapper around the power unlock + + @Return IMG_VOID +******************************************************************************/ +IMG_VOID OSPowerLockUnwrap(IMG_VOID); +#endif /* SYS_CUSTOM_POWERLOCK_WRAP */ + +/*! +****************************************************************************** + + @Function OSWaitus + + @Description + This function implements a busy wait of the specified microseconds + This function does NOT release thread quanta + + @Input ui32Timeus - (us) + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus); + +/*! +****************************************************************************** + + @Function OSSleepms + + @Description + This function implements a sleep of the specified milliseconds + This function may allow pre-emption if implemented + + @Input ui32Timems - (ms) + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSSleepms(IMG_UINT32 ui32Timems); + +IMG_HANDLE OSFuncHighResTimerCreate(IMG_VOID); +IMG_UINT32 OSFuncHighResTimerGetus(IMG_HANDLE hTimer); +IMG_VOID OSFuncHighResTimerDestroy(IMG_HANDLE hTimer); +IMG_VOID OSReleaseThreadQuanta(IMG_VOID); +IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg); +IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value); + +IMG_IMPORT +IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +IMG_IMPORT +IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + +IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs); + +#ifndef OSReadHWReg +IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset); +#endif +#ifndef OSWriteHWReg +IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); +#endif + +typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*); +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout); +PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer); +PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer); +PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer); + +PVRSRV_ERROR OSGetSysMemSize(IMG_SIZE_T *puBytes); + +typedef enum _HOST_PCI_INIT_FLAGS_ +{ + HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, + HOST_PCI_INIT_FLAG_MSI = 0x00000002, + HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff +} HOST_PCI_INIT_FLAGS; + +struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; +typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; + +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); +IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); + +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData); + +/****************************************************************************** + + @Function OSPanic + + @Description Take action in response to an unrecoverable driver error + + @Input IMG_VOID + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSPanic(IMG_VOID); + +IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID); + +typedef enum _img_verify_test +{ + PVR_VERIFY_WRITE = 0, + PVR_VERIFY_READ +} IMG_VERIFY_TEST; + +IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T uBytes); + +PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T uBytes); +PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T uBytes); + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr, + IMG_SIZE_T uBytes, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_HANDLE *phOSWrapMem); +PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSAcquirePhysPageAddr) +#endif +static INLINE PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr, + IMG_SIZE_T uBytes, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_HANDLE *phOSWrapMem) +{ + PVR_UNREFERENCED_PARAMETER(pvCPUVAddr); + PVR_UNREFERENCED_PARAMETER(uBytes); + PVR_UNREFERENCED_PARAMETER(psSysPAddr); + PVR_UNREFERENCED_PARAMETER(phOSWrapMem); + return PVRSRV_OK; +} +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReleasePhysPageAddr) +#endif +static INLINE PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem) +{ + PVR_UNREFERENCED_PARAMETER(hOSWrapMem); + return PVRSRV_OK; +} +#endif + +#if defined(__linux__) && defined(__KERNEL__) + +#define OS_SUPPORTS_IN_LISR + +static inline IMG_BOOL OSInLISR(IMG_VOID unref__ *pvSysData) +{ + PVR_UNREFERENCED_PARAMETER(pvSysData); + return (in_irq()) ? IMG_TRUE : IMG_FALSE; +} + +static inline IMG_VOID OSWriteMemoryBarrier(IMG_VOID) +{ + wmb(); +} + +static inline IMG_VOID OSMemoryBarrier(IMG_VOID) +{ + mb(); +} + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSWriteMemoryBarrier) +#endif +static INLINE IMG_VOID OSWriteMemoryBarrier(IMG_VOID) { } + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSMemoryBarrier) +#endif +static INLINE IMG_VOID OSMemoryBarrier(IMG_VOID) { } + +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +/* Atomic functions */ +PVRSRV_ERROR OSAtomicAlloc(IMG_PVOID *ppvRefCount); +IMG_VOID OSAtomicFree(IMG_PVOID pvRefCount); +IMG_VOID OSAtomicInc(IMG_PVOID pvRefCount); +IMG_BOOL OSAtomicDecAndTest(IMG_PVOID pvRefCount); +IMG_UINT32 OSAtomicRead(IMG_PVOID pvRefCount); + +PVRSRV_ERROR OSTimeCreateWithUSOffset(IMG_PVOID *pvRet, IMG_UINT32 ui32MSOffset); +IMG_BOOL OSTimeHasTimePassed(IMG_PVOID pvData); +IMG_VOID OSTimeDestroy(IMG_PVOID pvData); + +#if defined(__linux__) +IMG_VOID OSReleaseBridgeLock(IMG_VOID); +IMG_VOID OSReacquireBridgeLock(IMG_VOID); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReleaseBridgeLock) +#endif +static INLINE IMG_VOID OSReleaseBridgeLock(IMG_VOID) { } + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReacquireBridgeLock) +#endif +static INLINE IMG_VOID OSReacquireBridgeLock(IMG_VOID) { } + +#endif + +#if defined(__linux__) +IMG_VOID OSGetCurrentProcessNameKM(IMG_CHAR *pszName, IMG_UINT32 ui32Size); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSGetCurrentProcessNameKM) +#endif +static INLINE IMG_VOID OSGetCurrentProcessNameKM(IMG_CHAR *pszName, IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ui32Size); +} + +#endif + +#if defined(__linux__) && defined(DEBUG) +#define OSDumpStack dump_stack +#else +#define OSDumpStack() +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* __OSFUNC_H__ */ + +/****************************************************************************** + End of file (osfunc.h) +******************************************************************************/ + diff --git a/sgx_km/eurasia_km/services4/srvkm/include/osperproc.h b/sgx_km/eurasia_km/services4/srvkm/include/osperproc.h new file mode 100644 index 0000000..0b962b4 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/osperproc.h @@ -0,0 +1,94 @@ +/*************************************************************************/ /*! +@Title OS specific per process data interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific per process data interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __OSPERPROC_H__ +#define __OSPERPROC_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData); +PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData); + +PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); +#else /* defined(__linux__) */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSPerProcessPrivateDataInit) +#endif +static INLINE PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData) +{ + PVR_UNREFERENCED_PARAMETER(phOsPrivateData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSPerProcessPrivateDataDeInit) +#endif +static INLINE PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + PVR_UNREFERENCED_PARAMETER(hOsPrivateData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSPerProcessSetHandleOptions) +#endif +static INLINE PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + PVR_UNREFERENCED_PARAMETER(psHandleBase); + + return PVRSRV_OK; +} +#endif /* defined(__linux__) */ + +#if defined (__cplusplus) +} +#endif + +#endif /* __OSPERPROC_H__ */ + +/****************************************************************************** + End of file (osperproc.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/pdump_int.h b/sgx_km/eurasia_km/services4/srvkm/include/pdump_int.h new file mode 100644 index 0000000..a76fed0 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/pdump_int.h @@ -0,0 +1,100 @@ +/*************************************************************************/ /*! +@Title Parameter dump internal common functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef __PDUMP_INT_H__ +#define __PDUMP_INT_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + * This file contains internal pdump utility functions which may be accessed + * from OS-specific code. The header should not be included outside of srvkm + * pdump files. + */ + +#if !defined(_UITRON) +/* + * No dbgdriver on uitron, so ignore any common functions for communicating + * with dbgdriver. + */ +#include "dbgdrvif.h" + +/* Callbacks which are registered with the debug driver. */ +IMG_EXPORT IMG_VOID PDumpConnectionNotify(IMG_VOID); + +#endif /* !defined(_UITRON) */ + +typedef enum +{ + /* Continuous writes are always captured in the dbgdrv; the buffer will + * expand if no client/sink process is running. + */ + PDUMP_WRITE_MODE_CONTINUOUS = 0, + /* Last frame capture */ + PDUMP_WRITE_MODE_LASTFRAME, + /* Capture frame, binary data */ + PDUMP_WRITE_MODE_BINCM, + /* Persistent capture, append data to init phase */ + PDUMP_WRITE_MODE_PERSISTENT +} PDUMP_DDWMODE; + + +IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags); + +IMG_UINT32 PDumpOSDebugDriverWrite( PDBG_STREAM psStream, + PDUMP_DDWMODE eDbgDrvWriteMode, + IMG_UINT8 *pui8Data, + IMG_UINT32 ui32BCount, + IMG_UINT32 ui32Level, + IMG_UINT32 ui32DbgDrvFlags); + +#if defined (__cplusplus) +} +#endif +#endif /* __PDUMP_INT_H__ */ + +/****************************************************************************** + End of file (pdump_int.h) +******************************************************************************/ + diff --git a/sgx_km/eurasia_km/services4/srvkm/include/pdump_km.h b/sgx_km/eurasia_km/services4/srvkm/include/pdump_km.h new file mode 100644 index 0000000..4d62154 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/pdump_km.h @@ -0,0 +1,446 @@ +/*************************************************************************/ /*! +@Title pdump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for pdump functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _PDUMP_KM_H_ +#define _PDUMP_KM_H_ + + +/* + * Include the OS abstraction APIs + */ +#include "pdump_osfunc.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Pull in pdump flags from services include + */ +#include "pdump.h" + +#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 +#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 + +/* + * PDump streams (common to all OSes) + */ +#define PDUMP_STREAM_PARAM2 0 +#define PDUMP_STREAM_SCRIPT2 1 +#define PDUMP_STREAM_DRIVERINFO 2 +#define PDUMP_NUM_STREAMS 3 + +#if defined(PDUMP_DEBUG_OUTFILES) +/* counter increments each time debug write is called */ +extern IMG_UINT32 g_ui32EveryLineCounter; +#endif + +#ifndef PDUMP +#define MAKEUNIQUETAG(hMemInfo) (0) +#endif + +IMG_BOOL _PDumpIsProcessActive(IMG_VOID); + +IMG_BOOL PDumpWillCapture(IMG_UINT32 ui32Flags); + +#ifdef PDUMP + +#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)(hMemInfo))->sMemBlk.hBuffer))->pMapping) + + IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_IMPORT PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData, + IMG_PVOID pvAltLinAddr, + IMG_PVOID pvLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_DEV_PHYADDR *pPages, + IMG_UINT32 ui32NumPages, + IMG_DEV_VIRTADDR sDevAddr, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32Length, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + PVRSRV_ERROR PDumpMemPDEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2); + + PVRSRV_ERROR PDumpMemPTEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2); + IMG_VOID PDumpInitCommon(IMG_VOID); + IMG_VOID PDumpDeInitCommon(IMG_VOID); + IMG_VOID PDumpInit(IMG_VOID); + IMG_VOID PDumpDeInit(IMG_VOID); + IMG_BOOL PDumpIsSuspended(IMG_VOID); + PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID); + PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID); + IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame); + IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags); + IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags); + + PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Flags); + PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator); + + IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_HANDLE hDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + PDUMP_MEM_FORMAT eMemFormat, + IMG_UINT32 ui32PDumpFlags); + IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpRegKM(IMG_CHAR* pszPDumpRegName, + IMG_UINT32 dwReg, + IMG_UINT32 dwData); + + PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2); + PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, + IMG_CHAR* pszFormat, + ...) IMG_FORMAT_PRINTF(2, 3); + + PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32dwData, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID); + IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID); + + IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_UINT32 ui32DevVAddr, + IMG_PUINT32 pui32PhysPages, + IMG_UINT32 ui32NumPages, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CHAR *pszMemSpace, + IMG_UINT32 *pui32MMUContextID, + IMG_UINT32 ui32MMUType, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hOSMemHandle, + IMG_VOID *pvPDCPUAddr); + PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CHAR *pszMemSpace, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32MMUType); + + PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_DEV_PHYADDR sPDDevPAddr, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2); + + IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame); + + PVRSRV_ERROR PDumpSaveMemKM (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32DataMaster, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpTASignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_UINT32 ui32TAKickCount, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters); + + PVRSRV_ERROR PDump3DSignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters); + + PVRSRV_ERROR PDumpCounterRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters); + + PVRSRV_ERROR PDumpRegRead(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + + PVRSRV_ERROR PDumpCycleCountRegRead(PVRSRV_DEVICE_IDENTIFIER *psDevId, + const IMG_UINT32 dwRegOffset, + IMG_BOOL bLastFrame); + + PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags); + PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks); + + PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_UINT32 ui32DevVAddr, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_HANDLE hUniqueTag, + IMG_UINT32 ui32Flags); + PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_HANDLE hUniqueTag, + IMG_BOOL bInterleaved, + IMG_BOOL bSparse, + IMG_UINT32 ui32Flags); + PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpSignatureBuffer(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_CHAR *pszBufferType, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo, + IMG_UINT32 ui32ROffOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags); + + IMG_VOID PDumpVGXMemToFile(IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 uiAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hUniqueTag); + + IMG_VOID PDumpSuspendKM(IMG_VOID); + IMG_VOID PDumpResumeKM(IMG_VOID); + + /* New pdump common functions */ + PVRSRV_ERROR PDumpStoreMemToFile(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 uiAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hUniqueTag); + + #define PDUMPMEMPOL PDumpMemPolKM + #define PDUMPMEM PDumpMemKM + #define PDUMPMEMPTENTRIES PDumpMemPTEntriesKM + #define PDUMPPDENTRIES PDumpMemPDEntriesKM + #define PDUMPMEMUM PDumpMemUM + #define PDUMPINIT PDumpInitCommon + #define PDUMPDEINIT PDumpDeInitCommon + #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM + #define PDUMPTESTFRAME PDumpIsCaptureFrameKM + #define PDUMPTESTNEXTFRAME PDumpTestNextFrame + #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM + #define PDUMPREG PDumpRegKM + #define PDUMPCOMMENT PDumpComment + #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags + #define PDUMPREGPOL PDumpRegPolKM + #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM + #define PDUMPMALLOCPAGES PDumpMallocPages + #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable + #define PDUMPSETMMUCONTEXT PDumpSetMMUContext + #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext + #define PDUMPPDDEVPADDR PDumpPDDevPAddrKM + #define PDUMPFREEPAGES PDumpFreePages + #define PDUMPFREEPAGETABLE PDumpFreePageTable + #define PDUMPPDREG PDumpPDReg + #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags + #define PDUMPCBP PDumpCBP + #define PDUMPREGBASEDCBP PDumpRegBasedCBP + #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys + #define PDUMPENDINITPHASE PDumpStopInitPhaseKM + #define PDUMPBITMAPKM PDumpBitmapKM + #define PDUMPDRIVERINFO PDumpDriverInfoKM + #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags + #define PDUMPIDL PDumpIDL + #define PDUMPSUSPEND PDumpSuspendKM + #define PDUMPRESUME PDumpResumeKM + +#else +#if defined LINUX || defined (__QNXNTO__) || defined GCC_IA32 || defined GCC_ARM + #define PDUMPMEMPOL(args...) + #define PDUMPMEM(args...) + #define PDUMPMEMPTENTRIES(args...) + #define PDUMPPDENTRIES(args...) + #define PDUMPMEMUM(args...) + #define PDUMPINIT(args...) + #define PDUMPDEINIT(args...) + #define PDUMPISLASTFRAME(args...) + #define PDUMPTESTFRAME(args...) + #define PDUMPTESTNEXTFRAME(args...) + #define PDUMPREGWITHFLAGS(args...) + #define PDUMPREG(args...) + #define PDUMPCOMMENT(args...) + #define PDUMPREGPOL(args...) + #define PDUMPREGPOLWITHFLAGS(args...) + #define PDUMPMALLOCPAGES(args...) + #define PDUMPMALLOCPAGETABLE(args...) + #define PDUMPSETMMUCONTEXT(args...) + #define PDUMPCLEARMMUCONTEXT(args...) + #define PDUMPPDDEVPADDR(args...) + #define PDUMPFREEPAGES(args...) + #define PDUMPFREEPAGETABLE(args...) + #define PDUMPPDREG(args...) + #define PDUMPPDREGWITHFLAGS(args...) + #define PDUMPSYNC(args...) + #define PDUMPCOPYTOMEM(args...) + #define PDUMPWRITE(args...) + #define PDUMPCBP(args...) + #define PDUMPREGBASEDCBP(args...) + #define PDUMPCOMMENTWITHFLAGS(args...) + #define PDUMPMALLOCPAGESPHYS(args...) + #define PDUMPENDINITPHASE(args...) + #define PDUMPMSVDXREG(args...) + #define PDUMPMSVDXREGWRITE(args...) + #define PDUMPMSVDXREGREAD(args...) + #define PDUMPMSVDXPOLEQ(args...) + #define PDUMPMSVDXPOL(args...) + #define PDUMPBITMAPKM(args...) + #define PDUMPDRIVERINFO(args...) + #define PDUMPIDLWITHFLAGS(args...) + #define PDUMPIDL(args...) + #define PDUMPSUSPEND(args...) + #define PDUMPRESUME(args...) + #define PDUMPMSVDXWRITEREF(args...) + #else + #error Compiler not specified + #endif +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* _PDUMP_KM_H_ */ + +/****************************************************************************** + End of file (pdump_km.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/pdump_osfunc.h b/sgx_km/eurasia_km/services4/srvkm/include/pdump_osfunc.h new file mode 100644 index 0000000..9fc1fd8 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/pdump_osfunc.h @@ -0,0 +1,385 @@ +/*************************************************************************/ /*! +@Title OS-independent interface to helper functions for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + + +/* + * Some OSes (WinXP,CE) allocate the string on the stack, but some + * (Linux,Symbian) use a global variable/lock instead. + * Would be good to use the same across all OSes. + * + * A handle is returned which represents IMG_CHAR* type on all OSes except + * Symbian when it represents PDumpState* type. + * + * The allocated buffer length is also returned on OSes where it's + * supported (e.g. Linux). + */ +#define MAX_PDUMP_STRING_LENGTH (256) + + +#if defined(__QNXNTO__) + +#define PDUMP_GET_SCRIPT_STRING() \ + IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; \ + IMG_HANDLE hScript = (IMG_HANDLE)pszScript; + +#define PDUMP_GET_MSG_STRING() \ + IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; + +#define PDUMP_GET_FILE_STRING() \ + IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; + +#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ + IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \ + IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1; \ + IMG_UINT32 ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1; \ + IMG_HANDLE hScript = (IMG_HANDLE)pszScript; + +#define PDUMP_LOCK(args...) +#define PDUMP_UNLOCK(args...) +#define PDUMP_LOCK_MSG(args...) +#define PDUMP_UNLOCK_MSG(args...) + +#else /* __QNXNTO__ */ + + + /* + * Linux + */ +#define PDUMP_GET_SCRIPT_STRING() \ + IMG_HANDLE hScript; \ + IMG_UINT32 ui32MaxLen; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_GET_MSG_STRING() \ + IMG_CHAR *pszMsg; \ + IMG_UINT32 ui32MaxLen; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_GET_FILE_STRING() \ + IMG_CHAR *pszFileName; \ + IMG_UINT32 ui32MaxLen; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ + IMG_HANDLE hScript; \ + IMG_CHAR *pszFileName; \ + IMG_UINT32 ui32MaxLenScript; \ + IMG_UINT32 ui32MaxLenFileName; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\ + if(eError != PVRSRV_OK) return eError; \ + eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_LOCK() \ + PDumpOSLock(__LINE__); + +#define PDUMP_UNLOCK() \ + PDumpOSUnlock(__LINE__); + +#define PDUMP_LOCK_MSG() \ + PDumpOSLockMessageBuffer(); + +#define PDUMP_UNLOCK_MSG() \ + PDumpOSUnlockMessageBuffer(); + + /*! + * @name PDumpOSLock + * @brief Lock the PDump streams + * @return error none + */ + IMG_VOID PDumpOSLock(IMG_UINT32 ui32Line); + + /*! + * @name PDumpOSUnlock + * @brief Lock the PDump streams + * @return error none + */ + IMG_VOID PDumpOSUnlock(IMG_UINT32 ui32Line); + + /*! + * @name PDumpOSLockMessageBuffer + * @brief Lock the PDump message buffer + * @return error none + */ + IMG_VOID PDumpOSLockMessageBuffer(IMG_VOID); + + /*! + * @name PDumpOSUnlockMessageBuffer + * @brief Lock the PDump message buffer + * @return error none + */ + IMG_VOID PDumpOSUnlockMessageBuffer(IMG_VOID); + +#endif /* __QNXNTO__ */ + + /*! + * @name PDumpOSGetScriptString + * @brief Get the "script" buffer + * @param phScript - buffer handle for pdump script + * @param pui32MaxLen - max length of the script buffer + * FIXME: the max length should be internal to the OS-specific code + * @return error (always PVRSRV_OK on some OSes) + */ + PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen); + + /*! + * @name PDumpOSGetMessageString + * @brief Get the "message" buffer + * @param pszMsg - buffer pointer for pdump messages + * @param pui32MaxLen - max length of the message buffer + * FIXME: the max length should be internal to the OS-specific code + * @return error (always PVRSRV_OK on some OSes) + */ + PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen); + + /*! + * @name PDumpOSGetFilenameString + * @brief Get the "filename" buffer + * @param ppszFile - buffer pointer for filename + * @param pui32MaxLen - max length of the filename buffer + * FIXME: the max length should be internal to the OS-specific code + * @return error (always PVRSRV_OK on some OSes) + */ + PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen); + + +/* + * Define macro for processing variable args list in OS-independent + * manner. See e.g. PDumpComment(). + */ + +#define PDUMP_va_list va_list +#define PDUMP_va_start va_start +#define PDUMP_va_end va_end + + + +/*! + * @name PDumpOSGetStream + * @brief Get a handle to the labelled stream (cast the handle to PDBG_STREAM to use it) + * @param ePDumpStream - stream label + */ +IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream); + +/*! + * @name PDumpOSGetStreamOffset + * @brief Return current offset within the labelled stream + * @param ePDumpStream - stream label + */ +IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream); + +/*! + * @name PDumpOSGetParamFileNum + * @brief Return file number of the 'script' stream, in the case that the file was split + * @param ePDumpStream - stream label + */ +IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID); + +/*! + * @name PDumpOSCheckForSplitting + * @brief Check if the requested pdump params are too large for a single file + * @param hStream - pdump stream + * @param ui32Size - size of params to dump (bytes) + * @param ui32Flags - pdump flags + */ +IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags); + +/*! + * @name PDumpOSIsSuspended + * @brief Is the pdump stream busy? + * @return IMG_BOOL + */ +IMG_BOOL PDumpOSIsSuspended(IMG_VOID); + +/*! + * @name PDumpOSIsSuspended + * @brief Is the pdump jump table initialised? + * @return IMG_BOOL + */ +IMG_BOOL PDumpOSJTInitialised(IMG_VOID); + +/*! + * @name PDumpOSWriteString + * @brief General function for writing to pdump stream. + * Usually more convenient to use PDumpOSWriteString2 below. + * @param hDbgStream - pdump stream handle + * @param psui8Data - data to write + * @param ui32Size - size of write + * @param ui32Flags - pdump flags + * @return error + */ +IMG_BOOL PDumpOSWriteString(IMG_HANDLE hDbgStream, + IMG_UINT8 *psui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags); + +/*! + * @name PDumpOSWriteString2 + * @brief Write a string to the "script" output stream + * @param pszScript - buffer to write (ptr to state structure on Symbian) + * @param ui32Flags - pdump flags + * @return error + */ +IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags); + +/*! + * @name PDumpOSBufprintf + * @brief Printf to OS-specific pdump state buffer + * @param hBuf - buffer handle to write into (ptr to state structure on Symbian) + * @param ui32ScriptSizeMax - maximum size of data to write (not supported on all OSes) + * @param pszFormat - format string + */ +PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(3, 4); + +/*! + * @name PDumpOSDebugPrintf + * @brief Debug message during pdumping + * @param pszFormat - format string + */ +IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2); + +/* + * Write into a IMG_CHAR* on all OSes. Can be allocated on the stack or heap. + */ +/*! + * @name PDumpOSSprintf + * @brief Printf to IMG char array + * @param pszComment - char array to print into + * @param pszFormat - format string + */ +PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4); + +/*! + * @name PDumpOSVSprintf + * @brief Printf to IMG string using variable args (see stdarg.h). This is necessary + * because the ... notation does not support nested function calls. + * @param pszMsg - char array to print into + * @param ui32ScriptSizeMax - maximum size of data to write (not supported on all OSes) + * @param pszFormat - format string + * @param vaArgs - variable args structure (from stdarg.h) + */ +PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) IMG_FORMAT_PRINTF(3, 0); + +/*! + * @name PDumpOSBuflen + * @param hBuffer - handle to buffer (ptr to state structure on Symbian) + * @param ui32BuffeRSizeMax - max size of buffer (chars) + * @return length of buffer, will always be <= ui32BufferSizeMax + */ +IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax); + +/*! + * @name PDumpOSVerifyLineEnding + * @brief Put \r\n sequence at the end if it isn't already there + * @param hBuffer - handle to buffer + * @param ui32BufferSizeMax - max size of buffer (chars) + */ +IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax); + +/*! + * @name PDumpOSCPUVAddrToDevPAddr + * @brief OS function to convert CPU virtual to device physical for dumping pages + * @param hOSMemHandle mem allocation handle (used if kernel virtual mem space is limited, e.g. linux) + * @param ui32Offset dword offset into allocation (for use with mem handle, e.g. linux) + * @param pui8LinAddr CPU linear addr (usually a kernel virtual address) + * @param ui32PageSize page size, used for assertion check + * @return psDevPAddr device physical addr + */ +IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_UINT8 *pui8LinAddr, + IMG_UINT32 ui32PageSize, + IMG_DEV_PHYADDR *psDevPAddr); + +/*! + * @name PDumpOSCPUVAddrToPhysPages + * @brief OS function to convert CPU virtual to backing physical pages + * @param hOSMemHandle mem allocation handle (used if kernel virtual mem space is limited, e.g. linux) + * @param ui32Offset offset within mem allocation block + * @param pui8LinAddr CPU linear addr + * @param uiDataPageMask mask for data page (= data page size -1) + * @return pui32PageOffset CPU page offset (same as device page offset if page sizes equal) + */ +IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_PUINT8 pui8LinAddr, + IMG_UINTPTR_T uiDataPageMask, + IMG_UINT32 *pui32PageOffset); + +/*! + * @name PDumpOSReleaseExecution + * @brief OS function to switch to another process, to clear pdump buffers + */ +IMG_VOID PDumpOSReleaseExecution(IMG_VOID); + +/*! + * @name PDumpOSIsCaptureFrameKM + * @brief Is the current frame a capture frame? + */ +IMG_BOOL PDumpOSIsCaptureFrameKM(IMG_VOID); + +/*! + * @name PDumpOSSetFrameKM + * @brief Set frame counter + */ +PVRSRV_ERROR PDumpOSSetFrameKM(IMG_UINT32 ui32Frame); + +#if defined (__cplusplus) +} +#endif diff --git a/sgx_km/eurasia_km/services4/srvkm/include/perfkm.h b/sgx_km/eurasia_km/services4/srvkm/include/perfkm.h new file mode 100644 index 0000000..458a29b --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/perfkm.h @@ -0,0 +1,53 @@ +/*************************************************************************/ /*! +@Title Perf initialisation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _PERFKM_H_ +#define _PERFKM_H_ + +#include "img_types.h" + +#define PERFINIT() +#define PERFDEINIT() + +#endif /* _PERFKM_H_ */ + +/****************************************************************************** + End of file (perfkm.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/perproc.h b/sgx_km/eurasia_km/services4/srvkm/include/perproc.h new file mode 100644 index 0000000..124f2f2 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/perproc.h @@ -0,0 +1,141 @@ +/*************************************************************************/ /*! +@Title Handle Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Perprocess data +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PERPROC_H__ +#define __PERPROC_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "resman.h" + +#include "handle.h" + +typedef struct _PVRSRV_PER_PROCESS_DATA_ +{ + IMG_UINT32 ui32PID; + IMG_HANDLE hBlockAlloc; + PRESMAN_CONTEXT hResManContext; + IMG_HANDLE hPerProcData; + PVRSRV_HANDLE_BASE *psHandleBase; +#if defined (PVR_SECURE_HANDLES) + /* Handles are being allocated in batches */ + IMG_BOOL bHandlesBatched; +#endif /* PVR_SECURE_HANDLES */ + IMG_UINT32 ui32RefCount; + + /* True if the process is the initialisation server. */ + IMG_BOOL bInitProcess; +#if defined(PDUMP) + /* True if pdump data from the process is 'persistent' */ + IMG_BOOL bPDumpPersistent; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* True if this process is marked for pdumping. This flag is + * significant in a multi-app environment. + */ + IMG_BOOL bPDumpActive; +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ +#endif + /* + * OS specific data can be stored via this handle. + * See osperproc.h for a generic mechanism for initialising + * this field. + */ + IMG_HANDLE hOsPrivateData; +} PVRSRV_PER_PROCESS_DATA; + +PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID); + +PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags); +IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID); + +PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID); +PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID); + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFindPerProcessData) +#endif +static INLINE +PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(IMG_VOID) +{ + return PVRSRVPerProcessData(OSGetCurrentProcessIDKM()); +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVProcessPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + return (psPerProc != IMG_NULL) ? psPerProc->hOsPrivateData : IMG_NULL; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPerProcessPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVPerProcessPrivateData(IMG_UINT32 ui32PID) +{ + return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID)); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFindPerProcessPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVFindPerProcessPrivateData(IMG_VOID) +{ + return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData()); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* __PERPROC_H__ */ + +/****************************************************************************** + End of file (perproc.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/power.h b/sgx_km/eurasia_km/services4/srvkm/include/power.h new file mode 100644 index 0000000..0abaf75 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/power.h @@ -0,0 +1,140 @@ +/*************************************************************************/ /*! +@Title Power Management Functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef POWER_H +#define POWER_H + +#if defined(__cplusplus) +extern "C" { +#endif + + +/*! + ***************************************************************************** + * Power management + *****************************************************************************/ + +typedef struct _PVRSRV_POWER_DEV_TAG_ +{ + PFN_PRE_POWER pfnPrePower; + PFN_POST_POWER pfnPostPower; + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32DeviceIndex; + PVRSRV_DEV_POWER_STATE eDefaultPowerState; + PVRSRV_DEV_POWER_STATE eCurrentPowerState; + struct _PVRSRV_POWER_DEV_TAG_ *psNext; + struct _PVRSRV_POWER_DEV_TAG_ **ppsThis; + +} PVRSRV_POWER_DEV; + +typedef enum _PVRSRV_INIT_SERVER_STATE_ +{ + PVRSRV_INIT_SERVER_Unspecified = -1, + PVRSRV_INIT_SERVER_RUNNING = 0, + PVRSRV_INIT_SERVER_RAN = 1, + PVRSRV_INIT_SERVER_SUCCESSFUL = 2, + PVRSRV_INIT_SERVER_NUM = 3, + PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff + +} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE; + +IMG_IMPORT +IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState); + + + +IMG_IMPORT +PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, + IMG_BOOL bSystemPowerEvent); +IMG_IMPORT +IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex, + PFN_PRE_POWER pfnPrePower, + PFN_POST_POWER pfnPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex); + +IMG_IMPORT +IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo); + +IMG_IMPORT +IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo); + +#if defined (__cplusplus) +} +#endif +#endif /* POWER_H */ + +/****************************************************************************** + End of file (power.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/queue.h b/sgx_km/eurasia_km/services4/srvkm/include/queue.h new file mode 100644 index 0000000..2af7a34 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/queue.h @@ -0,0 +1,154 @@ +/*************************************************************************/ /*! +@Title Command Queue API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Internal structures and definitions for command queues +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef QUEUE_H +#define QUEUE_H + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! + * Macro to Read Offset in given command queue + */ +#define UPDATE_QUEUE_ROFF(psQueue, uSize) \ + (psQueue)->uReadOffset = ((psQueue)->uReadOffset + (uSize)) \ + & ((psQueue)->uQueueSize - 1); + +/*! + generic cmd complete structure. + This structure represents the storage required between starting and finishing + a given cmd and is required to hold the generic sync object update data. + note: for any given system we know what command types we support and + therefore how much storage is required for any number of commands in progress + */ + typedef struct _COMMAND_COMPLETE_DATA_ + { + IMG_BOOL bInUse; + /* ; */ /*!< TBD */ + IMG_UINT32 ui32DstSyncCount; /*!< number of dst sync objects */ + IMG_UINT32 ui32SrcSyncCount; /*!< number of src sync objects */ + PVRSRV_SYNC_OBJECT *psDstSync; /*!< dst sync ptr list, + allocated on back of this structure */ + PVRSRV_SYNC_OBJECT *psSrcSync; /*!< src sync ptr list, + allocated on back of this structure */ + IMG_UINT32 ui32AllocSize; /*!< allocated size*/ + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete; /*!< Command complete callback */ + IMG_HANDLE hCallbackData; /*!< Command complete callback data */ + +#if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) || defined(PVR_ANDROID_NATIVE_WINDOW_HAS_FENCE) + IMG_VOID *pvCleanupFence; /*!< Sync fence to 'put' after timeline inc() */ + IMG_VOID *pvTimeline; /*!< Android sync timeline to inc() */ +#endif + }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA; + +#if !defined(USE_CODE) +IMG_VOID QueueDumpDebugInfo(IMG_VOID); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVProcessQueues (IMG_BOOL bFlush); + +#if defined(__linux__) && defined(__KERNEL__) +#include +#include +void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off); +void ProcSeqShowQueue(struct seq_file *sfile,void* el); +#endif + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T uQueueSize, + PVRSRV_QUEUE_INFO **ppsQueueInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND **ppsCommand, + IMG_UINT32 ui32DevIndex, + IMG_UINT16 CommandType, + IMG_UINT32 ui32DstSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsDstSync[], + IMG_UINT32 ui32SrcSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[], + IMG_SIZE_T ui32DataByteSize, + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete, + IMG_HANDLE hCallbackData, + IMG_HANDLE *phFence); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue, + IMG_SIZE_T uParamSize, + IMG_VOID **ppvSpace); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND *psCommand); + +IMG_IMPORT +IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex, + PFN_CMD_PROC *ppfnCmdProcList, + IMG_UINT32 ui32MaxSyncsPerCmd[][2], + IMG_UINT32 ui32CmdCount); +IMG_IMPORT +PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex, + IMG_UINT32 ui32CmdCount); + +#endif /* !defined(USE_CODE) */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ + +#endif /* QUEUE_H */ + +/****************************************************************************** + End of file (queue.h) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/ra.h b/sgx_km/eurasia_km/services4/srvkm/include/ra.h new file mode 100644 index 0000000..aaeb345 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/ra.h @@ -0,0 +1,290 @@ +/*************************************************************************/ /*! +@Title Resource Allocator API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _RA_H_ +#define _RA_H_ + +#include "img_types.h" +#include "hash.h" +#include "osfunc.h" + +/** Resource arena. + * struct _RA_ARENA_ deliberately opaque + */ +typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313 +typedef struct _BM_MAPPING_ BM_MAPPING; + + + +/** Enable support for arena statistics. */ +#define RA_STATS + + +/** Resource arena statistics. */ +struct _RA_STATISTICS_ +{ + /** total number of segments add to the arena */ + IMG_SIZE_T uSpanCount; + + /** number of current live segments within the arena */ + IMG_SIZE_T uLiveSegmentCount; + + /** number of current free segments within the arena */ + IMG_SIZE_T uFreeSegmentCount; + + /** total number of resource within the arena */ + IMG_SIZE_T uTotalResourceCount; + + /** number of free resource within the arena */ + IMG_SIZE_T uFreeResourceCount; + + /** total number of resources allocated from the arena */ + IMG_SIZE_T uCumulativeAllocs; + + /** total number of resources returned to the arena */ + IMG_SIZE_T uCumulativeFrees; + + /** total number of spans allocated by the callback mechanism */ + IMG_SIZE_T uImportCount; + + /** total number of spans deallocated by the callback mechanism */ + IMG_SIZE_T uExportCount; +}; +typedef struct _RA_STATISTICS_ RA_STATISTICS; + +struct _RA_SEGMENT_DETAILS_ +{ + IMG_SIZE_T uiSize; + IMG_CPU_PHYADDR sCpuPhyAddr; + IMG_HANDLE hSegment; +}; +typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS; + +/** + * @Function RA_Create + * + * @Description + * + * To create a resource arena. + * + * @Input name - the name of the arena for diagnostic purposes. + * @Input base - the base of an initial resource span or 0. + * @Input uSize - the size of an initial resource span or 0. + * @Input pRef - the reference to return for the initial resource or 0. + * @Input uQuantum - the arena allocation quantum. + * @Input alloc - a resource allocation callback or 0. + * @Input free - a resource de-allocation callback or 0. + * @Input import_handle - handle passed to alloc and free or 0. + * @Return arena handle, or IMG_NULL. + */ +RA_ARENA * +RA_Create (IMG_CHAR *name, + IMG_UINTPTR_T base, + IMG_SIZE_T uSize, + BM_MAPPING *psMapping, + IMG_SIZE_T uQuantum, + IMG_BOOL (*imp_alloc)(IMG_VOID *_h, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase), + IMG_VOID (*imp_free) (IMG_VOID *, + IMG_UINTPTR_T, + BM_MAPPING *), + IMG_VOID (*backingstore_free) (IMG_VOID *, + IMG_SIZE_T, + IMG_SIZE_T, + IMG_HANDLE), + IMG_VOID *import_handle); + +/** + * @Function RA_Delete + * + * @Description + * + * To delete a resource arena. All resources allocated from the arena + * must be freed before deleting the arena. + * + * @Input pArena - the arena to delete. + * @Return None + */ +IMG_VOID +RA_Delete (RA_ARENA *pArena); + +/** + * @Function RA_TestDelete + * + * @Description + * + * To test whether it is safe to delete a resource arena. If any allocations + * have not been freed, the RA must not be deleted. + * + * @Input pArena - the arena to test. + * @Return IMG_BOOL - IMG_TRUE if is safe to go on and call RA_Delete. + */ +IMG_BOOL +RA_TestDelete (RA_ARENA *pArena); + +/** + * @Function RA_Add + * + * @Description + * + * To add a resource span to an arena. The span must not overlap with + * any span previously added to the arena. + * + * @Input pArena - the arena to add a span into. + * @Input base - the base of the span. + * @Input uSize - the extent of the span. + * @Return IMG_TRUE - success, IMG_FALSE - failure + */ +IMG_BOOL +RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize); + +/** + * @Function RA_Alloc + * + * @Description + * + * To allocate resource from an arena. + * + * @Input pArena - the arena + * @Input uRequestSize - the size of resource segment requested. + * @Output pActualSize - the actual_size of resource segment allocated, + * typcially rounded up by quantum. + * @Output ppsMapping - the user reference associated with allocated + * resource span. + * @Input uFlags - flags influencing allocation policy. + * @Input uAlignment - the alignment constraint required for the + * allocated segment, use 0 if alignment not required. + * @Input uAlignmentOffset - the required alignment offset + * @Input pvPrivData - private data passed to OS allocator + * @Input ui32PrivData - length of private data + * + * @Output pBase - allocated base resource + * @Return IMG_TRUE - success, IMG_FALSE - failure + */ +IMG_BOOL +RA_Alloc (RA_ARENA *pArena, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_UINT32 uAlignment, + IMG_UINT32 uAlignmentOffset, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase); + +/** + * @Function RA_Free + * + * @Description To free a resource segment. + * + * @Input pArena - the arena the segment was originally allocated from. + * @Input base - the base of the resource span to free. + * @Input bFreeBackingStore - Should backing store memory be freed? + * + * @Return None + */ +IMG_VOID +RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore); + + +#ifdef RA_STATS + +#define CHECK_SPACE(total) \ +{ \ + if((total)<100) \ + return PVRSRV_ERROR_INVALID_PARAMS; \ +} + +#define UPDATE_SPACE(str, count, total) \ +{ \ + if((count) == -1) \ + return PVRSRV_ERROR_INVALID_PARAMS; \ + else \ + { \ + (str) += (count); \ + (total) -= (count); \ + } \ +} + + +/** + * @Function RA_GetNextLiveSegment + * + * @Description Returns details of the next live resource segments + * + * @Input pArena - the arena the segment was originally allocated from. + * @Output psSegDetails - rtn details of segments + * + * @Return IMG_TRUE if operation succeeded + */ +IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails); + + +/** + * @Function RA_GetStats + * + * @Description gets stats on a given arena + * + * @Input pArena - the arena the segment was originally allocated from. + * @Input ppszStr - string to write stats to + * @Input pui32StrLen - length of string + * + * @Return PVRSRV_ERROR + */ +PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen); + +PVRSRV_ERROR RA_GetStatsFreeMem(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen); + +#endif /* #ifdef RA_STATS */ + +#endif + diff --git a/sgx_km/eurasia_km/services4/srvkm/include/refcount.h b/sgx_km/eurasia_km/services4/srvkm/include/refcount.h new file mode 100644 index 0000000..95493b7 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/refcount.h @@ -0,0 +1,293 @@ +/*************************************************************************/ /*! +@Title Services reference count debugging +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __REFCOUNT_H__ +#define __REFCOUNT_H__ + +#include "pvr_bridge_km.h" +#if defined(SUPPORT_ION) +#include "ion_sync.h" +#endif /* defined(SUPPORT_ION) */ +#if defined(SUPPORT_DMABUF) +#include "dmabuf_sync.h" +#endif /* defined(SUPPORT_DMABUF) */ + +#if defined(PVRSRV_REFCOUNT_DEBUG) + +void PVRSRVDumpRefCountCCB(void); + +#define PVRSRVKernelSyncInfoIncRef(x...) \ + PVRSRVKernelSyncInfoIncRef2(__FILE__, __LINE__, x) +#define PVRSRVKernelSyncInfoDecRef(x...) \ + PVRSRVKernelSyncInfoDecRef2(__FILE__, __LINE__, x) +#define PVRSRVKernelMemInfoIncRef(x...) \ + PVRSRVKernelMemInfoIncRef2(__FILE__, __LINE__, x) +#define PVRSRVKernelMemInfoDecRef(x...) \ + PVRSRVKernelMemInfoDecRef2(__FILE__, __LINE__, x) +#define PVRSRVBMBufIncRef(x...) \ + PVRSRVBMBufIncRef2(__FILE__, __LINE__, x) +#define PVRSRVBMBufDecRef(x...) \ + PVRSRVBMBufDecRef2(__FILE__, __LINE__, x) +#define PVRSRVBMBufIncExport(x...) \ + PVRSRVBMBufIncExport2(__FILE__, __LINE__, x) +#define PVRSRVBMBufDecExport(x...) \ + PVRSRVBMBufDecExport2(__FILE__, __LINE__, x) + +void PVRSRVKernelSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVKernelSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVKernelMemInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVKernelMemInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVBMBufIncRef2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMBufDecRef2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMBufIncExport2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMBufDecExport2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMXProcIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_UINT32 ui32Index); +void PVRSRVBMXProcDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_UINT32 ui32Index); + +#if defined(__linux__) + +/* mmap refcounting is Linux specific */ +#include "mmap.h" + +#define PVRSRVOffsetStructIncRef(x...) \ + PVRSRVOffsetStructIncRef2(__FILE__, __LINE__, x) +#define PVRSRVOffsetStructDecRef(x...) \ + PVRSRVOffsetStructDecRef2(__FILE__, __LINE__, x) +#define PVRSRVOffsetStructIncMapped(x...) \ + PVRSRVOffsetStructIncMapped2(__FILE__, __LINE__, x) +#define PVRSRVOffsetStructDecMapped(x...) \ + PVRSRVOffsetStructDecMapped2(__FILE__, __LINE__, x) + +void PVRSRVOffsetStructIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); +void PVRSRVOffsetStructDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); +void PVRSRVOffsetStructIncMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); +void PVRSRVOffsetStructDecMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); + +#if defined(SUPPORT_ION) +#define PVRSRVIonBufferSyncInfoIncRef(x...) \ + PVRSRVIonBufferSyncInfoIncRef2(__FILE__, __LINE__, x) +#define PVRSRVIonBufferSyncInfoDecRef(x...) \ + PVRSRVIonBufferSyncInfoDecRef2(__FILE__, __LINE__, x) + +PVRSRV_ERROR PVRSRVIonBufferSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_HANDLE hUnique, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_ION_SYNC_INFO **ppsIonSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVIonBufferSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_ION_SYNC_INFO *psIonSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +#endif /* defined (SUPPORT_ION) */ + +#if defined(SUPPORT_DMABUF) +#define PVRSRVDmaBufSyncInfoIncRef(x...) \ + PVRSRVDmaBufSyncInfoIncRef2(__FILE__, __LINE__, x) +#define PVRSRVDmaBufSyncInfoDecRef(x...) \ + PVRSRVDmaBufSyncInfoDecRef2(__FILE__, __LINE__, x) + +PVRSRV_ERROR PVRSRVDmaBufSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_HANDLE hUnique, + IMG_HANDLE hPriv, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_DMABUF_SYNC_INFO **ppsDmaBufSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); + +void PVRSRVDmaBufSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_DMABUF_SYNC_INFO *psDmaBufSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +#endif /* defined (SUPPORT_DMABUF) */ + +#endif /* defined(__linux__) */ + +#else /* defined(PVRSRV_REFCOUNT_DEBUG) */ + +static INLINE void PVRSRVDumpRefCountCCB(void) { } + +static INLINE void PVRSRVKernelSyncInfoIncRef(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + PVRSRVAcquireSyncInfoKM(psKernelSyncInfo); +} + +static INLINE void PVRSRVKernelSyncInfoDecRef(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + PVRSRVReleaseSyncInfoKM(psKernelSyncInfo); +} + +static INLINE void PVRSRVKernelMemInfoIncRef(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + psKernelMemInfo->ui32RefCount++; +} + +static INLINE void PVRSRVKernelMemInfoDecRef(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + psKernelMemInfo->ui32RefCount--; +} + +static INLINE void PVRSRVBMBufIncRef(BM_BUF *pBuf) +{ + pBuf->ui32RefCount++; +} + +static INLINE void PVRSRVBMBufDecRef(BM_BUF *pBuf) +{ + pBuf->ui32RefCount--; +} + +static INLINE void PVRSRVBMBufIncExport(BM_BUF *pBuf) +{ + pBuf->ui32ExportCount++; +} + +static INLINE void PVRSRVBMBufDecExport(BM_BUF *pBuf) +{ + pBuf->ui32ExportCount--; +} + +static INLINE void PVRSRVBMXProcIncRef(IMG_UINT32 ui32Index) +{ + gXProcWorkaroundShareData[ui32Index].ui32RefCount++; +} + +static INLINE void PVRSRVBMXProcDecRef(IMG_UINT32 ui32Index) +{ + gXProcWorkaroundShareData[ui32Index].ui32RefCount--; +} + +#if defined(__linux__) + +/* mmap refcounting is Linux specific */ +#include "mmap.h" + +static INLINE void PVRSRVOffsetStructIncRef(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32RefCount++; +} + +static INLINE void PVRSRVOffsetStructDecRef(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32RefCount--; +} + +static INLINE void PVRSRVOffsetStructIncMapped(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32Mapped++; +} + +static INLINE void PVRSRVOffsetStructDecMapped(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32Mapped--; +} + +#if defined(SUPPORT_ION) +static INLINE PVRSRV_ERROR PVRSRVIonBufferSyncInfoIncRef(IMG_HANDLE hUnique, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_ION_SYNC_INFO **ppsIonSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + + return PVRSRVIonBufferSyncAcquire(hUnique, + hDevCookie, + hDevMemContext, + ppsIonSyncInfo); +} + +static INLINE void PVRSRVIonBufferSyncInfoDecRef(PVRSRV_ION_SYNC_INFO *psIonSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + PVRSRVIonBufferSyncRelease(psIonSyncInfo); +} +#endif /* defined (SUPPORT_ION) */ + +#if defined(SUPPORT_DMABUF) +static INLINE PVRSRV_ERROR PVRSRVDmaBufSyncInfoIncRef(IMG_HANDLE hUnique, + IMG_HANDLE hPriv, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_DMABUF_SYNC_INFO **ppsDmaBufSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + + return PVRSRVDmaBufSyncAcquire(hUnique, + hPriv, + hDevCookie, + hDevMemContext, + ppsDmaBufSyncInfo); +} + +static INLINE void PVRSRVDmaBufSyncInfoDecRef(PVRSRV_DMABUF_SYNC_INFO *psDmaBufSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + PVRSRVDmaBufSyncRelease(psDmaBufSyncInfo); +} +#endif /* defined (SUPPORT_DMABUF) */ + +#endif /* defined(__linux__) */ + +#endif /* defined(PVRSRV_REFCOUNT_DEBUG) */ + +#endif /* __REFCOUNT_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/resman.h b/sgx_km/eurasia_km/services4/srvkm/include/resman.h new file mode 100644 index 0000000..14af110 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/resman.h @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@Title Resource Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __RESMAN_H__ +#define __RESMAN_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************************************************** + * resman definitions + *****************************************************************************/ + +enum { + /* SGX: */ + RESMAN_TYPE_SHARED_PB_DESC = 1, /*!< Parameter buffer kernel stubs */ + RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, /*!< Shared parameter buffer creation lock */ + RESMAN_TYPE_HW_RENDER_CONTEXT, /*!< Hardware Render Context Resource */ + RESMAN_TYPE_HW_TRANSFER_CONTEXT, /*!< Hardware transfer Context Resource */ + RESMAN_TYPE_HW_2D_CONTEXT, /*!< Hardware 2D Context Resource */ + RESMAN_TYPE_TRANSFER_CONTEXT, /*!< Transfer Queue context */ + + /* VGX: */ + RESMAN_TYPE_DMA_CLIENT_FIFO_DATA, /*!< VGX DMA Client FIFO data */ + + /* DISPLAY CLASS: */ + RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, /*!< Display Class Swapchain Reference Resource */ + RESMAN_TYPE_DISPLAYCLASS_DEVICE, /*!< Display Class Device Resource */ + + /* BUFFER CLASS: */ + RESMAN_TYPE_BUFFERCLASS_DEVICE, /*!< Buffer Class Device Resource */ + + /* OS specific User mode Mappings: */ + RESMAN_TYPE_OS_USERMODE_MAPPING, /*!< OS specific User mode mappings */ + + /* COMMON: */ + RESMAN_TYPE_DEVICEMEM_CONTEXT, /*!< Device Memory Context Resource */ + RESMAN_TYPE_DEVICECLASSMEM_MAPPING, /*!< Device Memory Mapping Resource */ + RESMAN_TYPE_DEVICEMEM_MAPPING, /*!< Device Memory Mapping Resource */ + RESMAN_TYPE_DEVICEMEM_WRAP, /*!< Device Memory Wrap Resource */ + RESMAN_TYPE_DEVICEMEM_ALLOCATION, /*!< Device Memory Allocation Resource */ + RESMAN_TYPE_DEVICEMEM_ION, /*!< Device Memory Ion Resource */ + RESMAN_TYPE_DEVICEMEM_DMABUF, /*!< Device Memory dma-buf Resource */ + RESMAN_TYPE_EVENT_OBJECT, /*!< Event Object */ + RESMAN_TYPE_SHARED_MEM_INFO, /*!< Shared system memory meminfo */ + RESMAN_TYPE_MODIFY_SYNC_OPS, /*!< Syncobject synchronisation Resource*/ + RESMAN_TYPE_SYNC_INFO, /*!< Syncobject Resource*/ + + /* KERNEL: */ + RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION /*!< Device Memory Allocation Resource */ +}; + +#define RESMAN_CRITERIA_ALL 0x00000000 /*!< match by criteria all */ +#define RESMAN_CRITERIA_RESTYPE 0x00000001 /*!< match by criteria type */ +#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002 /*!< match by criteria param1 */ +#define RESMAN_CRITERIA_UI32_PARAM 0x00000004 /*!< match by criteria param2 */ + +typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bForceCleanup); + +typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM; +typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT; + +/****************************************************************************** + * resman functions + *****************************************************************************/ + +/* + Note: + Resource cleanup can fail with retry in which case we don't remove + it from resman's list and either UM or KM will try to release the + resource at a later date (and will keep trying until a non-retry + error is returned) +*/ + +PVRSRV_ERROR ResManInit(IMG_VOID); +IMG_VOID ResManDeInit(IMG_VOID); + +PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + RESMAN_FREE_FN pfnFreeResource); + +PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem, + IMG_BOOL bForceCleanup); + +PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param); + +PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem, + PRESMAN_CONTEXT psNewResManContext); + +PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext, + PRESMAN_ITEM psItem); + +PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc, + PRESMAN_CONTEXT *phResManContext); +IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext, + IMG_BOOL bKernelContext); + +#if defined (__cplusplus) +} +#endif + +#endif /* __RESMAN_H__ */ + +/****************************************************************************** + End of file (resman.h) +******************************************************************************/ + diff --git a/sgx_km/eurasia_km/services4/srvkm/include/services_headers.h b/sgx_km/eurasia_km/services4/srvkm/include/services_headers.h new file mode 100644 index 0000000..d09b8a8 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/services_headers.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@Title Command queues and synchronisation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Internal structures and definitions for command queues and + synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef SERVICES_HEADERS_H +#define SERVICES_HEADERS_H + +#ifdef DEBUG_RELEASE_BUILD +#pragma optimize( "", off ) +#define DEBUG 1 +#endif + +#include "img_defs.h" +#include "services.h" +#include "servicesint.h" +#include "power.h" +#include "resman.h" +#include "queue.h" +#include "srvkm.h" +#include "kerneldisplay.h" +#include "syscommon.h" +#include "pvr_debug.h" +#include "metrics.h" +#include "osfunc.h" +#include "refcount.h" + +#endif /* SERVICES_HEADERS_H */ + diff --git a/sgx_km/eurasia_km/services4/srvkm/include/srvkm.h b/sgx_km/eurasia_km/services4/srvkm/include/srvkm.h new file mode 100644 index 0000000..723037e --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/srvkm.h @@ -0,0 +1,273 @@ +/*************************************************************************/ /*! +@Title Services kernel module internal header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVKM_H +#define SRVKM_H + +#include "servicesint.h" + +#if defined(__cplusplus) +extern "C" { +#endif + + /** Use PVR_DPF() unless message is necessary in release build + */ + #ifdef PVR_DISABLE_LOGGING + #define PVR_LOG(X) + #else + /* PRQA S 3410 1 */ /* this macro requires no brackets in order to work */ + #define PVR_LOG(X) PVRSRVReleasePrintf X; + #endif + + IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(1, 2); + + IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags); + IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID); + + IMG_IMPORT IMG_VOID PVRSRVScheduleDevicesKM(IMG_VOID); + +#if defined(SUPPORT_PVRSRV_DEVICE_CLASS) + IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State); +#endif + + PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave); + + IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_VOID); + + IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDumpSyncs(IMG_BOOL bActiveOnly); + +#define SYNC_OP_CLASS_MASK 0x0000ffffUL +#define SYNC_OP_CLASS_SHIFT 0 +#define SYNC_OP_CLASS_MODOBJ (1<<0) +#define SYNC_OP_CLASS_QUEUE (1<<1) +#define SYNC_OP_CLASS_KICKTA (1<<2) +#define SYNC_OP_CLASS_TQ_3D (1<<3) +#define SYNC_OP_CLASS_TQ_2D (1<<4) +#define SYNC_OP_CLASS_LINUX_FENCE (1<<5) +#define SYNC_OP_TYPE_MASK 0x00f0000UL +#define SYNC_OP_TYPE_SHIFT 16 +#define SYNC_OP_TYPE_READOP (1<<0) +#define SYNC_OP_TYPE_WRITEOP (1<<1) +#define SYNC_OP_TYPE_READOP2 (1<<2) + +#define SYNC_OP_HAS_DATA 0x80000000UL +#define SYNC_OP_TAKE 0x40000000UL +#define SYNC_OP_ROLLBACK 0x20000000UL + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncTakeWriteOp) +#endif +static INLINE +IMG_UINT32 SyncTakeWriteOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSync, IMG_UINT32 ui32OpType) +{ +#if defined(SUPPORT_PER_SYNC_DEBUG) + IMG_UINT32 ui32Index = psKernelSync->ui32HistoryIndex; + + /* Record a history of all the classes of operation taken on this sync */ + psKernelSync->ui32OperationMask |= (ui32OpType & SYNC_OP_CLASS_MASK) >> SYNC_OP_CLASS_SHIFT; + + /* Add this operation to the history buffer */ + psKernelSync->aui32OpInfo[ui32Index] = SYNC_OP_HAS_DATA | ui32OpType | (SYNC_OP_TYPE_WRITEOP << SYNC_OP_TYPE_SHIFT) | SYNC_OP_TAKE; + psKernelSync->aui32ReadOpSample[ui32Index] = psKernelSync->psSyncData->ui32ReadOpsPending; + psKernelSync->aui32WriteOpSample[ui32Index] = psKernelSync->psSyncData->ui32WriteOpsPending; + psKernelSync->aui32ReadOp2Sample[ui32Index] = psKernelSync->psSyncData->ui32ReadOps2Pending; + psKernelSync->ui32HistoryIndex++; + psKernelSync->ui32HistoryIndex = psKernelSync->ui32HistoryIndex % PER_SYNC_HISTORY; +#endif + PVR_UNREFERENCED_PARAMETER(ui32OpType); + return psKernelSync->psSyncData->ui32WriteOpsPending++; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncTakeReadOp) +#endif +static INLINE +IMG_UINT32 SyncTakeReadOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSync, IMG_UINT32 ui32OpType) +{ +#if defined(SUPPORT_PER_SYNC_DEBUG) + IMG_UINT32 ui32Index = psKernelSync->ui32HistoryIndex; + + /* Record a history of all the classes of operation taken on this sync */ + psKernelSync->ui32OperationMask |= (ui32OpType & SYNC_OP_CLASS_MASK) >> SYNC_OP_CLASS_SHIFT; + + /* Add this operation to the history buffer */ + psKernelSync->aui32OpInfo[ui32Index] = SYNC_OP_HAS_DATA | ui32OpType | (SYNC_OP_TYPE_READOP << SYNC_OP_TYPE_SHIFT) | SYNC_OP_TAKE; + psKernelSync->aui32ReadOpSample[ui32Index] = psKernelSync->psSyncData->ui32ReadOpsPending; + psKernelSync->aui32WriteOpSample[ui32Index] = psKernelSync->psSyncData->ui32WriteOpsPending; + psKernelSync->aui32ReadOp2Sample[ui32Index] = psKernelSync->psSyncData->ui32ReadOps2Pending; + psKernelSync->ui32HistoryIndex++; + psKernelSync->ui32HistoryIndex = psKernelSync->ui32HistoryIndex % PER_SYNC_HISTORY; +#endif + PVR_UNREFERENCED_PARAMETER(ui32OpType); + return psKernelSync->psSyncData->ui32ReadOpsPending++; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncTakeReadOp2) +#endif +static INLINE +IMG_UINT32 SyncTakeReadOp2(PVRSRV_KERNEL_SYNC_INFO *psKernelSync, IMG_UINT32 ui32OpType) +{ +#if defined(SUPPORT_PER_SYNC_DEBUG) + IMG_UINT32 ui32Index = psKernelSync->ui32HistoryIndex; + + /* Record a history of all the classes of operation taken on this sync */ + psKernelSync->ui32OperationMask |= (ui32OpType & SYNC_OP_CLASS_MASK) >> SYNC_OP_CLASS_SHIFT; + + /* Add this operation to the history buffer */ + psKernelSync->aui32OpInfo[ui32Index] = SYNC_OP_HAS_DATA | ui32OpType | (SYNC_OP_TYPE_READOP2 << SYNC_OP_TYPE_SHIFT) | SYNC_OP_TAKE; + psKernelSync->aui32ReadOpSample[ui32Index] = psKernelSync->psSyncData->ui32ReadOpsPending; + psKernelSync->aui32WriteOpSample[ui32Index] = psKernelSync->psSyncData->ui32WriteOpsPending; + psKernelSync->aui32ReadOp2Sample[ui32Index] = psKernelSync->psSyncData->ui32ReadOps2Pending; + psKernelSync->ui32HistoryIndex++; + psKernelSync->ui32HistoryIndex = psKernelSync->ui32HistoryIndex % PER_SYNC_HISTORY; +#endif + PVR_UNREFERENCED_PARAMETER(ui32OpType); + return psKernelSync->psSyncData->ui32ReadOps2Pending++; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncRollBackWriteOp) +#endif +static INLINE +IMG_UINT32 SyncRollBackWriteOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSync, IMG_UINT32 ui32OpType) +{ +#if defined(SUPPORT_PER_SYNC_DEBUG) + IMG_UINT32 ui32Index = psKernelSync->ui32HistoryIndex; + + /* Record a history of all the classes of operation taken on this sync */ + psKernelSync->ui32OperationMask |= (ui32OpType & SYNC_OP_CLASS_MASK) >> SYNC_OP_CLASS_SHIFT; + + /* Add this operation to the history buffer */ + psKernelSync->aui32OpInfo[ui32Index] = SYNC_OP_HAS_DATA | ui32OpType | (SYNC_OP_TYPE_WRITEOP << SYNC_OP_TYPE_SHIFT) | SYNC_OP_ROLLBACK; + psKernelSync->aui32ReadOpSample[ui32Index] = psKernelSync->psSyncData->ui32ReadOpsPending; + psKernelSync->aui32WriteOpSample[ui32Index] = psKernelSync->psSyncData->ui32WriteOpsPending; + psKernelSync->aui32ReadOp2Sample[ui32Index] = psKernelSync->psSyncData->ui32ReadOps2Pending; + psKernelSync->ui32HistoryIndex++; + psKernelSync->ui32HistoryIndex = psKernelSync->ui32HistoryIndex % PER_SYNC_HISTORY; +#endif + PVR_UNREFERENCED_PARAMETER(ui32OpType); + return psKernelSync->psSyncData->ui32WriteOpsPending--; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SyncRollBackReadOp) +#endif +static INLINE +IMG_UINT32 SyncRollBackReadOp(PVRSRV_KERNEL_SYNC_INFO *psKernelSync, IMG_UINT32 ui32OpType) +{ +#if defined(SUPPORT_PER_SYNC_DEBUG) + IMG_UINT32 ui32Index = psKernelSync->ui32HistoryIndex; + + /* Record a history of all the classes of operation taken on this sync */ + psKernelSync->ui32OperationMask |= (ui32OpType & SYNC_OP_CLASS_MASK) >> SYNC_OP_CLASS_SHIFT; + + /* Add this operation to the history buffer */ + psKernelSync->aui32OpInfo[ui32Index] = SYNC_OP_HAS_DATA | ui32OpType | (SYNC_OP_TYPE_READOP << SYNC_OP_TYPE_SHIFT) | SYNC_OP_ROLLBACK; + psKernelSync->aui32ReadOpSample[ui32Index] = psKernelSync->psSyncData->ui32ReadOpsPending; + psKernelSync->aui32WriteOpSample[ui32Index] = psKernelSync->psSyncData->ui32WriteOpsPending; + psKernelSync->aui32ReadOp2Sample[ui32Index] = psKernelSync->psSyncData->ui32ReadOps2Pending; + psKernelSync->ui32HistoryIndex++; + psKernelSync->ui32HistoryIndex = psKernelSync->ui32HistoryIndex % PER_SYNC_HISTORY; +#endif + PVR_UNREFERENCED_PARAMETER(ui32OpType); + return psKernelSync->psSyncData->ui32ReadOpsPending--; +} + + + +#if defined (__cplusplus) +} +#endif + +/****************** +HIGHER LEVEL MACROS +*******************/ + +/*---------------------------------------------------------------------------- +Repeats the body of the loop for a certain minimum time, or until the body +exits by its own means (break, return, goto, etc.) + +Example of usage: + +LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) +{ + if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) + { + bTimeout = IMG_FALSE; + break; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); +} END_LOOP_UNTIL_TIMEOUT(); + +-----------------------------------------------------------------------------*/ + +/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time + * it will be decremented and the loop executed one final time. This is necessary + * when preemption is enabled. + */ +/* PRQA S 3411,3431 12 */ /* critical format, leave alone */ +#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ +{\ + IMG_UINT32 uiOffset, uiStart, uiCurrent; \ + IMG_INT32 iNotLastLoop; \ + for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ + ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ + uiCurrent = OSClockus(), \ + uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ + uiStart = uiCurrent < uiStart ? 0 : uiStart) + +#define END_LOOP_UNTIL_TIMEOUT() \ +} + +/*! + ****************************************************************************** + + @Function PVRSRVGetErrorStringKM + + @Description Returns a text string relating to the PVRSRV_ERROR enum. + + ******************************************************************************/ +IMG_IMPORT +const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError); + +#endif /* SRVKM_H */ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/ttrace.h b/sgx_km/eurasia_km/services4/srvkm/include/ttrace.h new file mode 100644 index 0000000..cb70ff8 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/ttrace.h @@ -0,0 +1,200 @@ +/*************************************************************************/ /*! +@Title Timed Trace header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Timed Trace header. Contines structures and functions used + in the timed trace subsystem. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "services_headers.h" +#include "ttrace_common.h" +#include "ttrace_tokens.h" + +#ifndef __TTRACE_H__ +#define __TTRACE_H__ + +#if defined(TTRACE) + + #define PVR_TTRACE(group, class, token) \ + PVRSRVTimeTrace(group, class, token) + #define PVR_TTRACE_UI8(group, class, token, val) \ + PVRSRVTimeTraceUI8(group, class, token, val) + #define PVR_TTRACE_UI16(group, class, token, val) \ + PVRSRVTimeTraceUI16(group, class, token, val) + #define PVR_TTRACE_UI32(group, class, token, val) \ + PVRSRVTimeTraceUI32(group, class, token, val) + #define PVR_TTRACE_UI64(group, class, token, val) \ + PVRSRVTimeTraceUI64(group, class, token, val) + #define PVR_TTRACE_DEV_VIRTADDR(group, class, token, val) \ + PVRSRVTimeTraceDevVirtAddr(group, class, token, val) + #define PVR_TTRACE_CPU_PHYADDR(group, class, token, val) \ + PVRSRVTimeTraceCpuPhyAddr(group, class, token, val) + #define PVR_TTRACE_DEV_PHYADDR(group, class, token, val) \ + PVRSRVTimeTraceDevPhysAddr(group, class, token, val) + #define PVR_TTRACE_SYS_PHYADDR(group, class, token, val) \ + PVRSRVTimeTraceSysPhysAddr(group, class, token, val) + #define PVR_TTRACE_SYNC_OBJECT(group, token, syncobj, op) \ + PVRSRVTimeTraceSyncObject(group, token, syncobj, op) + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTimeTraceArray(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT32 ui32TypeSize, + IMG_UINT32 ui32Count, IMG_UINT8 *ui8Data); + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTrace) +#endif +static INLINE IMG_VOID PVRSRVTimeTrace(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, 0, 0, NULL); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI8) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI8(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT8 ui8Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI8, + 1, &ui8Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI16) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI16(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT16 ui16Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI16, + 1, (IMG_UINT8 *) &ui16Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI32) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI32(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT32 ui32Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &ui32Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI64) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI64(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT64 ui64Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI64, + 1, (IMG_UINT8 *) &ui64Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceDevVirtAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceDevVirtAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_DEV_VIRTADDR psVAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &psVAddr.uiAddr); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceCpuPhyAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceCpuPhyAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_CPU_PHYADDR psPAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &psPAddr.uiAddr); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceDevPhysAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceDevPhysAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_DEV_PHYADDR psPAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &psPAddr.uiAddr); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceSysPhysAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceSysPhysAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_SYS_PHYADDR psPAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, sizeof(psPAddr.uiAddr), + 1, (IMG_UINT8 *) &psPAddr.uiAddr); +} + +#else /* defined(PVRSRV_NEED_PVR_TIME_TRACE) */ + + #define PVR_TTRACE(group, class, token) \ + ((void) 0) + #define PVR_TTRACE_UI8(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_UI16(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_UI32(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_UI64(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_DEV_VIRTADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_CPU_PHYADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_DEV_PHYADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_SYS_PHYADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_SYNC_OBJECT(group, token, syncobj, op) \ + ((void) 0) + +#endif /* defined(PVRSRV_NEED_PVR_TIME_TRACE) */ + +IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceInit(IMG_VOID); +IMG_IMPORT IMG_VOID PVRSRVTimeTraceDeinit(IMG_VOID); + +IMG_IMPORT IMG_VOID PVRSRVTimeTraceSyncObject(IMG_UINT32 ui32Group, IMG_UINT32 ui32Token, + PVRSRV_KERNEL_SYNC_INFO *psSync, IMG_UINT8 ui8SyncOp); +IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceBufferCreate(IMG_UINT32 ui32PID); +IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceBufferDestroy(IMG_UINT32 ui32PID); + +IMG_IMPORT IMG_VOID PVRSRVDumpTimeTraceBuffers(IMG_VOID); +#endif /* __TTRACE_H__ */ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/ttrace_common.h b/sgx_km/eurasia_km/services4/srvkm/include/ttrace_common.h new file mode 100644 index 0000000..e149c20 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/ttrace_common.h @@ -0,0 +1,151 @@ +/*************************************************************************/ /*! +@Title Timed Trace header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Timed Trace common header. Contains shared defines and + structures which are shared with the post processing tool. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "img_types.h" + +#ifndef __TTRACE_COMMON_H__ +#define __TTRACE_COMMON_H__ + +/* + * Trace item + * ========== + * + * A trace item contains a trace header, a timestamp, a UID and a + * data header all of which are 32-bit and mandatory. If there + * is no data then the data header size is set to 0. + * + * Trace header + * ------------ + * 31 27 23 19 15 11 7 3 + * GGGG GGGG CCCC CCCC TTTT TTTT TTTT TTTT + * + * G = group + * Note: + * Group 0xff means the message is padding + * + * C = class + * T = Token + * + * Data header + *----------- + * 31 27 23 19 15 11 7 3 + * SSSS SSSS SSSS SSSS TTTT CCCC CCCC CCCC + * + * S = data packet size + * T = Type + * 0000 - 8 bit + * 0001 - 16 bit + * 0010 - 32 bit + * 0011 - 64 bit + * + * C = data item count + * + * Note: It might look strange having both the packet + * size and the data item count, but the idea + * is the you might have a "special" data type + * who's size might not be known by the post + * processing program and rather then fail + * processing the buffer after that point if we + * know the size we can just skip it and move to + * the next item. + */ + + +#define PVRSRV_TRACE_HEADER 0 +#define PVRSRV_TRACE_TIMESTAMP 1 +#define PVRSRV_TRACE_HOSTUID 2 +#define PVRSRV_TRACE_DATA_HEADER 3 +#define PVRSRV_TRACE_DATA_PAYLOAD 4 + +#define PVRSRV_TRACE_ITEM_SIZE 16 + +#define PVRSRV_TRACE_GROUP_MASK 0xff +#define PVRSRV_TRACE_CLASS_MASK 0xff +#define PVRSRV_TRACE_TOKEN_MASK 0xffff + +#define PVRSRV_TRACE_GROUP_SHIFT 24 +#define PVRSRV_TRACE_CLASS_SHIFT 16 +#define PVRSRV_TRACE_TOKEN_SHIFT 0 + +#define PVRSRV_TRACE_SIZE_MASK 0xffff +#define PVRSRV_TRACE_TYPE_MASK 0xf +#define PVRSRV_TRACE_COUNT_MASK 0xfff + +#define PVRSRV_TRACE_SIZE_SHIFT 16 +#define PVRSRV_TRACE_TYPE_SHIFT 12 +#define PVRSRV_TRACE_COUNT_SHIFT 0 + + +#define WRITE_HEADER(n,m) \ + ((m & PVRSRV_TRACE_##n##_MASK) << PVRSRV_TRACE_##n##_SHIFT) + +#define READ_HEADER(n,m) \ + ((m & (PVRSRV_TRACE_##n##_MASK << PVRSRV_TRACE_##n##_SHIFT)) >> PVRSRV_TRACE_##n##_SHIFT) + + +#if defined(TTRACE_LARGE_BUFFER) +#define TIME_TRACE_BUFFER_SIZE 8192 +#else +#define TIME_TRACE_BUFFER_SIZE 4096 +#endif + +/* Type defines for trace items */ +#define PVRSRV_TRACE_TYPE_UI8 0 +#define PVRSRV_TRACE_TYPE_UI16 1 +#define PVRSRV_TRACE_TYPE_UI32 2 +#define PVRSRV_TRACE_TYPE_UI64 3 + +#define PVRSRV_TRACE_TYPE_SYNC 15 + #define PVRSRV_TRACE_SYNC_UID 0 + #define PVRSRV_TRACE_SYNC_WOP 1 + #define PVRSRV_TRACE_SYNC_WOC 2 + #define PVRSRV_TRACE_SYNC_ROP 3 + #define PVRSRV_TRACE_SYNC_ROC 4 + #define PVRSRV_TRACE_SYNC_WO_DEV_VADDR 5 + #define PVRSRV_TRACE_SYNC_RO_DEV_VADDR 6 + #define PVRSRV_TRACE_SYNC_OP 7 + #define PVRSRV_TRACE_SYNC_RO2P 8 + #define PVRSRV_TRACE_SYNC_RO2C 9 + #define PVRSRV_TRACE_SYNC_RO2_DEV_VADDR 10 +#define PVRSRV_TRACE_TYPE_SYNC_SIZE ((PVRSRV_TRACE_SYNC_RO2_DEV_VADDR + 1) * sizeof(IMG_UINT32)) + +#endif /* __TTRACE_COMMON_H__*/ diff --git a/sgx_km/eurasia_km/services4/srvkm/include/ttrace_tokens.h b/sgx_km/eurasia_km/services4/srvkm/include/ttrace_tokens.h new file mode 100644 index 0000000..5f89849 --- /dev/null +++ b/sgx_km/eurasia_km/services4/srvkm/include/ttrace_tokens.h @@ -0,0 +1,135 @@ +/*************************************************************************/ /*! +@Title Timed Trace header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Timed Trace token header. Contains defines for all the tokens + used. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __TTRACE_TOKENS_H__ +#define __TTRACE_TOKENS_H__ + +/* All defines should use decimal so to not confuse the post processing tool */ + +/* Trace groups */ +#define PVRSRV_TRACE_GROUP_KICK 0 +#define PVRSRV_TRACE_GROUP_TRANSFER 1 +#define PVRSRV_TRACE_GROUP_QUEUE 2 +#define PVRSRV_TRACE_GROUP_POWER 3 +#define PVRSRV_TRACE_GROUP_MKSYNC 4 +#define PVRSRV_TRACE_GROUP_MODOBJ 5 + +#define PVRSRV_TRACE_GROUP_PADDING 255 + +/* Trace classes */ +#define PVRSRV_TRACE_CLASS_FUNCTION_ENTER 0 +#define PVRSRV_TRACE_CLASS_FUNCTION_EXIT 1 +#define PVRSRV_TRACE_CLASS_SYNC 2 +#define PVRSRV_TRACE_CLASS_CCB 3 +#define PVRSRV_TRACE_CLASS_CMD_START 4 +#define PVRSRV_TRACE_CLASS_CMD_END 5 +#define PVRSRV_TRACE_CLASS_CMD_COMP_START 6 +#define PVRSRV_TRACE_CLASS_CMD_COMP_END 7 +#define PVRSRV_TRACE_CLASS_FLAGS 8 +#define PVRSRV_TRACE_CLASS_DEVVADDR 9 +#define PVRSRV_TRACE_CLASS_FRAMENUM 10 + +#define PVRSRV_TRACE_CLASS_NONE 255 + +/* Operation about to happen on the sync object */ +#define PVRSRV_SYNCOP_SAMPLE 0 +#define PVRSRV_SYNCOP_COMPLETE 1 +#define PVRSRV_SYNCOP_DUMP 2 + +/* + * Trace tokens + * ------------ + * These only need to unique within a group. + */ + +/* Kick group tokens */ +#define KICK_TOKEN_DOKICK 0 +#define KICK_TOKEN_CCB_OFFSET 1 +#define KICK_TOKEN_TA3D_SYNC 2 +#define KICK_TOKEN_TA_SYNC 3 +#define KICK_TOKEN_3D_SYNC 4 +#define KICK_TOKEN_SRC_SYNC 5 +#define KICK_TOKEN_DST_SYNC 6 +#define KICK_TOKEN_FIRST_KICK 7 +#define KICK_TOKEN_LAST_KICK 8 +#define KICK_TOKEN_HWRTDATASET 9 +#define KICK_TOKEN_HWRTDATA 10 +#define KICK_TOKEN_FRAMENUM 11 +#define KICK_TOKEN_RENDERCONTEXT 12 + +/* Transfer Queue group tokens */ +#define TRANSFER_TOKEN_SUBMIT 0 +#define TRANSFER_TOKEN_TA_SYNC 1 +#define TRANSFER_TOKEN_3D_SYNC 2 +#define TRANSFER_TOKEN_SRC_SYNC 3 +#define TRANSFER_TOKEN_DST_SYNC 4 +#define TRANSFER_TOKEN_CCB_OFFSET 5 + +/* Queue group tokens */ +#define QUEUE_TOKEN_GET_SPACE 0 +#define QUEUE_TOKEN_INSERTKM 1 +#define QUEUE_TOKEN_SUBMITKM 2 +#define QUEUE_TOKEN_PROCESS_COMMAND 3 +#define QUEUE_TOKEN_PROCESS_QUEUES 4 +#define QUEUE_TOKEN_COMMAND_COMPLETE 5 +#define QUEUE_TOKEN_UPDATE_DST 6 +#define QUEUE_TOKEN_UPDATE_SRC 7 +#define QUEUE_TOKEN_SRC_SYNC 8 +#define QUEUE_TOKEN_DST_SYNC 9 +#define QUEUE_TOKEN_COMMAND_TYPE 10 + +/* uKernel Sync tokens */ +#define MKSYNC_TOKEN_KERNEL_CCB_OFFSET 0 +#define MKSYNC_TOKEN_CORE_CLK 1 +#define MKSYNC_TOKEN_UKERNEL_CLK 2 + +/* ModObj tokens */ +#define MODOBJ_TOKEN_MODIFY_PENDING 0 +#define MODOBJ_TOKEN_COMPLETE_PENDING 1 +#define MODOBJ_TOKEN_READ_SYNC 2 +#define MODOBJ_TOKEN_WRITE_SYNC 3 +#define MODOBJ_TOKEN_READ_WRITE_SYNC 4 +#define MODOBJ_TOKEN_SYNC_UPDATE 5 +#define MODOBJ_TOKEN_READ2_SYNC 6 + +#endif /* __TTRACE_TOKENS_H__ */ diff --git a/sgx_km/eurasia_km/services4/system/include/syscommon.h b/sgx_km/eurasia_km/services4/system/include/syscommon.h new file mode 100644 index 0000000..79cdee3 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/include/syscommon.h @@ -0,0 +1,393 @@ +/*************************************************************************/ /*! +@Title Common System APIs and structures +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides common system-specific declarations and macros + that are supported by all system's +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SYSCOMMON_H +#define _SYSCOMMON_H + +#include "sysconfig.h" /* System specific system defines */ +#include "sysinfo.h" /* globally accessible system info */ +#include "servicesint.h" +#include "queue.h" +#include "power.h" +#include "resman.h" +#include "ra.h" +#include "device.h" +#include "buffer_manager.h" +#include "pvr_debug.h" +#include "services.h" + +#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__) +#include +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + +/*! + **************************************************************************** + device id management structure + ****************************************************************************/ +typedef struct _SYS_DEVICE_ID_TAG +{ + IMG_UINT32 uiID; + IMG_BOOL bInUse; + +} SYS_DEVICE_ID; + + +/* + the max number of independent local backing stores services supports + (grow this number if ever required) +*/ +#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4 + +typedef IMG_HANDLE (*PFN_HTIMER_CREATE) (IMG_VOID); +typedef IMG_UINT32 (*PFN_HTIMER_GETUS) (IMG_HANDLE); +typedef IMG_VOID (*PFN_HTIMER_DESTROY) (IMG_HANDLE); +/*! + **************************************************************************** + Top level system data structure + ****************************************************************************/ +typedef struct _SYS_DATA_TAG_ +{ + IMG_UINT32 ui32NumDevices; /*!< number of devices in system */ + SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT]; + PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< list of private device info structures */ + PVRSRV_POWER_DEV *psPowerDeviceList; /*!< list of devices registered with the power manager */ + PVRSRV_RESOURCE sPowerStateChangeResource; /*!< lock for power state transitions */ + PVRSRV_SYS_POWER_STATE eCurrentPowerState; /*!< current Kernel services power state */ + PVRSRV_SYS_POWER_STATE eFailedPowerState; /*!< Kernel services power state (Failed to transition to) */ + IMG_UINT32 ui32CurrentOSPowerState; /*!< current OS specific power state */ + PVRSRV_QUEUE_INFO *psQueueList; /*!< list of all command queues in the system */ + PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList; /*!< list of cross process syncinfos */ + IMG_PVOID pvEnvSpecificData; /*!< Environment specific data */ + IMG_PVOID pvSysSpecificData; /*!< Unique to system, accessible at system layer only */ + PVRSRV_RESOURCE sQProcessResource; /*!< Command Q processing access lock */ + IMG_VOID *pvSOCRegsBase; /*!< SOC registers base linear address */ + IMG_HANDLE hSOCTimerRegisterOSMemHandle; /*!< SOC Timer register (if present) */ + IMG_UINT32 *pvSOCTimerRegisterKM; /*!< SOC Timer register (if present) */ + IMG_VOID *pvSOCClockGateRegsBase; /*!< SOC Clock gating registers (if present) */ + IMG_UINT32 ui32SOCClockGateRegsSize; + + struct _DEVICE_COMMAND_DATA_ *apsDeviceCommandData[SYS_DEVICE_COUNT]; + /*!< command complete data and callback function store for every command for every device */ + + RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS]; /*!< RA Arenas for local device memory heap management */ + + IMG_CHAR *pszVersionString; /*!< Human readable string showing relevent system version info */ +#if defined(SUPPORT_TI_VERSION_STRING) + IMG_CHAR szTIVersion[64]; +#endif + PVRSRV_EVENTOBJECT *psGlobalEventObject; /*!< OS Global Event Object */ + + PVRSRV_MISC_INFO_CPUCACHEOP_TYPE ePendingCacheOpType; /*!< Deferred CPU cache op control */ + + PFN_HTIMER_CREATE pfnHighResTimerCreate; + PFN_HTIMER_GETUS pfnHighResTimerGetus; + PFN_HTIMER_DESTROY pfnHighResTimerDestroy; +} SYS_DATA; + + +/**************************************************************************** + * common function prototypes + ****************************************************************************/ + +#if defined (CUSTOM_DISPLAY_SEGMENT) +PVRSRV_ERROR SysGetDisplaySegmentAddress (IMG_VOID *pvDevInfo, IMG_VOID *pvPhysicalAddress, IMG_UINT32 *pui32Length); +#endif + +PVRSRV_ERROR SysInitialise(IMG_VOID); +PVRSRV_ERROR SysFinalise(IMG_VOID); + +PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData); +PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_VOID **ppvDeviceMap); + +IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode); +IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData, + PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits); + +PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex); + +PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState); +PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState); +PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); +PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +IMG_VOID SysSGXIdleEntered(IMG_VOID); +IMG_VOID SysSGXCommandPending(IMG_BOOL bSGXIdle); + +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) +PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock); +IMG_VOID SysPowerLockUnwrap(IMG_VOID); +#endif + +PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID, + IMG_VOID *pvIn, + IMG_UINT32 ulInSize, + IMG_VOID *pvOut, + IMG_UINT32 ulOutSize); + + +IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr); +IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr); +IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr); +IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr); +IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr); +#if defined(PVR_LMA) +IMG_BOOL SysVerifyCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR CpuPAddr); +IMG_BOOL SysVerifySysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr); +#endif + +extern SYS_DATA* gpsSysData; + + +#if !defined(USE_CODE) + +/*! +****************************************************************************** + + @Function SysAcquireData + + @Description returns reference to to sysdata + creating one on first call + + @Input ppsSysData - pointer to copy reference into + + @Return ppsSysData updated + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysAcquireData) +#endif +static INLINE IMG_VOID SysAcquireData(SYS_DATA **ppsSysData) +{ + /* Copy pointer back system information pointer */ + *ppsSysData = gpsSysData; + + /* + Verify we've not been called before being initialised. Instinctively + we should do this check first, but in the failing case we'll just write + null back and the compiler won't warn about an uninitialised varible. + */ + PVR_ASSERT (gpsSysData != IMG_NULL); +} + + +/*! +****************************************************************************** + + @Function SysAcquireDataNoCheck + + @Description returns reference to to sysdata + creating one on first call + + @Input none + + @Return psSysData - pointer to copy reference into + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysAcquireDataNoCheck) +#endif +static INLINE SYS_DATA * SysAcquireDataNoCheck(IMG_VOID) +{ + /* return pointer back system information pointer */ + return gpsSysData; +} + + +/*! +****************************************************************************** + + @Function SysInitialiseCommon + + @Description Performs system initialisation common to all systems + + @Input psSysData - pointer to system data + + @Return PVRSRV_ERROR : + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysInitialiseCommon) +#endif +static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData) +{ + PVRSRV_ERROR eError; + + /* Initialise Services */ + eError = PVRSRVInit(psSysData); + + return eError; +} + +/*! +****************************************************************************** + + @Function SysDeinitialiseCommon + + @Description Performs system deinitialisation common to all systems + + @Input psSysData - pointer to system data + + @Return PVRSRV_ERROR : + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysDeinitialiseCommon) +#endif +static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData) +{ + /* De-initialise Services */ + PVRSRVDeInit(psSysData); + + OSDestroyResource(&psSysData->sPowerStateChangeResource); +} +#endif /* !defined(USE_CODE) */ + + +/* + * SysReadHWReg and SysWriteHWReg differ from OSReadHWReg and OSWriteHWReg + * in that they are always intended for use with real hardware, even on + * NO_HARDWARE systems. + */ +#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)) +#define SysReadHWReg(p, o) OSReadHWReg(p, o) +#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v) +#else /* !(defined(NO_HARDWARE) && defined(__linux__)) */ +/*! +****************************************************************************** + + @Function SysReadHWReg + + @Description + + register read function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : + + @Return register value + +******************************************************************************/ +static inline IMG_UINT32 SysReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) +{ + return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset); +} + +/*! +****************************************************************************** + + @Function SysWriteHWReg + + @Description + + register write function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : + + @input ui32Value : + + @Return none + +******************************************************************************/ +static inline IMG_VOID SysWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ + writel(ui32Value, pvLinRegBaseAddr + ui32Offset); +} +#endif /* !(defined(NO_HARDWARE) && defined(__linux__)) */ + +#if defined(__cplusplus) +} +#endif + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysHighResTimerCreate) +#endif +static INLINE IMG_HANDLE SysHighResTimerCreate(IMG_VOID) +{ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + return psSysData->pfnHighResTimerCreate(); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysHighResTimerGetus) +#endif +static INLINE IMG_UINT32 SysHighResTimerGetus(IMG_HANDLE hTimer) +{ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + return psSysData->pfnHighResTimerGetus(hTimer); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysHighResTimerDestroy) +#endif +static INLINE IMG_VOID SysHighResTimerDestroy(IMG_HANDLE hTimer) +{ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + psSysData->pfnHighResTimerDestroy(hTimer); +} +#endif + +/***************************************************************************** + End of file (syscommon.h) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/system/omap/oemfuncs.h b/sgx_km/eurasia_km/services4/system/omap/oemfuncs.h new file mode 100644 index 0000000..0902042 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/oemfuncs.h @@ -0,0 +1,80 @@ +/*************************************************************************/ /*! +@Title SGX kernel/client driver interface structures and prototypes +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__OEMFUNCS_H__) +#define __OEMFUNCS_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* function in/out data structures: */ +typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl, + IMG_BYTE *pInBuf, + IMG_UINT32 InBufLen, + IMG_BYTE *pOutBuf, + IMG_UINT32 OutBufLen, + IMG_UINT32 *pdwBytesTransferred); +/* + Function table for kernel 3rd party driver to kernel services +*/ +typedef struct PVRSRV_DC_OEM_JTABLE_TAG +{ + PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch; + IMG_PVOID pvDummy1; + IMG_PVOID pvDummy2; + IMG_PVOID pvDummy3; + +} PVRSRV_DC_OEM_JTABLE; + +#define OEM_GET_EXT_FUNCS (1<<1) + +#if defined(__cplusplus) +} +#endif + +#endif /* __OEMFUNCS_H__ */ + +/***************************************************************************** + End of file (oemfuncs.h) +*****************************************************************************/ + + diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq.c b/sgx_km/eurasia_km/services4/system/omap/sgxfreq.c new file mode 100644 index 0000000..6e5571e --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq.c @@ -0,0 +1,846 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) +#include +#include +#else +#include +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) +#include +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) +#include +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#include +#endif + +#include "sgxfreq.h" + +static struct sgxfreq_data { + int freq_cnt; + unsigned long *freq_list; + unsigned long freq; + unsigned long freq_request; + unsigned long freq_limit; + unsigned long total_idle_time; + unsigned long total_active_time; + struct mutex freq_mutex; + struct list_head gov_list; + struct sgxfreq_governor *gov; + struct mutex gov_mutex; + struct sgxfreq_sgx_data sgx_data; + struct device *dev; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) + struct gpu_platform_data *pdata; +#else + struct clk *core_clk; + struct clk *gpu_clk; + struct clk *per_clk; + struct clk *gpu_core_clk; + struct clk *gpu_hyd_clk; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + struct regulator *gpu_reg; +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + struct notifier_block *clk_nb; +#endif +#endif +} sfd; + +/* Governor init/deinit functions */ +int onoff_init(void); +int onoff_deinit(void); +int activeidle_init(void); +int activeidle_deinit(void); +int on3demand_init(void); +int on3demand_deinit(void); +int userspace_init(void); +int userspace_deinit(void); + + +typedef int sgxfreq_gov_init_t(void); +sgxfreq_gov_init_t *sgxfreq_gov_init[] = { + onoff_init, + activeidle_init, + on3demand_init, + userspace_init, + NULL, +}; + +typedef int sgxfreq_gov_deinit_t(void); +sgxfreq_gov_deinit_t *sgxfreq_gov_deinit[] = { + onoff_deinit, + activeidle_deinit, + on3demand_deinit, + userspace_deinit, + NULL, +}; + +#define SGXFREQ_DEFAULT_GOV_NAME "on3demand" +static unsigned long _idle_curr_time; +static unsigned long _idle_prev_time; +static unsigned long _active_curr_time; +static unsigned long _active_prev_time; + +#if (defined(CONFIG_THERMAL) || defined(CONFIG_THERMAL_FRAMEWORK)) +int cool_init(void); +void cool_deinit(void); +#endif + +/*********************** begin sysfs interface ***********************/ + +struct kobject *sgxfreq_kobj; + +static ssize_t show_frequency_list(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int i; + ssize_t count = 0; + + for (i = 0; i < sfd.freq_cnt; i++) + count += sprintf(&buf[count], "%lu ", sfd.freq_list[i]); + count += sprintf(&buf[count], "\n"); + + return count; +} + +static ssize_t show_frequency_request(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", sfd.freq_request); +} + +static ssize_t show_frequency_limit(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", sfd.freq_limit); +} + +static ssize_t show_frequency(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", sfd.freq); +} + +static ssize_t show_stat(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "gpu %lu %lu\n", + sfd.total_active_time, sfd.total_idle_time); +} + +static ssize_t show_governor_list(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t i = 0; + struct sgxfreq_governor *t; + + list_for_each_entry(t, &sfd.gov_list, governor_list) { + if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) + - (SGXFREQ_NAME_LEN + 2))) + goto out; + i += scnprintf(&buf[i], SGXFREQ_NAME_LEN, "%s ", t->name); + } +out: + i += sprintf(&buf[i], "\n"); + return i; +} + +static ssize_t show_governor(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (sfd.gov) + return scnprintf(buf, SGXFREQ_NAME_LEN, "%s\n", sfd.gov->name); + + return sprintf(buf, "\n"); +} + +static ssize_t store_governor(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + int ret; + char name[16]; + + ret = sscanf(buf, "%15s", name); + if (ret != 1) + return -EINVAL; + + ret = sgxfreq_set_governor(name); + if (ret) + return ret; + else + return count; +} + +static DEVICE_ATTR(frequency_list, 0444, show_frequency_list, NULL); +static DEVICE_ATTR(frequency_request, 0444, show_frequency_request, NULL); +static DEVICE_ATTR(frequency_limit, 0444, show_frequency_limit, NULL); +static DEVICE_ATTR(frequency, 0444, show_frequency, NULL); +static DEVICE_ATTR(governor_list, 0444, show_governor_list, NULL); +static DEVICE_ATTR(governor, 0644, show_governor, store_governor); +static DEVICE_ATTR(stat, 0444, show_stat, NULL); + +static const struct attribute *sgxfreq_attributes[] = { + &dev_attr_frequency_list.attr, + &dev_attr_frequency_request.attr, + &dev_attr_frequency_limit.attr, + &dev_attr_frequency.attr, + &dev_attr_governor_list.attr, + &dev_attr_governor.attr, + &dev_attr_stat.attr, + NULL +}; + +/************************ end sysfs interface ************************/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) +static int set_volt_for_freq(unsigned long freq) +{ + struct opp *opp; + unsigned long volt = 0; + int ret; + + if (sfd.gpu_reg) { + opp = opp_find_freq_exact(sfd.dev, freq, true); + if(IS_ERR(opp)) + { + int r = PTR_ERR(opp); + pr_err("sgxfreq: Couldn't find opp matching freq: %lu. Err: %d", + freq, r); + return -1; + } + + volt = opp_get_voltage(opp); + if (!volt) + { + pr_err("sgxfreq: Could find volt corresponding to freq: %lu\n", + freq); + return -1; + } + + ret = regulator_set_voltage_tol(sfd.gpu_reg, volt , 6000); + if (ret) { + pr_err("sgxfreq: Error(%d) setting volt: %lu for freq:%lu\n", + ret, volt, freq); + return ret; + } + } + + return 0; + +} +#endif + +static int __set_freq(void) +{ + unsigned long freq; + int ret = 0; + + freq = min(sfd.freq_request, sfd.freq_limit); + if (freq != sfd.freq) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + if (freq > sfd.freq) { + /* Going up - must scale voltage before clocks */ + if (set_volt_for_freq(freq) != 0) { + pr_err("sgxfreq: Error setting voltage for freq: %lu\n", + freq); + goto err1; + } + } +#endif + + ret = clk_set_rate(sfd.gpu_core_clk, freq); + if (ret) { + pr_err("sgxfreq: Error(%d) setting gpu core clock rate: %lu\n", + ret, freq); + goto err2; + } + + ret = clk_set_rate(sfd.gpu_hyd_clk, freq); + if (ret) { + pr_err("sgxfreq: Error(%d) setting gpu hyd clock rate: %lu\n", + ret, freq); + goto err3; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + if (freq < sfd.freq) { + /* Going down - must scale voltage after clocks */ + if(set_volt_for_freq(freq) != 0) { + pr_err("sgxfreq: Error setting voltage for freq: %lu\n", + freq); + goto err4; + } + } +#endif +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)) + sfd.pdata->device_scale(sfd.dev, sfd.dev, freq); +#else + sfd.pdata->device_scale(sfd.dev, freq); +#endif + sfd.freq = freq; + + goto noerr; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) +err4: +#endif + ret |= clk_set_rate(sfd.gpu_hyd_clk, sfd.freq); + +err3: + ret |= clk_set_rate(sfd.gpu_core_clk, sfd.freq); +err2: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + if(freq > sfd.freq) + ret |= set_volt_for_freq(sfd.freq); +err1: +#endif +#endif +noerr: + return ret; + } + return ret; +} + +static struct sgxfreq_governor *__find_governor(const char *name) +{ + struct sgxfreq_governor *t; + + list_for_each_entry(t, &sfd.gov_list, governor_list) + if (!strncasecmp(name, t->name, SGXFREQ_NAME_LEN)) + return t; + + return NULL; +} + +static void __update_timing_info(bool active) +{ + struct timeval tv; + do_gettimeofday(&tv); + if(active) + { + if(sfd.sgx_data.active == true) { + _active_curr_time = __tv2msec(tv); + sfd.total_active_time += __delta32( + _active_curr_time, _active_prev_time); + SGXFREQ_TRACE("A->A TA:= %lums \tdA: %lums \tTI: %lums \tdI: %lums\n", + sfd.total_active_time, + __delta32(_active_curr_time, _active_prev_time), + sfd.total_active_time, + (unsigned long)0); + _active_prev_time = _active_curr_time; + } else { + _idle_curr_time = __tv2msec(tv); + _active_prev_time = _idle_curr_time; + sfd.total_idle_time += + __delta32(_idle_curr_time, _idle_prev_time); + SGXFREQ_TRACE("I->A TA:= %lums \tdA: %lums \tTI: %lums \tdI: %lums\n", + sfd.total_active_time, + (unsigned long)0, + sfd.total_idle_time, + __delta32(_idle_curr_time, _idle_prev_time)); + } + } else { + if(sfd.sgx_data.active == true) + { + _idle_prev_time = _active_curr_time = __tv2msec(tv); + sfd.total_active_time += + __delta32(_active_curr_time, _active_prev_time); + SGXFREQ_TRACE("A->I TA:= %lums \tdA: %lums \tTI: %lums \tdI: %lums\n", + sfd.total_active_time, + __delta32(_active_curr_time, _active_prev_time), + sfd.total_active_time, + (unsigned long)0); + } + else + { + _idle_curr_time = __tv2msec(tv); + sfd.total_idle_time += __delta32( + _idle_curr_time, _idle_prev_time); + SGXFREQ_TRACE("I->I TA:= %lums \tdA: %lums \tTI: %lums \tdI: %lums\n", + sfd.total_active_time, + (unsigned long)0, + sfd.total_idle_time, + __delta32(_idle_curr_time, _idle_prev_time)); + _idle_prev_time = _idle_curr_time; + } + } +} + +int sgxfreq_init(struct device *dev) +{ + int i, ret; + unsigned long freq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + struct dev_pm_opp *opp; + struct device_node *np; + unsigned int voltage_latency; +#else + struct opp *opp; +#endif + struct timeval tv; + + sfd.dev = dev; + if (!sfd.dev) + return -EINVAL; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) + sfd.pdata = (struct gpu_platform_data *)dev->platform_data; + if (!sfd.pdata || + !sfd.pdata->opp_get_opp_count || + !sfd.pdata->opp_find_freq_ceil || + !sfd.pdata->device_scale) + return -EINVAL; +#endif + + rcu_read_lock(); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) + sfd.freq_cnt = sfd.pdata->opp_get_opp_count(dev); +#else + ret = of_init_opp_table(dev); + if (ret) { + pr_err("sgxfreq: failed to init OPP table: %d\n", ret); + return -EINVAL; + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + sfd.freq_cnt = opp_get_opp_count(dev); +#else + sfd.freq_cnt = dev_pm_opp_get_opp_count(dev); +#endif +#endif + if (sfd.freq_cnt < 1) { + pr_err("sgxfreq: failed to get operating frequencies\n"); + rcu_read_unlock(); + return -ENODEV; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) + np = of_node_get(dev->of_node); + sfd.clk_nb = of_pm_voltdm_notifier_register(dev, np, sfd.gpu_core_clk, "gpu", + &voltage_latency); + + if (IS_ERR(sfd.clk_nb)) { + ret = PTR_ERR(sfd.clk_nb); + /* defer probe if regulator is not yet registered */ + if (ret == -EPROBE_DEFER) { + dev_err(dev, + "gpu clock notifier not ready, retry\n"); + } else { + dev_err(dev, + "Failed to register gpu clock notifier: %d\n", + ret); + } + return ret; + } + +#endif + sfd.freq_list = kmalloc(sfd.freq_cnt * sizeof(unsigned long), GFP_ATOMIC); + if (!sfd.freq_list) { + rcu_read_unlock(); + return -ENOMEM; + } + + freq = 0; + for (i = 0; i < sfd.freq_cnt; i++) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) + opp = sfd.pdata->opp_find_freq_ceil(dev, &freq); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + opp = opp_find_freq_ceil(dev, &freq); +#else + /* 3.14 and later kernels */ + opp = dev_pm_opp_find_freq_ceil(dev, &freq); +#endif + if (IS_ERR_OR_NULL(opp)) { + rcu_read_unlock(); + kfree(sfd.freq_list); + return -ENODEV; + } + sfd.freq_list[i] = freq; + freq++; + } + rcu_read_unlock(); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) + sfd.core_clk = devm_clk_get(dev, "dpll_core_h14x2_ck"); + if (IS_ERR(sfd.core_clk)) { + ret = PTR_ERR(sfd.core_clk); + pr_err("sgxfreq: failed to get core clock: %d\n", ret); + return ret; + } + + sfd.gpu_clk = devm_clk_get(dev, "dpll_gpu_m2_ck"); + if (IS_ERR(sfd.gpu_clk)) { + ret = PTR_ERR(sfd.gpu_clk); + pr_err("sgxfreq: failed to get gpu clock: %d\n", ret); + return ret; + } + + sfd.per_clk = devm_clk_get(dev, "dpll_per_h14x2_ck"); + if (IS_ERR(sfd.per_clk)) { + ret = PTR_ERR(sfd.per_clk); + pr_err("sgxfreq: failed to get per clock: %d\n", ret); + return ret; + } + + sfd.gpu_core_clk = devm_clk_get(dev, "gpu_core_gclk_mux"); + if (IS_ERR(sfd.gpu_core_clk)) { + ret = PTR_ERR(sfd.gpu_core_clk); + pr_err("sgxfreq: failed to get gpu core clock: %d\n", ret); + return ret; + } + + sfd.gpu_hyd_clk = devm_clk_get(dev, "gpu_core_gclk_mux"); + if (IS_ERR(sfd.gpu_hyd_clk)) { + ret = PTR_ERR(sfd.gpu_hyd_clk); + pr_err("sgxfreq: failed to get gpu hyd clock: %d\n", ret); + return ret; + } + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) + sfd.gpu_reg = devm_regulator_get(dev, "gpu"); + if (IS_ERR(sfd.gpu_reg)) { + if (PTR_ERR(sfd.gpu_reg) == -EPROBE_DEFER) { + dev_err(dev, "gpu regulator not ready, retry\n"); + return -EPROBE_DEFER; + } + pr_err("sgxfreq: failed to get gpu regulator: %ld\n", PTR_ERR(sfd.gpu_reg)); + sfd.gpu_reg = NULL; + } +#endif + + ret = clk_set_parent(sfd.gpu_hyd_clk, sfd.core_clk); + if (ret != 0) { + pr_err("sgxfreq: failed to set gpu_hyd_clk parent: %d\n", ret); + } + + ret = clk_set_parent(sfd.gpu_core_clk, sfd.core_clk); + if (ret != 0) { + pr_err("sgxfreq: failed to set gpu_core_clk parent: %d\n", ret); + } +#endif + + mutex_init(&sfd.freq_mutex); + sfd.freq_limit = sfd.freq_list[sfd.freq_cnt - 1]; + sgxfreq_set_freq_request(sfd.freq_list[sfd.freq_cnt - 1]); + sfd.sgx_data.clk_on = false; + sfd.sgx_data.active = false; + + mutex_init(&sfd.gov_mutex); + INIT_LIST_HEAD(&sfd.gov_list); + + sgxfreq_kobj = kobject_create_and_add("sgxfreq", &sfd.dev->kobj); + ret = sysfs_create_files(sgxfreq_kobj, sgxfreq_attributes); + if (ret) { + kfree(sfd.freq_list); + return ret; + } + +#if (defined(CONFIG_THERMAL) || defined(CONFIG_THERMAL_FRAMEWORK)) + cool_init(); +#endif + + for (i = 0; sgxfreq_gov_init[i] != NULL; i++) + sgxfreq_gov_init[i](); + + if (sgxfreq_set_governor(SGXFREQ_DEFAULT_GOV_NAME)) { + kfree(sfd.freq_list); + return -ENODEV; + } + do_gettimeofday(&tv); + _idle_prev_time = _active_curr_time = _idle_curr_time = + _active_prev_time = __tv2msec(tv); + + return 0; +} + +int sgxfreq_deinit(void) +{ + int i; + + sgxfreq_set_governor(NULL); + + sgxfreq_set_freq_request(sfd.freq_list[0]); + +#if (defined(CONFIG_THERMAL) || defined(CONFIG_THERMAL_FRAMEWORK)) + cool_deinit(); +#endif + + for (i = 0; sgxfreq_gov_deinit[i] != NULL; i++) + sgxfreq_gov_deinit[i](); + + sysfs_remove_files(sgxfreq_kobj, sgxfreq_attributes); + kobject_put(sgxfreq_kobj); + + kfree(sfd.freq_list); + + return 0; +} + +int sgxfreq_register_governor(struct sgxfreq_governor *governor) +{ + if (!governor) + return -EINVAL; + + list_add(&governor->governor_list, &sfd.gov_list); + + return 0; +} + +void sgxfreq_unregister_governor(struct sgxfreq_governor *governor) +{ + if (!governor) + return; + + list_del(&governor->governor_list); +} + +int sgxfreq_set_governor(const char *name) +{ + int ret = 0; + struct sgxfreq_governor *new_gov = 0; + + if (name) { + new_gov = __find_governor(name); + if (!new_gov) + return -EINVAL; + } + + mutex_lock(&sfd.gov_mutex); + + if (sfd.gov && sfd.gov->gov_stop) + sfd.gov->gov_stop(); + + if (new_gov && new_gov->gov_start) + ret = new_gov->gov_start(&sfd.sgx_data); + + if (ret) { + if (sfd.gov && sfd.gov->gov_start) + sfd.gov->gov_start(&sfd.sgx_data); + return -ENODEV; + } + sfd.gov = new_gov; + + mutex_unlock(&sfd.gov_mutex); + + return 0; +} + +int sgxfreq_get_freq_list(unsigned long **pfreq_list) +{ + *pfreq_list = sfd.freq_list; + + return sfd.freq_cnt; +} + +unsigned long sgxfreq_get_freq_min(void) +{ + return sfd.freq_list[0]; +} + +unsigned long sgxfreq_get_freq_max(void) +{ + return sfd.freq_list[sfd.freq_cnt - 1]; +} + +unsigned long sgxfreq_get_freq_floor(unsigned long freq) +{ + int i; + unsigned long f = 0; + + for (i = sfd.freq_cnt - 1; i >= 0; i--) { + f = sfd.freq_list[i]; + if (f <= freq) + return f; + } + + return f; +} + +unsigned long sgxfreq_get_freq_ceil(unsigned long freq) +{ + int i; + unsigned long f = 0; + + for (i = 0; i < sfd.freq_cnt; i++) { + f = sfd.freq_list[i]; + if (f >= freq) + return f; + } + + return f; +} + +unsigned long sgxfreq_get_freq(void) +{ + return sfd.freq; +} + +unsigned long sgxfreq_get_freq_request(void) +{ + return sfd.freq_request; +} + +unsigned long sgxfreq_get_freq_limit(void) +{ + return sfd.freq_limit; +} + +unsigned long sgxfreq_set_freq_request(unsigned long freq_request) +{ + freq_request = sgxfreq_get_freq_ceil(freq_request); + + mutex_lock(&sfd.freq_mutex); + + sfd.freq_request = freq_request; + __set_freq(); + + mutex_unlock(&sfd.freq_mutex); + + return freq_request; +} + +unsigned long sgxfreq_set_freq_limit(unsigned long freq_limit) +{ + freq_limit = sgxfreq_get_freq_ceil(freq_limit); + + mutex_lock(&sfd.freq_mutex); + + sfd.freq_limit = freq_limit; + __set_freq(); + + mutex_unlock(&sfd.freq_mutex); + + return freq_limit; +} + +unsigned long sgxfreq_get_total_active_time(void) +{ + __update_timing_info(sfd.sgx_data.active); + return sfd.total_active_time; +} + +unsigned long sgxfreq_get_total_idle_time(void) +{ + __update_timing_info(sfd.sgx_data.active); + return sfd.total_idle_time; +} + +/* + * sgx_clk_on, sgx_clk_off, sgx_active, and sgx_idle notifications are + * serialized by power lock. governor notif calls need sync with governor + * setting. + */ +void sgxfreq_notif_sgx_clk_on(void) +{ + sfd.sgx_data.clk_on = true; + + mutex_lock(&sfd.gov_mutex); + + if (sfd.gov && sfd.gov->sgx_clk_on) + sfd.gov->sgx_clk_on(); + + mutex_unlock(&sfd.gov_mutex); +} + +void sgxfreq_notif_sgx_clk_off(void) +{ + sfd.sgx_data.clk_on = false; + + mutex_lock(&sfd.gov_mutex); + + if (sfd.gov && sfd.gov->sgx_clk_off) + sfd.gov->sgx_clk_off(); + + mutex_unlock(&sfd.gov_mutex); +} + + +void sgxfreq_notif_sgx_active(void) +{ + __update_timing_info(true); + + sfd.sgx_data.active = true; + + mutex_lock(&sfd.gov_mutex); + + if (sfd.gov && sfd.gov->sgx_active) + sfd.gov->sgx_active(); + + mutex_unlock(&sfd.gov_mutex); + +} + +void sgxfreq_notif_sgx_idle(void) +{ + + __update_timing_info(false); + + sfd.sgx_data.active = false; + + mutex_lock(&sfd.gov_mutex); + + if (sfd.gov && sfd.gov->sgx_idle) + sfd.gov->sgx_idle(); + + mutex_unlock(&sfd.gov_mutex); +} + +void sgxfreq_notif_sgx_frame_done(void) +{ + mutex_lock(&sfd.gov_mutex); + + if (sfd.gov && sfd.gov->sgx_frame_done) + sfd.gov->sgx_frame_done(); + + mutex_unlock(&sfd.gov_mutex); +} diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq.h b/sgx_km/eurasia_km/services4/system/omap/sgxfreq.h new file mode 100644 index 0000000..01ecf85 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#ifndef SGXFREQ_H +#define SGXFREQ_H + +#include +#include + +#define SGXFREQ_NAME_LEN 16 + +//#define SGXFREQ_DEBUG_FTRACE +#if defined(SGXFREQ_DEBUG_FTRACE) +#define SGXFREQ_TRACE(...) trace_printk(__VA_ARGS__) +#else +#define SGXFREQ_TRACE(...) +#endif + +struct sgxfreq_sgx_data { + bool clk_on; + bool active; +}; + +struct sgxfreq_governor { + char name[SGXFREQ_NAME_LEN]; + int (*gov_start) (struct sgxfreq_sgx_data *data); + void (*gov_stop) (void); + void (*sgx_clk_on) (void); + void (*sgx_clk_off) (void); + void (*sgx_active) (void); + void (*sgx_idle) (void); + void (*sgx_frame_done) (void); + struct list_head governor_list; +}; + +/* sgxfreq_init must be called before any other api */ +int sgxfreq_init(struct device *dev); +int sgxfreq_deinit(void); + +int sgxfreq_register_governor(struct sgxfreq_governor *governor); +void sgxfreq_unregister_governor(struct sgxfreq_governor *governor); + +int sgxfreq_set_governor(const char *name); + +int sgxfreq_get_freq_list(unsigned long **pfreq_list); + +unsigned long sgxfreq_get_freq_min(void); +unsigned long sgxfreq_get_freq_max(void); + +unsigned long sgxfreq_get_freq_floor(unsigned long freq); +unsigned long sgxfreq_get_freq_ceil(unsigned long freq); + +unsigned long sgxfreq_get_freq(void); +unsigned long sgxfreq_get_freq_request(void); +unsigned long sgxfreq_get_freq_limit(void); + +unsigned long sgxfreq_set_freq_request(unsigned long freq_request); +unsigned long sgxfreq_set_freq_limit(unsigned long freq_limit); + +unsigned long sgxfreq_get_total_active_time(void); +unsigned long sgxfreq_get_total_idle_time(void); + +/* Helper functions */ +static inline unsigned long __tv2msec(struct timeval tv) +{ + return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); +} + +static inline unsigned long __delta32(unsigned long a, unsigned long b) +{ + if (a >= b) + return a - b; + else + return 1 + (0xFFFFFFFF - b) + a; +} + +/* External notifications to sgxfreq */ +void sgxfreq_notif_sgx_clk_on(void); +void sgxfreq_notif_sgx_clk_off(void); +void sgxfreq_notif_sgx_active(void); +void sgxfreq_notif_sgx_idle(void); +void sgxfreq_notif_sgx_frame_done(void); + +#endif diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq_activeidle.c b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_activeidle.c new file mode 100644 index 0000000..bb9b6c7 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_activeidle.c @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#include +#include "sgxfreq.h" + +static int activeidle_start(struct sgxfreq_sgx_data *data); +static void activeidle_stop(void); +static void activeidle_sgx_active(void); +static void activeidle_sgx_idle(void); + +static struct activeidle_data { + unsigned long freq_active; + unsigned long freq_idle; + struct mutex mutex; + bool sgx_active; +} aid; + +static struct sgxfreq_governor activeidle_gov = { + .name = "activeidle", + .gov_start = activeidle_start, + .gov_stop = activeidle_stop, + .sgx_active = activeidle_sgx_active, + .sgx_idle = activeidle_sgx_idle, +}; + +/*********************** begin sysfs interface ***********************/ + +extern struct kobject *sgxfreq_kobj; + +static ssize_t show_freq_active(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", aid.freq_active); +} + +static ssize_t store_freq_active(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long freq; + + ret = sscanf(buf, "%lu", &freq); + if (ret != 1) + return -EINVAL; + + freq = sgxfreq_get_freq_ceil(freq); + + mutex_lock(&aid.mutex); + + aid.freq_active = freq; + if (aid.sgx_active) + sgxfreq_set_freq_request(aid.freq_active); + + mutex_unlock(&aid.mutex); + + return count; +} + +static ssize_t show_freq_idle(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", aid.freq_idle); +} + +static ssize_t store_freq_idle(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long freq; + + ret = sscanf(buf, "%lu", &freq); + if (ret != 1) + return -EINVAL; + + freq = sgxfreq_get_freq_floor(freq); + + mutex_lock(&aid.mutex); + + aid.freq_idle = freq; + if (!aid.sgx_active) + sgxfreq_set_freq_request(aid.freq_idle); + + mutex_unlock(&aid.mutex); + + return count; +} +static DEVICE_ATTR(freq_active, 0644, show_freq_active, store_freq_active); +static DEVICE_ATTR(freq_idle, 0644, show_freq_idle, store_freq_idle); + +static struct attribute *activeidle_attributes[] = { + &dev_attr_freq_active.attr, + &dev_attr_freq_idle.attr, + NULL +}; + +static struct attribute_group activeidle_attr_group = { + .attrs = activeidle_attributes, + .name = "activeidle", +}; + +/************************ end sysfs interface ************************/ + +int activeidle_init(void) +{ + int ret; + + mutex_init(&aid.mutex); + + ret = sgxfreq_register_governor(&activeidle_gov); + if (ret) + return ret; + + aid.freq_idle = sgxfreq_get_freq_min(); + aid.freq_active = sgxfreq_get_freq_max(); + + return 0; +} + +int activeidle_deinit(void) +{ + return 0; +} + +static int activeidle_start(struct sgxfreq_sgx_data *data) +{ + int ret; + + aid.sgx_active = data->active; + + ret = sysfs_create_group(sgxfreq_kobj, &activeidle_attr_group); + if (ret) + return ret; + + if (aid.sgx_active) + sgxfreq_set_freq_request(aid.freq_active); + else + sgxfreq_set_freq_request(aid.freq_idle); + + return 0; +} + +static void activeidle_stop(void) +{ + sysfs_remove_group(sgxfreq_kobj, &activeidle_attr_group); +} + +static void activeidle_sgx_active(void) +{ + mutex_lock(&aid.mutex); + + aid.sgx_active = true; + sgxfreq_set_freq_request(aid.freq_active); + + mutex_unlock(&aid.mutex); +} + +static void activeidle_sgx_idle(void) +{ + mutex_lock(&aid.mutex); + + aid.sgx_active = false; + sgxfreq_set_freq_request(aid.freq_idle); + + mutex_unlock(&aid.mutex); +} diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq_cool.c b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_cool.c new file mode 100644 index 0000000..0940362 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_cool.c @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) + +#include + +static struct cool_data { + int freq_cnt; + unsigned long *freq_list; + unsigned long state; + struct thermal_cooling_device *cdev; +} cd; + +static int sgxfreq_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = cd.freq_cnt - 1; + return 0; +} + +static int sgxfreq_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = cd.state; + return 0; +} + +static int sgxfreq_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + int freq_max_index, freq_limit_index; + + freq_max_index = cd.freq_cnt - 1; + + freq_limit_index = freq_max_index - (unsigned int)state; + + if (freq_limit_index < 0) + freq_limit_index = 0; + + sgxfreq_set_freq_limit(cd.freq_list[freq_limit_index]); + + cd.state = state; + return 0; +} + + +static const struct thermal_cooling_device_ops sgxfreq_cooling_ops = { + .get_max_state = sgxfreq_get_max_state, + .get_cur_state = sgxfreq_get_cur_state, + .set_cur_state = sgxfreq_set_cur_state, +}; + +int cool_init(void) +{ + int ret; + struct thermal_zone_device *tz; + + cd.freq_cnt = sgxfreq_get_freq_list(&cd.freq_list); + if (!cd.freq_cnt || !cd.freq_list) + return -EINVAL; + + cd.cdev = thermal_cooling_device_register("gpu", (void *)NULL, &sgxfreq_cooling_ops); + + if(IS_ERR(cd.cdev)) { + pr_err("sgxfreq: Error while regeistering cooling device: %ld\n", PTR_ERR(cd.cdev)); + return -1; + } + + tz = thermal_zone_get_zone_by_name("gpu"); + if(IS_ERR(tz)) { + pr_err("sgxfreq: Error while trying to obtain zone device: %ld\n", PTR_ERR(tz)); + return -1; + } + + ret = thermal_zone_bind_cooling_device(tz, 0, cd.cdev, THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + if (ret) + { + pr_err("sgxfreq: Error binding cooling device: %d\n", ret); + } + + return 0; +} + +void cool_deinit(void) +{ + thermal_cooling_device_unregister(cd.cdev); +} +#else //if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#include + +static int cool_device(struct thermal_dev *dev, int cooling_level); + +static struct cool_data { + int freq_cnt; + unsigned long *freq_list; +} cd; + +static struct thermal_dev_ops cool_dev_ops = { + .cool_device = cool_device, +}; + +static struct thermal_dev cool_dev = { + .name = "gpu_cooling.0", + .domain_name = "gpu", + .dev_ops = &cool_dev_ops, +}; + +static struct thermal_dev case_cool_dev = { + .name = "gpu_cooling.1", + .domain_name = "case", + .dev_ops = &cool_dev_ops, +}; + +static unsigned int gpu_cooling_level; +#if defined(CONFIG_CASE_TEMP_GOVERNOR) +static unsigned int case_cooling_level; +#endif + +int cool_init(void) +{ + int ret; + cd.freq_cnt = sgxfreq_get_freq_list(&cd.freq_list); + if (!cd.freq_cnt || !cd.freq_list) + return -EINVAL; + + ret = thermal_cooling_dev_register(&cool_dev); + if (ret) + return ret; + + return thermal_cooling_dev_register(&case_cool_dev); +} + +void cool_deinit(void) +{ + thermal_cooling_dev_unregister(&cool_dev); + thermal_cooling_dev_unregister(&case_cool_dev); +} + +static int cool_device(struct thermal_dev *dev, int cooling_level) +{ + int freq_max_index, freq_limit_index; + +#if defined(CONFIG_CASE_TEMP_GOVERNOR) + if (!strcmp(dev->domain_name, "case")) + { + int tmp = 0; + tmp = cooling_level - case_subzone_number; + if (tmp < 0) + tmp = 0; + case_cooling_level = tmp; + } + else +#endif + { + gpu_cooling_level = cooling_level; + } + + freq_max_index = cd.freq_cnt - 1; +#if defined(CONFIG_CASE_TEMP_GOVERNOR) + if (case_cooling_level > gpu_cooling_level) + { + freq_limit_index = freq_max_index - case_cooling_level; + } + else +#endif + { + freq_limit_index = freq_max_index - gpu_cooling_level; + } + + if (freq_limit_index < 0) + freq_limit_index = 0; + + sgxfreq_set_freq_limit(cd.freq_list[freq_limit_index]); + + return 0; +} +#endif //if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq_on3demand.c b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_on3demand.c new file mode 100644 index 0000000..73a8b77 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_on3demand.c @@ -0,0 +1,295 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#include +#include +#include +#include "sgxfreq.h" + +static int on3demand_start(struct sgxfreq_sgx_data *data); +static void on3demand_stop(void); +static void on3demand_predict(void); + + +static struct sgxfreq_governor on3demand_gov = { + .name = "on3demand", + .gov_start = on3demand_start, + .gov_stop = on3demand_stop, + .sgx_frame_done = on3demand_predict +}; + +static struct on3demand_data { + unsigned int load; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int history_size; + unsigned long prev_total_idle; + unsigned long prev_total_active; + unsigned int low_load_cnt; + struct mutex mutex; +} odd; + +#define ON3DEMAND_DEFAULT_UP_THRESHOLD 80 +#define ON3DEMAND_DEFAULT_DOWN_THRESHOLD 30 +#define ON3DEMAND_DEFAULT_HISTORY_SIZE_THRESHOLD 5 + +/*FIXME: This should be dynamic and queried from platform */ +#define ON3DEMAND_FRAME_DONE_DEADLINE_MS 16 + + +/*********************** begin sysfs interface ***********************/ + +extern struct kobject *sgxfreq_kobj; + +static ssize_t show_down_threshold(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", odd.down_threshold); +} + +static ssize_t store_down_threshold(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned int thres; + + ret = sscanf(buf, "%u", &thres); + if (ret != 1) + return -EINVAL; + + mutex_lock(&odd.mutex); + + if (thres <= 100) { + odd.down_threshold = thres; + odd.low_load_cnt = 0; + } else { + return -EINVAL; + } + + mutex_unlock(&odd.mutex); + + return count; +} + +static ssize_t show_up_threshold(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", odd.up_threshold); +} + +static ssize_t store_up_threshold(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned int thres; + + ret = sscanf(buf, "%u", &thres); + if (ret != 1) + return -EINVAL; + + mutex_lock(&odd.mutex); + + if (thres <= 100) { + odd.up_threshold = thres; + odd.low_load_cnt = 0; + } else { + return -EINVAL; + } + + mutex_unlock(&odd.mutex); + + return count; +} + +static ssize_t show_history_size(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", odd.history_size); +} + +static ssize_t store_history_size(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned int size; + + ret = sscanf(buf, "%u", &size); + if (ret != 1) + return -EINVAL; + + mutex_lock(&odd.mutex); + + if (size >= 1) { + odd.history_size = size; + odd.low_load_cnt = 0; + } else { + return -EINVAL; + } + + mutex_unlock(&odd.mutex); + + return count; +} + +static ssize_t show_load(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", odd.load); +} + +static DEVICE_ATTR(down_threshold, 0644, + show_down_threshold, store_down_threshold); +static DEVICE_ATTR(up_threshold, 0644, + show_up_threshold, store_up_threshold); +static DEVICE_ATTR(history_size, 0644, + show_history_size, store_history_size); +static DEVICE_ATTR(load, 0444, + show_load, NULL); + +static struct attribute *on3demand_attributes[] = { + &dev_attr_down_threshold.attr, + &dev_attr_up_threshold.attr, + &dev_attr_history_size.attr, + &dev_attr_load.attr, + NULL +}; + +static struct attribute_group on3demand_attr_group = { + .attrs = on3demand_attributes, + .name = "on3demand", +}; +/************************ end sysfs interface ************************/ + +int on3demand_init(void) +{ + int ret; + + mutex_init(&odd.mutex); + + ret = sgxfreq_register_governor(&on3demand_gov); + if (ret) + return ret; + + return 0; +} + +int on3demand_deinit(void) +{ + return 0; +} + +static int on3demand_start(struct sgxfreq_sgx_data *data) +{ + int ret; + + odd.load = 0; + odd.up_threshold = ON3DEMAND_DEFAULT_UP_THRESHOLD; + odd.down_threshold = ON3DEMAND_DEFAULT_DOWN_THRESHOLD; + odd.history_size = ON3DEMAND_DEFAULT_HISTORY_SIZE_THRESHOLD; + odd.prev_total_active = 0; + odd.prev_total_idle = 0; + odd.low_load_cnt = 0; + + ret = sysfs_create_group(sgxfreq_kobj, &on3demand_attr_group); + if (ret) + return ret; + + return 0; +} + +static void on3demand_stop(void) +{ + sysfs_remove_group(sgxfreq_kobj, &on3demand_attr_group); +} + +static void on3demand_predict(void) +{ + static unsigned short first_sample = 1; + unsigned long total_active, delta_active; + unsigned long total_idle, delta_idle; + unsigned long freq; + + if (first_sample == 1) { + first_sample = 0; + odd.prev_total_active = sgxfreq_get_total_active_time(); + odd.prev_total_idle = sgxfreq_get_total_idle_time(); + return; + } + + /* Sample new active and idle times */ + total_active = sgxfreq_get_total_active_time(); + total_idle = sgxfreq_get_total_idle_time(); + + /* Compute load */ + delta_active = __delta32(total_active, odd.prev_total_active); + delta_idle = __delta32(total_idle, odd.prev_total_idle); + + /* + * If SGX was active for longer than frame display time (1/fps), + * scale to highest possible frequency. + */ + if (delta_active > ON3DEMAND_FRAME_DONE_DEADLINE_MS) { + odd.low_load_cnt = 0; + sgxfreq_set_freq_request(sgxfreq_get_freq_max()); + } + + if ((delta_active + delta_idle)) + odd.load = (100 * delta_active / (delta_active + delta_idle)); + odd.prev_total_active = total_active; + odd.prev_total_idle = total_idle; + + /* Scale GPU frequency on purpose */ + if (odd.load >= odd.up_threshold) { + odd.low_load_cnt = 0; + sgxfreq_set_freq_request(sgxfreq_get_freq_max()); + } else if (odd.load <= odd.down_threshold) { + if (odd.low_load_cnt == odd.history_size) { + /* Convert load to frequency */ + freq = (sgxfreq_get_freq() * odd.load) / 100; + sgxfreq_set_freq_request(freq); + odd.low_load_cnt = 0; + } else { + odd.low_load_cnt++; + } + } else { + odd.low_load_cnt = 0; + } +} diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq_onoff.c b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_onoff.c new file mode 100644 index 0000000..40f33dd --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_onoff.c @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#include +#include "sgxfreq.h" + +static int onoff_start(struct sgxfreq_sgx_data *data); +static void onoff_stop(void); +static void onoff_sgx_clk_on(void); +static void onoff_sgx_clk_off(void); + +static struct onoff_data { + unsigned long freq_off; + unsigned long freq_on; + struct mutex mutex; + bool sgx_clk_on; +} ood; + +static struct sgxfreq_governor onoff_gov = { + .name = "onoff", + .gov_start = onoff_start, + .gov_stop = onoff_stop, + .sgx_clk_on = onoff_sgx_clk_on, + .sgx_clk_off = onoff_sgx_clk_off, +}; + +/*********************** begin sysfs interface ***********************/ + +extern struct kobject *sgxfreq_kobj; + +static ssize_t show_freq_on(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", ood.freq_on); +} + +static ssize_t store_freq_on(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long freq; + + ret = sscanf(buf, "%lu", &freq); + if (ret != 1) + return -EINVAL; + + freq = sgxfreq_get_freq_ceil(freq); + + mutex_lock(&ood.mutex); + + ood.freq_on = freq; + if (ood.sgx_clk_on) + sgxfreq_set_freq_request(ood.freq_on); + + mutex_unlock(&ood.mutex); + + return count; +} + +static ssize_t show_freq_off(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", ood.freq_off); +} + +static ssize_t store_freq_off(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long freq; + + ret = sscanf(buf, "%lu", &freq); + if (ret != 1) + return -EINVAL; + + freq = sgxfreq_get_freq_floor(freq); + + mutex_lock(&ood.mutex); + + ood.freq_off = freq; + if (!ood.sgx_clk_on) + sgxfreq_set_freq_request(ood.freq_off); + + mutex_unlock(&ood.mutex); + + return count; +} +static DEVICE_ATTR(freq_on, 0644, show_freq_on, store_freq_on); +static DEVICE_ATTR(freq_off, 0644, show_freq_off, store_freq_off); + +static struct attribute *onoff_attributes[] = { + &dev_attr_freq_on.attr, + &dev_attr_freq_off.attr, + NULL +}; + +static struct attribute_group onoff_attr_group = { + .attrs = onoff_attributes, + .name = "onoff", +}; + +/************************ end sysfs interface ************************/ + +int onoff_init(void) +{ + int ret; + + mutex_init(&ood.mutex); + + ret = sgxfreq_register_governor(&onoff_gov); + if (ret) + return ret; + + ood.freq_off = sgxfreq_get_freq_min(); + ood.freq_on = sgxfreq_get_freq_max(); + + return 0; +} + +int onoff_deinit(void) +{ + return 0; +} + +static int onoff_start(struct sgxfreq_sgx_data *data) +{ + int ret; + + ood.sgx_clk_on = data->clk_on; + + ret = sysfs_create_group(sgxfreq_kobj, &onoff_attr_group); + if (ret) + return ret; + + if (ood.sgx_clk_on) + sgxfreq_set_freq_request(ood.freq_on); + else + sgxfreq_set_freq_request(ood.freq_off); + + return 0; +} + +static void onoff_stop(void) +{ + sysfs_remove_group(sgxfreq_kobj, &onoff_attr_group); +} + +static void onoff_sgx_clk_on(void) +{ + mutex_lock(&ood.mutex); + + ood.sgx_clk_on = true; + sgxfreq_set_freq_request(ood.freq_on); + + mutex_unlock(&ood.mutex); +} + +static void onoff_sgx_clk_off(void) +{ + mutex_lock(&ood.mutex); + + ood.sgx_clk_on = false; + sgxfreq_set_freq_request(ood.freq_off); + + mutex_unlock(&ood.mutex); +} + diff --git a/sgx_km/eurasia_km/services4/system/omap/sgxfreq_userspace.c b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_userspace.c new file mode 100644 index 0000000..c7c9aec --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sgxfreq_userspace.c @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#include +#include "sgxfreq.h" + + +static int userspace_start(struct sgxfreq_sgx_data *data); +static void userspace_stop(void); + + +static struct sgxfreq_governor userspace_gov = { + .name = "userspace", + .gov_start = userspace_start, + .gov_stop = userspace_stop, +}; + + +static struct userspace_data { + unsigned long freq_user; /* in Hz */ +} usd; + + +/*********************** begin sysfs interface ***********************/ + +extern struct kobject *sgxfreq_kobj; + + +static ssize_t show_frequency_set(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", usd.freq_user); +} + + +static ssize_t store_frequency_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long freq; + + ret = sscanf(buf, "%lu", &freq); + if (ret != 1) + return -EINVAL; + + if (freq > sgxfreq_get_freq_max()) + freq = sgxfreq_get_freq_max(); + usd.freq_user = sgxfreq_set_freq_request(freq); + trace_printk("USERSPACE: new freq=%luHz.\n", usd.freq_user); + + return count; +} + + +static DEVICE_ATTR(frequency_set, 0644, + show_frequency_set, store_frequency_set); + + +static struct attribute *userspace_attributes[] = { + &dev_attr_frequency_set.attr, + NULL +}; + + +static struct attribute_group userspace_attr_group = { + .attrs = userspace_attributes, + .name = "userspace", +}; + +/************************ end sysfs interface ************************/ + + +int userspace_init(void) +{ + int ret; + + ret = sgxfreq_register_governor(&userspace_gov); + if (ret) + return ret; + return 0; +} + + +int userspace_deinit(void) +{ + return 0; +} + + +static int userspace_start(struct sgxfreq_sgx_data *data) +{ + int ret; + + usd.freq_user = sgxfreq_get_freq(); + + ret = sysfs_create_group(sgxfreq_kobj, &userspace_attr_group); + if (ret) + return ret; + + trace_printk("USERSPACE: started.\n"); + + return 0; +} + + +static void userspace_stop(void) +{ + sysfs_remove_group(sgxfreq_kobj, &userspace_attr_group); + + trace_printk("USERSPACE: stopped.\n"); +} diff --git a/sgx_km/eurasia_km/services4/system/omap/sysconfig.c b/sgx_km/eurasia_km/services4/system/omap/sysconfig.c new file mode 100644 index 0000000..3ebb7c8 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sysconfig.c @@ -0,0 +1,1267 @@ +/*************************************************************************/ /*! +@Title System Configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System Configuration functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "sysconfig.h" +#include "services_headers.h" +#include "kerneldisplay.h" +#include "oemfuncs.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "syslocal.h" + +#include "ocpdefs.h" + +/* top level system data anchor point*/ +SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL; +SYS_DATA gsSysData; + +static SYS_SPECIFIC_DATA gsSysSpecificData; +SYS_SPECIFIC_DATA *gpsSysSpecificData; + +/* SGX structures */ +static IMG_UINT32 gui32SGXDeviceID; +static SGX_DEVICE_MAP gsSGXDeviceMap; +static PVRSRV_DEVICE_NODE *gpsSGXDevNode; + + +#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED) +static IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr; +#endif + +#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) +extern struct platform_device *gpsPVRLDMDev; +#endif + +IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl, + IMG_BYTE *pInBuf, + IMG_UINT32 InBufLen, + IMG_BYTE *pOutBuf, + IMG_UINT32 OutBufLen, + IMG_UINT32 *pdwBytesTransferred); + +#if defined(SGX_OCP_REGS_ENABLED) + +static IMG_CPU_VIRTADDR gpvOCPRegsLinAddr; + +static PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData) +{ + PVRSRV_ERROR eError = EnableSGXClocks(psSysData); + +#if !defined(SGX_OCP_NO_INT_BYPASS) + if(eError == PVRSRV_OK) + { + OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_SYSCONFIG, 0x14); + OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_DEBUG_CONFIG, EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK); + } +#endif + return eError; +} + +#else /* defined(SGX_OCP_REGS_ENABLED) */ + +static INLINE PVRSRV_ERROR EnableSGXClocksWrap(SYS_DATA *psSysData) +{ + return EnableSGXClocks(psSysData); +} + +#endif /* defined(SGX_OCP_REGS_ENABLED) */ + +static INLINE PVRSRV_ERROR EnableSystemClocksWrap(SYS_DATA *psSysData) +{ + PVRSRV_ERROR eError = EnableSystemClocks(psSysData); + +#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + if(eError == PVRSRV_OK) + { + /* + * The SGX Clocks are enabled separately if active power + * management is enabled. + */ + eError = EnableSGXClocksWrap(psSysData); + if (eError != PVRSRV_OK) + { + DisableSystemClocks(psSysData); + } + } +#endif + + return eError; +} + +/*! +****************************************************************************** + + @Function SysLocateDevices + + @Description Specifies devices in the systems memory map + + @Input psSysData - sys data + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData) +{ +#if defined(NO_HARDWARE) + PVRSRV_ERROR eError; + IMG_CPU_PHYADDR sCpuPAddr; +#else +#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) + struct resource *dev_res; + int dev_irq; +#endif +#endif + + PVR_UNREFERENCED_PARAMETER(psSysData); + + /* SGX Device: */ + gsSGXDeviceMap.ui32Flags = 0x0; + +#if defined(NO_HARDWARE) + /* + * For no hardware, allocate some contiguous memory for the + * register block. + */ + + /* Registers */ + gsSGXDeviceMap.ui32RegsSize = SYS_OMAP_SGX_REGS_SIZE; + + eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize, + &gsSGXRegsCPUVAddr, + &sCpuPAddr); + if(eError != PVRSRV_OK) + { + return eError; + } + gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr; + gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase); +#if defined(__linux__) + /* Indicate the registers are already mapped */ + gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; +#else + /* + * FIXME: Could we just use the virtual address returned by + * OSBaseAllocContigMemory? + */ + gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL; +#endif + + OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize); + + /* + device interrupt IRQ + Note: no interrupts available on no hardware system + */ + gsSGXDeviceMap.ui32IRQ = 0; + +#else /* defined(NO_HARDWARE) */ +#if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) + /* get the resource and IRQ through platform resource API */ + dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0); + if (dev_res == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__)); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + dev_irq = platform_get_irq(gpsPVRLDMDev, 0); + if (dev_irq < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq)); + return PVRSRV_ERROR_INVALID_DEVICE; + } + + gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start; + gsSGXDeviceMap.sRegsCpuPBase = + SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); + PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr)); + +#if defined(SGX544) && defined(SGX_FEATURE_MP) + /* FIXME: Workaround due to HWMOD change. Otherwise this region is too small. */ + gsSGXDeviceMap.ui32RegsSize = SYS_OMAP_SGX_REGS_SIZE; +#else + gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start); +#endif + PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize)); + + gsSGXDeviceMap.ui32IRQ = dev_irq; + PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ)); +#else /* defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) */ + gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP_SGX_REGS_SYS_PHYS_BASE; + gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); + gsSGXDeviceMap.ui32RegsSize = SYS_OMAP_SGX_REGS_SIZE; + + gsSGXDeviceMap.ui32IRQ = SYS_OMAP_SGX_IRQ; + +#endif /* defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) */ +#if defined(SGX_OCP_REGS_ENABLED) + gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase, + gsSGXDeviceMap.ui32RegsSize, + PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY, + IMG_NULL); + + if (gsSGXRegsCPUVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers")); + return PVRSRV_ERROR_BAD_MAPPING; + } + + /* Indicate the registers are already mapped */ + gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; + gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr; +#endif +#endif /* defined(NO_HARDWARE) */ + +#if defined(PDUMP) + { + /* initialise memory region name for pdumping */ + static IMG_CHAR pszPDumpDevName[] = "SGXMEM"; + gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName; + } +#endif + + /* add other devices here: */ + + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SysCreateVersionString + + @Description Read the version string + + @Return IMG_CHAR * : Version string + +******************************************************************************/ +static IMG_CHAR *SysCreateVersionString(void) +{ + static IMG_CHAR aszVersionString[100]; + IMG_UINT32 ui32MaxStrLen; + SYS_DATA *psSysData; + IMG_UINT32 ui32SGXRevision; + IMG_INT32 i32Count; + + SysAcquireData(&psSysData); + + ui32SGXRevision = SGX_CORE_REV; + ui32MaxStrLen = 99; + + i32Count = OSSNPrintf(aszVersionString, ui32MaxStrLen + 1, + "SGX revision = %u", + (IMG_UINT)(ui32SGXRevision)); + if(i32Count == -1) + { + return IMG_NULL; + } + + return aszVersionString; +} + + +/*! +****************************************************************************** + + @Function SysInitialise + + @Description Initialises kernel services at 'driver load' time + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SysInitialise(IMG_VOID) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; +#if !defined(PVR_NO_OMAP_TIMER) + IMG_CPU_PHYADDR TimerRegPhysBase; +#endif +#if !defined(SGX_DYNAMIC_TIMING_INFO) + SGX_TIMING_INFORMATION* psTimingInfo; +#endif + gpsSysData = &gsSysData; + OSMemSet(gpsSysData, 0, sizeof(SYS_DATA)); + + gpsSysSpecificData = &gsSysSpecificData; + OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA)); + + gpsSysData->pvSysSpecificData = gpsSysSpecificData; + + eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA); + + gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT; + + /* init device ID's */ + for(i=0; isDeviceID[i].uiID = i; + gpsSysData->sDeviceID[i].bInUse = IMG_FALSE; + } + + gpsSysData->psDeviceNodeList = IMG_NULL; + gpsSysData->psQueueList = IMG_NULL; + + eError = SysInitialiseCommon(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + +#if !defined(SGX_DYNAMIC_TIMING_INFO) + /* Set up timing information*/ + psTimingInfo = &gsSGXDeviceMap.sTimingInfo; + psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED; + psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + psTimingInfo->bEnableActivePM = IMG_TRUE; +#else + psTimingInfo->bEnableActivePM = IMG_FALSE; +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; + psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; +#endif + + /* + Setup the Source Clock Divider value + */ + gpsSysSpecificData->ui32SrcClockDiv = 3; + + /* + Locate the devices within the system, specifying + the physical addresses of each devices components + (regs, mem, ports etc.) + */ + eError = SysLocateDevices(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV); + + eError = SysPMRuntimeRegister(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME); + + eError = SysDvfsInitialize(gpsSysSpecificData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialize DVFS")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_DVFS_INIT); + + /* + Register devices with the system + This also sets up their memory maps/heaps + */ + eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice, + DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV); + + /* + Once all devices are registered, specify the backing store + and, if required, customise the memory heap config + */ + psDeviceNode = gpsSysData->psDeviceNodeList; + while(psDeviceNode) + { + /* perform any OEM SOC address space customisations here */ + switch(psDeviceNode->sDevId.eDeviceType) + { + case PVRSRV_DEVICE_TYPE_SGX: + { + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + + /* + specify the backing store to use for the devices MMU PT/PDs + - the PT/PDs are always UMA in this system + */ + psDeviceNode->psLocalDevMemArena = IMG_NULL; + + /* useful pointers */ + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + /* specify the backing store for all SGX heaps */ + for(i=0; iui32HeapCount; i++) + { + psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG; + } + + gpsSGXDevNode = psDeviceNode; + gsSysSpecificData.psSGXDevNode = psDeviceNode; + + break; + } + default: + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!")); + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* advance to next device */ + psDeviceNode = psDeviceNode->psNext; + } + + eError = EnableSystemClocksWrap(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError)); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS); +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + eError = EnableSGXClocksWrap(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError)); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + + eError = PVRSRVInitialiseDevice(gui32SGXDeviceID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!")); + (IMG_VOID)SysDeinitialise(gpsSysData); + gpsSysData = IMG_NULL; + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV); + +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + /* SGX defaults to D3 power state */ + DisableSGXClocks(gpsSysData); +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + +#if !defined(PVR_NO_OMAP_TIMER) +#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA) + TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase; +#else + TimerRegPhysBase.uiAddr = SYS_OMAP_GP11TIMER_REGS_SYS_PHYS_BASE; +#endif + gpsSysData->pvSOCTimerRegisterKM = IMG_NULL; + gpsSysData->hSOCTimerRegisterOSMemHandle = 0; + if (TimerRegPhysBase.uiAddr != 0) + { + OSReservePhys(TimerRegPhysBase, + 4, + PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED, + IMG_NULL, + (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM, + &gpsSysData->hSOCTimerRegisterOSMemHandle); + } +#endif /* !defined(PVR_NO_OMAP_TIMER) */ + + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SysFinalise + + @Description Final part of initialisation at 'driver load' time + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SysFinalise(IMG_VOID) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + eError = EnableSGXClocksWrap(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to Enable SGX clocks (%d)", eError)); + return eError; + } +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + + eError = OSInstallMISR(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install MISR")); + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR); + +#if defined(SYS_USING_INTERRUPTS) + /* install a Device ISR */ + eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to install ISR")); + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR); +#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + SysEnableSGXInterrupts(gpsSysData); +#endif +#endif /* defined(SYS_USING_INTERRUPTS) */ +#if defined(__linux__) || defined(__QNXNTO__) + /* Create a human readable version string for this system */ + gpsSysData->pszVersionString = SysCreateVersionString(); + if (!gpsSysData->pszVersionString) + { + PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string")); + } + else + { + PVR_TRACE(("SysFinalise: Version string: %s", gpsSysData->pszVersionString)); + } +#endif + +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + /* SGX defaults to D3 power state */ + DisableSGXClocks(gpsSysData); +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + + gpsSysSpecificData->bSGXInitComplete = IMG_TRUE; + + return eError; +} + + +/*! +****************************************************************************** + + @Function SysDeinitialise + + @Description De-initialises kernel services at 'driver unload' time + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psSysData); + + if(gpsSysData->pvSOCTimerRegisterKM) + { + OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM, + 4, + PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED, + gpsSysData->hSOCTimerRegisterOSMemHandle); + } + + +#if defined(SYS_USING_INTERRUPTS) + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) + { + eError = OSUninstallDeviceLISR(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed")); + return eError; + } + } +#endif + + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR)) + { + eError = OSUninstallMISR(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed")); + return eError; + } + } + + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV)) + { +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)); + /* Reenable SGX clocks whilst SGX is being deinitialised. */ + eError = EnableSGXClocksWrap(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed")); + return eError; + } +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + + /* Deinitialise SGX */ + eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device")); + return eError; + } + } + + /* Disable system clocks. Must happen after last access to hardware */ + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) + { + DisableSystemClocks(gpsSysData); + } + + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_DVFS_INIT)) + { + eError = SysDvfsDeinitialize(gpsSysSpecificData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to de-init DVFS")); + gpsSysData = IMG_NULL; + return eError; + } + } + + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME)) + { + eError = SysPMRuntimeUnregister(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!")); + gpsSysData = IMG_NULL; + return eError; + } + } + + if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA)) + { + eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure")); + return eError; + } + } + + SysDeinitialiseCommon(gpsSysData); + +#if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED) + if(gsSGXRegsCPUVAddr != IMG_NULL) + { +#if defined(NO_HARDWARE) + /* Free hardware resources. */ + OSBaseFreeContigMemory(SYS_OMAP_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase); +#else +#if defined(SGX_OCP_REGS_ENABLED) + OSUnMapPhysToLin(gsSGXRegsCPUVAddr, + gsSGXDeviceMap.ui32RegsSize, + PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY, + IMG_NULL); + + gpvOCPRegsLinAddr = IMG_NULL; +#endif +#endif /* defined(NO_HARDWARE) */ + gsSGXRegsCPUVAddr = IMG_NULL; + gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; + } +#endif /* defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED) */ + + + gpsSysSpecificData->ui32SysSpecificData = 0; + gpsSysSpecificData->bSGXInitComplete = IMG_FALSE; + + gpsSysData = IMG_NULL; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SysGetDeviceMemoryMap + + @Description returns a device address map for the specified device + + @Input eDeviceType - device type + @Input ppvDeviceMap - void ptr to receive device specific info. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_VOID **ppvDeviceMap) +{ + + switch(eDeviceType) + { + case PVRSRV_DEVICE_TYPE_SGX: + { + /* just return a pointer to the structure */ + *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap; + + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type")); + } + } + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + @Function SysCpuPAddrToDevPAddr + + @Description Compute a device physical address from a cpu physical + address. Relevant when + + @Input cpu_paddr - cpu physical address. + @Input eDeviceType - device type required if DevPAddr + address spaces vary across devices + in the same system + @Return device physical address. + +******************************************************************************/ +IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CPU_PHYADDR CpuPAddr) +{ + IMG_DEV_PHYADDR DevPAddr; + + PVR_UNREFERENCED_PARAMETER(eDeviceType); + + /* Note: for UMA system we assume DevP == CpuP */ + DevPAddr.uiAddr = CpuPAddr.uiAddr; + + return DevPAddr; +} + +/*! +****************************************************************************** + @Function SysSysPAddrToCpuPAddr + + @Description Compute a cpu physical address from a system physical + address. + + @Input sys_paddr - system physical address. + @Return cpu physical address. + +******************************************************************************/ +IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr) +{ + IMG_CPU_PHYADDR cpu_paddr; + + /* This would only be an inequality if the CPU's MMU did not point to + sys address 0, ie. multi CPU system */ + cpu_paddr.uiAddr = sys_paddr.uiAddr; + return cpu_paddr; +} + +/*! +****************************************************************************** + @Function SysCpuPAddrToSysPAddr + + @Description Compute a system physical address from a cpu physical + address. + + @Input cpu_paddr - cpu physical address. + @Return device physical address. + +******************************************************************************/ +IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr) +{ + IMG_SYS_PHYADDR sys_paddr; + + /* This would only be an inequality if the CPU's MMU did not point to + sys address 0, ie. multi CPU system */ + sys_paddr.uiAddr = cpu_paddr.uiAddr; + return sys_paddr; +} + + +/*! +****************************************************************************** + @Function SysSysPAddrToDevPAddr + + @Description Compute a device physical address from a system physical + address. + + @Input SysPAddr - system physical address. + @Input eDeviceType - device type required if DevPAddr + address spaces vary across devices + in the same system + + @Return Device physical address. + +******************************************************************************/ +IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr) +{ + IMG_DEV_PHYADDR DevPAddr; + + PVR_UNREFERENCED_PARAMETER(eDeviceType); + + /* Note: for UMA system we assume DevP == CpuP */ + DevPAddr.uiAddr = SysPAddr.uiAddr; + + return DevPAddr; +} + + +/*! +****************************************************************************** + @Function SysDevPAddrToSysPAddr + + @Description Compute a device physical address from a system physical + address. + + @Input DevPAddr - device physical address. + @Input eDeviceType - device type required if DevPAddr + address spaces vary across devices + in the same system + + @Return System physical address. + +******************************************************************************/ +IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr) +{ + IMG_SYS_PHYADDR SysPAddr; + + PVR_UNREFERENCED_PARAMETER(eDeviceType); + + /* Note: for UMA system we assume DevP == SysP */ + SysPAddr.uiAddr = DevPAddr.uiAddr; + + return SysPAddr; +} + + +/***************************************************************************** + @Function SysRegisterExternalDevice + + @Description Called when a 3rd party device registers with services + + @Input psDeviceNode - the new device node. + + @Return IMG_VOID +*****************************************************************************/ +IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + + +/***************************************************************************** + @Function SysRemoveExternalDevice + + @Description Called when a 3rd party device unregisters from services + + @Input psDeviceNode - the device node being removed. + + @Return IMG_VOID +*****************************************************************************/ +IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psDeviceNode); +} + +/*! +****************************************************************************** + @Function SysGetInterruptSource + + @Description Returns System specific information about the device(s) that + generated the interrupt in the system + + @Input psSysData + @Input psDeviceNode + + @Return System specific information indicating which device(s) + generated the interrupt + +******************************************************************************/ +IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData, + PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVR_UNREFERENCED_PARAMETER(psSysData); +#if defined(NO_HARDWARE) + /* no interrupts in no_hw system just return all bits */ + return 0xFFFFFFFF; +#else + /* Not a shared irq, so we know this is an interrupt for this device */ + return psDeviceNode->ui32SOCInterruptBit; +#endif +} + + +/*! +****************************************************************************** + @Function SysClearInterrupts + + @Description Clears specified system interrupts + + @Input psSysData + @Input ui32ClearBits + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits) +{ + PVR_UNREFERENCED_PARAMETER(ui32ClearBits); + PVR_UNREFERENCED_PARAMETER(psSysData); +#if !defined(NO_HARDWARE) +#if defined(SGX_OCP_NO_INT_BYPASS) + OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1); +#endif + /* Flush posted writes */ + OSReadHWReg(((PVRSRV_SGXDEV_INFO *)gpsSGXDevNode->pvDevice)->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR); +#endif /* defined(NO_HARDWARE) */ +} + +#if defined(SGX_OCP_NO_INT_BYPASS) +/*! +****************************************************************************** + @Function SysEnableSGXInterrupts + + @Description Enables SGX interrupts + + @Input psSysData + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SysEnableSGXInterrupts(SYS_DATA *psSysData) +{ + SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData; + if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_ENABLE_LISR) && !SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED)) + { + OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQSTATUS_2, 0x1); + OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_SET_2, 0x1); + SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED); + } +} + +/*! +****************************************************************************** + @Function SysDisableSGXInterrupts + + @Description Disables SGX interrupts + + @Input psSysData + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SysDisableSGXInterrupts(SYS_DATA *psSysData) +{ + SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *)psSysData->pvSysSpecificData; + + if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED)) + { + OSWriteHWReg(gpvOCPRegsLinAddr, EUR_CR_OCP_IRQENABLE_CLR_2, 0x1); + SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, SYS_SPECIFIC_DATA_IRQ_ENABLED); + } +} +#endif /* defined(SGX_OCP_NO_INT_BYPASS) */ + +/*! +****************************************************************************** + + @Function SysSystemPrePowerState + + @Description Perform system-level processing required before a power transition + + @Input eNewPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) + { + PVR_TRACE(("SysSystemPrePowerState: Entering state D3")); + +#if defined(SYS_USING_INTERRUPTS) + if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) + { +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) + IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData); +#endif + eError = OSUninstallDeviceLISR(gpsSysData); +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) + if (bWrapped) + { + UnwrapSystemPowerChange(&gsSysSpecificData); + } +#endif + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError)); + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR); + SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR); + } +#endif + + if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) + { + DisableSystemClocks(gpsSysData); + + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS); + SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS); + } + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function SysSystemPostPowerState + + @Description Perform system-level processing required after a power transition + + @Input eNewPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (eNewPowerState == PVRSRV_SYS_POWER_STATE_D0) + { + PVR_TRACE(("SysSystemPostPowerState: Entering state D0")); + + if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS)) + { + eError = EnableSystemClocksWrap(gpsSysData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocksWrap failed (%d)", eError)); + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS); + SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS); + } + +#if defined(SYS_USING_INTERRUPTS) + if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR)) + { +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) + IMG_BOOL bWrapped = WrapSystemPowerChange(&gsSysSpecificData); +#endif + + eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode); +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) + if (bWrapped) + { + UnwrapSystemPowerChange(&gsSysSpecificData); + } +#endif + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError)); + return eError; + } + SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR); + SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR); + } +#endif + } + return eError; +} + + +/*! +****************************************************************************** + + @Function SysDevicePrePowerState + + @Description Perform system level processing required before a device power + transition + + @Input ui32DeviceIndex : + @Input eNewPowerState : + @Input eCurrentPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVR_UNREFERENCED_PARAMETER(eCurrentPowerState); + + if (ui32DeviceIndex != gui32SGXDeviceID) + { + return PVRSRV_OK; + } + +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePrePowerState: SGX Entering state D3")); + DisableSGXClocks(gpsSysData); + } +#else /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + PVR_UNREFERENCED_PARAMETER(eNewPowerState ); +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SysDevicePostPowerState + + @Description Perform system level processing required after a device power + transition + + @Input ui32DeviceIndex : + @Input eNewPowerState : + @Input eCurrentPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(eNewPowerState); + + if (ui32DeviceIndex != gui32SGXDeviceID) + { + return eError; + } + +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + PVR_DPF((PVR_DBG_MESSAGE, "SysDevicePostPowerState: SGX Leaving state D3")); + eError = EnableSGXClocksWrap(gpsSysData); + } +#else /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + PVR_UNREFERENCED_PARAMETER(eCurrentPowerState); +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + + return eError; +} + +/***************************************************************************** + @Function SysOEMFunction + + @Description marshalling function for custom OEM functions + + @Input ui32ID - function ID + @Input pvIn - in data + @Output pvOut - out data + + @Return PVRSRV_ERROR +*****************************************************************************/ +PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID, + IMG_VOID *pvIn, + IMG_UINT32 ulInSize, + IMG_VOID *pvOut, + IMG_UINT32 ulOutSize) +{ + PVR_UNREFERENCED_PARAMETER(ui32ID); + PVR_UNREFERENCED_PARAMETER(pvIn); + PVR_UNREFERENCED_PARAMETER(ulInSize); + PVR_UNREFERENCED_PARAMETER(pvOut); + PVR_UNREFERENCED_PARAMETER(ulOutSize); + +#if !defined(__QNXNTO__) + if ((ui32ID == OEM_GET_EXT_FUNCS) && + (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE))) + { + PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut; + psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM; + return PVRSRV_OK; + } +#endif + + return PVRSRV_ERROR_INVALID_PARAMS; +} +/****************************************************************************** + End of file (sysconfig.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/services4/system/omap/sysconfig.h b/sgx_km/eurasia_km/services4/system/omap/sysconfig.h new file mode 100644 index 0000000..54be563 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sysconfig.h @@ -0,0 +1,113 @@ +/*************************************************************************/ /*! +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SOCCONFIG_H__) +#define __SOCCONFIG_H__ + +#if defined(VS_PRODUCT_VERSION) && VS_PRODUCT_VERSION == 5 + #define VS_PRODUCT_NAME "OMAP5" + #define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100) // 10ms (100hz) +#else + #define VS_PRODUCT_NAME "OMAP4" + #define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (50) // 20ms (50hz) +#endif + +#define SYS_SGX_PDS_TIMER_FREQ (1000) // 1ms (1000hz) + +/* Allow the AP latency to be overridden in the build config */ +#if !defined(SYS_SGX_ACTIVE_POWER_LATENCY_MS) +#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (2) +#endif + + +#define SYS_OMAP_SGX_REGS_SYS_PHYS_BASE 0x56000000 +#define SYS_OMAP_SGX_REGS_SIZE 0xFFFF + +#define SYS_OMAP_SGX_IRQ 53 /* OMAP4 IRQ's are offset by 32 */ + +#define SYS_OMAP_DSS_REGS_SYS_PHYS_BASE 0x58000000 +#define SYS_OMAP_DSS_REGS_SIZE 0x7000 + +#define SYS_OMAP_DSS_HDMI_INTERRUPT_STATUS_REG 0x6028 +#define SYS_OMAP_DSS_HDMI_INTERRUPT_ENABLE_REG 0x602c + +#define SYS_OMAP_DSS_HDMI_INTERRUPT_VSYNC_ENABLE_MASK 0x10000 +#define SYS_OMAP_DSS_HDMI_INTERRUPT_VSYNC_STATUS_MASK 0x10000 + +#define SYS_OMAP_DSS_LCD_INTERRUPT_STATUS_REG 0x1018 +#define SYS_OMAP_DSS_LCD_INTERRUPT_ENABLE_REG 0x101c + +#define SYS_OMAP_DSS_LCD_INTERRUPT_VSYNC_ENABLE_MASK 0x40002 +#define SYS_OMAP_DSS_LCD_INTERRUPT_VSYNC_STATUS_MASK 0x40002 + + +#define SYS_OMAP_GP11TIMER_ENABLE_SYS_PHYS_BASE 0x48088038 +#define SYS_OMAP_GP11TIMER_REGS_SYS_PHYS_BASE 0x4808803C +#define SYS_OMAP_GP11TIMER_TSICR_SYS_PHYS_BASE 0x48088054 + +/* Interrupt bits */ +#define DEVICE_SGX_INTERRUPT (1<<0) +#define DEVICE_MSVDX_INTERRUPT (1<<1) +#define DEVICE_DISP_INTERRUPT (1<<2) + +#if defined(__linux__) +#if defined(PVR_LDM_DEVICE_TREE) +#define SYS_SGX_DEV_NAME "ti,dra7-sgx544" +#else +/* + * Recent OMAP4 kernels register SGX as platform device "omap_gpu". + * This device must be used with the Linux power management calls + * in sysutils_linux.c, in order for SGX to be powered on. + */ +#if defined(PVR_LDM_PLATFORM_PRE_REGISTERED_DEV) +#define SYS_SGX_DEV_NAME PVR_LDM_PLATFORM_PRE_REGISTERED_DEV +#else +#define SYS_SGX_DEV_NAME "omap_gpu" +#endif /* defined(PVR_LDM_PLATFORM_PRE_REGISTERED_DEV) */ +#endif /* defined(PVR_LDM_DEVICE_TREE) */ +#endif /* defined(__linux__) */ + +/***************************************************************************** + * system specific data structures + *****************************************************************************/ + +#endif /* __SYSCONFIG_H__ */ diff --git a/sgx_km/eurasia_km/services4/system/omap/sysinfo.h b/sgx_km/eurasia_km/services4/system/omap/sysinfo.h new file mode 100644 index 0000000..8ba5943 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sysinfo.h @@ -0,0 +1,70 @@ +/*************************************************************************/ /*! +@Title System Description Header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides system-specific declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSINFO_H__) +#define __SYSINFO_H__ + +#if defined(SGX540) && (SGX_CORE_REV == 120) +#define SYS_SGX_CLOCK_SPEED 307200000 +#else +#define SYS_SGX_CLOCK_SPEED 304742400 +#endif + +/*!< System specific poll/timeout details */ +#if defined(PVR_LINUX_USING_WORKQUEUES) +/* + * The workqueue based 3rd party display driver may be blocked for up + * to 500ms waiting for a vsync when the screen goes blank, so we + * need to wait longer for the hardware if a flush of the swap chain is + * required. + */ +#define MAX_HW_TIME_US (1000000) +#define WAIT_TRY_COUNT (20000) +#else +#define MAX_HW_TIME_US (2000000) +#define WAIT_TRY_COUNT (40000) +#endif + + +#define SYS_DEVICE_COUNT 15 /* SGX, DISPLAYCLASS (external), BUFFERCLASS (external) */ + +#endif /* __SYSINFO_H__ */ diff --git a/sgx_km/eurasia_km/services4/system/omap/syslocal.h b/sgx_km/eurasia_km/services4/system/omap/syslocal.h new file mode 100644 index 0000000..bddbf6f --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/syslocal.h @@ -0,0 +1,262 @@ +/*************************************************************************/ /*! +@Title Local system definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description This header provides local system declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SYSLOCAL_H__) +#define __SYSLOCAL_H__ + +#if defined(__linux__) + +#include +#include +#if defined(PVR_LINUX_USING_WORKQUEUES) +#include +#else +#include +#endif +#include + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) +#include +#include +#else /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) */ +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)) +#include +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)) */ +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if !defined(LDM_PLATFORM) +#error "LDM_PLATFORM must be set" +#endif +#define PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO +#include +#endif + +#if ((defined(DEBUG) || defined(TIMING)) && \ + (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,34))) && \ + !defined(PVR_NO_OMAP_TIMER) +/* + * We need to explicitly enable the GPTIMER11 clocks, or we'll get an + * abort when we try to access the timer registers. + */ +#define PVR_OMAP4_TIMING_PRCM +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,13)) +#include +#endif +#if !defined(PVR_NO_OMAP_TIMER) +#define PVR_OMAP_USE_DM_TIMER_API +#include +#endif +#endif + +#if !defined(PVR_NO_OMAP_TIMER) +#define PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA +#endif +#endif /* defined(__linux__) */ + +#if !defined(NO_HARDWARE) && \ + defined(SYS_USING_INTERRUPTS) +#define SGX_OCP_REGS_ENABLED +#endif + +#if defined(__linux__) +#if defined(SGX_OCP_REGS_ENABLED) +/* FIXME: Temporary workaround for OMAP4470 and active power off in 4430 */ +#if !defined(SGX544) && defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) +#define SGX_OCP_NO_INT_BYPASS +#endif +#endif +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + +/***************************************************************************** + * system specific data structures + *****************************************************************************/ + +/***************************************************************************** + * system specific function prototypes + *****************************************************************************/ + +IMG_VOID DisableSystemClocks(SYS_DATA *psSysData); +PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData); + +IMG_VOID DisableSGXClocks(SYS_DATA *psSysData); +PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData); + +/* + * Various flags to indicate what has been initialised, and what + * has been temporarily deinitialised for power management purposes. + */ +#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS 0x00000001 +#define SYS_SPECIFIC_DATA_ENABLE_LISR 0x00000002 +#define SYS_SPECIFIC_DATA_ENABLE_MISR 0x00000004 +#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA 0x00000008 +#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV 0x00000010 +#define SYS_SPECIFIC_DATA_ENABLE_REGDEV 0x00000020 +#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT 0x00000040 +#define SYS_SPECIFIC_DATA_ENABLE_INITDEV 0x00000080 +#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV 0x00000100 + +#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00000200 +#define SYS_SPECIFIC_DATA_PM_DISABLE_SYSCLOCKS 0x00000400 +#define SYS_SPECIFIC_DATA_ENABLE_OCPREGS 0x00000800 +#define SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME 0x00001000 +#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00002000 +#define SYS_SPECIFIC_DATA_DVFS_INIT 0x00004000 + +#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag))) + +#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag))) + +#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0) + +typedef struct _SYS_SPECIFIC_DATA_TAG_ +{ + IMG_UINT32 ui32SysSpecificData; + PVRSRV_DEVICE_NODE *psSGXDevNode; + IMG_BOOL bSGXInitComplete; +#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA) + IMG_CPU_PHYADDR sTimerRegPhysBase; +#endif +#if !defined(__linux__) + IMG_BOOL bSGXClocksEnabled; +#endif + IMG_UINT32 ui32SrcClockDiv; +#if defined(__linux__) + IMG_BOOL bSysClocksOneTimeInit; + atomic_t sSGXClocksEnabled; +#if defined(PVR_LINUX_USING_WORKQUEUES) + struct mutex sPowerLock; +#else + IMG_BOOL bConstraintNotificationsEnabled; + spinlock_t sPowerLock; + atomic_t sPowerLockCPU; + spinlock_t sNotifyLock; + atomic_t sNotifyLockCPU; + IMG_BOOL bCallVDD2PostFunc; +#endif +#if defined(DEBUG) || defined(TIMING) + struct clk *psGPT11_FCK; + struct clk *psGPT11_ICK; +#endif +#if defined(PVR_OMAP_USE_DM_TIMER_API) + struct omap_dm_timer *psGPTimer; +#endif +#endif /* defined(__linux__) */ +} SYS_SPECIFIC_DATA; + +extern SYS_SPECIFIC_DATA *gpsSysSpecificData; + +#if defined(SGX_OCP_REGS_ENABLED) && defined(SGX_OCP_NO_INT_BYPASS) +IMG_VOID SysEnableSGXInterrupts(SYS_DATA* psSysData); +IMG_VOID SysDisableSGXInterrupts(SYS_DATA* psSysData); +#else +#define SysEnableSGXInterrupts(psSysData) +#define SysDisableSGXInterrupts(psSysData) +#endif + +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) +IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData); +IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData); +#endif + +#if defined(__linux__) + +PVRSRV_ERROR SysPMRuntimeRegister(void); +PVRSRV_ERROR SysPMRuntimeUnregister(void); + +PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData); +PVRSRV_ERROR SysDvfsDeinitialize(SYS_SPECIFIC_DATA *psSysSpecificData); + +#else /* defined(__linux__) */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysPMRuntimeRegister) +#endif +static INLINE PVRSRV_ERROR SysPMRuntimeRegister(void) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysPMRuntimeUnregister) +#endif +static INLINE PVRSRV_ERROR SysPMRuntimeUnregister(void) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysDvfsInitialize) +#endif +static INLINE PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData) +{ + PVR_UNREFERENCED_PARAMETER(psSysSpecificData); + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(SysDvfsDeinitialize) +#endif +static INLINE PVRSRV_ERROR SysDvfsDeinitialize(SYS_SPECIFIC_DATA *psSysSpecificData) +{ + PVR_UNREFERENCED_PARAMETER(psSysSpecificData); + return PVRSRV_OK; +} + +#endif /* defined(__linux__) */ + +#if defined(__cplusplus) +} +#endif + +#endif /* __SYSLOCAL_H__ */ + + diff --git a/sgx_km/eurasia_km/services4/system/omap/sysutils.c b/sgx_km/eurasia_km/services4/system/omap/sysutils.c new file mode 100644 index 0000000..b1ea055 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sysutils.c @@ -0,0 +1,63 @@ +/*************************************************************************/ /*! +@Title Shared (User/kernel) and System dependent utilities +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides system-specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Pull in the correct system dependent sysutils source */ + +#if defined(__linux__) +#include "sysutils_linux.c" +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) +#include "sgxfreq.c" +#include "sgxfreq_onoff.c" +#include "sgxfreq_activeidle.c" +#include "sgxfreq_on3demand.c" +#include "sgxfreq_userspace.c" +#if (defined(CONFIG_THERMAL) || defined(CONFIG_THERMAL_FRAMEWORK)) +#include "sgxfreq_cool.c" +#endif +#endif +#endif + + + +#if defined(__QNXNTO__) +#include "sysutils_nto.c" +#endif diff --git a/sgx_km/eurasia_km/services4/system/omap/sysutils_linux.c b/sgx_km/eurasia_km/services4/system/omap/sysutils_linux.c new file mode 100644 index 0000000..ad02673 --- /dev/null +++ b/sgx_km/eurasia_km/services4/system/omap/sysutils_linux.c @@ -0,0 +1,721 @@ +/*************************************************************************/ /*! +@Title System dependent utilities +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides system-specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include +#include + +#include "sgxdefs.h" +#include "services_headers.h" +#include "sysinfo.h" +#include "sgxapi_km.h" +#include "sysconfig.h" +#include "sgxinfokm.h" +#include "syslocal.h" + +#include +#include + +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) +#include "sgxfreq.h" +#endif + +#if defined(SUPPORT_DRI_DRM_PLUGIN) +#include +#include + +#include + +#include "pvr_drm.h" +#endif + +#define ONE_MHZ 1000000 +#define HZ_TO_MHZ(m) ((m) / ONE_MHZ) + +#if defined(SUPPORT_OMAP3430_SGXFCLK_96M) +#define SGX_PARENT_CLOCK "cm_96m_fck" +#else +#define SGX_PARENT_CLOCK "core_ck" +#endif + +#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) +extern struct platform_device *gpsPVRLDMDev; +#endif + +static PVRSRV_ERROR PowerLockWrap(SYS_SPECIFIC_DATA *psSysSpecData, IMG_BOOL bTryLock) +{ + if (!in_interrupt()) + { + if (bTryLock) + { + int locked = mutex_trylock(&psSysSpecData->sPowerLock); + if (locked == 0) + { + return PVRSRV_ERROR_RETRY; + } + } + else + { + mutex_lock(&psSysSpecData->sPowerLock); + } + } + + return PVRSRV_OK; +} + +static IMG_VOID PowerLockUnwrap(SYS_SPECIFIC_DATA *psSysSpecData) +{ + if (!in_interrupt()) + { + mutex_unlock(&psSysSpecData->sPowerLock); + } +} + +PVRSRV_ERROR SysPowerLockWrap(IMG_BOOL bTryLock) +{ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + return PowerLockWrap(psSysData->pvSysSpecificData, bTryLock); +} + +IMG_VOID SysPowerLockUnwrap(IMG_VOID) +{ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + PowerLockUnwrap(psSysData->pvSysSpecificData); +} + +/* + * This function should be called to unwrap the Services power lock, prior + * to calling any function that might sleep. + * This function shouldn't be called prior to calling EnableSystemClocks + * or DisableSystemClocks, as those functions perform their own power lock + * unwrapping. + * If the function returns IMG_TRUE, UnwrapSystemPowerChange must be + * called to rewrap the power lock, prior to returning to Services. + */ +IMG_BOOL WrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData) +{ + return IMG_TRUE; +} + +IMG_VOID UnwrapSystemPowerChange(SYS_SPECIFIC_DATA *psSysSpecData) +{ +} + +/* + * Return SGX timining information to caller. + */ +IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo) +{ +#if !defined(NO_HARDWARE) + PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0); +#endif +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + /* + * The core SGX driver and ukernel code expects SGX frequency + * changes to occur only just prior to SGX initialization. We + * don't wish to constrain the DVFS implementation as such. So + * we let these components believe that frequency setting is + * always at maximum. This produces safe values for derived + * parameters such as APM and HWR timeouts. + */ + psTimingInfo->ui32CoreClockSpeed = (IMG_UINT32)sgxfreq_get_freq_max(); +#else /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ + psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED; +#endif + psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; + psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; +#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) + psTimingInfo->bEnableActivePM = IMG_TRUE; +#else + psTimingInfo->bEnableActivePM = IMG_FALSE; +#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ + psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; +} + +/*! +****************************************************************************** + + @Function EnableSGXClocks + + @Description Enable SGX clocks + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData) +{ +#if !defined(NO_HARDWARE) + SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; + + /* SGX clocks already enabled? */ + if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) + { + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks")); + +#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) + { + /* + * pm_runtime_get_sync returns 1 after the module has + * been reloaded. + */ + int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res)); + return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK; + } + } +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + sgxfreq_notif_sgx_clk_on(); +#endif /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ +#endif /* defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) */ + + SysEnableSGXInterrupts(psSysData); + + /* Indicate that the SGX clocks are enabled */ + atomic_set(&psSysSpecData->sSGXClocksEnabled, 1); + +#else /* !defined(NO_HARDWARE) */ + PVR_UNREFERENCED_PARAMETER(psSysData); +#endif /* !defined(NO_HARDWARE) */ + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function DisableSGXClocks + + @Description Disable SGX clocks. + + @Return none + +******************************************************************************/ +IMG_VOID DisableSGXClocks(SYS_DATA *psSysData) +{ +#if !defined(NO_HARDWARE) + SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; + + /* SGX clocks already disabled? */ + if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0) + { + return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks")); + + SysDisableSGXInterrupts(psSysData); + +#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) + { + int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res)); + } + } +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + sgxfreq_notif_sgx_clk_off(); +#endif /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ +#endif /* defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) */ + + /* Indicate that the SGX clocks are disabled */ + atomic_set(&psSysSpecData->sSGXClocksEnabled, 0); + +#else /* !defined(NO_HARDWARE) */ + PVR_UNREFERENCED_PARAMETER(psSysData); +#endif /* !defined(NO_HARDWARE) */ +} + +#if (defined(DEBUG) || defined(TIMING)) && !defined(PVR_NO_OMAP_TIMER) +#if defined(PVR_OMAP_USE_DM_TIMER_API) +#define GPTIMER_TO_USE 11 +/*! +****************************************************************************** + + @Function AcquireGPTimer + + @Description Acquire a GP timer + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) +{ + PVR_ASSERT(psSysSpecData->psGPTimer == NULL); + + /* + * This code has problems on module reload for OMAP5 running Linux + * 3.4.10, due to omap2_dm_timer_set_src (called by + * omap_dm_timer_request_specific), being unable to set the parent + * clock to OMAP_TIMER_SRC_32_KHZ. + * Not calling omap_dm_timer_set_source doesn't help. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)) || !defined(MODULE) + /* + * This code could try requesting registers 9, 10, and 11, + * stopping at the first succesful request. We'll stick with + * 11 for now, as it avoids having to hard code yet more + * physical addresses into the code. + */ + psSysSpecData->psGPTimer = omap_dm_timer_request_specific(GPTIMER_TO_USE); + if (psSysSpecData->psGPTimer == NULL) + { + + PVR_DPF((PVR_DBG_WARNING, "%s: omap_dm_timer_request_specific failed", __FUNCTION__)); + return PVRSRV_ERROR_CLOCK_REQUEST_FAILED; + } + + omap_dm_timer_set_source(psSysSpecData->psGPTimer, OMAP_TIMER_SRC_SYS_CLK); + omap_dm_timer_enable(psSysSpecData->psGPTimer); + + /* Set autoreload, and start value of 0 */ + omap_dm_timer_set_load_start(psSysSpecData->psGPTimer, 1, 0); + + omap_dm_timer_start(psSysSpecData->psGPTimer); + + /* + * The DM timer API doesn't have a mechanism for obtaining the + * physical address of the counter register. + */ + psSysSpecData->sTimerRegPhysBase.uiAddr = SYS_OMAP_GP11TIMER_REGS_SYS_PHYS_BASE; +#else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3,4,0)) || !defined(MODULE) */ + (void)psSysSpecData; +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3,4,0)) || !defined(MODULE) */ + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function ReleaseGPTimer + + @Description Release a GP timer + + @Return PVRSRV_ERROR + +******************************************************************************/ +static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) +{ + if (psSysSpecData->psGPTimer != NULL) + { + /* Always returns 0 */ + (void) omap_dm_timer_stop(psSysSpecData->psGPTimer); + + omap_dm_timer_disable(psSysSpecData->psGPTimer); + + omap_dm_timer_free(psSysSpecData->psGPTimer); + + psSysSpecData->sTimerRegPhysBase.uiAddr = 0; + + psSysSpecData->psGPTimer = NULL; + } + +} +#else /* PVR_OMAP_USE_DM_TIMER_API */ +/*! +****************************************************************************** + + @Function AcquireGPTimer + + @Description Acquire a GP timer + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) +{ +#if defined(PVR_OMAP4_TIMING_PRCM) + struct clk *psCLK; + IMG_INT res; + struct clk *sys_ck; + IMG_INT rate; +#endif + PVRSRV_ERROR eError; + + IMG_CPU_PHYADDR sTimerRegPhysBase; + IMG_HANDLE hTimerEnable; + IMG_UINT32 *pui32TimerEnable; + + PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0); + +#if defined(PVR_OMAP4_TIMING_PRCM) + /* assert our dependence on the GPTIMER11 module */ + psCLK = clk_get(NULL, "gpt11_fck"); + if (IS_ERR(psCLK)) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock")); + goto ExitError; + } + psSysSpecData->psGPT11_FCK = psCLK; + + psCLK = clk_get(NULL, "gpt11_ick"); + if (IS_ERR(psCLK)) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock")); + goto ExitError; + } + psSysSpecData->psGPT11_ICK = psCLK; + + sys_ck = clk_get(NULL, "sys_clkin_ck"); + if (IS_ERR(sys_ck)) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock")); + goto ExitError; + } + + if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck) + { + PVR_TRACE(("Setting GPTIMER11 parent to System Clock")); + res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res)); + goto ExitError; + } + } + + rate = clk_get_rate(psSysSpecData->psGPT11_FCK); + PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate))); + + res = clk_enable(psSysSpecData->psGPT11_FCK); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res)); + goto ExitError; + } + + res = clk_enable(psSysSpecData->psGPT11_ICK); + if (res < 0) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res)); + goto ExitDisableGPT11FCK; + } +#endif /* defined(PVR_OMAP4_TIMING_PRCM) */ + + /* Set the timer to non-posted mode */ + sTimerRegPhysBase.uiAddr = SYS_OMAP_GP11TIMER_TSICR_SYS_PHYS_BASE; + pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase, + 4, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + &hTimerEnable); + + if (pui32TimerEnable == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed")); + goto ExitDisableGPT11ICK; + } + + if(!(*pui32TimerEnable & 4)) + { + PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)")); + + /* Set posted mode */ + *pui32TimerEnable |= 4; + } + + OSUnMapPhysToLin(pui32TimerEnable, + 4, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + hTimerEnable); + + /* Enable the timer */ + sTimerRegPhysBase.uiAddr = SYS_OMAP_GP11TIMER_ENABLE_SYS_PHYS_BASE; + pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase, + 4, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + &hTimerEnable); + + if (pui32TimerEnable == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed")); + goto ExitDisableGPT11ICK; + } + + /* Enable and set autoreload on overflow */ + *pui32TimerEnable = 3; + + OSUnMapPhysToLin(pui32TimerEnable, + 4, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + hTimerEnable); + + psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase; + + eError = PVRSRV_OK; + + goto Exit; + +ExitDisableGPT11ICK: +#if defined(PVR_OMAP4_TIMING_PRCM) + clk_disable(psSysSpecData->psGPT11_ICK); +ExitDisableGPT11FCK: + clk_disable(psSysSpecData->psGPT11_FCK); +ExitError: +#endif /* defined(PVR_OMAP4_TIMING_PRCM) */ + eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED; +Exit: + return eError; +} + +/*! +****************************************************************************** + + @Function ReleaseGPTimer + + @Description Release a GP timer + + @Return PVRSRV_ERROR + +******************************************************************************/ +static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) +{ + IMG_HANDLE hTimerDisable; + IMG_UINT32 *pui32TimerDisable; + + if (psSysSpecData->sTimerRegPhysBase.uiAddr == 0) + { + return; + } + + /* Disable the timer */ + pui32TimerDisable = OSMapPhysToLin(psSysSpecData->sTimerRegPhysBase, + 4, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + &hTimerDisable); + + if (pui32TimerDisable == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed")); + } + else + { + *pui32TimerDisable = 0; + + OSUnMapPhysToLin(pui32TimerDisable, + 4, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + hTimerDisable); + } + + psSysSpecData->sTimerRegPhysBase.uiAddr = 0; + +#if defined(PVR_OMAP4_TIMING_PRCM) + clk_disable(psSysSpecData->psGPT11_ICK); + + clk_disable(psSysSpecData->psGPT11_FCK); +#endif /* defined(PVR_OMAP4_TIMING_PRCM) */ +} +#endif /* PVR_OMAP_USE_DM_TIMER_API */ +#else /* (DEBUG || TIMING) && !PVR_NO_OMAP_TIMER */ +static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) +{ + PVR_UNREFERENCED_PARAMETER(psSysSpecData); + + return PVRSRV_OK; +} +static void ReleaseGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) +{ + PVR_UNREFERENCED_PARAMETER(psSysSpecData); +} +#endif /* (DEBUG || TIMING) && !PVR_NO_OMAP_TIMER */ + +/*! +****************************************************************************** + + @Function EnableSystemClocks + + @Description Setup up the clocks for the graphics device to work. + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData) +{ + SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; + + PVR_TRACE(("EnableSystemClocks: Enabling System Clocks")); + + if (!psSysSpecData->bSysClocksOneTimeInit) + { + mutex_init(&psSysSpecData->sPowerLock); + + atomic_set(&psSysSpecData->sSGXClocksEnabled, 0); + + psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE; + } + + return AcquireGPTimer(psSysSpecData); +} + +/*! +****************************************************************************** + + @Function DisableSystemClocks + + @Description Disable the graphics clocks. + + @Return none + +******************************************************************************/ +IMG_VOID DisableSystemClocks(SYS_DATA *psSysData) +{ + SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; + + PVR_TRACE(("DisableSystemClocks: Disabling System Clocks")); + + /* + * Always disable the SGX clocks when the system clocks are disabled. + * This saves having to make an explicit call to DisableSGXClocks if + * active power management is enabled. + */ + DisableSGXClocks(psSysData); + + ReleaseGPTimer(psSysSpecData); +} + +PVRSRV_ERROR SysPMRuntimeRegister(void) +{ +#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) + pm_runtime_enable(&gpsPVRLDMDev->dev); +#endif + return PVRSRV_OK; +} + +PVRSRV_ERROR SysPMRuntimeUnregister(void) +{ +#if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) + pm_runtime_disable(&gpsPVRLDMDev->dev); +#endif + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData) +{ + PVR_UNREFERENCED_PARAMETER(psSysSpecificData); +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + if (sgxfreq_init(&gpsPVRLDMDev->dev)) + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ + + return PVRSRV_OK; +} + +PVRSRV_ERROR SysDvfsDeinitialize(SYS_SPECIFIC_DATA *psSysSpecificData) +{ + PVR_UNREFERENCED_PARAMETER(psSysSpecificData); +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + if (sgxfreq_deinit()) + return PVRSRV_ERROR_NOT_SUPPORTED; +#endif /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ + + return PVRSRV_OK; +} + +#if defined(SUPPORT_DRI_DRM_PLUGIN) +static struct omap_gpu_plugin sOMAPGPUPlugin; + +#define SYS_DRM_SET_PLUGIN_FIELD(d, s, f) (d)->f = (s)->f +int +SysDRMRegisterPlugin(PVRSRV_DRM_PLUGIN *psDRMPlugin) +{ + int iRes; + + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, name); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, open); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, load); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, unload); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, release); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, mmap); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, ioctls); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, num_ioctls); + SYS_DRM_SET_PLUGIN_FIELD(&sOMAPGPUPlugin, psDRMPlugin, ioctl_start); + + iRes = omap_gpu_register_plugin(&sOMAPGPUPlugin); + if (iRes != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: omap_gpu_register_plugin failed (%d)", __FUNCTION__, iRes)); + } + + return iRes; +} + +void +SysDRMUnregisterPlugin(PVRSRV_DRM_PLUGIN *psDRMPlugin) +{ + int iRes = omap_gpu_unregister_plugin(&sOMAPGPUPlugin); + if (iRes != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: omap_gpu_unregister_plugin failed (%d)", __FUNCTION__, iRes)); + } +} +#endif + +IMG_VOID SysSGXIdleEntered(IMG_VOID) +{ +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + sgxfreq_notif_sgx_idle(); +#endif +} + +IMG_VOID SysSGXCommandPending(IMG_BOOL bSGXIdle) +{ +#if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) + if (bSGXIdle) + sgxfreq_notif_sgx_active(); +#else + PVR_UNREFERENCED_PARAMETER(bSGXIdle); +#endif +} diff --git a/sgx_km/eurasia_km/tools/intern/debug/client/linuxsrv.h b/sgx_km/eurasia_km/tools/intern/debug/client/linuxsrv.h new file mode 100644 index 0000000..d9fd825 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/client/linuxsrv.h @@ -0,0 +1,64 @@ +/*************************************************************************/ /*! +@File linuxsrv.h +@Title Module defs for pvr core drivers. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _LINUXSRV_H__ +#define _LINUXSRV_H__ + +typedef struct tagIOCTL_PACKAGE +{ + IMG_UINT32 ui32Cmd; // ioctl command + IMG_UINT32 ui32Size; // needs to be correctly set + IMG_VOID *pInBuffer; // input data buffer + IMG_UINT32 ui32InBufferSize; // size of input data buffer + IMG_VOID *pOutBuffer; // output data buffer + IMG_UINT32 ui32OutBufferSize; // size of output data buffer +} IOCTL_PACKAGE; + +IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice, + IMG_UINT32 ui32ControlCode, + IMG_VOID *pInBuffer, + IMG_UINT32 ui32InBufferSize, + IMG_VOID *pOutBuffer, + IMG_UINT32 ui32OutBufferSize, + IMG_UINT32 *pui32BytesReturned); + +#endif /* _LINUXSRV_H__*/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Kbuild.mk b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Kbuild.mk new file mode 100644 index 0000000..7a827ce --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Kbuild.mk @@ -0,0 +1,51 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +ccflags-y += \ + -I$(TOP)/tools/intern/debug/dbgdriv/common \ + -I$(TOP)/tools/intern/debug/client + +dbgdrv-y += \ + tools/intern/debug/dbgdriv/common/dbgdriv_handle.o \ + tools/intern/debug/dbgdriv/common/dbgdriv.o \ + tools/intern/debug/dbgdriv/common/ioctl.o \ + tools/intern/debug/dbgdriv/common/hotkey.o \ + tools/intern/debug/dbgdriv/linux/main.o \ + tools/intern/debug/dbgdriv/linux/hostfunc.o diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Linux.mk b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Linux.mk new file mode 100644 index 0000000..e050879 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/Linux.mk @@ -0,0 +1,45 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +modules := dbgdrv + +dbgdrv_type := kernel_module +dbgdrv_target := dbgdrv.ko +dbgdrv_makefile := $(THIS_DIR)/Kbuild.mk diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.c b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.c new file mode 100644 index 0000000..2696cb9 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.c @@ -0,0 +1,2936 @@ +/*************************************************************************/ /*! +@Title Debug Driver +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description 32 Bit kernel mode debug driver +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifdef LINUX +#include +#endif +#ifdef __QNXNTO__ +#include +#endif + +#include "img_types.h" +#include "pvr_debug.h" +#include "dbgdrvif.h" +#include "dbgdriv.h" +#include "hotkey.h" +#include "hostfunc.h" +#include "pvr_debug.h" + + + + +#define LAST_FRAME_BUF_SIZE 1024 + +typedef struct _DBG_LASTFRAME_BUFFER_ +{ + PDBG_STREAM psStream; + IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE]; + IMG_UINT32 ui32BufLen; + struct _DBG_LASTFRAME_BUFFER_ *psNext; +} *PDBG_LASTFRAME_BUFFER; + +/****************************************************************************** + Global vars +******************************************************************************/ + +static PDBG_STREAM g_psStreamList = 0; +static PDBG_LASTFRAME_BUFFER g_psLFBufferList; + +static IMG_UINT32 g_ui32LOff = 0; +static IMG_UINT32 g_ui32Line = 0; +static IMG_UINT32 g_ui32MonoLines = 25; + +static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE; +static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff; +static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff; + +IMG_VOID * g_pvAPIMutex=IMG_NULL; + +extern IMG_UINT32 g_ui32HotKeyFrame; +extern IMG_BOOL g_bHotKeyPressed; +extern IMG_BOOL g_bHotKeyRegistered; + +IMG_BOOL gbDumpThisFrame = IMG_FALSE; + + +IMG_UINT32 SpaceInStream(PDBG_STREAM psStream); +IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize); +PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream); + +/*************************************************************************** + Declare kernel mode service table. +***************************************************************************/ +DBGKM_SERVICE_TABLE g_sDBGKMServices = +{ + sizeof (DBGKM_SERVICE_TABLE), + ExtDBGDrivCreateStream, + ExtDBGDrivDestroyStream, + ExtDBGDrivFindStream, + ExtDBGDrivWriteString, + ExtDBGDrivReadString, + ExtDBGDrivWrite, + ExtDBGDrivRead, + ExtDBGDrivSetCaptureMode, + ExtDBGDrivSetOutputMode, + ExtDBGDrivSetDebugLevel, + ExtDBGDrivSetFrame, + ExtDBGDrivGetFrame, + ExtDBGDrivOverrideMode, + ExtDBGDrivDefaultMode, + ExtDBGDrivWrite2, + ExtDBGDrivWriteStringCM, + ExtDBGDrivWriteCM, + ExtDBGDrivSetMarker, + ExtDBGDrivGetMarker, + ExtDBGDrivStartInitPhase, + ExtDBGDrivStopInitPhase, + ExtDBGDrivIsCaptureFrame, + ExtDBGDrivWriteLF, + ExtDBGDrivReadLF, + ExtDBGDrivGetStreamOffset, + ExtDBGDrivSetStreamOffset, + ExtDBGDrivIsLastCaptureFrame, + ExtDBGDrivWaitForEvent, + ExtDBGDrivSetConnectNotifier, + ExtDBGDrivWritePersist +}; + + +/* Static function declarations */ +static IMG_UINT32 DBGDrivWritePersist(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +static IMG_VOID InvalidateAllStreams(IMG_VOID); + +/***************************************************************************** + Code +*****************************************************************************/ + + + +DBGKM_CONNECT_NOTIFIER g_fnDBGKMNotifier; + +/*! + @name ExtDBGDrivSetConnectNotifier + @brief Registers one or more services callback functions which are called on events in the dbg driver + @param fn_notifier - services callbacks + @return none + */ +IMG_VOID IMG_CALLCONV ExtDBGDrivSetConnectNotifier(DBGKM_CONNECT_NOTIFIER fn_notifier) +{ + /* Set the callback function which enables the debug driver to + * communicate to services KM when pdump is connected. + */ + g_fnDBGKMNotifier = fn_notifier; +} + +/*! + @name ExtDBGDrivCreateStream + */ +IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size) +{ + IMG_VOID * pvRet; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + pvRet=DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, ui32Size); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return pvRet; +} + +/*! + @name ExtDBGDrivDestroyStream + */ +void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivDestroyStream(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivFindStream + */ +IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream) +{ + IMG_VOID * pvRet; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + pvRet=DBGDrivFindStream(pszName, bResetStream); + if(g_fnDBGKMNotifier.pfnConnectNotifier) + { + g_fnDBGKMNotifier.pfnConnectNotifier(); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "pfnConnectNotifier not initialised.\n")); + } + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return pvRet; +} + +/*! + @name ExtDBGDrivWriteString + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivWriteString(psStream, pszString, ui32Level); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivReadString + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivReadString(psStream, pszString, ui32Limit); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivWrite + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivRead + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivSetCaptureMode + */ +void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, ui32SampleRate); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivSetOutputMode + */ +void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivSetOutputMode(psStream, ui32OutMode); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivSetDebugLevel + */ +void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivSetDebugLevel(psStream, ui32DebugLevel); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivSetFrame + */ +void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivSetFrame(psStream, ui32Frame); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivGetFrame + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivGetFrame(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivIsLastCaptureFrame + */ +IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream) +{ + IMG_BOOL bRet; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + bRet = DBGDrivIsLastCaptureFrame(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return bRet; +} + +/*! + @name ExtDBGDrivIsCaptureFrame + */ +IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame) +{ + IMG_BOOL bRet; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + bRet = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return bRet; +} + +/*! + @name ExtDBGDrivOverrideMode + */ +void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivOverrideMode(psStream, ui32Mode); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivDefaultMode + */ +void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivDefaultMode(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivWrite2 + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivWritePersist + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWritePersist(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivWritePersist(psStream, pui8InBuf, ui32InBuffSize, ui32Level); + if(ui32Ret==0xFFFFFFFFU) + { + PVR_DPF((PVR_DBG_ERROR, "An error occurred in DBGDrivWritePersist.")); + } + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivWriteStringCM + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivWriteStringCM(psStream, pszString, ui32Level); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivWriteCM + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret=DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivSetMarker + */ +void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivSetMarker(psStream, ui32Marker); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivGetMarker + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream) +{ + IMG_UINT32 ui32Marker; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Marker = DBGDrivGetMarker(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Marker; +} + +/*! + @name ExtDBGDrivWriteLF + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret = DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, ui32Flags); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivReadLF + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + + +/*! + @name ExtDBGDrivStartInitPhase + */ +IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivStartInitPhase(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivStopInitPhase + */ +IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivStopInitPhase(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return; +} + +/*! + @name ExtDBGDrivGetStreamOffset + */ +IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream) +{ + IMG_UINT32 ui32Ret; + + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + ui32Ret = DBGDrivGetStreamOffset(psStream); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); + + return ui32Ret; +} + +/*! + @name ExtDBGDrivSetStreamOffset + */ +IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset) +{ + /* Aquire API Mutex */ + HostAquireMutex(g_pvAPIMutex); + + DBGDrivSetStreamOffset(psStream, ui32StreamOffset); + + /* Release API Mutex */ + HostReleaseMutex(g_pvAPIMutex); +} + +/*! + @name ExtDBGDrivWaitForEvent + */ +IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent) +{ +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + DBGDrivWaitForEvent(eEvent); +#else /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */ + PVR_UNREFERENCED_PARAMETER(eEvent); /* PRQA S 3358 */ +#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */ +} + +/*!**************************************************************************** + @name AtoI + @brief Returns the integer value of a decimal string + @param szIn - String with hexadecimal value + @return IMG_UINT32 integer value, 0 if string is null or not valid + Based on Max`s one, now copes with (only) hex ui32ords, upper or lower case a-f. +*****************************************************************************/ +IMG_UINT32 AtoI(IMG_CHAR *szIn) +{ + IMG_INT iLen = 0; + IMG_UINT32 ui32Value = 0; + IMG_UINT32 ui32Digit=1; + IMG_UINT32 ui32Base=10; + IMG_INT iPos; + IMG_CHAR bc; + + //get len of string + while (szIn[iLen] > 0) + { + iLen ++; + } + + //nothing to do + if (iLen == 0) + { + return (0); + } + + /* See if we have an 'x' or 'X' before the number to make it a hex number */ + iPos=0; + while (szIn[iPos] == '0') + { + iPos++; + } + if (szIn[iPos] == '\0') + { + return 0; + } + if (szIn[iPos] == 'x' || szIn[iPos] == 'X') + { + ui32Base=16; + szIn[iPos]='0'; + } + + //go through string from right (least significant) to left + for (iPos = iLen - 1; iPos >= 0; iPos --) + { + bc = szIn[iPos]; + + if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16) //handle lower case a-f + { + bc -= 'a' - 0xa; + } + else + if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16) //handle upper case A-F + { + bc -= 'A' - 0xa; + } + else + if ((bc >= '0') && (bc <= '9')) //if char out of range, return 0 + { + bc -= '0'; + } + else + return (0); + + ui32Value += (IMG_UINT32)bc * ui32Digit; + + ui32Digit = ui32Digit * ui32Base; + } + return (ui32Value); +} + + +/*!**************************************************************************** + @name StreamValid + @brief Validates supplied debug buffer. + @param psStream - debug stream + @return true if valid +*****************************************************************************/ +static IMG_BOOL StreamValid(PDBG_STREAM psStream) +{ + PDBG_STREAM psThis; + + psThis = g_psStreamList; + + while (psThis) + { + if (psStream && (psThis == psStream) ) + { + return(IMG_TRUE); + } + else + { + psThis = psThis->psNext; + } + } + + return(IMG_FALSE); +} + + +/*!**************************************************************************** + @name StreamValidForRead + @brief Validates supplied debug buffer for read op. + @param psStream - debug stream + @return true if readable +*****************************************************************************/ +static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream) +{ + if( StreamValid(psStream) && + ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) ) + { + return(IMG_TRUE); + } + + return(IMG_FALSE); +} + +/*!**************************************************************************** + @name StreamValidForWrite + @brief Validates supplied debug buffer for write op. + @param psStream - debug stream + @return true if writable +*****************************************************************************/ +static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream) +{ + if( StreamValid(psStream) && + ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_READONLY) == 0) ) + { + return(IMG_TRUE); + } + + return(IMG_FALSE); +} + + +/*!**************************************************************************** + @name Write + @brief Copies data from a buffer into selected stream. Stream size is fixed. + @param psStream - stream for output + @param pui8Data - input buffer + @param ui32InBuffSize - size of input + @return none +*****************************************************************************/ +static void Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize) +{ + /* + Split copy into two bits as necessary (if we're allowed to wrap). + */ + if (!psStream->bCircularAllowed) + { + //PVR_ASSERT( (psStream->ui32WPtr + ui32InBuffSize) < psStream->ui32Size ); + } + + if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size) + { + /* Yes we need two bits, calculate their sizes */ + IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr; + IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1; + + /* Copy first block to current location */ + HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr), + (IMG_PVOID) pui8Data, + ui32B1); + + /* Copy second block to start of buffer */ + HostMemCopy(psStream->pvBase, + (IMG_PVOID)(pui8Data + ui32B1), + ui32B2); + + /* Set pointer to be the new end point */ + psStream->ui32WPtr = ui32B2; + } + else + { /* Can fit block in single chunk */ + HostMemCopy((IMG_PVOID)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32WPtr), + (IMG_PVOID) pui8Data, + ui32InBuffSize); + + psStream->ui32WPtr += ui32InBuffSize; + + if (psStream->ui32WPtr == psStream->ui32Size) + { + psStream->ui32WPtr = 0; + } + } + psStream->ui32DataWritten += ui32InBuffSize; +} + + +/*!**************************************************************************** + @name MonoOut + @brief Output data to mono display. [Possibly deprecated] + @param pszString - input + @param bNewLine - line wrapping + @return none +*****************************************************************************/ +void MonoOut(const IMG_CHAR * pszString,IMG_BOOL bNewLine) +{ +#if defined (_WIN64) + PVR_UNREFERENCED_PARAMETER(pszString); + PVR_UNREFERENCED_PARAMETER(bNewLine); + +#else + IMG_UINT32 i; + IMG_CHAR * pScreen; + + pScreen = (IMG_CHAR *) DBGDRIV_MONOBASE; + + pScreen += g_ui32Line * 160; + + /* + Write the string. + */ + i=0; + do + { + pScreen[g_ui32LOff + (i*2)] = pszString[i]; + pScreen[g_ui32LOff + (i*2)+1] = 127; + i++; + } + while (i < MAX_STREAM_NAME_LENGTH && (pszString[i] != 0)); + + g_ui32LOff += i * 2; + + if (bNewLine) + { + g_ui32LOff = 0; + g_ui32Line++; + } + + /* + Scroll if necssary. + */ + if (g_ui32Line == g_ui32MonoLines) + { + g_ui32Line = g_ui32MonoLines - 1; + + HostMemCopy((IMG_VOID *)DBGDRIV_MONOBASE,(IMG_VOID *)(DBGDRIV_MONOBASE + 160),160 * (g_ui32MonoLines - 1)); + + HostMemSet((IMG_VOID *)(DBGDRIV_MONOBASE + (160 * (g_ui32MonoLines - 1))),0,160); + } +#endif +} + +/*!**************************************************************************** + @name WriteExpandingBuffer + @brief Copies data from a buffer into selected stream. Stream size may be expandable. + @param psStream - stream for output + @param pui8InBuf - input buffer + @param ui32InBuffSize - size of input + @return bytes copied +*****************************************************************************/ +static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize) +{ + IMG_UINT ui32Space; + + /* + How much space have we got in the buffer ? + */ + ui32Space = SpaceInStream(psStream); + + /* + Don't copy anything if we don't have space or buffers not enabled. + */ + if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is disabled", psStream)); + return(0); + } + + /* + Check if we can expand the buffer + */ + if (psStream->psCtrl->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION) + { + /* + Don't do anything if we've got less that 32 ui8tes of space and + we're not allowing expansion of buffer space... + */ + if (ui32Space < 32) + { + PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is full and isn't expandable", psStream)); + return(0); + } + } + else + { + if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4))) + { + IMG_UINT32 ui32NewBufSize; + + /* + Find new buffer size + */ + ui32NewBufSize = 2 * psStream->ui32Size; + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x", + psStream->ui32Size, ui32NewBufSize)); + + if (ui32InBuffSize > psStream->ui32Size) + { + ui32NewBufSize += ui32InBuffSize; + } + + /* + Attempt to expand the buffer + */ + if (!ExpandStreamBuffer(psStream,ui32NewBufSize)) + { + if (ui32Space < 32) + { + if(psStream->bCircularAllowed) + { + return(0); + } + else + { + /* out of memory */ + PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: Unable to expand %p. Out of memory.", psStream)); + InvalidateAllStreams(); + return (0xFFFFFFFFUL); + } + } + } + + /* + Recalc the space in the buffer + */ + ui32Space = SpaceInStream(psStream); + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x", + ui32Space)); + } + } + + /* + Only copy what we can.. + */ + if (ui32Space <= (ui32InBuffSize + 4)) + { + ui32InBuffSize = ui32Space - 4; + } + + /* + Write the stuff... + */ + Write(psStream,pui8InBuf,ui32InBuffSize); + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + if (ui32InBuffSize) + { + HostSignalEvent(DBG_EVENT_STREAM_DATA); + } +#endif + return(ui32InBuffSize); +} + +/***************************************************************************** +****************************************************************************** +****************************************************************************** + THE ACTUAL FUNCTIONS +****************************************************************************** +****************************************************************************** +*****************************************************************************/ + +/*!**************************************************************************** + @name DBGDrivCreateStream + @brief Creates a pdump/debug stream + @param pszName - stream name + @param ui32CapMode - capture mode (framed, continuous, hotkey) + @param ui32OutMode - output mode (see dbgdrvif.h) + @param ui32Flags - output flags, text stream bit is set for pdumping + @param ui32Size - size of stream buffer in pages + @return none +*****************************************************************************/ +IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName, + IMG_UINT32 ui32CapMode, + IMG_UINT32 ui32OutMode, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size) +{ + PDBG_STREAM psStream; + PDBG_STREAM psInitStream; + PDBG_LASTFRAME_BUFFER psLFBuffer; + PDBG_STREAM_CONTROL psCtrl; + IMG_UINT32 ui32Off; + IMG_VOID * pvBase; + static const IMG_CHAR pszNameInitSuffix[] = "_Init"; + IMG_UINT32 ui32OffSuffix; + + /* + If we already have a buffer using this name just return + its handle. + */ + psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE); + + if (psStream) + { + return ((IMG_VOID *) psStream); + } + + /* + Allocate memory for control structures + */ + psStream = HostNonPageablePageAlloc(1); + if (!psStream) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream\n\r")); + goto exit_stream_alloc_failed; + } + + psInitStream = HostNonPageablePageAlloc(1); + if (!psInitStream) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream\n\r")); + goto exit_stream_init_alloc_failed; + } + + psLFBuffer = HostNonPageablePageAlloc(1); + if (!psLFBuffer) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc LFBuffer\n\r")); + goto exit_lfbuffer_alloc_failed; + } + + psCtrl = HostNonPageablePageAlloc(1); + if (!psCtrl) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Ctrl struct\n\r")); + goto exit_ctrl_alloc_failed; + } + + /* Allocate memory for buffer */ + if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + pvBase = HostNonPageablePageAlloc(ui32Size); + } + else + { + pvBase = HostPageablePageAlloc(ui32Size); + } + + if (!pvBase) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r")); + goto exit_stream_buffer_failed; + } + + /* Setup control state */ + psCtrl->ui32Flags = ui32Flags; + psCtrl->ui32CapMode = ui32CapMode; + psCtrl->ui32OutMode = ui32OutMode; + psCtrl->ui32DebugLevel = DEBUG_LEVEL_0; + psCtrl->ui32DefaultMode = ui32CapMode; + psCtrl->ui32Start = 0; + psCtrl->ui32End = 0; + psCtrl->ui32Current = 0; + psCtrl->ui32SampleRate = 1; + psCtrl->bInitPhaseComplete = IMG_FALSE; + + /* + Setup internal debug buffer state. + */ + psStream->psNext = 0; + psStream->pvBase = pvBase; + psStream->psCtrl = psCtrl; + psStream->ui32Size = ui32Size * 4096UL; + psStream->ui32RPtr = 0; + psStream->ui32WPtr = 0; + psStream->ui32DataWritten = 0; + psStream->ui32Marker = 0; + psStream->bCircularAllowed = IMG_TRUE; + psStream->ui32InitPhaseWOff = 0; + + /* Allocate memory for buffer */ + if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + pvBase = HostNonPageablePageAlloc(ui32Size); + } + else + { + pvBase = HostPageablePageAlloc(ui32Size); + } + + if (!pvBase) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r")); + + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + HostNonPageablePageFree(psStream->pvBase); + } + else + { + HostPageablePageFree(psStream->pvBase); + } + goto exit_stream_buffer_failed; + } + + /* Initialise the stream for the init phase */ + psInitStream->psNext = 0; + psInitStream->pvBase = pvBase; + psInitStream->psCtrl = psCtrl; + psInitStream->ui32Size = ui32Size * 4096UL; + psInitStream->ui32RPtr = 0; + psInitStream->ui32WPtr = 0; + psInitStream->ui32DataWritten = 0; + psInitStream->ui32Marker = 0; + psInitStream->bCircularAllowed = IMG_FALSE; + psInitStream->ui32InitPhaseWOff = 0; + psStream->psInitStream = psInitStream; + + /* + Copy buffer name. + */ + ui32Off = 0; + + do + { + IMG_CHAR c = pszName[ui32Off]; + psStream->szName[ui32Off] = c; + psInitStream->szName[ui32Off] = c; + ui32Off++; + } + while ((ui32Off < MAX_STREAM_NAME_LENGTH) && (pszName[ui32Off] != 0)); + + if (ui32Off == MAX_STREAM_NAME_LENGTH) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDrivCreateStream: Stream name too long!\n\r")); + goto exit_buffer_name_too_long; + } + psStream->szName[ui32Off] = '\0'; + + /* + Append suffix to init phase name + */ + ui32OffSuffix = 0; + do + { + psInitStream->szName[ui32Off] = pszNameInitSuffix[ui32OffSuffix]; + ui32Off++; + ui32OffSuffix++; + } + while ((ui32Off < MAX_STREAM_NAME_LENGTH) && (ui32OffSuffix < (sizeof(pszNameInitSuffix)/sizeof(IMG_CHAR) - 1))); + + if (ui32Off == MAX_STREAM_NAME_LENGTH) + { + PVR_DPF((PVR_DBG_ERROR,"DBGDrivCreateStream: Init stream name too long!\n\r")); + goto exit_buffer_name_too_long; + } + psInitStream->szName[ui32Off] = '\0'; + + /* Setup last frame buffer */ + psLFBuffer->psStream = psStream; + psLFBuffer->ui32BufLen = 0UL; + + g_bHotkeyMiddump = IMG_FALSE; + g_ui32HotkeyMiddumpStart = 0xffffffffUL; + g_ui32HotkeyMiddumpEnd = 0xffffffffUL; + + /* + Insert into list. + */ + psStream->psNext = g_psStreamList; + g_psStreamList = psStream; + + psLFBuffer->psNext = g_psLFBufferList; + g_psLFBufferList = psLFBuffer; + + AddSIDEntry(psStream); + + return((IMG_VOID *) psStream); + +exit_buffer_name_too_long: + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + HostNonPageablePageFree(psStream->pvBase); + } + else + { + HostPageablePageFree(psStream->pvBase); + } + if ((psInitStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + HostNonPageablePageFree(psInitStream->pvBase); + } + else + { + HostPageablePageFree(psInitStream->pvBase); + } +exit_stream_buffer_failed: + HostNonPageablePageFree(psCtrl); +exit_ctrl_alloc_failed: + HostNonPageablePageFree(psLFBuffer); +exit_lfbuffer_alloc_failed: + HostNonPageablePageFree(psInitStream); +exit_stream_init_alloc_failed: + HostNonPageablePageFree(psStream); +exit_stream_alloc_failed: + return((IMG_VOID *) 0); +} + +/*!**************************************************************************** + @name DBGDrivDestroyStream + @brief Delete a stream and free its memory + @param psStream - stream to be removed + @return none +*****************************************************************************/ +void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream) +{ + PDBG_STREAM psStreamThis; + PDBG_STREAM psStreamPrev; + PDBG_LASTFRAME_BUFFER psLFBuffer; + PDBG_LASTFRAME_BUFFER psLFThis; + PDBG_LASTFRAME_BUFFER psLFPrev; + + PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName )); + + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + RemoveSIDEntry(psStream); + + psLFBuffer = FindLFBuf(psStream); + + /* + Remove from linked list. + */ + psStreamThis = g_psStreamList; + psStreamPrev = 0; + + while (psStreamThis) + { + if (psStreamThis == psStream) + { + if (psStreamPrev) + { + psStreamPrev->psNext = psStreamThis->psNext; + } + else + { + g_psStreamList = psStreamThis->psNext; + } + + psStreamThis = 0; + } + else + { + psStreamPrev = psStreamThis; + psStreamThis = psStreamThis->psNext; + } + } + + psLFThis = g_psLFBufferList; + psLFPrev = 0; + + while (psLFThis) + { + if (psLFThis == psLFBuffer) + { + if (psLFPrev) + { + psLFPrev->psNext = psLFThis->psNext; + } + else + { + g_psLFBufferList = psLFThis->psNext; + } + + psLFThis = 0; + } + else + { + psLFPrev = psLFThis; + psLFThis = psLFThis->psNext; + } + } + /* + Dectivate hotkey it the stream is of this type. + */ + if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY) + { + DeactivateHotKeys(); + } + + /* + And free its memory. + */ + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + HostNonPageablePageFree(psStream->psCtrl); + HostNonPageablePageFree(psStream->pvBase); + HostNonPageablePageFree(psStream->psInitStream->pvBase); + } + else + { + HostNonPageablePageFree(psStream->psCtrl); + HostPageablePageFree(psStream->pvBase); + HostPageablePageFree(psStream->psInitStream->pvBase); + } + + HostNonPageablePageFree(psStream->psInitStream); + HostNonPageablePageFree(psStream); + HostNonPageablePageFree(psLFBuffer); + + if (g_psStreamList == 0) + { + PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" )); + } + + return; +} + +/*!**************************************************************************** + @name DBGDrivFindStream + @brief Finds/resets a named stream + @param pszName - stream name + @param bResetStream - whether to reset the stream, e.g. to end pdump init phase + @return none +*****************************************************************************/ +IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream) +{ + PDBG_STREAM psStream; + PDBG_STREAM psThis; + IMG_BOOL bAreSame; + IMG_UINT32 ui32NameLength; + + psStream = 0; + ui32NameLength = strlen(pszName); + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s", + pszName, + (bResetStream == IMG_TRUE) ? "with reset" : "no reset")); + + /* + Scan buffer names for supplied one. + */ + for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext) + { + bAreSame = IMG_TRUE; + + if (strlen(psThis->szName) == ui32NameLength) + { + IMG_UINT32 ui32Off = 0; + + while ((ui32Off < ui32NameLength) && (ui32Off < MAX_STREAM_NAME_LENGTH) && bAreSame) + { + if (psThis->szName[ui32Off] != pszName[ui32Off]) + { + bAreSame = IMG_FALSE; + } + + ui32Off++; + } + } + else + { + bAreSame = IMG_FALSE; + } + + if (bAreSame) + { + psStream = psThis; + break; + } + } + + if(bResetStream && psStream) + { + static const IMG_CHAR szComment[] = "-- Init phase terminated\r\n"; + psStream->psInitStream->ui32RPtr = 0; + psStream->ui32RPtr = 0; + psStream->ui32WPtr = 0; + psStream->ui32DataWritten = psStream->psInitStream->ui32DataWritten; + if (psStream->psCtrl->bInitPhaseComplete == IMG_FALSE) + { + if (psStream->psCtrl->ui32Flags & DEBUG_FLAGS_TEXTSTREAM) + { + DBGDrivWrite2(psStream, (IMG_UINT8 *)szComment, sizeof(szComment) - 1, 0x01); + } + psStream->psCtrl->bInitPhaseComplete = IMG_TRUE; + } + + { + /* mark init stream to prevent further reading by pdump client */ + psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr; + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x, total bw %x", + psStream->szName, + psStream->psInitStream->ui32InitPhaseWOff, + psStream->psInitStream->ui32DataWritten )); + } + } + + return((IMG_VOID *) psStream); +} + +static void IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream) +{ + IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n"; + IMG_UINT32 ui32Space; + IMG_UINT32 ui32Off = 0; + IMG_UINT32 ui32WPtr = psStream->ui32WPtr; + IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase; + + PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s\r\n", psStream->szName )); + + /* + Validate buffer. + */ + /* + if (!StreamValid(psStream)) + { + return; + } +*/ + /* Write what we can of the error message */ + ui32Space = SpaceInStream(psStream); + + /* Make sure there's space for termination character */ + if(ui32Space > 0) + { + ui32Space--; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full.")); + } + + while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space)) + { + pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off]; + ui32Off++; + ui32WPtr++; + } + pui8Buffer[ui32WPtr++] = '\0'; + psStream->ui32WPtr = ui32WPtr; + + /* Buffer will accept no more params from Services/client driver */ + psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_READONLY; +} + +/*!**************************************************************************** + @name InvalidateAllStreams + @brief invalidate all streams in list + @return none +*****************************************************************************/ +static IMG_VOID InvalidateAllStreams(IMG_VOID) +{ + PDBG_STREAM psStream = g_psStreamList; + while (psStream != IMG_NULL) + { + DBGDrivInvalidateStream(psStream); + psStream = psStream->psNext; + } + return; +} + + + +/*!**************************************************************************** + @name DBGDrivWriteStringCM + @brief Write capture mode data, wraps DBGDrivWriteString + @param psStream - stream + @param pszString - input buffer + @param ui32Level - debug level +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level) +{ + /* + Validate buffer. + */ + if (!StreamValidForWrite(psStream)) + { + return(0xFFFFFFFFUL); + } + + /* + Only write string if debug capture mode adds up... + */ + if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) + { + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0) + { + return(0); + } + } + else + { + if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY) + { + if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE)) + { + return(0); + } + } + } + + return(DBGDrivWriteString(psStream,pszString,ui32Level)); + +} + +/*!**************************************************************************** + @name DBGDrivWriteString + @brief Write string to stream (note stream buffer size is assumed fixed) + @param psStream - stream + @param pszString - string to write + @param ui32Level - verbosity level + @return -1; invalid stream + 0; other error (e.g. stream not enabled) + else number of characters written +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Len; + IMG_UINT32 ui32Space; + IMG_UINT32 ui32WPtr; + IMG_UINT8 * pui8Buffer; + + /* + Validate buffer. + */ + if (!StreamValidForWrite(psStream)) + { + return(0xFFFFFFFFUL); + } + + /* + Check debug level. + */ + if ((psStream->psCtrl->ui32DebugLevel & ui32Level) == 0) + { + return(0xFFFFFFFFUL); + } + + /* + Output to standard debug out ? (don't if async out + flag is set). + */ + if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_ASYNC) == 0) + { + if (psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG) + { + PVR_DPF((PVR_DBG_MESSAGE,"%s: %s\r\n",psStream->szName, pszString)); + } + + /* + Output to mono monitor ? + */ + if (psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_MONO) + { + MonoOut(psStream->szName,IMG_FALSE); + MonoOut(": ",IMG_FALSE); + MonoOut(pszString,IMG_TRUE); + } + } + + /* + Don't bother writing the string if it's not flagged + */ + if ( + !( + ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) != 0) || + ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_ASYNC) != 0) + ) + ) + { + return(0xFFFFFFFFUL); + } + + /* + How much space have we got in the buffer ? + */ + ui32Space=SpaceInStream(psStream); + + /* Make sure there's space for termination character */ + if(ui32Space > 0) + { + ui32Space--; + } + + ui32Len = 0; + ui32WPtr = psStream->ui32WPtr; + pui8Buffer = (IMG_UINT8 *) psStream->pvBase; + + while((pszString[ui32Len] != 0) && (ui32Len < ui32Space)) + { + pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len]; + ui32Len++; + ui32WPtr++; + if (ui32WPtr == psStream->ui32Size) + { + ui32WPtr = 0; + } + } + + if (ui32Len < ui32Space) + { + /* copy terminator */ + pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len]; + ui32Len++; + ui32WPtr++; + if (ui32WPtr == psStream->ui32Size) + { + ui32WPtr = 0; + } + + /* Write pointer, and length */ + psStream->ui32WPtr = ui32WPtr; + psStream->ui32DataWritten+= ui32Len; + } else + { + ui32Len = 0; + } + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + if (ui32Len) + { + HostSignalEvent(DBG_EVENT_STREAM_DATA); + } +#endif + + return(ui32Len); +} + +/*!**************************************************************************** + @name DBGDrivReadString + @brief Reads string from debug stream + @param psStream - stream + @param pszString - string to read + @param ui32Limit - max size to read + @return -1; invalid stream + 0; other error (e.g. stream not enabled) + else number of characters read +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit) +{ + IMG_UINT32 ui32OutLen; + IMG_UINT32 ui32Len; + IMG_UINT32 ui32Offset; + IMG_UINT8 *pui8Buff; + + /* + Validate buffer. + */ + if (!StreamValidForRead(psStream)) + { + return(0); + } + + /* + Stream appears to be in list so carry on. + */ + pui8Buff = (IMG_UINT8 *)psStream->pvBase; + ui32Offset = psStream->ui32RPtr; + + if (psStream->ui32RPtr == psStream->ui32WPtr) + { + return(0); + } + + /* + Find length of string. + */ + ui32Len = 0; + while((pui8Buff[ui32Offset] != 0) && (ui32Offset != psStream->ui32WPtr)) + { + ui32Offset++; + ui32Len++; + + /* + Reset offset if buffer wrapped. + */ + if (ui32Offset == psStream->ui32Size) + { + ui32Offset = 0; + } + } + + ui32OutLen = ui32Len + 1; + + /* + Only copy string if target has enough space. + */ + if (ui32Len > ui32Limit) + { + return(0); + } + + /* + Copy it. + */ + ui32Offset = psStream->ui32RPtr; + ui32Len = 0; + + while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit)) + { + pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset]; + ui32Offset++; + ui32Len++; + + /* + If wrap as necessary + */ + if (ui32Offset == psStream->ui32Size) + { + ui32Offset = 0; + } + } + + pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset]; + + psStream->ui32RPtr = ui32Offset + 1; + + if (psStream->ui32RPtr == psStream->ui32Size) + { + psStream->ui32RPtr = 0; + } + + return(ui32OutLen); +} + +/*!**************************************************************************** + @name DBGDrivWrite + @brief Write binary buffer to stream (fixed size) + @param psStream - stream + @param pui8InBuf - buffer to write + @param ui32InBuffSize - size + @param ui32Level - verbosity level + @return bytes written, 0 if recoverable error, -1 if unrecoverable error +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + IMG_UINT32 ui32Space; + DBG_STREAM *psStream; + + /* + Validate buffer. + */ + if (!StreamValidForWrite(psMainStream)) + { + return(0xFFFFFFFFUL); + } + + /* + Check debug level. + */ + if ((psMainStream->psCtrl->ui32DebugLevel & ui32Level) == 0) + { + return(0xFFFFFFFFUL); + } + + /* + Only write data if debug mode adds up... + */ + if (psMainStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) + { + if ((psMainStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0) + { + /* throw away non-capturing data */ + return(ui32InBuffSize); + } + } + else if (psMainStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY) + { + if ((psMainStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE)) + { + /* throw away non-capturing data */ + return(ui32InBuffSize); + } + } + + if(psMainStream->psCtrl->bInitPhaseComplete) + { + psStream = psMainStream; + } + else + { + psStream = psMainStream->psInitStream; + } + + /* + How much space have we got in the buffer ? + */ + ui32Space=SpaceInStream(psStream); + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv %d b for %s: Roff = %x, WOff = %x", + ui32InBuffSize, + psStream->szName, + psStream->ui32RPtr, + psStream->ui32WPtr)); + + /* + Don't copy anything if we don't have space or buffers not enabled. + */ + if ((psStream->psCtrl->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite: buffer %p is disabled", psStream)); + return(0); + } + + if (ui32Space < 8) + { + PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite: buffer %p is full", psStream)); + return(0); + } + + /* + Only copy what we can.. + */ + if (ui32Space <= (ui32InBuffSize + 4)) + { + ui32InBuffSize = ui32Space - 8; + } + + /* + Write the stuff... + */ + Write(psStream,(IMG_UINT8 *) &ui32InBuffSize,4); + Write(psStream,pui8InBuf,ui32InBuffSize); + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + if (ui32InBuffSize) + { + HostSignalEvent(DBG_EVENT_STREAM_DATA); + } +#endif + return(ui32InBuffSize); +} + +/*!**************************************************************************** + @name DBGDrivWriteCM + @brief Write capture mode data, wraps DBGDrivWrite + @param psStream - stream + @param pui8InBuf - input buffer + @param ui32InBuffSize - buffer size + @param ui32Level - verbosity level + @return bytes written, 0 if recoverable error, -1 if unrecoverable error +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + /* + Validate buffer. + */ + if (!StreamValidForWrite(psStream)) + { + return(0xFFFFFFFFUL); + } + + /* + Only write data if debug mode adds up... + */ + if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) + { + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0) + { + /* throw away non-capturing data */ + return(ui32InBuffSize); + } + } + else + { + if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY) + { + if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE)) + { + /* throw away non-capturing data */ + return(ui32InBuffSize); + } + } + } + + return(DBGDrivWrite2(psStream,pui8InBuf,ui32InBuffSize,ui32Level)); +} + + +/*!**************************************************************************** + @name DBGDrivWritePersist + @brief Copies data from a buffer into selected stream's init phase. Stream size should be expandable. + @param psStream - stream for output + @param pui8InBuf - input buffer + @param ui32InBuffSize - size of input + @param ui32Level - not used + @return bytes copied, 0 if recoverable error, -1 if unrecoverable error +*****************************************************************************/ +static IMG_UINT32 DBGDrivWritePersist(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + DBG_STREAM *psStream; + PVR_UNREFERENCED_PARAMETER(ui32Level); + + /* + Validate buffer. + */ + if (!StreamValidForWrite(psMainStream)) + { + return(0xFFFFFFFFUL); + } + + /* Always append persistent data to init phase so it's available on + * subsequent app runs. + */ + psStream = psMainStream->psInitStream; + if(psStream->bCircularAllowed == IMG_TRUE) + { + PVR_DPF((PVR_DBG_WARNING, "DBGDrivWritePersist: Init phase is a circular buffer, some data may be lost")); + } + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Append %x b to %s: Roff = %x, WOff = %x [bw = %x]", + ui32InBuffSize, + psStream->szName, + psStream->ui32RPtr, + psStream->ui32WPtr, + psStream->ui32DataWritten)); + + return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) ); +} + +/*!**************************************************************************** + @name DBGDrivWrite2 + @brief Copies data from a buffer into selected (expandable) stream. + @param psMainStream - stream for output + @param pui8InBuf - input buffer + @param ui32InBuffSize - size of input + @param ui32Level - debug level of input + @return bytes copied, 0 if recoverable error, -1 if unrecoverable error +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level) +{ + DBG_STREAM *psStream; + + /* + Validate buffer. + */ + if (!StreamValidForWrite(psMainStream)) + { + PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid")); + return(0xFFFFFFFFUL); + } + + /* + Check debug level. + */ + if ((psMainStream->psCtrl->ui32DebugLevel & ui32Level) == 0) + { + return(0); + } + + if(psMainStream->psCtrl->bInitPhaseComplete) + { + psStream = psMainStream; + } + else + { + psStream = psMainStream->psInitStream; + } + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x", + ui32InBuffSize, + psStream->szName, + psStream->ui32RPtr, + psStream->ui32WPtr)); + + return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) ); +} + +/*!**************************************************************************** + @name DBGDrivRead + @brief Read from debug driver buffers + @param psMainStream - stream + @param bReadInitBuffer - whether to read from the init stream or the main stream + @param ui32OutBuffSize - available space in client buffer + @param pui8OutBuf - output buffer + @return bytes read, 0 if failure occurred +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf) +{ + IMG_UINT32 ui32Data; + DBG_STREAM *psStream; + + /* + Validate buffer. + */ + if (!StreamValidForRead(psMainStream)) + { + PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %p is invalid", psMainStream)); + return(0); + } + + if(bReadInitBuffer) + { + psStream = psMainStream->psInitStream; + } + else + { + psStream = psMainStream; + } + + /* Don't read beyond the init phase marker point */ + if (psStream->ui32RPtr == psStream->ui32WPtr || + ((psStream->ui32InitPhaseWOff > 0) && + (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) ) + { + return(0); + } + + /* + Get amount of data in buffer. + */ + if (psStream->ui32RPtr <= psStream->ui32WPtr) + { + ui32Data = psStream->ui32WPtr - psStream->ui32RPtr; + } + else + { + ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr); + } + + /* + Don't read beyond the init phase marker point + */ + if ((psStream->ui32InitPhaseWOff > 0) && + (psStream->ui32InitPhaseWOff < psStream->ui32WPtr)) + { + ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr; + } + + /* + Only transfer what target buffer can handle. + */ + if (ui32Data > ui32OutBuffSize) + { + ui32Data = ui32OutBuffSize; + } + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x", + ui32Data, + psStream->szName, + psStream->ui32RPtr, + psStream->ui32WPtr)); + + /* + Split copy into two bits as necessay. + */ + if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size) + { /* Calc block 1 and block 2 sizes */ + IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr; + IMG_UINT32 ui32B2 = ui32Data - ui32B1; + + /* Copy up to end of circular buffer */ + HostMemCopy((IMG_VOID *) pui8OutBuf, + (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr), + ui32B1); + + /* Copy from start of circular buffer */ + HostMemCopy((IMG_VOID *)(pui8OutBuf + ui32B1), + psStream->pvBase, + ui32B2); + + /* Update read pointer now that we've copied the data out */ + psStream->ui32RPtr = ui32B2; + } + else + { /* Copy data from wherever */ + HostMemCopy((IMG_VOID *) pui8OutBuf, + (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr), + ui32Data); + + /* Update read pointer now that we've copied the data out */ + psStream->ui32RPtr += ui32Data; + + /* Check for wrapping */ + if (psStream->ui32RPtr == psStream->ui32Size) + { + psStream->ui32RPtr = 0; + } + } + + return(ui32Data); +} + +/*!**************************************************************************** + @name DBGDrivSetCaptureMode + @brief Set capture mode + @param psStream - stream + @param ui32Mode - capturing mode + @param ui32Start - start frame (frame mode only) + @param ui32End - end frame (frame mode) + @param ui32SampleRate - sampling frequency (frame mode) +*****************************************************************************/ +void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->psCtrl->ui32CapMode = ui32Mode; + psStream->psCtrl->ui32DefaultMode = ui32Mode; + psStream->psCtrl->ui32Start = ui32Start; + psStream->psCtrl->ui32End = ui32End; + psStream->psCtrl->ui32SampleRate = ui32SampleRate; + + /* + Activate hotkey it the stream is of this type. + */ + if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY) + { + ActivateHotKeys(psStream); + } +} + +/*!**************************************************************************** + @name DBGDrivSetOutputMode + @brief Change output mode + @param psStream - stream + @param ui32OutMode - output mode + @return none +*****************************************************************************/ +void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->psCtrl->ui32OutMode = ui32OutMode; +} + +/*!**************************************************************************** + @name DBGDrivSetDebugLevel + @brief Change debug level + @param psStream - stream + @param ui32DebugLevel - verbosity level + @return none +*****************************************************************************/ +void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->psCtrl->ui32DebugLevel = ui32DebugLevel; +} + +/*!**************************************************************************** + @name DBGDrivSetFrame + @brief Advance frame counter + @param psStream - stream + @param ui32Frame - frame number + @return none +*****************************************************************************/ +void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->psCtrl->ui32Current = ui32Frame; + + if ((ui32Frame >= psStream->psCtrl->ui32Start) && + (ui32Frame <= psStream->psCtrl->ui32End) && + (((ui32Frame - psStream->psCtrl->ui32Start) % psStream->psCtrl->ui32SampleRate) == 0)) + { + psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE; + } + else + { + psStream->psCtrl->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE; + } + + if (g_bHotkeyMiddump) + { + if ((ui32Frame >= g_ui32HotkeyMiddumpStart) && + (ui32Frame <= g_ui32HotkeyMiddumpEnd) && + (((ui32Frame - g_ui32HotkeyMiddumpStart) % psStream->psCtrl->ui32SampleRate) == 0)) + { + psStream->psCtrl->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE; + } + else + { + psStream->psCtrl->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE; + if (psStream->psCtrl->ui32Current > g_ui32HotkeyMiddumpEnd) + { + g_bHotkeyMiddump = IMG_FALSE; + } + } + } + + /* Check to see if hotkey press has been registered (from keyboard filter) */ + if (g_bHotKeyRegistered) + { + g_bHotKeyRegistered = IMG_FALSE; + + PVR_DPF((PVR_DBG_MESSAGE,"Hotkey pressed (%p)!\n",psStream)); + + if (!g_bHotKeyPressed) + { + /* + Capture the next frame. + */ + g_ui32HotKeyFrame = psStream->psCtrl->ui32Current + 2; + + /* + Do the flag. + */ + g_bHotKeyPressed = IMG_TRUE; + } + + /* + If in framed hotkey mode, then set start frame. + */ + if (((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) && + ((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_HOTKEY) != 0)) + { + if (!g_bHotkeyMiddump) + { + /* Turn on */ + g_ui32HotkeyMiddumpStart = g_ui32HotKeyFrame + 1; + g_ui32HotkeyMiddumpEnd = 0xffffffff; + g_bHotkeyMiddump = IMG_TRUE; + PVR_DPF((PVR_DBG_MESSAGE,"Sampling every %d frame(s)\n", psStream->psCtrl->ui32SampleRate)); + } + else + { + /* Turn off */ + g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame; + PVR_DPF((PVR_DBG_MESSAGE,"Turning off sampling\n")); + } + } + + } + + /* + Clear the hotkey frame indicator when over that frame. + */ + if (psStream->psCtrl->ui32Current > g_ui32HotKeyFrame) + { + g_bHotKeyPressed = IMG_FALSE; + } +} + +/*!**************************************************************************** + @name DBGDrivGetFrame + @brief Retrieve current frame number + @param psStream - stream + @return frame number +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return(0); + } + + return(psStream->psCtrl->ui32Current); +} + +/*!**************************************************************************** + @name DBGDrivIsLastCaptureFrame + @brief Is this the last frame to be captured? + @param psStream - stream + @return true if last capture frame, false otherwise +*****************************************************************************/ +IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream) +{ + IMG_UINT32 ui32NextFrame; + + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return IMG_FALSE; + } + + if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) + { + ui32NextFrame = psStream->psCtrl->ui32Current + psStream->psCtrl->ui32SampleRate; + if (ui32NextFrame > psStream->psCtrl->ui32End) + { + return IMG_TRUE; + } + } + return IMG_FALSE; +} + +/*!**************************************************************************** + @name DBGDrivIsCaptureFrame + @brief Is this a capture frame? + @param psStream - stream + @param bCheckPreviousFrame - set if it needs to be 1 frame ahead + @return true if capturing this frame, false otherwise +*****************************************************************************/ +IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame) +{ + IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1UL : 0UL; + + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return IMG_FALSE; + } + + if (psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) + { + /* Needs to be one frame ahead, so disppatch can turn everything on */ + if (g_bHotkeyMiddump) + { + if ((psStream->psCtrl->ui32Current >= (g_ui32HotkeyMiddumpStart - ui32FrameShift)) && + (psStream->psCtrl->ui32Current <= (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) && + ((((psStream->psCtrl->ui32Current + ui32FrameShift) - g_ui32HotkeyMiddumpStart) % psStream->psCtrl->ui32SampleRate) == 0)) + { + return IMG_TRUE; + } + } + else + { + if ((psStream->psCtrl->ui32Current >= (psStream->psCtrl->ui32Start - ui32FrameShift)) && + (psStream->psCtrl->ui32Current <= (psStream->psCtrl->ui32End - ui32FrameShift)) && + ((((psStream->psCtrl->ui32Current + ui32FrameShift) - psStream->psCtrl->ui32Start) % psStream->psCtrl->ui32SampleRate) == 0)) + { + return IMG_TRUE; + } + } + } + else if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY) + { + if ((psStream->psCtrl->ui32Current == (g_ui32HotKeyFrame-ui32FrameShift)) && (g_bHotKeyPressed)) + { + return IMG_TRUE; + } + } + return IMG_FALSE; +} + +/*!**************************************************************************** + @name DBGDrivOverrideMode + @brief Override capture mode + @param psStream - stream + @param ui32Mode - capture mode + @return none +*****************************************************************************/ +void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->psCtrl->ui32CapMode = ui32Mode; +} + +/*!**************************************************************************** + @name DBGDrivDefaultMode + @param psStream - stream + @return none +*****************************************************************************/ +void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream) +{ + /* + Validate buffer. + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->psCtrl->ui32CapMode = psStream->psCtrl->ui32DefaultMode; +} + +/*!**************************************************************************** + @name DBGDrivSetClientMarker + @brief Sets the marker to prevent reading initphase beyond data on behalf of previous app + @param psStream - stream + @param ui32Marker - byte offset in init buffer + @return nothing +*****************************************************************************/ +IMG_VOID IMG_CALLCONV DBGDrivSetClientMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) +{ + /* + Validate buffer + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->ui32InitPhaseWOff = ui32Marker; +} + +/*!**************************************************************************** + @name DBGDrivSetMarker + @brief Sets the marker in the stream to split output files + @param psStream, ui32Marker + @return nothing +*****************************************************************************/ +void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) +{ + /* + Validate buffer + */ + if (!StreamValid(psStream)) + { + return; + } + + psStream->ui32Marker = ui32Marker; +} + +/*!**************************************************************************** + @name DBGDrivGetMarker + @brief Gets the marker in the stream to split output files + @param psStream - stream + @return marker offset +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream) +{ + /* + Validate buffer + */ + if (!StreamValid(psStream)) + { + return 0; + } + + return psStream->ui32Marker; +} + + +/*!**************************************************************************** + @name DBGDrivGetStreamOffset + @brief Gets the amount of data written to the stream + @param psMainStream - stream + @return bytes written +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream) +{ + PDBG_STREAM psStream; + + /* + Validate buffer + */ + if (!StreamValid(psMainStream)) + { + return 0; + } + + if(psMainStream->psCtrl->bInitPhaseComplete) + { + psStream = psMainStream; + } + else + { + psStream = psMainStream->psInitStream; + } + + return psStream->ui32DataWritten; +} + +/*!**************************************************************************** + @name DBGDrivSetStreamOffset + @brief Sets the amount of data written to the stream + @param psMainStream - stream + @param ui32StreamOffset - stream offset + @return Nothing +*****************************************************************************/ +IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, IMG_UINT32 ui32StreamOffset) +{ + PDBG_STREAM psStream; + + /* + Validate buffer + */ + if (!StreamValid(psMainStream)) + { + return; + } + + if(psMainStream->psCtrl->bInitPhaseComplete) + { + psStream = psMainStream; + } + else + { + psStream = psMainStream->psInitStream; + } + + PVR_DPF((PVR_DBGDRIV_MESSAGE, "DBGDrivSetStreamOffset: %s set to %x b", + psStream->szName, + ui32StreamOffset)); + psStream->ui32DataWritten = ui32StreamOffset; +} + +/*!**************************************************************************** + @name DBGDrivGetServiceTable + @brief get jump table for Services driver + @return pointer to jump table +*****************************************************************************/ +IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID) +{ + return((IMG_PVOID)&g_sDBGKMServices); +} + +/*!**************************************************************************** + @name DBGDrivWriteLF + @brief Store data that should only be kept from the last frame dumped + @param psStream - stream + @param pui8InBuf - input buffer + @param ui32InBuffSize - size + @param ui32Level - verbosity level + @param ui32Flags - flags + @return bytes written +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags) +{ + PDBG_LASTFRAME_BUFFER psLFBuffer; + + /* + Validate buffer. + */ + if (!StreamValidForWrite(psStream)) + { + return(0xFFFFFFFFUL); + } + + /* + Check debug level. + */ + if ((psStream->psCtrl->ui32DebugLevel & ui32Level) == 0) + { + return(0xFFFFFFFFUL); + } + + /* + Only write data if debug mode adds up... + */ + if ((psStream->psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) + { + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0) + { + /* throw away non-capturing data */ + return(ui32InBuffSize); + } + } + else if (psStream->psCtrl->ui32CapMode == DEBUG_CAPMODE_HOTKEY) + { + if ((psStream->psCtrl->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE)) + { + /* throw away non-capturing data */ + return(ui32InBuffSize); + } + } + + psLFBuffer = FindLFBuf(psStream); + + if (ui32Flags & WRITELF_FLAGS_RESETBUF) + { + /* + Copy the data into the buffer + */ + ui32InBuffSize = (ui32InBuffSize > LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : ui32InBuffSize; + HostMemCopy((IMG_VOID *)psLFBuffer->ui8Buffer, (IMG_VOID *)pui8InBuf, ui32InBuffSize); + psLFBuffer->ui32BufLen = ui32InBuffSize; + } + else + { + /* + Append the data to the end of the buffer + */ + ui32InBuffSize = ((psLFBuffer->ui32BufLen + ui32InBuffSize) > LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - psLFBuffer->ui32BufLen) : ui32InBuffSize; + HostMemCopy((IMG_VOID *)(&psLFBuffer->ui8Buffer[psLFBuffer->ui32BufLen]), (IMG_VOID *)pui8InBuf, ui32InBuffSize); + psLFBuffer->ui32BufLen += ui32InBuffSize; + } + + return(ui32InBuffSize); +} + +/*!**************************************************************************** + @name DBGDrivReadLF + @brief Read data that should only be kept from the last frame dumped + @param psStream - stream + @param ui32OutBuffSize - buffer size + @param pui8OutBuf - output buffer + @return bytes read +*****************************************************************************/ +IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf) +{ + PDBG_LASTFRAME_BUFFER psLFBuffer; + IMG_UINT32 ui32Data; + + /* + Validate buffer. + */ + if (!StreamValidForRead(psStream)) + { + return(0); + } + + psLFBuffer = FindLFBuf(psStream); + + /* + Get amount of data to copy + */ + ui32Data = (ui32OutBuffSize < psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen; + + /* + Copy the data into the buffer + */ + HostMemCopy((IMG_VOID *)pui8OutBuf, (IMG_VOID *)psLFBuffer->ui8Buffer, ui32Data); + + return ui32Data; +} + +/*!**************************************************************************** + @name DBGDrivStartInitPhase + @brief Marks start of init phase + @param psStream - stream + @return void +*****************************************************************************/ +IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream) +{ + psStream->psCtrl->bInitPhaseComplete = IMG_FALSE; +} + +/*!**************************************************************************** + @name DBGDrivStopInitPhase + @brief Marks end of init phase + @param psStream - stream + @return void +*****************************************************************************/ +IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream) +{ + psStream->psCtrl->bInitPhaseComplete = IMG_TRUE; +} + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) +/*!**************************************************************************** + @name DBGDrivWaitForEvent + @brief waits for an event + @param eEvent - debug driver event + @return void +*****************************************************************************/ +IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent) +{ + HostWaitForEvent(eEvent); +} +#endif + +/*!**************************************************************************** + @name ExpandStreamBuffer + @brief allocates a new buffer when the current one is full + @param psStream - stream + @param ui32NewSize - new size + @return IMG_TRUE - if allocation succeeded, IMG_FALSE - if not +*****************************************************************************/ +IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize) +{ + IMG_VOID * pvNewBuf; + IMG_UINT32 ui32NewSizeInPages; + IMG_UINT32 ui32NewWOffset; + IMG_UINT32 ui32NewROffset; + IMG_UINT32 ui32SpaceInOldBuf; + + /* + First check new size is bigger than existing size + */ + if (psStream->ui32Size >= ui32NewSize) + { + return IMG_FALSE; + } + + /* + Calc space in old buffer + */ + ui32SpaceInOldBuf = SpaceInStream(psStream); + + /* + Allocate new buffer + */ + ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL; + + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages); + } + else + { + pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages); + } + + if (pvNewBuf == IMG_NULL) + { + return IMG_FALSE; + } + + if(psStream->bCircularAllowed) + { + /* + Copy over old buffer to new one, we place data at start of buffer + even if Read offset is not at start of buffer + */ + if (psStream->ui32RPtr <= psStream->ui32WPtr) + { + /* + No wrapping of data so copy data to start of new buffer + */ + HostMemCopy(pvNewBuf, + (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr), + psStream->ui32WPtr - psStream->ui32RPtr); + } + else + { + IMG_UINT32 ui32FirstCopySize; + + /* + The data has wrapped around the buffer, copy beginning of buffer first + */ + ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr; + + HostMemCopy(pvNewBuf, + (IMG_VOID *)((IMG_UINTPTR_T)psStream->pvBase + psStream->ui32RPtr), + ui32FirstCopySize); + + /* + Now second half + */ + HostMemCopy((IMG_VOID *)((IMG_UINTPTR_T)pvNewBuf + ui32FirstCopySize), + (IMG_VOID *)(IMG_PBYTE)psStream->pvBase, + psStream->ui32WPtr); + } + ui32NewROffset = 0; + } + else + { + /* Copy everything in the old buffer to the new one */ + HostMemCopy(pvNewBuf, psStream->pvBase, psStream->ui32WPtr); + ui32NewROffset = psStream->ui32RPtr; + } + + /* + New Write offset is at end of data + */ + ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf; + + /* + Free old buffer + */ + if ((psStream->psCtrl->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0) + { + HostNonPageablePageFree(psStream->pvBase); + } + else + { + HostPageablePageFree(psStream->pvBase); + } + + /* + Now set new params up + */ + psStream->pvBase = pvNewBuf; + psStream->ui32RPtr = ui32NewROffset; + psStream->ui32WPtr = ui32NewWOffset; + psStream->ui32Size = ui32NewSizeInPages * 4096; + + return IMG_TRUE; +} + +/*!**************************************************************************** + @name SpaceInStream + @brief remaining space in stream + @param psStream - stream + @return bytes remaining +*****************************************************************************/ +IMG_UINT32 SpaceInStream(PDBG_STREAM psStream) +{ + IMG_UINT32 ui32Space; + + if (psStream->bCircularAllowed) + { + /* Allow overwriting the buffer which was already read */ + if (psStream->ui32RPtr > psStream->ui32WPtr) + { + ui32Space = psStream->ui32RPtr - psStream->ui32WPtr; + } + else + { + ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr); + } + } + else + { + /* Don't overwrite anything */ + ui32Space = psStream->ui32Size - psStream->ui32WPtr; + } + + return ui32Space; +} + + +/*!**************************************************************************** + @name DestroyAllStreams + @brief delete all streams in list + @return none +*****************************************************************************/ +void DestroyAllStreams(void) +{ + while (g_psStreamList != IMG_NULL) + { + DBGDrivDestroyStream(g_psStreamList); + } + return; +} + +/*!**************************************************************************** + @name FindLFBuf + @brief finds last frame stream + @param psStream - stream to find + @return stream if found, NULL otherwise +*****************************************************************************/ +PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream) +{ + PDBG_LASTFRAME_BUFFER psLFBuffer; + + psLFBuffer = g_psLFBufferList; + + while (psLFBuffer) + { + if (psLFBuffer->psStream == psStream) + { + break; + } + + psLFBuffer = psLFBuffer->psNext; + } + + return psLFBuffer; +} + +/****************************************************************************** + End of file (DBGDRIV.C) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.h b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.h new file mode 100644 index 0000000..8aaf6d6 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv.h @@ -0,0 +1,155 @@ +/*************************************************************************/ /*! +@Title Debug Driver interface definition. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _DBGDRIV_ +#define _DBGDRIV_ + +/***************************************************************************** + The odd constant or two +*****************************************************************************/ +#define BUFFER_SIZE 64*PAGESIZE + +#define DBGDRIV_VERSION 0x100 +#define MAX_PROCESSES 2 +#define BLOCK_USED 0x01 +#define BLOCK_LOCKED 0x02 +#define DBGDRIV_MONOBASE 0x000B0000UL + + +extern IMG_VOID * g_pvAPIMutex; + +/***************************************************************************** + KM mode functions +*****************************************************************************/ +IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName, + IMG_UINT32 ui32CapMode, + IMG_UINT32 ui32OutMode, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Pages); +IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream); +IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream); +IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit); +IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf); +IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate); +IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode); +IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel); +IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame); +IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode); +IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream); +IMG_PVOID IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID); +IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +IMG_VOID IMG_CALLCONV DBGDrivSetClientMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker); +IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker); +IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream); +IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream); +IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame); +IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags); +IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf); +IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream); +IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset); +IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent); + +IMG_VOID DestroyAllStreams(IMG_VOID); + +/***************************************************************************** + Function prototypes +*****************************************************************************/ +IMG_UINT32 AtoI(IMG_CHAR *szIn); + +IMG_VOID HostMemSet(IMG_VOID *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size); +IMG_VOID HostMemCopy(IMG_VOID *pvDest,IMG_VOID *pvSrc,IMG_UINT32 ui32Size); +IMG_VOID MonoOut(const IMG_CHAR * pszString,IMG_BOOL bNewLine); + +/***************************************************************************** + Secure handle Function prototypes +*****************************************************************************/ +IMG_SID PStream2SID(PDBG_STREAM psStream); +PDBG_STREAM SID2PStream(IMG_SID hStream); +IMG_BOOL AddSIDEntry(PDBG_STREAM psStream); +IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream); + +/***************************************************************************** + Declarations for Service table entry points +*****************************************************************************/ +IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size); +IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream); +IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode); +IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream); +IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream); +IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf); +IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset); +IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent); +IMG_VOID IMG_CALLCONV ExtDBGDrivSetConnectNotifier(DBGKM_CONNECT_NOTIFIER fn_notifier); + +IMG_UINT32 IMG_CALLCONV ExtDBGDrivWritePersist(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level); + +#endif + +/***************************************************************************** + End of file (DBGDRIV.H) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_handle.c b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_handle.c new file mode 100644 index 0000000..a9d37a6 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_handle.c @@ -0,0 +1,141 @@ +/*************************************************************************/ /*! +@Title Resource Handle Manager +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "dbgdrvif.h" +#include "dbgdriv.h" + +/* max number of streams held in SID info table */ +#define MAX_SID_ENTRIES 8 + +typedef struct _SID_INFO +{ + PDBG_STREAM psStream; +} SID_INFO, *PSID_INFO; + +static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES]; + +IMG_SID PStream2SID(PDBG_STREAM psStream) +{ + if (psStream != (PDBG_STREAM)IMG_NULL) + { + IMG_INT32 iIdx; + + for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++) + { + if (psStream == gaSID_Xlat_Table[iIdx].psStream) + { + /* idx is one based */ + return (IMG_SID)iIdx+1; + } + } + } + + return (IMG_SID)0; +} + + +PDBG_STREAM SID2PStream(IMG_SID hStream) +{ + /* changed to zero based */ + IMG_INT32 iIdx = (IMG_INT32)hStream-1; + + if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES) + { + return gaSID_Xlat_Table[iIdx].psStream; + } + else + { + return (PDBG_STREAM)IMG_NULL; + } +} + + +IMG_BOOL AddSIDEntry(PDBG_STREAM psStream) +{ + if (psStream != (PDBG_STREAM)IMG_NULL) + { + IMG_INT32 iIdx; + + for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++) + { + if (psStream == gaSID_Xlat_Table[iIdx].psStream) + { + /* already created */ + return IMG_TRUE; + } + + if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)IMG_NULL) + { + /* free entry */ + gaSID_Xlat_Table[iIdx].psStream = psStream; + return IMG_TRUE; + } + } + } + + return IMG_FALSE; +} + +IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream) +{ + if (psStream != (PDBG_STREAM)IMG_NULL) + { + IMG_INT32 iIdx; + + for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++) + { + if (psStream == gaSID_Xlat_Table[iIdx].psStream) + { + gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)IMG_NULL; + return IMG_TRUE; + } + } + } + + return IMG_FALSE; +} + + +/****************************************************************************** + End of file (handle.c) +******************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_ioctl.h b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_ioctl.h new file mode 100644 index 0000000..0909e6d --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/dbgdriv_ioctl.h @@ -0,0 +1,57 @@ +/*************************************************************************/ /*! +@Title IOCTL implementations for debug device. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _IOCTL_ +#define _IOCTL_ + +/***************************************************************************** + Global vars +*****************************************************************************/ + +#define MAX_DBGVXD_W32_API 25 + +extern IMG_UINT32 (*g_DBGDrivProc[MAX_DBGVXD_W32_API])(IMG_VOID *, IMG_VOID *); + +#endif + +/***************************************************************************** + End of file (IOCTL.H) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hostfunc.h b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hostfunc.h new file mode 100644 index 0000000..e92ad9a --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hostfunc.h @@ -0,0 +1,82 @@ +/*************************************************************************/ /*! +@Title Host function definitions for debug device. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HOSTFUNC_ +#define _HOSTFUNC_ + +/***************************************************************************** + Defines +*****************************************************************************/ +#define HOST_PAGESIZE (4096) +#define DBG_MEMORY_INITIALIZER (0xe2) + +/***************************************************************************** + Function prototypes +*****************************************************************************/ +IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data); + +IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages); +IMG_VOID HostPageablePageFree(IMG_VOID * pvBase); +IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages); +IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase); + +IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl); +IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess); + +IMG_VOID HostCreateRegDeclStreams(IMG_VOID); + +IMG_VOID * HostCreateMutex(IMG_VOID); +IMG_VOID HostAquireMutex(IMG_VOID * pvMutex); +IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex); +IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex); + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) +IMG_INT32 HostCreateEventObjects(IMG_VOID); +IMG_VOID HostWaitForEvent(DBG_EVENT eEvent); +IMG_VOID HostSignalEvent(DBG_EVENT eEvent); +IMG_VOID HostDestroyEventObjects(IMG_VOID); +#endif /*defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */ + +#endif + +/***************************************************************************** + End of file (HOSTFUNC.H) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.c b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.c new file mode 100644 index 0000000..6bf20a6 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.c @@ -0,0 +1,199 @@ +/*************************************************************************/ /*! +@Title Debug driver utilities implementations. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Hotkey stuff +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#if !defined(LINUX) && !defined(__QNXNTO__) +#include +#include +#endif + +#include "img_types.h" +#include "pvr_debug.h" +#include "dbgdrvif.h" +#include "dbgdriv.h" +#include "hotkey.h" +#include "hostfunc.h" + + + + +/***************************************************************************** + Global vars +*****************************************************************************/ + +IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF; +IMG_BOOL g_bHotKeyPressed = IMG_FALSE; +IMG_BOOL g_bHotKeyRegistered = IMG_FALSE; + +/* Hotkey stuff */ +PRIVATEHOTKEYDATA g_PrivateHotKeyData; + + +/***************************************************************************** + Code +*****************************************************************************/ + + +/****************************************************************************** + * Function Name: ReadInHotKeys + * + * Inputs : none + * Outputs : - + * Returns : nothing + * Globals Used : - + * + * Description : Gets Hot key entries from system.ini + *****************************************************************************/ +IMG_VOID ReadInHotKeys(IMG_VOID) +{ + g_PrivateHotKeyData.ui32ScanCode = 0x58; /* F12 */ + g_PrivateHotKeyData.ui32ShiftState = 0x0; + + /* + Find buffer names etc.. + */ + HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode" , &g_PrivateHotKeyData.ui32ScanCode); + HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", &g_PrivateHotKeyData.ui32ShiftState); +} + +/****************************************************************************** + * Function Name: RegisterKeyPressed + * + * Inputs : IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo + * Outputs : - + * Returns : nothing + * Globals Used : - + * + * Description : Called when hotkey pressed. + *****************************************************************************/ +IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo) +{ + PDBG_STREAM psStream; + + PVR_UNREFERENCED_PARAMETER(pInfo); + + if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode) + { + PVR_DPF((PVR_DBG_MESSAGE,"PDUMP Hotkey pressed !\n")); + + psStream = (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream; + + if (!g_bHotKeyPressed) + { + /* + Capture the next frame. + */ + g_ui32HotKeyFrame = psStream->psCtrl->ui32Current + 2; + + /* + Do the flag. + */ + g_bHotKeyPressed = IMG_TRUE; + } + } +} + +/****************************************************************************** + * Function Name: ActivateHotKeys + * + * Inputs : - + * Outputs : - + * Returns : - + * Globals Used : - + * + * Description : Installs HotKey callbacks + *****************************************************************************/ +IMG_VOID ActivateHotKeys(PDBG_STREAM psStream) +{ + /* + Setup hotkeys. + */ + ReadInHotKeys(); + + /* + Has it already been allocated. + */ + if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey) + { + if (g_PrivateHotKeyData.ui32ScanCode != 0) + { + PVR_DPF((PVR_DBG_MESSAGE,"Activate HotKey for PDUMP.\n")); + + /* + Add in stream data. + */ + g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream; + + DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, g_PrivateHotKeyData.ui32ShiftState, &g_PrivateHotKeyData.sHotKeyInfo); + } + else + { + g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0; + } + } +} + +/****************************************************************************** + * Function Name: DeactivateHotKeys + * + * Inputs : - + * Outputs : - + * Returns : - + * Globals Used : - + * + * Description : Removes HotKey callbacks + *****************************************************************************/ +IMG_VOID DeactivateHotKeys(IMG_VOID) +{ + if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0) + { + PVR_DPF((PVR_DBG_MESSAGE,"Deactivate HotKey.\n")); + + RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey); + g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0; + } +} + + +/***************************************************************************** + End of file (HOTKEY.C) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.h b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.h new file mode 100644 index 0000000..7aa2952 --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/hotkey.h @@ -0,0 +1,82 @@ +/*************************************************************************/ /*! +@Title Debug driver utilities header file. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Hotkey stuff +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HOTKEY_ +#define _HOTKEY_ + + +typedef struct _hotkeyinfo +{ + IMG_UINT8 ui8ScanCode; + IMG_UINT8 ui8Type; + IMG_UINT8 ui8Flag; + IMG_UINT8 ui8Filler1; + IMG_UINT32 ui32ShiftState; + IMG_UINT32 ui32HotKeyProc; + IMG_VOID *pvStream; + IMG_UINT32 hHotKey; /* handle. */ +} HOTKEYINFO, *PHOTKEYINFO; + +typedef struct _privatehotkeydata +{ + IMG_UINT32 ui32ScanCode; + IMG_UINT32 ui32ShiftState; + HOTKEYINFO sHotKeyInfo; +} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA; + + +/***************************************************************************** + Hotkey stuff +*****************************************************************************/ +IMG_VOID ReadInHotKeys (IMG_VOID); +IMG_VOID ActivateHotKeys(PDBG_STREAM psStream); +IMG_VOID DeactivateHotKeys(IMG_VOID); + +IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey); +IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo); +IMG_VOID RegisterKeyPressed (IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo); + +#endif + +/***************************************************************************** + End of file (HOTKEY.H) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/ioctl.c b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/ioctl.c new file mode 100644 index 0000000..1767a9b --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/common/ioctl.c @@ -0,0 +1,827 @@ +/*************************************************************************/ /*! +@Title IOCTL implementations for debug device. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifdef LINUX +#include +#include "pvr_uaccess.h" +#endif /* LINUX */ + +#include "img_types.h" +#include "dbgdrvif.h" +#include "dbgdriv.h" +#include "hotkey.h" +#include "dbgdriv_ioctl.h" + + +/***************************************************************************** + Code +*****************************************************************************/ + +/***************************************************************************** + FUNCTION : DBGDrivCreateStream + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_CREATESTREAM psIn; + IMG_VOID * *ppvOut; + #ifdef LINUX + static IMG_CHAR name[32]; + #endif + + psIn = (PDBG_IN_CREATESTREAM) pvInBuffer; + ppvOut = (IMG_VOID * *) pvOutBuffer; + + #ifdef LINUX + + if(pvr_copy_from_user(name, psIn->u.pszName, 32) != 0) + { + return IMG_FALSE; + } + + *ppvOut = ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, 0, psIn->ui32Pages); + + #else + *ppvOut = ExtDBGDrivCreateStream(psIn->u.pszName, psIn->ui32CapMode, psIn->ui32OutMode, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages); + #endif + + + return(IMG_TRUE); +} + +/***************************************************************************** + FUNCTION : DBGDrivDestroyStream + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_STREAM *ppsStream; + PDBG_STREAM psStream; + + ppsStream = (PDBG_STREAM *) pvInBuffer; + psStream = (PDBG_STREAM) *ppsStream; + + PVR_UNREFERENCED_PARAMETER( pvOutBuffer); + + ExtDBGDrivDestroyStream(psStream); + + return(IMG_TRUE); +} + +/***************************************************************************** + FUNCTION : DBGDrivGetStream + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_FINDSTREAM psParams; + IMG_SID * phStream; + + psParams = (PDBG_IN_FINDSTREAM)pvInBuffer; + phStream = (IMG_SID *)pvOutBuffer; + + *phStream = PStream2SID(ExtDBGDrivFindStream(psParams->u.pszName, psParams->bResetStream)); + + return(IMG_TRUE); +} + +/***************************************************************************** + FUNCTION : DBGDrivWriteString + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_WRITESTRING psParams; + IMG_UINT32 *pui32OutLen; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_WRITESTRING) pvInBuffer; + pui32OutLen = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32OutLen = ExtDBGDrivWriteString(psStream,psParams->u.pszString,psParams->ui32Level); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32OutLen = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivWriteStringCM + + PURPOSE : Same as DBGDrivWriteString, but takes notice of capture mode. + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_WRITESTRING psParams; + IMG_UINT32 *pui32OutLen; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_WRITESTRING) pvInBuffer; + pui32OutLen = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32OutLen = ExtDBGDrivWriteStringCM(psStream,psParams->u.pszString,psParams->ui32Level); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32OutLen = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivReadString + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_UINT32 * pui32OutLen; + PDBG_IN_READSTRING psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_READSTRING) pvInBuffer; + pui32OutLen = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32OutLen = ExtDBGDrivReadString(psStream, + psParams->u.pszString,psParams->ui32StringLen); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32OutLen = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivWrite + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_UINT32 * pui32BytesCopied; + PDBG_IN_WRITE psInParams; + PDBG_STREAM psStream; + + psInParams = (PDBG_IN_WRITE) pvInBuffer; + pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psInParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32BytesCopied = ExtDBGDrivWrite(psStream, + psInParams->u.pui8InBuffer, + psInParams->ui32TransferSize, + psInParams->ui32Level); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32BytesCopied = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivWrite2 + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_UINT32 * pui32BytesCopied; + PDBG_IN_WRITE psInParams; + PDBG_STREAM psStream; + + psInParams = (PDBG_IN_WRITE) pvInBuffer; + pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psInParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32BytesCopied = ExtDBGDrivWrite2(psStream, + psInParams->u.pui8InBuffer, + psInParams->ui32TransferSize, + psInParams->ui32Level); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32BytesCopied = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivWriteCM + + PURPOSE : Same as DBGDIOCDrivWrite2, but takes notice of capture mode. + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_UINT32 * pui32BytesCopied; + PDBG_IN_WRITE psInParams; + PDBG_STREAM psStream; + + psInParams = (PDBG_IN_WRITE) pvInBuffer; + pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psInParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32BytesCopied = ExtDBGDrivWriteCM(psStream, + psInParams->u.pui8InBuffer, + psInParams->ui32TransferSize, + psInParams->ui32Level); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32BytesCopied = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivRead + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_UINT32 * pui32BytesCopied; + PDBG_IN_READ psInParams; + PDBG_STREAM psStream; + + psInParams = (PDBG_IN_READ) pvInBuffer; + pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psInParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32BytesCopied = ExtDBGDrivRead(psStream, + psInParams->bReadInitBuffer, + psInParams->ui32OutBufferSize, + psInParams->u.pui8OutBuffer); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32BytesCopied = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivSetCaptureMode + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_SETDEBUGMODE psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer; + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivSetCaptureMode(psStream, + psParams->ui32Mode, + psParams->ui32Start, + psParams->ui32End, + psParams->ui32SampleRate); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivSetOutMode + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_SETDEBUGOUTMODE psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer; + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivSetOutputMode(psStream,psParams->ui32Mode); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivSetDebugLevel + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_SETDEBUGLEVEL psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer; + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivSetDebugLevel(psStream,psParams->ui32Level); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivSetFrame + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_SETFRAME psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_SETFRAME) pvInBuffer; + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivSetFrame(psStream,psParams->ui32Frame); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivGetFrame + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_STREAM psStream; + IMG_UINT32 *pui32Current; + + pui32Current = (IMG_UINT32 *) pvOutBuffer; + psStream = SID2PStream(*(IMG_SID *)pvInBuffer); + + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32Current = ExtDBGDrivGetFrame(psStream); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32Current = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivIsCaptureFrame + + PURPOSE : Determines if this frame is a capture frame + + PARAMETERS : + + RETURNS : IMG_TRUE if current frame is to be captured +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_ISCAPTUREFRAME psParams; + IMG_UINT32 * pui32Current; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer; + pui32Current = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32Current = ExtDBGDrivIsCaptureFrame(psStream, + psParams->bCheckPreviousFrame); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32Current = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivOverrideMode + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_OVERRIDEMODE psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer; + PVR_UNREFERENCED_PARAMETER( pvOutBuffer); + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivOverrideMode(psStream,psParams->ui32Mode); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivDefaultMode + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_STREAM psStream; + + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + psStream = SID2PStream(*(IMG_SID *)pvInBuffer); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivDefaultMode(psStream); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivSetMarker + + PURPOSE : Sets the marker in the stream to split output files + + PARAMETERS : pvInBuffer, pvOutBuffer + + RETURNS : success +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_SETMARKER psParams; + PDBG_STREAM psStream; + + psParams = (PDBG_IN_SETMARKER) pvInBuffer; + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + psStream = SID2PStream(psParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + ExtDBGDrivSetMarker(psStream, psParams->ui32Marker); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivGetMarker + + PURPOSE : Gets the marker in the stream to split output files + + PARAMETERS : pvInBuffer, pvOutBuffer + + RETURNS : success +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_STREAM psStream; + IMG_UINT32 *pui32Current; + + pui32Current = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(*(IMG_SID *)pvInBuffer); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32Current = ExtDBGDrivGetMarker(psStream); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32Current = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDrivGetServiceTable + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_PVOID * ppvOut; + + PVR_UNREFERENCED_PARAMETER(pvInBuffer); + ppvOut = (IMG_PVOID *) pvOutBuffer; + + *ppvOut = DBGDrivGetServiceTable(); + + return(IMG_TRUE); +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivWriteLF + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + PDBG_IN_WRITE_LF psInParams; + IMG_UINT32 *pui32BytesCopied; + PDBG_STREAM psStream; + + psInParams = (PDBG_IN_WRITE_LF) pvInBuffer; + pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psInParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32BytesCopied = ExtDBGDrivWriteLF(psStream, + psInParams->u.pui8InBuffer, + psInParams->ui32BufferSize, + psInParams->ui32Level, + psInParams->ui32Flags); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivReadLF + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + IMG_UINT32 * pui32BytesCopied; + PDBG_IN_READ psInParams; + PDBG_STREAM psStream; + + psInParams = (PDBG_IN_READ) pvInBuffer; + pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer; + + psStream = SID2PStream(psInParams->hStream); + if (psStream != (PDBG_STREAM)IMG_NULL) + { + *pui32BytesCopied = ExtDBGDrivReadLF(psStream, + psInParams->ui32OutBufferSize, + psInParams->u.pui8OutBuffer); + return(IMG_TRUE); + } + else + { + /* invalid SID */ + *pui32BytesCopied = 0; + return(IMG_FALSE); + } +} + +/***************************************************************************** + FUNCTION : DBGDIOCDrivWaitForEvent + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer) +{ + DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer); + + PVR_UNREFERENCED_PARAMETER(pvOutBuffer); + + ExtDBGDrivWaitForEvent(eEvent); + + return(IMG_TRUE); +} + +/* + VxD DIOC interface jump table. +*/ +IMG_UINT32 (*g_DBGDrivProc[25])(IMG_VOID *, IMG_VOID *) = +{ + DBGDIOCDrivCreateStream, + DBGDIOCDrivDestroyStream, + DBGDIOCDrivGetStream, + DBGDIOCDrivWriteString, + DBGDIOCDrivReadString, + DBGDIOCDrivWrite, + DBGDIOCDrivRead, + DBGDIOCDrivSetCaptureMode, + DBGDIOCDrivSetOutMode, + DBGDIOCDrivSetDebugLevel, + DBGDIOCDrivSetFrame, + DBGDIOCDrivGetFrame, + DBGDIOCDrivOverrideMode, + DBGDIOCDrivDefaultMode, + DBGDIOCDrivGetServiceTable, + DBGDIOCDrivWrite2, + DBGDIOCDrivWriteStringCM, + DBGDIOCDrivWriteCM, + DBGDIOCDrivSetMarker, + DBGDIOCDrivGetMarker, + DBGDIOCDrivIsCaptureFrame, + DBGDIOCDrivWriteLF, + DBGDIOCDrivReadLF, + DBGDIOCDrivWaitForEvent +}; + +/***************************************************************************** + End of file (IOCTL.C) +*****************************************************************************/ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/hostfunc.c b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/hostfunc.c new file mode 100644 index 0000000..5d5e9ef --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/hostfunc.c @@ -0,0 +1,395 @@ +/*************************************************************************/ /*! +@Title Debug driver file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#include +#else +#include +#endif +#include + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) +#include +#include +#include +#include +#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */ + +#include "img_types.h" +#include "pvr_debug.h" + +#include "dbgdrvif.h" +#include "hostfunc.h" +#include "dbgdriv.h" + +#if defined(MODULE) && defined(DEBUG) && !defined(SUPPORT_DRI_DRM) +IMG_UINT32 gPVRDebugLevel = (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING); + +#define PVR_STRING_TERMINATOR '\0' +#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') ) + +/******************************************************************************/ + + +/*! +****************************************************************************** + + @Function PVRSRVDebugPrintf + + @Description To output a debug message to the user + + @Input uDebugLevel: The current debug level + @Input pszFile: The source file generating the message + @Input uLine: The line of the source file + @Input pszFormat: The message format string + @Input ...: Zero or more arguments for use by the format string + + @Return none +******************************************************************************/ +void PVRSRVDebugPrintf ( + IMG_UINT32 ui32DebugLevel, + const IMG_CHAR* pszFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR* pszFormat, + ... + ) +{ + IMG_BOOL bTrace; +#if !defined(__sh__) + IMG_CHAR *pszLeafName; + + pszLeafName = (char *)strrchr (pszFileName, '\\'); + + if (pszLeafName) + { + pszFileName = pszLeafName; + } +#endif /* __sh__ */ + + bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE; + + if (gPVRDebugLevel & ui32DebugLevel) + { + va_list vaArgs; + char szBuffer[256]; + char *szBufferEnd = szBuffer; + char *szBufferLimit = szBuffer + sizeof(szBuffer) - 1; + + /* The Limit - End pointer arithmetic we're doing in snprintf + ensures that our buffer remains null terminated from this */ + *szBufferLimit = '\0'; + + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "PVR_K:"); + szBufferEnd += strlen(szBufferEnd); + + /* Add in the level of warning */ + if (bTrace == IMG_FALSE) + { + switch(ui32DebugLevel) + { + case DBGPRIV_FATAL: + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Fatal):"); + break; + } + case DBGPRIV_ERROR: + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Error):"); + break; + } + case DBGPRIV_WARNING: + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Warning):"); + break; + } + case DBGPRIV_MESSAGE: + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Message):"); + break; + } + case DBGPRIV_VERBOSE: + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Verbose):"); + break; + } + default: + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, "(Unknown message level)"); + break; + } + } + szBufferEnd += strlen(szBufferEnd); + } + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, " "); + szBufferEnd += strlen(szBufferEnd); + + va_start (vaArgs, pszFormat); + vsnprintf(szBufferEnd, szBufferLimit - szBufferEnd, pszFormat, vaArgs); + va_end (vaArgs); + szBufferEnd += strlen(szBufferEnd); + + /* + * Metrics and Traces don't need a location + */ + if (bTrace == IMG_FALSE) + { + snprintf(szBufferEnd, szBufferLimit - szBufferEnd, + " [%d, %s]", (int)ui32Line, pszFileName); + szBufferEnd += strlen(szBufferEnd); + } + + printk(KERN_INFO "%s\r\n", szBuffer); + } +} +#endif /* defined(DEBUG) && !defined(SUPPORT_DRI_DRM) */ + +/*! +****************************************************************************** + + @Function HostMemSet + + @Description Function that does the same as the C memset() function + + @Modified *pvDest : pointer to start of buffer to be set + + @Input ui8Value: value to set each byte to + + @Input ui32Size : number of bytes to set + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size) +{ + memset(pvDest, (int) ui8Value, (size_t) ui32Size); +} + +/*! +****************************************************************************** + + @Function HostMemCopy + + @Description Function that does the same as the C memscpy() function + + @Input pvDst - pointer to dst + @Output pvSrc - pointer to src + @Input ui32Size - bytes to copy + + @Return none + +******************************************************************************/ +IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size) +{ +#if defined(USE_UNOPTIMISED_MEMCPY) + unsigned char *src,*dst; + int i; + + src=(unsigned char *)pvSrc; + dst=(unsigned char *)pvDst; + for(i=0;i= KERNEL_VERSION(2,6,37)) +typedef struct mutex MUTEX; +#define INIT_MUTEX(m) mutex_init(m) +#define DOWN_TRYLOCK(m) (!mutex_trylock(m)) +#define DOWN(m) mutex_lock(m) +#define UP(m) mutex_unlock(m) +#else +typedef struct semaphore MUTEX; +#define INIT_MUTEX(m) init_MUTEX(m) +#define DOWN_TRYLOCK(m) down_trylock(m) +#define DOWN(m) down(m) +#define UP(m) up(m) +#endif + +IMG_VOID *HostCreateMutex(IMG_VOID) +{ + MUTEX *psMutex; + + psMutex = kmalloc(sizeof(*psMutex), GFP_KERNEL); + if (psMutex) + { + INIT_MUTEX(psMutex); + } + + return psMutex; +} + +IMG_VOID HostAquireMutex(IMG_VOID * pvMutex) +{ + BUG_ON(in_interrupt()); + +#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS) + if (DOWN_TRYLOCK((MUTEX *)pvMutex)) + { + printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n"); + DOWN((MUTEX *)pvMutex); + } +#else + DOWN((MUTEX *)pvMutex); +#endif +} + +IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex) +{ + UP((MUTEX *)pvMutex); +} + +IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex) +{ + if (pvMutex) + { + kfree(pvMutex); + } +} + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + +#define EVENT_WAIT_TIMEOUT_MS 500 +#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000) + +static int iStreamData; +static wait_queue_head_t sStreamDataEvent; + +IMG_INT32 HostCreateEventObjects(IMG_VOID) +{ + init_waitqueue_head(&sStreamDataEvent); + + return 0; +} + +IMG_VOID HostWaitForEvent(DBG_EVENT eEvent) +{ + switch(eEvent) + { + case DBG_EVENT_STREAM_DATA: + /* + * More than one process may be woken up. + * Any process that wakes up should consume + * all the data from the streams. + */ + wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES); + iStreamData = 0; + break; + default: + /* + * For unknown events, enter an interruptible sleep. + */ + msleep_interruptible(EVENT_WAIT_TIMEOUT_MS); + break; + } +} + +IMG_VOID HostSignalEvent(DBG_EVENT eEvent) +{ + switch(eEvent) + { + case DBG_EVENT_STREAM_DATA: + iStreamData = 1; + wake_up_interruptible(&sStreamDataEvent); + break; + default: + break; + } +} + +IMG_VOID HostDestroyEventObjects(IMG_VOID) +{ +} +#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */ diff --git a/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/main.c b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/main.c new file mode 100644 index 0000000..2cf310c --- /dev/null +++ b/sgx_km/eurasia_km/tools/intern/debug/dbgdriv/linux/main.c @@ -0,0 +1,355 @@ +/*************************************************************************/ /*! +@Title Debug driver main file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRI_DRM) +#include +#endif + +#if defined(LDM_PCI) && !defined(SUPPORT_DRI_DRM) +#include +#endif + +#include + +#if defined(SUPPORT_DRI_DRM) +#include "drmP.h" +#endif + +#include "img_types.h" +#include "linuxsrv.h" +#include "dbgdriv_ioctl.h" +#include "dbgdrvif.h" +#include "dbgdriv.h" +#include "hostfunc.h" +#include "hotkey.h" +#include "pvr_debug.h" +#include "pvrmodule.h" +#include "pvr_uaccess.h" + +#if defined(SUPPORT_DRI_DRM) + +#include "pvr_drm_shared.h" +#include "pvr_drm.h" + +#else /* defined(SUPPORT_DRI_DRM) */ + +#define DRVNAME "dbgdrv" +MODULE_SUPPORTED_DEVICE(DRVNAME); + +#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM) +static struct class *psDbgDrvClass; +#endif + +static int AssignedMajorNumber = 0; + +long dbgdrv_ioctl(struct file *, unsigned int, unsigned long); + +static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile) +{ + return 0; +} + +static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile) +{ + return 0; +} + +static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma) +{ + return 0; +} + +static struct file_operations dbgdrv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = dbgdrv_ioctl, + .open = dbgdrv_open, + .release = dbgdrv_release, + .mmap = dbgdrv_mmap, +}; + +#endif /* defined(SUPPORT_DRI_DRM) */ + +IMG_VOID DBGDrvGetServiceTable(DBGKM_SERVICE_TABLE **fn_table) +{ + extern DBGKM_SERVICE_TABLE g_sDBGKMServices; + + *fn_table = &g_sDBGKMServices; +} + +#if defined(SUPPORT_DRI_DRM) +void dbgdrv_cleanup(void) +#else +static void __exit dbgdrv_cleanup(void) +#endif +{ +#if !defined(SUPPORT_DRI_DRM) +#if defined(LDM_PLATFORM) || defined(LDM_PCI) + device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0)); + class_destroy(psDbgDrvClass); +#endif + unregister_chrdev(AssignedMajorNumber, DRVNAME); +#endif /* !defined(SUPPORT_DRI_DRM) */ +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + HostDestroyEventObjects(); +#endif + HostDestroyMutex(g_pvAPIMutex); + return; +} + +#if defined(SUPPORT_DRI_DRM) +IMG_INT dbgdrv_init(void) +#else +static int __init dbgdrv_init(void) +#endif +{ +#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM) + struct device *psDev; +#endif + +#if !defined(SUPPORT_DRI_DRM) + int err = -EBUSY; +#endif + + /* Init API mutex */ + if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL) + { + return -ENOMEM; + } + +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + /* + * The current implementation of HostCreateEventObjects on Linux + * can never fail, so there is no need to check for error. + */ + (void) HostCreateEventObjects(); +#endif + +#if !defined(SUPPORT_DRI_DRM) + AssignedMajorNumber = + register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops); + + if (AssignedMajorNumber <= 0) + { + PVR_DPF((PVR_DBG_ERROR," unable to get major\n")); + goto ErrDestroyEventObjects; + } + +#if defined(LDM_PLATFORM) || defined(LDM_PCI) + /* + * This code (using GPL symbols) facilitates automatic device + * node creation on platforms with udev (or similar). + */ + psDbgDrvClass = class_create(THIS_MODULE, DRVNAME); + if (IS_ERR(psDbgDrvClass)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)", + __func__, PTR_ERR(psDbgDrvClass))); + goto ErrUnregisterCharDev; + } + + psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0), +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) + NULL, +#endif + DRVNAME); + if (IS_ERR(psDev)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)", + __func__, PTR_ERR(psDev))); + goto ErrDestroyClass; + } +#endif /* defined(LDM_PLATFORM) || defined(LDM_PCI) */ +#endif /* !defined(SUPPORT_DRI_DRM) */ + + return 0; + +#if !defined(SUPPORT_DRI_DRM) +ErrDestroyEventObjects: +#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS) + HostDestroyEventObjects(); +#endif +#if defined(LDM_PLATFORM) || defined(LDM_PCI) +ErrUnregisterCharDev: + unregister_chrdev(AssignedMajorNumber, DRVNAME); +ErrDestroyClass: + class_destroy(psDbgDrvClass); +#endif + return err; +#endif /* !defined(SUPPORT_DRI_DRM) */ +} + +#if defined(SUPPORT_DRI_DRM) +int dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile) +#else +long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg) +#endif +{ + IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg; + char *buffer, *in, *out; + unsigned int cmd; + + if((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1))) + { + PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n")); + return -1; + } + + buffer = (char *) HostPageablePageAlloc(1); + if(!buffer) + { + PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n")); + return -EFAULT; + } + + in = buffer; + out = buffer + (PAGE_SIZE >>1); + + if(pvr_copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0) + { + goto init_failed; + } + + /* Extra -1 because ioctls start at DEBUG_SERVICE_IOCTL_BASE + 1 */ + cmd = MAKEIOCTLINDEX(pIP->ui32Cmd) - DEBUG_SERVICE_IOCTL_BASE - 1; + + if(pIP->ui32Cmd == DEBUG_SERVICE_READ) + { + IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out; + DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in; + DBG_STREAM *psStream; + IMG_CHAR *ui8Tmp; + + ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize); + + if(!ui8Tmp) + { + goto init_failed; + } + + psStream = SID2PStream(psReadInParams->hStream); + if(!psStream) + { + vfree(ui8Tmp); + goto init_failed; + } + + *pui32BytesCopied = ExtDBGDrivRead(psStream, + psReadInParams->bReadInitBuffer, + psReadInParams->ui32OutBufferSize, + ui8Tmp); + + if(pvr_copy_to_user(psReadInParams->u.pui8OutBuffer, + ui8Tmp, + *pui32BytesCopied) != 0) + { + vfree(ui8Tmp); + goto init_failed; + } + + vfree(ui8Tmp); + } + else + { + (g_DBGDrivProc[cmd])(in, out); + } + + if(copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0) + { + goto init_failed; + } + + HostPageablePageFree((IMG_VOID *)buffer); + return 0; + +init_failed: + HostPageablePageFree((IMG_VOID *)buffer); + return -EFAULT; +} + + +/****************************************************************************** + * Function Name: RemoveHotKey + * + * Inputs : - + * Outputs : - + * Returns : - + * Globals Used : - + * + * Description : Removes HotKey callbacks + *****************************************************************************/ +IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey) +{ + PVR_UNREFERENCED_PARAMETER(hHotKey); +} + +/****************************************************************************** + * Function Name: DefineHotKey + * + * Inputs : - + * Outputs : - + * Returns : - + * Globals Used : - + * + * Description : Removes HotKey callbacks + *****************************************************************************/ +IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo) +{ + PVR_UNREFERENCED_PARAMETER(ui32ScanCode); + PVR_UNREFERENCED_PARAMETER(ui32ShiftState); + PVR_UNREFERENCED_PARAMETER(psInfo); +} + +EXPORT_SYMBOL(DBGDrvGetServiceTable); + +#if !defined(SUPPORT_DRI_DRM) +subsys_initcall(dbgdrv_init); +module_exit(dbgdrv_cleanup); +#endif diff --git a/sgx_km/lib/modules/pvrsrvkm.ko b/sgx_km/lib/modules/pvrsrvkm.ko deleted file mode 100644 index 5755059e7c9514fceaaf7bac53c3035d8311557a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 417820 zcmdSC3zQvKdFQ+9Ji3os$975GO*-u+F4-OEm?j(<0m4!2lC6NaR^aGiBaD(c28Tz6 z312AHOSWfF@8_U5(AUA{|iB|#)VRAFaIN>!U@d`XrQxviV37Q$m zIGMTW`}^;zeY*O%Wz5W4_pU3gUR9@RKfe9F_qV^j>%Bj==T)BP8TZdKb7u9IG55Y< zM6asbY=Jp*z4;+?MK|g-zWm~+Dv$2zo5pflHqG8#F5NaZ>5lV`B~__3pXqSCfaUe)mjzr8(5Qw6Zci!@bA- z->$5jsKsZ}lKIKr#g&yC>xTY6{3}ly6D7K;Pm{w_F~HMUj7`@h#^+D{`1}PAr{%57 z7d+~i?kUQ=DX#eE(_6Pjy`?R0j!V}q#*15);>8!fDPG*V6xSO|arx!6+gM0V-p9M$ zm_w74TCLcGwA+k&^WC$l@uS{IKzZ8t8d0)ix)0;kjW#6kEN;W~y64$GA-Eb)RKCTw$8c%!@oPZzKW)LOnzZRTN4S0CI`e`ht z{u8(LebJKgdT+vJe*^89>F7$x7oa{JlTJU?c5U*`R7w!i(!8*G33lb=$5?I;;t7LS?a_Qp3ZXdEH1ul&~0 zzF-LH|ElOjf246FE=4Ee{gk=>Esyrf>O=LTDB0E7n2vR#UMY%tUgK5$9n{~}xUF9? z;>(k%U>c=-tr7KJ32zH7jd@3dv6(xQZH6njt2N#nZ|`26j^F&w-Un|`-3T?XKk=^b zluU5KH|8L)U0pS1$si}aZDwOtvf_1)_eadZUXStb>Rb(8qU6z)m5Y|w;UR+_DZOl| z|G^(YUimAV+krE>j`m+#s{TChxu>5|U=m(O+e`5%WzM57%rf)gXOd7%D>6qw?CVxZ3C@MK)d4gAmkb* zFZ>1Yw;Xxp+Tydk_5Byhjd#SC(|O)N=N(Q1 zc*Zo&S)TRa*(rFojXXJ+r*fEUex`2)SJ^vtA%p9%?QbfVKUcOqvJk7BEdy*;7dH;4 z!{sUs-^!)v$-CMdzh{k~zEpfCe1q#+Faf`vNdvR1U-B1XwdvY%wyB1^dt-YOFN7}5 zsE1%aWCHZF;WxbJO=mhuyll)7?2BYKN+uYqwJ((2_>*qQ)Q8+3$_%q_4 zh19pt7E(7x;jiN7@HLzLDYa`30KfFZ`Hu&UBOe#+^<38*vj`u7Fa2&VUbY=P4XgLR zfou%-@icnD%VBbGXzZ8%D%0%~((Pf?yJ-XC?|`RTGby!^omSM#>{K(iQ<>f}l3icC zgxrMS!pcdAEOQTkfp6|J{s8tQkk0;%UwLxGtd6^aTnv5}%D8rI8*1ARlS(IRx7I$H z)?4Z?Yg;(O*SToNlFL*%1i$TEPFl_Wm7?EkCi zR>s`3tX`C;j{0%(SlY13@(FxDo$*=|nwx3yy5l{`l#?sxf7o|iC=TK}-nULUcg)m-;3?n^xefI)6*}cegRIW5SRinS5aqv!jfzOcfw0xLnmRZIo(q!xvUWy($8y=k&!n-j?`yobc4?@M_1YC4 zFV~JSYklqke-5>K1><|~iyu#J#!p$-LZ+^T;U^`*~D$h-lZ*O*&OV4w9){Ni-oFn%=8^Ms$4@N>ui8h+p}!B;VvO~}T0 zG99IV`TgtS3Or3TFM}&=it7D;g{z`Ii<;>_4 zcRI`SI?o3EwS% z0@`fguOqwI-mxiUS#Zn*i+1j2xOXz^;Q0OUR2nhIk?v7F**CSXw$U*IozCx?cv|D( zx#jz{GIJ{RLUBMs*yI4DeULbFp3VS=q;4e)4~^Oo=ku8zps2_ZpfE&zPMm6(n&b2beUs-CnMT+Po}S> zzT=A`d^422>GLHU$0~*N2VBzcg3f++~G_LD49>%pLAJ>}s;AO_8 z7^&cRey+nbAFSo`L8nJ;|M;lF6Z1dhH=7Fvts@^d&85t?4$TFJ>%#lf#Fng{9Dc{# z&&~@km!G)7iN)BtjMrW6{iEi48q;Oq z8jK?^Q`EKUm-xpCYa4C9OR--%W#_`{jq!VJYwCM!%i&*gB-p%A{2BF_c!L{*-vAzo z@i;y#g@;SnoN{IBkx>l!^uWdr(R|p?m;)EjC|lqoH_mV8eBpdH!L$)ryq%r62A?{= zP575R6?_&pWI)d(2llyxXPrI-i^_;DuAiFtMPtXG_!o@Jtt;Hky`T5{MvXVW-^cw_ ze*YToXY%`y`yju61^2W0{SR|Lm)|4f)ARW~`fR$9-&=X+Ub2X-n)bMY6^#$=g9A%9qFP^=Ff?N4xUR-{dWEw-)Qh$I_!VXpO*^HrQd!ff9@5YOZT0% z&!J=Cx%lCicrIEOdA&&Qp8?m7-f!jJ(YwXHqc`hD1A2@0j^3;*4d}g#dq?jd<=)Zz z<=i`Zzl3{7?>X)V`W8O`La+Ku*od{ZEW^zjIdZx>gl8DDY=NaSdehxkO*^^O@=!C` z$@O{smY#W;o;4)jYx^nc=z+Wzc<9e^+&kX+kNI!MGk=)>cD(X`x$L3B{YCod>D^N-K*W zUB~+{*Ov#le@}kDpZibd_sHkixAXhG+&8vncB9GtJMw$^@E^|aM_W;A{9ZcnrZtoFCDHu?LVN7Bmjy{yR|kL%GQYw5(!yV2Gi z{?#e)U+*o`A99a9?s-Af+d!LD{LreQyy^7ky1DM8!~F{aOREph!U*D}{X;#%SQIM*sytK)O}7jd)!IE23qChBR;7++`2 zoXayIa0s{17TC?Tt=OY%iQJKjzx0eX0b4$xJU)o6S86e4;y%>(y43f;aexnH7W;F= zgXX*7sId@Ffn!TYLz&#xU&>|Nw%Mf3CT+HPuQqog->mb`x0h^OgE1oS_kYduo;hmq znYDM$wmS;Umxg=@6YlNXmrG39`9mXcQ-7T@RSD{uwjnU7Kc| zHp@BQ24(!v(oD2g8}7Yi+40hrgov0j;H zgY*BbejrJP^Y0NCevW6yyKe4z1w8WJE4s6=SuzVpaS&Vb8X`lqj1>KU^&(R?tA)9$l2QsY+4MeXPFl|ih~ zH+lwN=X@brWHc!F2Dx6fF|C5!n4EsEfj68DuH}6sD=S~YUr{XlqMmuJqvQqc*F5_x z75^OZcj0D#Z)E4`3$lF^_;jA}8C$p+KJlLG_!yU6^R{ycY;0+xjT=}$nRaLoeEGL- zhW?($rMQ8{=-@5d{$0#DO-KBDJYM;&&m}AGKhnP{`a*Km@;%azy_NTWo*b}^=~c_0 z#~yA>H<&wFM~ZsP+i!I8Zv7SEsDuB~AZ7{-4o}Dh{==&N6&%j)$(D4{Q>vqN-6{BO zD#-N~ahSv4Xe!rZ8_Z9~DtnOpuV5syF`J6X;w-X18U3JXK&}SQzf9eV4}Ld2+G8Bv zzlH}@U;Pxz)+zhPg|hFWTu}zRHt=Du&9c^}&!p}0nREwsU2Q04Ge&<`#L@?UV)a3t zxp4b&d{<(ib^5O@bLFq%>vsm{269eo^vhiKQ);7Zf0KXOpdBZB&1d%ao6u2pQ7m7v zYdKw$*c?&Wa{pnAdlO)1n#gT%SA5HssJFQ3O|dtg#a0%+@2+@Z%VJz@@&^usk=uKZ zFkY>d);rA2@M+2sdZN9vUuszzBR}|U!Gmd){3_FYh;fkD>c1)8xruzBt#`%cmoCLi zTNmP`SG+l1B9>BZ@|U-LoVJB0`9vGMqZfSq#|nLI*n0m35B}PKyjL2(uDS3Rien$f zv-~jh4}7$gzO!J5i#E6tWVjW-y_qtWKFF7Nqm%zW&Q&~NmJdT0^1R@UI%VEEM04`s z5?cm(Ian;uG@5dv7PP68&z#&)yID{bkCR8>X)u z$k)?Xl{%kZS-F0@e>L&Kv*g5)(+m8{Tb#QY{o0}b&LVki52s68qu!UyB5MrGy{l=b z6c~IrbWG!PPxGHfEI6P){!6tD?V{IOA5*^C?hd{!e+OvyATs=y7oWUv)%8)b@{-q4 zz7*d_+Y6Ojh_x;C%KmBMZM1JnedD7;{N*0=({C%LH?SNwASAT*+%-nT2aF%P)6 z-|TLL&(44^a0;LA;(FZ+q8>S)Y5DraUdgoLj@OBem*wm=nJ3)w@+U9sqTUibzQ=5| z>py~Z4|9oKyss`V#(xnkf?w=5G*f@0;)enqg?I0zUSII3jOLD7M^Zb2UDt=7Wn6my zt$$kePo;-dZ#ytJe5%aXDYN2x{XqGPcP{m$i}hS^${s4mT{tm8hJ$MGrr5-1h+Rj? znf?#QHeWU-=OB1Lc58Fu#kXwsgZr?@r@!;aHK)mUS?Hg-Y7u+fu=aR(4ANP(;PeIg z0oT!2jo%(^H{j25uo%~Z!>Q99KLYRh%`A@Q{j}`p>(QA^c-qth_UYZiE#r9NpH{}a7mfFSknud54s*8B(WA)UwPuvOyhuM}Wd|@k zTYI}jC-lja!2XG1-CVxZzG(aQXLEIkLx9vHWyMCygi#r$n5ECNrxi5lE z$#Kv`ryhpgBHYuZNe>_0#jYJp7FW67?w%>1K)D27W2t}UB^~7A1}hiNe|F`p-#RFJ z>zcB9H$LQdyLvX}Hr%IZ2>sTDhj)tE4ZnBu4B^3R!4J^8t8&eCaDiW@7^&tIMgEZP zl07t`^7qh@_&4xiPjeaZlDl8SZ-Jo>zLMe!%s0#E*XSzx9~#qoV{x+Kvo@i*3Vm#? z`)5)=h_3tbA6V$u(M832#keyxreFF7ABvf3yvkn`UX-s>L53=g)w$Y02JpS0fBIs^ zFMGSKyM)dTbxqsE3O?#D^|_`M#?5{Dnt!&`f8J}C`rCQ`E}lP0`L}*+sn0cil=s&l zL$iCA(r@zn7yfjq-`cs9&hh)<-couu_d9~6^xgb@jK53#&2L#s-^SmE_>1{tH$mE< zopdB1;DH=u z(;SEQ$C1DCzmmU(eXOFV-vfM=#sjJ8Jdl<$n;be`jH`ci)R+-f}T{B@l)WYsy&GtmG&PjRkSAdTw*uM_U@*DKr z?kSQ!jF(frE3vm|Y0DD0yU*_TP#^diV~tS$mUxFhc*xGBfnTy%j6GC>3^#%SIRGcC z@o((WAQoOVXP+tsM`Gql-x$oZpF{i7-x+U5@+mF~zrT_5;J{9Xwr?wYPF4>2(6>P@ z9(yS58b7>O6@4U64aR`19%^JUaM{dxm1Pg*TyQEa)zJgWK}WYu@O-Yj0I%#%=FI{; zti3^}@Y_z=SiQ;wyr1Qrl{MfMttZe!rACIE-g+|A1de8ylm&a2xK9V)(B}E%whs8D zOT;7EH>WvRWjWiw;F|~2kwELbw`I1Z-R!fLF}*FiIt`Eu(eNN}W_Y`YIsf5wjDF@D zA4BgP=*=@aQ$#wlA| zy6Y&mrepKpHD>XdX)W2Yo8|XqvD!|vzmxXDk>gKUnDRE};^Y0mM=o2+8$Ql@2QnR> zNaw=i>3nb;9$ic;0sD~}$FY4&>8zgli@^L~I?+0wjxhJ1;+>@__)^=M44kvPEg2x6 zr?5BhbgH?n_(AI=GmKC7Q(UFvwJxB2GMz%sP^E?a&GnJ^KI8w4C(#8L#|$!tT&eG- z(LTY$uwuH3ZLzslN;rFiA0$CIn@1x(|fv^Za?K||NJ+Ep9q&qQnIx^FSCXX;w; zABG8YS?Vfgki~h+ZFE-WJa#wB0Tl0;D3c2(6TW$0TBXkc-z+!#4e?Ev`99C*8@~g+ z_x0w?t;9T8--aLN@Pphl^yZlOV+{TfKTHu(Gr<#Y>eey)Juz(4tv!$A?hH!eH5XS{1@OPt1J`i(g5fVS@{w5c4&_1Y9pt=`Vz zG2$I;vEXFQpFJhV(muGKi&uRSuLxhf>WjP*`2JH)r;ejjao-erCbNZ^9IJ0@_gW96 z2b$Qv)>|@tD;*ew&=mhbIy>BDasN8-6oka3@@M1{U^f+$yWQem`7UK_1oq12B$V+1 zS+ivg_COL+R=Eb!V*xxOy(W3BDA!4H1dj$?Vw8b$z8-*{<$ejB@ynvsOP7*E>~;G7 zS7!E0ai9t8#psQc8QY&=%f~$Sy(y0F@tgVZ*j#B9Uk>l`6_h{V_*yv*9`MXDw=T^T z_3PPm4tT77^_geWo?s6s=X7wu`&r(Zws6H*+95g5><=^#40Nn)nQ&a?>T+2R@Lu-o z_!i)k&dKFiwk(rl_=NUU4-#5F6Wqh?eEHhX_GYw)HCRlZ=6=W4M^eeW>NepS#p-_V z|5h8#WC!pYa9VGjwK;Y(P4U<5Rt^GR_j|D;;0`!VDEPohF0*r1H}Sklt~z^+4h0W@ zkNrJ-7Pi~5pPX`%1+Rl&)kb%9qVgPe!D}Dun|9`(Duy?Q4xXni=I`DYvE}mND#0`H zqxRjnhyC0?fVbVaHD;~psvV0f;Fb^D^uKYT5-!AAw;uyj5ae|!~5VplUe(YbuH<} z0NZ6k{M(ip_udnYOP`unIIgwFACOKB4)>LVSE8)O5QMkdy|zBKsfMiS{DXOLDZI~t z&(gyM{O7o}_;2UGfGr0m!Av&`~?6vU;<{bNb;>l08pGh;T7u&0PaXYwD`PpWM*IDq%d~uq2 z=EW-gR+^bkAm@)*DCNSl@ZGMya%S}2(gwZwU(rKuE}=Cj>0&#lXh2WV8+R`?TpDOpy0Y$AL=fZf?T%cFt` z*r)QmANfb}fyC>LobU0$$K(slqg&ON`cVAIj;Yz~t1t2Sd^4NJDnDBOi|{o?L&Ddy z_L=uXALDAv<(~3;6Tip8p?=kuaI5~y;LpMV&ODt*g0DfIaP>bU=PKv?DA`X3%j$d` z{0)uY^$pmAd{~Wt78q!sSjy^LeLXldcGu6qj))K8y}>?U%a`Cif~_22k=%hhq?lHTz1G0_KkQ&~L+m*bNs z!cX^9)(g+}P4n0Km-t=j3KxguqoA*{y(v0hM!cxlwtJ?2U3|3I?_fW7VO;LLA&!#y zo{G~fTAVvP6vyu|cB@+$e`Y%}c|IE=H^{2ql47@z0U#Zm!E5&&a2d{Cs=MZ9QbA zJjkuiWnFSmQ2x}SJ>Zz9kss7$Iv~ht9_OhE_Afc z;~?%Z1`f#~W8JZ9%Fw@ze`)KK8Heh;C)b^tv$wmCr1f@YBOYzRufX-OrgBcA-X~-I z4wK`CI7l};$HZ@*Oef-#oE4SneP>VT%kr9HRPNr!L}^cL2u4@73C?y6*#O~9;}kDE zPw+|~HW;7$Wq9?q;M!oHoKYSO`Y;sk zm}AiP^JsHTK3UOUD7M?mWYx9{KI&ciqV3VV?Gf6>|FUhXT~olV<(>TeGv%pC*6n#` zO~x7W*JOMlf30(=*6pPmve@n&;1M3`pd;GkW$;e2+aM31$^A{>UpP6yZ(YU5#DT^F z=_vHm9_}l^EnR}2GN!e5!D%A$Du7+_M6DC<;dzZ~lWU#pDB~a>AdihIZnUp=3wE}s z1AKIn=n|US`pPpoNX%I<93(dAk(YfXuw2VJk>2Y}lH&VJ-tNWkj5hrh^QQv=F;?MP z^e@o95`ahOZh%vKAia@|GmE8JUU7ARMenWuD88Yd=&te_v-XzQeq8yyjH~0yD%aA% zVc+HDfML-0Cn=+Li#oyjmo4CL(KooUcy5}kPLpTg!;ZrofquwR81b9(8n0ji{%ov_ znLaByZE7L96?$k7@;}#EX>DwIOnHZ|n0j5xQ$}@EcZNPj8MErDp3e2tKI}JAe+FNm z!n~_unwtX3dAT3q0h7}OYsTNAZr-lo)3`Tc+vKMXm$UL(D5Kb^aJ1^j0bf2BkZn*~ zQ`pMkGI}n2jf#I|htZpqV;Y(3)k0mBb>&fCV_H{KUF^=Y=6tz+osHnkq)di#c$|M@ zDxwFJPT`ta_@Vf?12QamoebzG9B_q%=O!}yO{yOVv=8qPd1?$fG^JXM(6N|tO+#@{}kJP~}ccXRwF z+N0|G;|} za~2-7PNsD$!StKp%H@RE+;zq~7%SswQg;|v_)BYWb(T2ocG?uK8q6iW9DLfs>0rEB z@U^1kFOW~cJtVNpkb8NtyjF zlgMfLKR@!of1q9e4mj=h_5bwlYX{bUcey%x{xtPOC*enQQjYNxEoj7Ana1n2?(b{N zrv2-zQ+4{%>6dsfdsf6D@Qq#3ij#8%9cFbix>LL^y{|ESU$c^d0S5K43+)JI%S8)=-_w-Wi5WY&@uP7oZcJ!7S6zv z%DHz}aR#0A;)FYcu0@$vCa;FIl4qRB)#i*7^iT0@t`dA)(K;@?`dWDW?wyaMR^IaS zylVb0^|fxIbG=kgXM4GGy^3{TLEU*T^S4chabUws^O2>ivu*A=-ylD?Zr6HFln%A} zqwuBIJc2%De;vBoj#2vRhCfWJ%FSv#Y#)zor?23q#z%lq-IOD;JJ) z^|+QT9^r47Z!bMkYF1;dAyw^IrB`g6h*;lMy^a}Ac57cJilmMv>q=&Lr#J5k?In zW%DbI7pIyV*Py43JIfaW-zMZLeHd~td)LDMHGWU))$`cBcLINrX6v27_72K?>}Q`z zTdjAmN81cn8D9Y2v{`2C%irHWLOX5ND=m$dqh4s9r?t6hozHtsuJ0CzZNK9um=W>! zAbW?i|Hj}K@n0tM z$A)Se?85b zL&jgn`sH@C=>+_wxyx5`-u{@UxA5L!;pZyx;hh=4;7oCjYi#fgM;u7+6&>KB?nfV2{miz=-7tioqW}l9H zSA!gjKQ7^WcKsUu&Dqs6b;%9-j@DjUtW`FTA$!^Rfhs@68tz1Jv_FNPR5!<-H0`C- zTsli`@Y$YxsWUG*+;h3X&aY4(cAk{af49k>?E+gjOmsf>Zsv{X4eYtjhL3s&kWchq z?-1|G&{_LE@D!5m#A9?8wsMRGke6;pVs>79Q9OwkW_1NasaXY-oZ`7vLcH?95T zLZA;j*qCK(Qgi30nVUaeg@3e{Q#!Qbe>|O~?g+VX{{Z|Wo1hcCr8RQFu6Et}?9(}) zn-2D}#XO^fZ$%r~IFV1wt8wx)XV2H@S8_VB{O_2rzX7`sUwjna2}8TS@_cww=ck+I zIo5?W7Y3j9JX!MP@J~#155LLR8^|E^yhbvZ?IRWc)}Vp?tVJ!|t2nQw@1ck%nWJEETE)zwDS`z6Jo0(;*0^bY*r zVLBIStND*~2qHJ8x}jk&894a?#xGd#Cv&-Az0Tt5AuIbue_#i=man?IeRcZA9N)N* zHt+qbZ@#VfY4}M#bhsr1j3s!|u6YC*{BE4eovIYZ$$<~w_$+O}^Hx{5 z`GBtSpN0Dtj3Hhq#>iYOO8E?0a~~P=!2#btAV*aD!)DCcNzsUX?3?s`0rs+Q`n>3p z^Sr2S*KKsoAZ^J$-ugdL5caIIzG8<>6-=ycnce&hKo%dXRUyje8 z&T}7iezn(zSDQJncA%x@RnD<)<@^DED&JxB#u2M~evo_V9OaBi=a3urtgzRK+ZB23 zY0i)rZ%aRFE@ou_IaYqW_(gk~rKc+Jir=CIq&dy|74xqq6)cPHO@ev=iGr> zt5fxx)pVmeoMk> ztCQNkfyWg3r;POzpG*HPHfr~hz(02Vq|KGswg(IYz1MNLdVsu)Rb4j=zpUJJygy&i zb@Er0=PY?@@O*EcpQ*X>$c@h{}jJVGCURuc!x$d4^W@{LtD?8~Xuw8J|WvThZ!{cdw&6l$SOE ze@RzpkGOP)bcOb)N^fX<(i_qlzppWh&d~4KbVfEeCsz=EZwvU)z8o9}&Y{?^b0h8V z4)ZaqKLT^c1ZIi2)tU0v#bvHbnv?Dh&YW|7IU8Ypg2K3*jhFuO*n6k(x@Q~A@>6B@ zITfF2ZD7mu@ZC6P!lJl%4+1sWUeL2ta z%^~)J(&oaT&7!QiewxpbM?@P}N9Q7LVNaRxQ9?#FX5qxbWftP!0x$7;@>-xn6*_EZ zg1?JDUFY+^CfCE?158dHJm7QpqT}FRG$iKe!Ny?VPX>m%*lExAJvNbcSL-Be~uhB@f-S z?oS!lnpmCoD`c^jEZ;zN^qYC2-7~BCVg*`=zE6OQgXOikgjSb@5xl8!X6woFlf|n# zd(_H8ZpT@d#&rZ-mwDF~cF|Nr$bb35!A_JlHc*b+uOED3Uf1-HO7s0>s z?Z|i=qXxG{$aN)+J@nmDk-WBPV$<*F)#Sdm!Gn8z)aidPg z)ShVQ=(r|tJl7d^?j*g@TFnWtZLnAGM`=TFh%WMV2e6O}{9_Zg2WSM>;XAR6C*Udq-G-)Ctx3U4`Dxi(d&(mta70c&#H+eQB? z=V``f^@4bs*pJmG%DEqWa}oZ%iaunks_+iHmpJ~>cNlfA{iEtr`SwLVB0f3zj-thZ zZ)B&@(df4(YwY051V`aXu4Z-3U2VB}mt@$_;m*$ZUHx9F!?So&uvat|csRqE-J61K zUGbD^nGP@+Wvbw8vb`04X`+mrd*fFUc{1fRJ=@_=UyPLrMwN48!)8^X0Yi+&S(7Sp zCU|w~8Ms+aM=Iksek6Zg_`!B$_#w6_{#{SUsE-fi_$uPfjjJyEDSl#XqNDuFD)e`- z0Y72#%y+Uh$j9sF&)`Q3!-sjUee~#)@`oEo7!UMcEJl@ z-LFJh48oXJ-$Y~aSoGGo%DIumA|F!Qx$YUXE&oArO2OjjE?Nzj6Hetbt54!`*}iV| z5mw?#_|}-bs)7A-`k+J|6Rgrg_3a*O##egH5Zw1;p4-b{*KynMdOL*QxyGzfhODn*gA~(Mp0bV0MN#j*(pl!AS!_<{ zQb^vk??CHqc0HF|YOO^GclX_JIKJuak0svQj-oq>sW2zG+v1cyte(#GqU5WU>1T(R zGC2An**%p*_dWdrolUVXvHAY~YJH1S#-=jDtIoaCIJ5KP?zDLh_FRe%I-GKf3o563 z%z!)cqm`RI;b%G2rgM@Qy`I5m+*{nEL#`<5kmXgquKrX;cz0({$iMkT%DepP?93ID zkcXGG*L@(>`kmH@MH8*@-23Unv2Z^V-)sGXDbD>= z7jHOx;ZNaFed|wWU%sF~AIqoECbMDLSVfn|#nZt9=}m9{46&K>spzXQEnaGF*U zi~U;VWOYV=b()jI!r@*^8Q2E7FTBIR@~%852X)nY^b`STL8qubi#=>NP9WH^JhGZSwD5tvtZ)nev zzCkM+rhE^taZBID*Itc0e6mB_aKRXNeFwjFzaGp^Z&eNRi6_{vjofShfO1IWH$N4D$Nq&+g-zBZ+xgxq?700FMgR}f zj&y7#Jej_X@#)(c>R;;`YENtY4n7Cl6mVod!@)0%Y_u`2 z*ZgWsGV5V)bOwrB%W!zC0C&-DslLWATn-+8Mmgz<;c`|^g@;0UwJ8`R|3#Uy`+=J1 z`X)N4Mf;4w=6Wwn?@4wtJ5%dqxqcbU;LGZbW0UIRqxl|u;qS-6)#daw@|nmd`(%f6 zI)bIiS?sLz`nKX++i&)718=hd&rOz#w=L4SOUxTvCoJuyn|*VFGyk%@^d^1IkZYy3 z<;!dUCXLnl+rV_arNaT6$GI)MI{k3S_l4m*XZM6_4}Gebiq5sL^uR_rA4s_}+RHv+ zb=*FCCX46_?nP6}J37DP!#NDIz+m@u!Y4M*23*Mw)hS=YW{4Ia<(m*1hwcTF&V4gs z)FXfB*?k#c6ka^qdy!ppi;`b0)LBnQi+A5wD6<~F4feUoZdlr8wqEe*d@aYT!|w#Q z%2=MtbA$rrSF~uS1y7-8bp{Z$w|g)a!OaxDbG?)8v3)JH*_HFbnwSfGaDaE(1HTJi zP#@COig!!|#6h@MOvJA5bJbba0awv=mg}s_hMcR&y<&2T!_Bwho8TCEamR?y97FdV z>(8+lLp(2=ptc+47;BKSW&0BGRSmtRzEyV)T~x>ag`aLTz-$7>D0~Hsvk?-LLzlD9 za07G}ec%n|<@h`8%+IlB#?XE(=iA-1Z_iLGkHbsMSp+Bj*0G=Rxhw3WuziDrV$ShS z$%kx>&IW2T7j0A;}yG~ly>ddhIm1FsG4I+ z)}A7TCv~+SKzAN@O%sYw?IKC>vQnabvLYd+EZh*JG{%E%x=ao;2WzAoGRRJ97m)F<3(zlwS|lVmaxS zF0i6oWH*ezls^5L(qR3q$WMl{U%go{f}2-_j8EqZ?q%<`{Z4Kc6Yuh#@hS$L<+*5| zEAbI)zdDn|2gjB+KYT&?T&y9Ci5FGwbCl6pbk7F28Z6JKWmuFn18ViV1c0F)wEx=I44^^w2jXB-hfTcF!B{ zH2)C4s;v0Ir%X9i94&h;xz+g!n!9LS(v{b9;dcOwa2GNjaMy0Gd|hYxRbu|o@AP+3 zxC$7Da7DayfJg8)HUlj@YzFJ+_<}!~crpGOWy_rBflXA*txsE~B3M}p$daMuHZ{qo8oOPqKlfN1;M+y#42K>gBPLgv` zo%2smdZqD1dlF<5OLzNy?9jA#o!_^-&bVjs)ul&-SItRum2ZPR#J|bbNu`T+_}~j3 z5O1i=yMb4_>2v?M@(rDJJ4;>i_PCv|^0{eo$`Kacd)p&w?M>F0Vdm>E_Nty9AF}p8;E}`_iGw{*9Dfj~;@Hv-()q z7M$7>)a+?(z_rDi(%?IWlB*JZsZH@RXZx{d1K)_4v2ufy3&M+wmwwrp5Aj!9&aPE( ztA12h?PmH1ei58a`3$$)d##7h!OQX?wlY`LcYI_^^*s{RN5(Iz-)l7%{8#d&2j5j3 zZc8*3?L^0FBkSL9>f5&JFYmi>FPhI#*3p~xo?IR4>yxh`3+N;If4yL>jhLwSWAMV? zKx6T!=4TdG<`Ogh{T7#kS?5zUTF5Q!Z*OOEIzrC8#Q_6*W5b_*>-!ASh0--P_S&h%HT3__QeNM%P`<5npREf` zMW-FbIT!QSpNAKeH>^F-+F$G1-QM9FBEX{GZvzHhTlrP7Gx3M!Z+G*|;a}f^*M5|f zwAqqh$exC~fuq_v8kabaI*Zr6nRU1*(cXq~Ys8L2=OS#Qe7W;^WRpDA)?ym)SBVzW zK4WQ8R(nchqYlzG`WGFuX!8sO%dVKSq=@TH`7`*~GwGG^SUs-e_vUd%@c2#D;|yQz zk9iyYDdwd4qUK_6=DX{LSmeKW1|W6{~3KDle<=8?GGB$u-Rjhf1R`y*uK_T-K9wMTHkM_z|qiVovr zt|D6B^oGwTrR_%;x_TK#%5;W4UJcANWV#ziZ=5zz@q&*(*LvcQ32O8v@g61j#BVb z`C3R*afa%h;QHBUeUN_S=LlBjWp+=i`d3V7J(!fwzuxmN@my!0NJgR6nDzCL!)Ne_ z505ae1C*_I7y7gDLe49v&{f243UT-EFTkGhOPJX6NAMHhMSn$_G{r}`|2#|=#iAvj zl1XSe{b7E~&mYzk%@*vB(xG2bd}fG`R9`qz`vK*3mF;o!4$T84oAL=X7yP{1$bFlEKN>P0 zY)1AwP&Up>S)JP`dNl9}Y+f9^#rNaU75j+~WNV<}BjOUt$7qvsd9QIefkv(66 zGR{^gSIfa)0cMADjX`j$4Z-f#xa>FOa=+c~6QkWygzptFre*qWN4>y1|CDksWEaCA zvzI;KQ+r$G^TaWn`DtfR656yBvI=!i{ zHLuZ+d|_Pr&5KTKlFy>G8hv-*J5NJj_{{obj#tD}#EgV*;o9kHt+5f4x@aQ7Ijy`$ z{J8x|9l0uf;Kc8aUO(PfKPCF`wvP9`4}4*Kjxm+_t@TBHi%xKjF&4?9&Un&3TImzP zBN_;nztouM3qGdrC5vNze;ohyQ^L9UTYHm4n+df;-LwBdU2Nz5z@)m`OJwa+0N%+h z5&fjsz3Zm?pAb%coy9BIS=SWoH;%K<6?+Bz!lQ**cmg(`_67U47=GyHFmtVUvU9xl zbThwXPZyYg;hzD&<6A4A1zc*3g0D>5g30OJZ0y*=rpBcls@r?NM?20Au4yl)i)-t0 zaV$qS&4V>}5g#?-wIUB$T|%zhDb~n^6Y<#|%BXE?&+&EO4a@6wbDn(}r#LGdI$!_k z@ls=H6S2a+_p3|evwZhP?c4YR{b@bW@)>v&pNR+W%z5o@^rCR!m8bDrG8!L2|L!Wt zyX8B^Dg2FqE5S1YY@aBUvAA7_M-_9Fz7}1!cQRjcd(5{)fMbldX507m>(PD8bERvJ zBcJb2O$3dhlO4C{2p_WEW#OcM%ZG<(SLc=tXpzG=6X&)$&gyGy^;kCGCzD;`HSB@b z4Y6gDAK*NQ8Gi{KusPWs5Lfll1OAz+?8VE(H^_^00K97F>t*W8PRm9(dOQ6n`eyUq zY|p6recNU5q(#@7mwZ^ELbf&fm^6`46AR zzg62e>vwJ=*^@c_Bm7qV%j)uoJ-^6=X(KbsQBQhX=PpW*Yn~&1p5dzbPQcZ!t9x1l z(LQYDu{d8W*XNx1qkZi9uB7@k-Ddw?;G6ZA!TdwtRa{HKeB$&`{raH-e+8INbcck_j;QLQuST$ z(x>(^GZvly-M|O2>)FVDo&JSu=`?3MEq>|qEta1-gSg8)nszG4U5!7%Av-A>Stpi1 z#dQXqJwg70Y-p{!VB4%-?EC(4zLk>ecVeja+b@iZ`AyF>PgFs`GQbpguMJ zkpdj+$%h0GJrR^&5ke7N2YQ3EblsLT7_6<*xvn!^7Xu zmV2)`@!B%@0X!2-iiNmx#WH$V%ezt3eHnjiy{9q95Z z-U}}ij8$?Wn?Hh#~y7RZ-CkMMgtzZ?9P zKh*qh*l(VjmcAW(^&~Op$+~}h5=A@UQ`yxbyhU7zpB%1^fh*3080cuXuA}e0ltOH1 z?z=1ht6_+75@R^XvuypFm|gCtyE#U-MutusdTxke4L`HFGehOHzihaiU=bXWiQ#wt zI^`RbpXd2<*~G5A8<$~B$YF2Z&*mrFWqa5U<;F3HbGPzx;r*%NaTRbTe%Ma@5Lxb- zEem|J_0$FFT;jt_#rG%Z0@hrkq|!b0wTfAab?zs>H)$>DAJ8H6%kLxUAfBAZ1dHbi z25sc03rx)@5np%_F=k*ftSKu$kNHP$1|CscZ$r087uS}ti@7W)-tZc9oqQR^+}@VU zx6bUVP_OjOqvY4(m-9|-ZHuTMe=xN%!Y*=28R`C+2z$nFpWn)rnc(Vk6@N}}Res(S zSK&lBAeKKSzf8Y<_5iti$@2DPpVZ~sK=En&jiy0NA}^u{P=NzSBq*(W*8pN(A?WoHJt65PUxz6WIElR4iD$8J49a-i?IRKTnFRKN8; zqdmDWx!e!p4(NXT;<2;g>GVT^V$JMnU>%FtSu5bY$Y7E0 zf0X5a!&~JPznl9qd#g9&ca-rrHp4Gv{E^N4m3g*b-#VDShPH8lPE?4S9@)$H8V&7p zzKr}O_pNx9F^!ltkt1LG`xT<@z^<5trK-pKARpfN8-+=V{xZTTiKH?vVZH{AJexY}awShZ&3tNs| zsF?fGz@LCu?js&I!5JUyGvYeYxR03ieQ9-T)|c9pP2L^h_n`xXXVWC-Rrkn#=EG%Y zk>I}$sJXFTf z+qLoKf?ja)M%%p-{i%$zJB}A@E*x-`+oEx{Z%Au9yTR|-&NB4I?m=U6mMncWzZQLU zF44#`G>)Kghi@}LW8qyf;Ds&6W7g}KOP);^yyF+7iz@yixQ51DeXez`%ER=z&T*aK zTK&M%8aivQDY|sb=B?2#`>_Ue)_Sd_b4<>sqci=g&O+}vbY9HqyqMFO?+!s{^;Z$S z)!(VCKea0wIU2k8hirvtsX3o;r*DIbhS0#;Oz9y<}EvzG|*nYJ*qWm`Cnie%PF|qvVz$cQ!|Cg7Zr)O4{{Ez&l=H(V!ma*un-^Tj0A7feQ_R-&gO_MY%G61e>NwT>rD4#`j3jm(kJ|o z?F9rb=_AuJ{Dq0lN9ZcIku`r}$tFH(2O!kf15rFsCH0BYx4nzHKa?(fv$UJfrU&FQ%dDqi;3-3VDUfGt<5LmEM$H&hp2y zy@9j9U(6q~^O8nr=N+P#aHaayMiKW}o1(>ci+8C^zsL`TIgfl^2b;cUse9>1)`tf+ z+{x+$c1-V`oDS?zC_S3Lg9p+1wF=`@S*QE3@nhR{#t*c&=K`^IZTM3ejY~F9JSAB5 zY&e!}_iH$QY>`h9fV(ER?0}QD;*!{ffH^^Qp6^hd#~u^!2+kuDwPtf|*!$3Et$n z)-Odb#W57;(A;h+%J^0LETq5oh;PBuUn8qEViU>>+16prnfbr>Ba_6Z?@4drnL$4E zEl}n3VdsD=O7;ZDSw~ok=Xw4gq0x*T96Wu&6D?Pd>)I*5;j!kUgl-hUg;ve3bY0p69jvd8VWG zCcnlz>R+_$k>IF;?;qlGsJzCg`4@BFvCt^b)1D_cHh70FA2E@4j`zavN5K=nd#7m6 z1cp6A#A^cNS~3ov2kSQAO>GC@Npp(d0S2!XCDLhX+v>G^-moUWIzmsx8lHK7wl6A%5RGr0`kk<9ojJ16HA1^yCW z>pN7M+gBR7y<_}*yU4EdZ(y9lnZ;|T+211mi^*3u;4BtwSEo+|f5Q2Soz$J>eTm-& z8+d6;x2JQ7oXn}6fc$9rd;$KfaI#VG=3}#c&^LL`lf)^eHQ$gv%GXo;fmQleeV_6wE8Vh1G6l`mhOd3S{Hcs|ndGskTNL{*+B%zC*fPm{ z6Zo(j_8X}h17|?We~>KAlPjPzm^6RV7)u++6P+o(2miop?CiV!DsRQtsQfNxZ1DHz z{JrO6*;V&y#{*Ue|8A4{h0--)ey_1u+~7->*_V7Zzvbh|7Y(>qt^vG344vH3o8Jj+ z`x4G6g0y&-%J&nxEJ>hJZ^BjXOgDyO-|XwH+P;I^3Cd>gPi+a$Q;!@0SP zzJ=!Q@kuW#$9oDo3MS3#@lV#!PPh^+`F36^pPL+`u@TOn7hX19^UmG}HZu?O zh2zlzt%Ym#tFwvJpK|U^cn+JQva5Pl&oob*p{{t!(eJs=OGQ8Ni}*(UWx4MBrceDA z&WV#)x|=ZbSF&>G(!yL&G<3KZtyBhnAIt{_I5K%@MRSrR4lc;jBS>kW*r0^a#pEH^!*tVhKU!!dlCCbED!JH_ldG{t+bY+ z^=#>aL)eH$6Fq=UXz`A9YU^(cCwtKCI-6Q!kzcw^`T_l2^|P4LZsJCIw<}hB_cr2? zxAjNO`KPqbwx^A5#ZS<>Z;juI8`*Z?Ya7os@V<+=nlR&Sb21&qq3DvqvZ|{`;Te>O zwV{JHg3$VxvZK<;qJj9P+|2MQJ608*8<~&#uNjN;mC#XZU>O7N77oFaw?#gLrH^t( zH4g37Y$C%u7`x;~`sQIX7AvM(^Ru-^#gJuJ+*-UHKfFusq|M1v4#(Z#T+fxi<{S2V z^346#Jho=OKM^iR7|ZUM7+QD9u6=90YBt8F_vM@8=XZ}${|vqrdq|>V=y39W*$Xm{ zZ@wouh7W&iQhN3P{RrO8&{uYA;G5&iNfy+%;`0G@wf-czoTZ-n-xDll=lPN^r#1KA zuzF=J?yL?Oq9gvSZFg5bwtJ{o!MBm`so1;C+v=|J2f1n1hHKm{pLHw7p!ikDIG)(@ zcu(ig62rO}=J3&exde=y>6E&@gh%D`X*`<8>$*mVK?f8w`VXJf`T7OfTU!<%8v3ey zz@E@gGCxv#l1nG6YsiM7_6Yoal!;xO(l-%B`SArbRGtWd9W_vAI*Ie25IP#l+qn_XL171xVTJzNXqudYV-VdS8 zs=TYuy{&8yoAaggOmJB{PW+5`$(YSqfZsKqk!Cg??9afKO7A~I7eRwzJwjeZL66{z zpwpy#q<4@l_U@T7va^Y{xDUsX50CpY_hrFVj6ZK!r%$z`__G_Ym6gJn)rQA-mCMII zWxG}`TnkSh$G$uLsdI##d`p+t#1l^c7TS}}bZ!2oWX!DUdp!?3tVO|pPVdPc)Vjz< zV<}M{aINuC`7u4MqXx~BPpOXPNH#{5_Xj?nfm`{t((_t_5X?2kq`LP4gVTx1<#6{} zBUVnW=sLsPaf)^0S>}?gC7=(F6FxSCefQ_hoghUqL}Tf#njI_x(QOPran zybR^m$rn-E8`{S>3n9z1)7(?~1lh`R>$+!Confu@7=44zaCW=R`+(*dTutRHGk2Gt zNA5f2WwT7g_cn=p)w^$wE4nUouB)yc{-BrEek%q#)Aq>nJ$W7<^!76wpt$mwM+`DL z0W5d+G*9*D6MdJg*&mF5!uLK0K9KU*-CRO^tTViv+*n&zkR8DZ3v* zPWEgbK;fNu-o4X2SM}`NHh-@AY8RhzpaY6}sZ4)*FTFIv_~kEa?dI)8z4Tq={LkP$ zHzvg(itW2G5x2E+D0;pv$okyTCSKn`24m6lYAJsItNeZ z9VmC-%@0Ci(jM)qO_iJXSx;(5=Jgp3i=hl<^M$^s_v@YkFO~~5f zcqRBW>=cdLbM`LdcTD8GGVtl3C1(p0*IP=X5_BE*Ng4*;E5t8`-=qIFz(>|U#zz-^ z25rMrHrE^ex6i5jT+2zvIbTO{P#a^3SVwK4FC*~W0oH)H6+VGo|867O#3&RGRqmJM zMX@}cw(JBU+NYaX&75c?q2` z(p>1RG$;Fi%l%4t4F3dr}5)4cQc z9LAr&qsLkJ56y1N=Aq^3C|M$QT7HbRpe^SvR2unuA#_8(4fK>fH?5$jOpwX1*1)ue zv(K38h0~E`>^QJ!OnX|`j1Kd`yYc@zd@l>w)ZbBZ&PMhgkNNIJzZ&Fth@1VF0;h7O zq=&tE^lESfU4||SIP{D3# z25xj-#y}b)!c*4K})#(g;C+h$u&U#Q%&M9$!0ax=1c z^W7yD}E7ei+B;Njt&Uim|*wx!Y0ZhJX+IkS z=UNot-%0-y^gn9&hSU%Y>R)^(9VL7V2DLT1PC1paW%F_$p^VB)*R%^~8v4jSb9vz1 zr}^BCCd59|xBKR>*LqVtLz$HZ-&N}B%3K6oD{lb1dW!hvjrHJ7vChFlF`iIP=@+yP z6!QJeB435;^OfT%`NtZcJ>wAA^=;T9{6(0`j6v&HgSy>kw(T(vjaPhI)UWniB+Kae zu5E46qQ5zFdk-yiuM+R*&q2z1+~eTne#F6cesKSb=|p>)N6 zjQ!htdne&b#ot%1n# z3Kx}g;o)=Q^ISiRetcKsTVI!slq{FFEMF+^Ta3#$WZ!IJFYk0+@j%*?uD+6QoT_i; z_b0sWVS7%M_+%nDk!r21L=N_~;y?76*7qfsI#06>+{%@b{n*iB9lo8(u=1I#kBd(- zK|P0WOBZM*9utlqG?C8nx;PO&M68EzyU@3Vz4@)LYd?YDm&}zmow{JpH`=!Vzidw6 z@3Bl?9%3%?P%4?!cQAb)cy}(w`X+$(cGd7jwgtldK6sOPZ+;&HC#sCCI>)n#06aiH zo%>Yh++EKsvbW_(8jxQlJEF5d{rCvEwmOsd2bv}5AGj@(l z&Ua5t)`N4C_(J1#ej9X4w)T^qA*vjJLA)&YCoNu@tGwuBv}z*d*l-@1XvKG3gIhL( ze@}Fgee-Ul3n(|w77vc++XH*6|IyTy3J%DoZE!dw-?)|xR3p$K;(tl~b z_4QixK1-MDz83Xj>pE19=LO3jkQ`((5hccuTNjj(r+uum$u4AjV0^B`)W%-; zF8TEz0`f5B`|@`P|R>^*-@s*qH{b7n;ynI*8tx4&2jIpH^@SydY zd_g5M&<)4e;LEA~0(@vn8R7^R<;%%_jmFHwzliqdWYGSzIrSmJCyp^td?lUBl?_Qe zZT+mMFT=f7Bj665iZl1x6WR^_pevrM_?7wjIY#A@{Dm7dp6De1Ej@RbIhoX#U$0$10=lAa$e{)8*>8TgW3GSOoH5gyU}C;uS% zv%Wyj2)Pg4jv%|yNmoyfihd9GB)=uor6-GqGnoTtbIa?<@WxryDZNzkA+LT{D>nW^ zx()1M<4Tuo``iCR9_H0G;$b6MG8b1TLm$?Yrx21C_|z}u#m+E?n`nMK=BeNJzmLc2 z`FR&bi5`>4yEGn!Yt7+mcd))@_fEc@PUHW1z>%J_`}gS3ex^qMD$G6fSMn{})Y$Cl zzoMRoaqWyttGlzc+Rqa7e%l>qGmvvxC`|EwUL)S0a zYHc6U3DgJm?=Wr44m+IJ`W7bnj*v%rA0wZh>2bF{K{ap#Y?(SZs72rjJm4oH+nNcm zaU!pzyGagA;0vbsc^N~0gCi?hV=XTSySWSLIQENW-5fSE(`@DxQ|#T0ZOA+)c~?VE z(cB^(C4W8s1jM@`ru5lDGPdMpuN(cTwXE|Rz0;m#VuUPKOwc8a&pQ&IALij@F9okf zY@k}_uuealiHLVunT|E5X+9f6E=U%H$PzoJwHs$eXQHu8x;&uunFdD{`;i8Bd}Um73GBbhxn?eWrn5alDtgbfza=WwIeTerLDL{aV08+?yNW zgWW4d1Da2DhDP)EJ5w|;0?e9UWb4!!U7ZQjS+n)<$hxNE6WMYkcOr91k1;DvFn?s? zsAFx$=B;h*fk6MOUnRaF?dLd=lpBj4#~&VeB1@LDzB1z4t>L+$42cUm5J_}b+2 zJmTkg>+ckqu8GhGtCT}lt+6`2owxAeyu|voC^r3M#f@Q@jG-~R(QCQ;$uyT8GtHdn zL2&xHz_%5oeEfC(KNH_bpWbTcxmP`|ac=Z}9@^b!_SrekWDYs`7%|VaG|z3f{s^*N zYo0W%oxL1-D0KDyBAuR(-#jr{8Z=g!@L7J?f^e`EMx6< zK3;0)_e*%OHbFo3EeCJ@Ux0%N@cL{DuiBfs5DpeWA8W8DYHm^N`;flY=;x&E%Uj#N zzvzO;ve+JTpQFE5IeO5V#E;#u?NRjCPYWWurq=xFY56$)79E@itGRIhMbC=AWqFNr z4D#dqR-^AV-(vJV!28g)cpu(H#}of=xBS0Ga~j`8-+q41c>WLO=-cuCTcPi_E}NI{ z=b>5AcVPK`KRnGGZgqX;KgNETSNr@gdi`ec|3zqDv@$eSA7CelA4V>|wfl0*E4tJ5)&aa&A0=piK3?|n@G-@TVmyS8^JIGh`+Vh}uRNTu zPMwFIPagRB;@Ol;oQ&K$F;k@sdJQ%V%%NMfuI}@`90^fQx#2F60gk6`^=uD@o4_Wd1q7n6g+ z(Qv-(XJh2#AfBW%c$^OBPmkqr9?`h(|Cj;pp}r@3WQ?urqt5(E50*VG6JqPf)}b=< z^})Ag)9J^&byKgizp37>-DMh6@V934G;tnHX=k_-`!RT1I~`;1>t#mVeme8+IsdJUt1o7s?~JS0w2fWZx-lgxGVAnN%Ora^d5aN1wmV!Sh^N zztEjiCede2l`%nQ6z$5ku}XW6<-vQFL|dzj^T}BsI(^QB#;vKt$j+xB<=K~DuiNdZ zuVT$K!kSigD~%_%+|p!ldG9&)>ClDS&s}p9IwrBKwYJi@Vym(`GrF>LWZ`*U{*peI z%;6rts!z7x3(SW;Vlu9z_D8-BrI zGY4%JILq4CvkKv$T(fm1dR4BK*pO1X$Ot;H_uE;VUU#MXYvqf_zmcBL7_cT04EU6n zR*5!v=T^)DaAy7>1<$Zfl*XO!&#?YtD%4+5{fQkcawOUzIEU)wASL=xa`!( z274|!100rblc(jOpplQrQ^#x7=m16>_EfIZ``vfjr>=U+7b>#ADU-`;(d z;6gXYzs8<3$U!T)D|eq&+ly&i`E-^jFIzhHdC4mIBwR=SJokFYrnaO`TOoVQv?p6a zKKeBIrn=ZIZQZnEeX#LS@6U~PFEvGTo(B43HcuM9N~~Aa7ho?ZywCs6UhjMzuBHud z<0?eJRbfwr{o!IT(^*X40``3sw?TF_%Lh8cvG&MW>0p`bcb~=nH7*|(*>Z#f-4CR> z>x&i#)I%Rlh+h@oWs4C!Ir(rU@MsS>U%TIaH|#NR2DiV4_h&Kv`4m%yo-34XVN;>r zEO^u!OLZz{%6aE8vF{>_R|t)jEa#b{qIXzc5a*7xDpp4^jY zz!_^b+4`|FGInm>(H^}m9{`7Vojkc^vv&N~P$tjZEg6VB*_%E`B7YNqj>OQe;1w(d zU=d#7dHI<2`|ju3VOgE0oE?W`{+&B#U+Hi@o;#FfhumuQ3>#AbJlpfuwAJ5d58211 zU4xBGaO8nUu&7?0vA-BiT`Wy!EG?fU_b%9R^KuTEEP0%%Gj28Zrl5y6fXe_Iwc7S$ zSA^ef|FHFI>sBC8K zQTPL&`uE&-mVJn4#1}<$&S~fZxv_c|`4WEuJ16~D`~idm?ZN2zIi4QRcHT(f)4Zhm zMJt~skKToEPbHe*qzsLvusZXZG9fYLk ztE}I!!3w{AtR0SYUXK09%&pqnKc+oO@ezD;>_gy7@#EP~Tbst){oqaGp)+y*3C$?B z%Dgn3#-H;1OK_)iY2OI$(EA5)CpmIa+`%jIm$QBOf7B|ybT`2(9b%IC$J^}r*w;T*53u!l#p{h~SN)a!L3z}U?q#}A8}d7ojZt~^ z{8sAI1#zqSPyVjLE%FxK)Z$ipoN(*$x?VgDZke+N@R&*Aaah)DWIkLkxg%se3NP9+i+ZfXP_tX z4D@f~8;D;f655f@>-oaZM>=~gJE3Sq@wg;MLTDtMhepDLMz|->(umHcDE|aB5~gV6 zLj3#0zasytEzdg_k{92IO#0{2+cz7IK5pk3=O^YF=L^F9JmrqB^Ba*nPbPHoL_#M| zBy{owbn;}1PCN}h3Ee%B(BKmZ4L%7CKAEDy3-)6Hxf6bi(7`tw4v#y<+mzf66I_Q0 zu0wD=IS;NUl{dk)kGn7lt~ra{m+^fX8=8M7{~7%2;aru#IhnvYnZQX5;^nL6fpZmb z0+*Fbf?0BA6>t(at`DcLd&KO2TItd$is-P4;gG=&VgIMB+oxu2k?00~F7xF_)ei7T zm(_a4r_Zh1w0>#elgS($5(5IgPjhWP0nDHtLSremJ|nU+=07cec&N09S)ZGQtl12OtMO@)OXWCc*7wgA-?btOpZLOy*=Ql-rjDf!=!-SR2>qzslL$Cps1hZhc2^ef2S=%}L zmg%x)vo_*>dGftlu+k4-PGc@SO^=JkBRKQ6OrO_| zS;`g(&Gqp+oLB7+*C`wh=Nrx=zbfCJNwt-x7vcH5dY(t#^!n0`1u&+^!t-)+2ke|5 zpK-QZ*@wZYJ-=3?td&#P3yt%a&{|~9E{E;73f&p(i;U@*_P6Q3Y!74dDGkMwj=z3J znc!V9)+7hiraP;{n77>hp`xp_4}2QW zAFL%jM15PKwZ*_K8;Zxf#dZDseZM04n*xCwlWw|UCVM~}ai^rauKeV%eU8#4^Q zd_&z>Br)YESFT=Om`&{w%-7zBD5rjj4!s<*@&Vaw@t|>xG?vL(=8B7@JQiX5vG)zn zLPJwsXA4Nm_`Jw4ttb5$UTBZP^Gy+Xf$g$yzxuMyz)%OilKnd7lM5QVejD)IeEpQo zIL&JzG;PPz_mg&v;T*EyB~5d1zs`B^t-Yr6=V|(H_iMCPc6W`sHpnY~1j_ii+2dNc zU4s5ltY|Ic_Cj@!o8~xV{8+~PWLM{wi5XPD7s~Eqibuug1z=U|IIYvYO-*H#U-9Gg zUF~bGP}_Q@$N3v=8{djjy53}t8q?j}!|(_O50mDtiHhrg8Ws6O#N{&Zm%$_u@KLOZcIPK|K6r)WonYHGXwK|*SUTFJxwDX9&`;0 z5Btf`Mny4R&x1kjTR0LJgl}J`g&~pY!{1~t8~!Gnp8qCz{Mi-R#B!#_xhv?b3QyMX zNhMB+WTnnx+4CyRB-RsfVt*WuZpe3M>U%qvL6?lfIqgeHZi_B5(Nu?hSZsFgJ~~UQ z@V9gnY_sE%r8#rBUBI^yypQkW?3V84f$qMd*iX{+W)k_PvSXC<`$S`vM4qSWko^ze z=K($(p7YZ6dH#Afu4@*3;2I9lc9_-o8KBis#0Q@5<-T$fd`O ze$M%bQMc-qEb(;@*Q;|G=`wb#l6kuCQ(6HoD-+^w2A#*$obIl33i$XE6X3S)KJ0JM z?&mXM^Y?{cbiU7%m-pn}R?A=5T!Du?*vaD+*;X@GhfdTN?K}9rWy8FTUdC9-H~F?U z^R3-Ku(%*iJiHHEYM+OMv$CgMa6|oJ=7DF5;MJC~c!;}=%AQ_&Y!-nluOFoQZSA&x zjMVq)q24~LXKZc1l(uc1%uUkY-FL=NeeU<4b2DZ7UvB=s)=hToNLPNrtTCYP;37(J zF&E>9`th4#+|VWVRdyqjMDH5MxSNJuIA^2hE8-j92la=ubao!G<1D@svpZhb4oSU@$wdXA7f#c5-UZ^GXZE2=#?0DEUMm3ppBj=waTC*_l zKQW|pjq>h-@Ut&Oj{`SqUv#Z=pN#r{#;)E&bTl7(2m?o{#-J zm#ed0Y_NW*Vo%5RFC8+>AAbfKE}7WaxR8p$=xFg_tK+YN-u*odf?M&eWK-AvFFu|& z9+UQj4fyGz|=yHlX8#IYaRnvXxPPf~Le&uvFH($!1 z!^G0D~so6s<3?GJ5A20K3Dykh9^V$+x}CZfj5(eHcrvXeIu2PG*#FK^ekbgWFPQXYB6 zyaGP}4|uV4F^9ao&##g-&;@PTG{#?R5AQdmHDs|xym)v?I`M>`e?@|O+)lo zI?D)slpkM#clA@cS--E~Uh}K`2plfYhuib(dHpx%^Y~vSa>eUavd@`#KWjej)sx)| zyTW4Ac-jEdVzcjQ#X%dvcSbgNW6v((BSSoOKmX3vgKaBxHh0FRWJ2f1U=o{^^{qo@ z`~NNN__Pta;8MQg@{>^eo_A`(L8S3<@K2ik&%htpR}S>R#(M6a4Ud<5Rk!+)E}!VyK2JTB$JRfji%ZwE_b3WS)Rn^*OS0YOg>OYe z_UyBjDbN};HYeMmfza`u_|~tdhIzLkJ7pDHB5RG2I(VR8Z>Oy+F$Z$UF;D9zq<-Nv zjJiAEX~}f)ZbrH%K7Z_6luV0sU@3&M=?Lb1vCaycmg9Ffcihc4&QuxZC+1KmXPUZK z+4+DZx{~;*BG_u*IBeRxoP%w*gH84jGwtBf+(6%#NH%7J^X9;`ZfpY z*|qm!+La8Fe9E+V=YQFf6f5SyW(n?4YYO6wNHg`}#qNvB>Q_YBoxPhW>wRn9a2@w#Ft=~!-VE961I|B>816ripAh#S+HuaV>nwjB{k(5h4@ABdnJ4nC zLVkbaQyJz5=AFf)t1aO@Lp__|YhhMat zU*_{MWR9`>UsboB$<=uR=TpFV-u@RlC}VE#>X5$nROsY@_Q9O5QAO*>Qi1-u@-C*8 z#ZTb+PzY%+h5Ka{SBGrv!mFK|kO?oM9q;?8eXk5{EhZhCRp&9*Jom6BQY;A7eXrRw za9@U9Z*pE4dqjWC(`|a)0A1DIKchMZ#@^yu@Cx6;k6Q z!*8~qterBz=XE99ANoiKEe+ml=fi|HDI?kzTch_2fXUPm!W;R(_&7EH71OWXo|ZtkuU9R#5da0`KtaHg^pikkJp@r@9xSA$%zD) zeS7q?DP643Q);gUEzpLY=V;H-gwwfX!}?B36EUzkx)xuq0*6`t*Em?b)+yiDRkn4t zr~D{7VTL@?lLBa6@frLWQ0Mrlvl&S?68|otsd}aJU8%AzKl&-@k4C#KZ-`IDH%eO+ zI^9&fnbu+Ydan9cHa5+VU3%Px^GSbGoj$Lx^OwN&aNhhp`DAAu&Zj!k`#FMJG^6um zo)45KU7zx(uK&-=x%8O@bTLzhKC1khBXOoCo5fq&xkj`73UF!b$EISUX8ZRq&3E5s zu4hbVItR?o*i?3OBzF`O@ye@p*TyA1=}@{~L*-2CXm1g9D%PpuwYa)s#&!xhMtha! zPU@ZJ?#ftqp|IZ6nTri}Zw^}587$3>{tjNPbC{>xT$8FB9fkQ}&X(-);O`fAHXn4Q zrqfM}RkyZSk}4x!^LBH4Us{#1u0~zar|zy03||*K6|L=r-cwUfDRbN1`}G%CGNZ zJX<^`c;?CfX7YsA-d5?}#k*pXWVY|=sGMR?FAt z=nYmL*gJ&$KC^TIU=U1#Ck+esT04Fb_>-@-$Dh`AljzKv&+Xn6?FffzW2AKzXH2HM z8(T`7;azha_-wH@D|2_z^Jw?Cl6G}3k;Zu{84o|stISMi6Ewxz!;bgVvP51omZQXl z3BWVs1-vU|_bJt(LAQr?TQ^L~yoK*GAcYPsVs=`^wI*(7y0s%Yg69 zM%T^>*4jV!L4uSzTtL)N=Lf8#s)dT zT{?=#yw#pNRlA2$?WOs`&ns&0J5zZI3GGWK@i4uN^hN>?I-R}$bRTe}?++uc>G*F@ z*JLX1pIsoY^kmkB=hP>`Smj;v(c8u(6IsWU9;-r!$PCS=%6m_sy_tY{p_vh#)+j3* zrRL=KnnTtXUU^9Gml4lnN1JzTBK+sctGazY!Qt1^1Ac1gmd>s!Um@-n;Wq~DX}{9; z1Dz~{r_t9+PL9Za5p^fv-%0r1z{_F6%W7*PaC6uga!K`0B2Ofj;NQLu=;hgbW1{^6 zwJUi0x*2$5eoWd2545j5Q)?Y9p?ei0BfCdMdR9?sX?;xiNYjWCa=rjfwUa=wF8Q zL0`^@kJEah+RyM!abWbG9^bcsXRO~P=z8xO-v2hsSvKBRChtGXd>r!r=Y0P--`No9 zZp@g{J2K|ji+TS^%DyaX_Fm5W4)RR!Jek#A0<>TvYd^c6Z|6vRDdpdjyx&H>_wv4; z@}J=QPx1a@>gDj%v7h1nmC8@OKh67p;{6_;4-VCPBj3J4+EtYQPToJqJ2CzDuI2p& z)b%dvTF3j>`F=cO_P(0;H}ifi&z~KttI4?*iaJXHQ2e0wKp@1*<>@cvoKUrPD6@y?`C zdK2ZVy#Et8`az!ShsvYWbbpV0KTP>N?`J5F{&Z|H?>qT^E#(Wme~I#oc&-{M|J{7M zoqYQ!zk&Bx^IoCMdfwOY{#`sTA1X7&x4$RtCn>Xq_y58B&r;_5c>hb4=ecyK%&#jS zFx*R-kMaJuyx&ZjQ@sB*@4v<~GgRhfeETEPwovA3-v5~Qt0;3N?|-Owp42lyzsLCY z+rY4aG7s_oRq|a;nZvvj9JO?W=h>k$yZE+$K`98$Ae^1)S$oFC1Kf?P-p5aiwH}LHQY1Fyl zdw4&{`%OHN*#mU-D!zS`H0Wf*HN5{K?`57NL;2p%x5watM_KFdd8)+9cneChes>zdV%oI=+38w2eH!G?aFfZ!u{f;Q9HXv~7H2 z{x03g6I>0z@^QWaL+LcnPY$Kk`SxkjcJln#P}-w>J3{{!Woq4D8A@AC+K-a<9X!GL zKzk4K?OxJ8$nyh3X}9w20n)beJTR1YJ>SszN;mL)|4`ZizF{*d9ppJPl=c$7-9p+G zJb!K|?IV0+E+~DJ=T8l#-NHAG`bdnoPS@(o)>>1TP~HI$Yf=Wo9|9q`+D_%1kyhvPHc^H_|z z6QT94_;kzdWn%AYE3fjY4vnj9lf&OU4!!*{ZBx|PsGQGZ=_TZBKtTL(;^oM;m!_#7 z1?Tr+ry4Pbo)Nv~h@H+J!no|hFQp#gSFl(Z?6^2Q+cB~DN$_lOe5j8%^(9{;jlQU# z`PwXg>e!pP1Em-{pN>=DFhkr$;pp|ip!-yXYn53Kp0$^ehV?q&+Riue=@0Uh?NBuT z51bnl-3-F%=w}YjgnlgTJk*EN!>6$We;8WXz(es~&^)=YtCum>t?pg2eich17 z4EDi@J@jVV`<5DXPBnV5tI26OtabjMc#FE_exGK3ifs~8O1xQ$d_?E zvrM*d{2R@$g9B_%`bIgG!6)vRiP+ns9nXJ-^4^YO?d8_)ie-1#o^3w*1ZAh}{ev%{ z{N<{zB0hTqe!q40*qofkE)|>VkHkgxUKR(tV^f($j^D=Ki`g&{XbiR}{&8s)^O-xl zwZ+=SN8vNoUr73{K4S~to39?~Evhuz@;CD^bLRHgm-&BP9k|G+{Tl7ly$qgugnN=s z5vT7AvHvo}^wK>>k8pp)DejfiJyY_rJ%b;T?r~Lor}e<4c`nC2b|F5Y;wkwOh_-JR z&9i^<4qy^}X#S`Q_s~vyF0uQ0&}Q~Z;*22&ENt;@iY|0dmZuTL96;x=^fB9e zk=l#!f8i-P>G$)F%6EpgbWY=?w2_ZEt3bSZ#j1zbSC6!I+PO&ia)$FN!M@(nU>AGm zJA2yK%f|iGON+qP}h!%yX`%uo9~dYKPh%MU{`E?aSN6*mD(hWaG;z2BqSFm>{B zu5>kWC=6Z94=*FVU!vr?kDZur?CBLb8%PY{lITtI??+S*vhXVEC^8R=K3+_^r4!DJ zYRpWAd+IDN?h($>5gUvbck!0@Fm^6x%2*A4uGM>|k!@$-;gir&fjgiIm0j`afY_qw zS7$0Se4nwg3$(B3{Y_TVw)dyhSmFCHM_YFN;po}#4;E|9w$7&Z=`WSX(|n#h_^8f5 z-z0Or?ugS|ZyM}xBHx8?;rR*h+>aN_oL_8m$6oz-FQ^^wjaH949RQqtH*i}Xl8zr5 zeo?p7;?5oXKeMf7W?kd*-l2fIP0P?%{eH^d&t2*FQ^$vUdGlfBjIC|q^B=&m?%0WF z`)GK7?*zX*u~Sd-e1zXAerNcd=JzPS$N07QJnfwmmNkQ*05b*no2fSKx!^o_$e2yU zGDWT>^d`T8L(p2mG~1uC@go}0Vsphk7w}0D9yEr^uRK=KAA|2WPZvyH@6@OLDN-c!2rB#TywRUH)a_hpovySNDNK zm9u&ne9Ktw;tt;W;&O9;#(YmleaR*L(*0GE)AC!3@M}3@GSjRx<#@tzwenls8e`S zA79Kk-9jGC8R$*iJ$a-TREfP@)=%rLt=$u?nL^NFj%00IWM0)=GM~Pfz0%Js`)psN zE2^C_=GG{_ocV=0HnBlyjAR2EYwfc4t7|@9Un6D<|Lfj&KR<5{FJFlcm^o1fhH#p@ zO`F_jjn4{oovzAv?|83N-5sB<5-U00*K5J=XBe+cYhS0;nqeG{_ZHV4j8BpGWSz7c zbC>Q@uHV-=MVXWJ8RiU)BVz~6NLGAF_y-@-bsiPZG1ooHJm-8-T-;mDuU-e$*eF(Y z1-LXuqG{lUKL*Fl%hL2XDyE0zj(mJ{9^3lwQw|@q{(LCe=y|$7AA(oK=*iKp^hLEP z{_(O{yppZB_^D0WP`Y7FiEn=KG1?RFX>8QD$LQN6ap&c??`4RUc`13T^4_oC`$Eaq zC;E_lx)k{|QkK7!jcv_Z;$SZePwH%vyZ2u7xU;9TUwNL&Ta-RVn@7<(R8FzVMc;<= zE8-KK%fjc0HH6ETmJcq*!vQzPSDWp(BM(iTGBw>z=FU|5@zGpv^%Lgv9hQfVT71ag z5#J$n&1$Ujw0}%@bDP?pcqHaYxjeqnDqCdTC4C?tJYaocb>CDGS`l7UkNi7*J@}6w zQypH1P@AgbBz5@w_>cGVqYKZQ|B+PwFQxMT_672vO6C78M`JKQNU4@I3ij zsr(NN<-dQP{Ew&d?;OhibMxf)`2Q*L9;K{l%*3`23Ec(>4Pytvzx$5v-ANs?S9l!w zI^IDYrg6V5e?oO7b$CATbznQQc$}|}sZ1+Al^4EY3ZHT{8 zyyfj(XY9HSn8m}uAUd~n1#fu0OZ)*}%U)EE3wVk@|PSzu-vP7|?Yhben3kt@V{L^wC64|7C0>|1YIa?|bFvyVr47@N*lR-7Ct- zhY*~VbFpaQb>vBx(^*u-^S80v)h6?^wP%o*Gwv(-e`O)0zvV=?1(#{v$)*j-WaOlc zryg(?a1muMA+~_#kTLWM6CvNL^q08|Ul_#yiZ7Ig#m`4}ZVDZ| z$e45K{{GL*vu8z9ir=cS>&F(4!DSqQ2Ve{z?v0|yECa{I;QbwA6z4$xp4C|ARz(A* zb)X|VkZ2*(Qh&C$k=uJmnJ*PHLF*vRK|XF1I2nj3uJ&t5`*w{|rTr@HgBPt?C(y@i z`(@gvZP{fr-07@XqQv=lW+c*FhW((@=oM&Nc+{QG`W8U{y5B@Jm*<;kZo0eAY)el{$LT&Oeuz@JUs znKVGhf^QY{pr?Mack1q;E8u^{pXbg{=>Jx`UQN@3WRB*N{=A&xDNifj9%LfMC}NDz zRm4jf;1i#Y!KdJjcKC&E{gCKt=nl$DjG82mp2FFAb%x4E90KHmc+J zubfL6mpqrPN3ztGL7qtN*jTUiX}414vXQC}wmfa#q_*1tg;(e+sx2dsU6`~e3eh`4Dj+$vQRQn@=)}Xseq4^T*PN=fS%;bE!(+l z*TRci=km1j(TzVbHm!Wrn0xt{hST$y==YM4^N$*%3Ud(fk5p$mtAST(i<2~cS2|}n z{!)4MztUHbuJ145ySE2GcqQ4*b9S$RGxxlU&LoqpKC7{So@{)dD(@B61L)MM)5D>< zS5kK|skgwI^P5#~j(Yzm>$r43z?1FAUk!aPB;UU}^qslG=4VrSpq^~<{VRM|AJhgi z%i>FT`h%hTFG%wL?$Gy3lkZOqeZMUE{<)#=oUOI+JT~^7_wjx61?EM+XJ-8xyse`FZOPWr zWc^l$=4#-&>TDgH%`I6T8o*}X=Ql2RrQ$}~cX>QhiRcRZ7A3u$y#wf=#jXTMa zE7cF#73p_@zNO2HPKN97d4$(~f260FEi2R~WUu0ryK#Rp;~soAHSVkvl5r=`0G>ss zzCG?c>Qxdup~oY1uwrr4%&8y!v8Rmlmx_40Jq;5M_SKhHx#4jLQho}WORS#)@K3<2 zVQ_P2411l*NxzY9Fo9lBz~-zothxX3!G|W9kNyfr`N^#rv+;TSlvlrW-cibh(l;2J zQLTNkXZv${(yOuS^v~$|e99wxwf|phW?xou8Q8P#ikAhu#;Lz<z zdywHBdBqvDUC`k!>_a>gJaK1s=-he|isI#Kr@Gk2Fhm9#H9 zRJ|jljgUv_n!6P@$?E;#e*7qQEyqrUy|Ka3h2`+VLH3{B_mSi~`o7i+h?(Y!(eifl#~&XV=IrS0$ma6U*oW5jFlYZ>8hKnIPeBSL;aC!&{J6??d0 zeNy9oZ)iq(=^}8{zek{MV@4A{!^Rv$z8rBh1&MU*BV3XQp3Ka)jfr4g4YRD#Twn8b&0pKBzL~SM#C^kBuchEz^*rH}|BDG|FGQc`9J}XLBiPJr z&*K-m=Klo02|R7t9TIxbx;>2t)$`wM-Hu-7!>{~AJbpZHxc0&K(Aveu7OhcNE$Z62 zM*dmAsQA^I>x4ty_t%Ht%&rs<&EZAyY;SG9DutH}xD&m}4+PnT{GCNtVDErBZ9ULY zL3C8*JL&qayh_8*@SNiKD5kyqHhup81rIXU*}S=6HrD=)-S6a^&m$Tt*R}39?CG;M zfOlRH&^~#ECura53S;KJp5k~YF1yO6_bdJUoR+`B$1pujT7Gi$)b|xj#_uNJ(3r0x zPo92@=PZm#`gNqscTBpO=F%}6&v15S#CP49E13@c<&dQX>_jD%)qTCXCrxu~5xOb{ zl7ouRDI1r2H@pYQd%*i>Do;KD*W_28V)Cv$BfP7=j9J#TV-iZv;|q%IU$RtfDxElt zHeGPocFWk<$Rj(w((|N~hE8x0KTc#J@(%wCg z6YZe4Xe~ZBelxwlh8EXoELzR3U!n$tJ!vuItP59aEV%u1068 zfh)zQbU36R!4R#0i*2+eIi{Ev!kOgJl=@Vb9Gk5#hb+uFA3g7rutTScapFDa;BT2lqo zsrx*_0R20beVu&Q&*1CSV4r!0AGSxHBRofWDn`Ww?Wp}Ic)-SrlfPpTUN4#zUG&BA zC_d0zPmGk1@wkP0CAUocaBn^3m6qpyWAa|)Jxbn3c;A$~2fUB+8#6P#&H5G|7--W_ zr(h6H)fe@BcrF2leSVaWlVBPHAIcB!_4#2yR{J>&TD4=#7)zh={2{xo%4=`j%YivL z@j9WGz9GEvg1YIlb+=7XqRv8D)6^V_-nbb zDOMPZ7URPIqdcWsPVm&YOj3_vmCZyjj#hTt|1G?v{Xx3Vi4Vj&tq**k1x3P8}Oa_0UT})pQC+!LpIx(722nf z&ZMzbtP0J04u09u9j>s;kB`;mH?U1{7E49$?_NUtvOj2?`}vzLKe~Wj_sNHJg8Js< zWTi_Cf$^Yq#OkN=MJP7#|8E}Yg)#LbgXP;W(s-a3F&5hETHkWZ0 zhy+J z<5J>XcovSu^Gd(89Cv@7G=0;Yr|;rTI|hwIz5kG=xl|0gd~H`p^J)`*l)XVRG*?f? z4jfJqM`=oH&`|mdzi_4?-(BH$BTu!pDj?2Fh}`n+rTFO@@UMCWhhlB5Vtt@~$k#ax z98EDEZC3|rH3z@)hjt9_=uRSQUuwruWh-3<6#G9kf}FE!R_c|VN;VGNkuF{zWlS_5 zuk66_swqUS0$bA@_6z%@@rwWC*OFBuh!fe*!qPfGEusbVqS|rHNWR7JR|x} z|I{~~zmjZZPVK}01EQ0b>53i{uSau~=6{Ww!;54@5_dBh?Wea@bk5q{t?3jA98&zkBWA)h6mzj?}t@$KpP7r_cLH6~DI~+JXXyI+Fnm?o`DNXCXK8-P+%7Z`2xaI{seMqzgWEy;&X9SbSa-e&BRm6sqx{DBP4E*8&Ifp=w+;HTFj@Ju zwJW+;JCW*yHqiU*y3K#nSV!i;-gJtt7lptG@2BaSb6$PApKopd_7vR*sr5iwKIqN> zV$2bj`hlm3pU4{R!ZO0|f7nbx!fzB|)f3H;y z@$H>mt=qgz5zf8bWuL~%AL%dJ-&IU2wdM7g0s5mY_8G=?=IqyBH`0AC_2|6Nwg=<0 z(reyJKeQf}?K}@mBfz3L&%%oC`7yq!Pm-VSrcWcW(_uziM|xvTr^jrLiL1tU=`kVS zrL#=hG^e+0ZaBFi-9~3(Cdju5{YLtZbel2qhkQ%p=O}Oqz8~YcsSMxLr&-6d=ZIX9 zoXNm1%D;&^zZtSbeJr+C*f`CbfKhV8%aDJG?lYkGBr+s|zC*@M<8NgNdXVe;Om9bu zpVDJqO2)j@Kz~Wb{Z8^C+pL`RvNf#F^h8tZ&7k~7uPVKb?_uc1cNJsq$5{G~bQO)e zKU$*wXK^1=5$K z(+#%kR2>EyyCkXSxqnK{-RT&)b<;IISmUdS#nw}v4WEOn;pv1_YHP89G&#Vu2ZBJ*caX)f)3G*B029a_2 z107qu_SjkJ9D-4GjD#Ps^B*>E7mqj5J6Ly?{-f{>td{1Hmk#Ec-lzGlcdG*?bHqaS zOmCAAo`&t2S}V@2m7Trb?l}q1*pt(?&OzMG>OLnMr(Ne)S3#q5c%9YR5^D!lKk@%T z7iS#`o#dfUmv6R*@6%jg5?Wbvk-6u}z}a#xPAhJ``xeF}J?GfsGpMPjV^Q>v0%LnLX$l#6il^C`IHbd;q<2qBSzFw<2phc_!=2@K&5Y8mW zb#_es_VkmlbH1p`U4qT#3fZ>VmpoTQ55rF2>^IaWfAul^)r;k#rK3gA5qk^A7Jc>u z%tza;UZQr>G!=wLElsgzAD}68>e44KKvN&*yXef;lav?z3trjJm8SlvAA>Y7)B6(j zelRtT-j3<~wBh-P^^bU7>k@D$yI`&=8{~fGu{6)mYHx}41+o>KS$lXeYu7;b~`W@2nM9%TH8x+~Iz)N$|Y;H9|W}t3MPk4So=u-7f#b4Wqze%5RU2 zc^f~@lbR+h*1THhBs?C_VVGZeHRXjdZ(}2 z>mEUV*gbOf6}hfC$;yA|FYUiDJU0%H+p`yg7y7&CxQUloM_-|_Lq7SvZ#Qaz|C*Qv~S{MdPelX|EkX* zzx2%OJ|g&me9KrbvNlotk}Yv2Xs*bT-{p06N@jT&veDpvPmZ!Xp;@h6`Y`m%SFG=$ z+PC#pD;@2n(uVzCa&!WkHPsorub$Q!`uj8*3;LawdAXXW!6QA{xd+F>?ePuMjcCl% zjp)+#PkPg=y&p;Avn&*z=H~yjugJVKauK++j`?Q61$}>0;F4YAn+;c5UmCzy`n+V- z*gaCmQ>>q%Ap}%BX zZmp_$;9qL1*LqdBldazCZGOE<+<|i^%?Ft~8Jn`{$`0Y>r1U~B1LoE^&Za>elD^)$ zBb+Pm%c`6SS$`$C(w>>twZdP2T?;O(tt8cc+TN6I|IZRRJ%f<4@*&v|uzRYSOC=wW z2Lti}x+o)SWlI)a4cL>b8rRw0RiYWvot+PxG5Pv3LG_VGbB%#^^L6yRYJ^|BbZBe~ z^kpDFc@?8&&KB$D-k0;u+nGI{=d-&xe;qdtgLrf>30J;dYexiD-Fwo9m-eyuSib=6 zsb*g{&0P<>2JHQdQ{(H$_P@+C2QIW$lV6FqfjuAjCjH9GT|WnUIh2N9bD;cB=H@^r zn^_lF{+A3)%Np!pbMtp1Yn+~3>0sL``E`uW^Q)cfemvj3tY+tR*7@w)=x)GjwZpt^ z>rtL!7&GqMWT$U4@9!kvbgoR?GH6uW7z^{!4kr(fWo!1Abn0c!bWMa<&IgYv8QU(|*kDSq9+rc`UuP zq{F)XAmz(rkCwdj=j3cZ7br6_Tn2cB58sZ3rNw@9N>3XdDuYe-ygsPE=`!kTE|pI* zp_b@J1M8#;{pLgEcBG)24g#*)aaoyxs=y5fW9y%GI7#PH& z>n(+5md0<3mqIhxCs=>Tr|10jhxYn}tF+AQuX)+~mfXaC6hp_Nb?w8p>(HNU1ln6` zreyn-=7DECOp@(R#wYSJnLm8`Kw1r%2G3dl0P_>}E;!vkUkK&j)tpBEJDBBv{I#xD zyV9XAhJzyKf91bfug>(mTs@l13*OhzmJex{InBZps(tt#nz4K$c;`v?{Bfc3qPdHe zhfgn>URWTVeYK00M`yWcI(yU?O&4!n4F1vr>6a{!j{WGO^EE$H`kDpOU%5c~ zs}@MVW`Xqb0_kfPNPqPL>DMlhzHWi^*DR3!+6B_TbAj~r3#5P70_htTNPpb|=^GbF z|Lz6SU%x>5bql0N3#4DaK>7^}q~Ewe`lbca->^XXO$(%NULgIA3#7kkf%KafNWW!) z^fxb%{=EyNf8PS>l?BqbERcTd0_j^9NT0e;I=ba;r0HC|=3o6*m9dK$)BU+C&~pv? zr`Ccx)4zE1Rf=D;dMCj`1#-_TzXJgZ$*AfHH zuGeJu&y;TMHl?@Mc;CCH^JaWbMp=JkN?W^c;$3ax`#63t@iKPU|8KYVVoEWAFi*Q=}-0LQ`D_;>=&NvuLXeRLyXgzR;tYqoCS<2 zc$1He&#SV;u{d{M%BI!NXXf(#8F_9he@{p8^4?F}9JM!9v;XS3iK~ z++?ba#c6m_?I{04ks`L+z__86q`yIFJF^%eY}w8y}IzO}F00@ru9lDqDqE7{A^ zdEX1||6P^2{C81Sv3l@(Q0&HBz@6{GmC!u2O+BT@m2Tn3rwBh2yAPsu z9yDG^;m41a=rWzhkCW1=-{M(xa-lrxZyVf-XV1e2usmu68|SEi?F_t`r%zXLAEL@V z4o*xJ*s2lm>5S)!uJERKKxLkFp6hukhESyEk`>)edX`pn-^f$8;ym>VCq5pg&M9vu z?XD_yGo~6(^-=B3nBCSF;7PR$OzdG>K79lmhwf9fV+ZbacJ@hriW~WONM63>s6*$l z^#5J#@5rVooZOY{A*o$9JI5dAfAvLu_+9u&Fhe zIQXrkKGkJ$M?1m^F@3CEPj%z-G%gx&@^D2LKZ1UmtuINFAHl`!3l|&n=R+6Y>^vF+ z!6e+zM~m#Gx!72V{j+c1iNHBD_Z0l}mTQ{uq2s%|@e9%SHPHCu)T!|mUPVh9yFfv4(XmB??E`k<+uy;74lNMAy}OJbc#Yj)v{Zl-~s zM18uKt+GW_}fZ+xS)a-NmoQZ+kqAj3FLG!^Mf12#7OTpYCm{PWRp$A{WSWFTbPwY#*DI zj{L4I?Ie6t3fbQT_Ur^afgBv6zN73{*!;{P_%fFCUHcij6J6ik9trpIF;@~3s`uRA zT-FtDJqF&Fl22zKu#dEx$UkCM_8y6o`rt=sru`)ORIhM+l6T$Rti3Pc*N?@mz^Qcc zKxTu(y-h=w5~tJ2Qe>Xr#}N-c!FXrBxUbunyY))vq{g(0Y^APyYw}g~<9J864h)jh z*pk}XJ6Vi;eSrLjIHS`tdl>7j-IKgSLn|alM$u~&^L-7vO?GlucPz~H4pC12B%E8{ z`vmWjN3|Msf=rUUR=lSSbbu~Wf-YR#4B(cmmX6?MvE;V;SE@t*^hNJ?Roq#OVLP`k zBmVO;-j^v*UVNlJiI0ZMrrXrm+jf%nRMwVjrpmQ|HG$WJ)sB4i^P$^QRCxbg48_<8g%?IPW@TJ?KjXs>Ix@8mgu=LYu!{g550KRJrcWL>T&aZUAs`H+$n)c7# z|B|g&Gfut@3qSo3j@1vH8554uI8$GQ=R<-GpI?=2m1l^PZ=5|A_ zL+QkH!d;jx7F_V#{&yqC5Z<3yKQ?B;mRPD$_)|A!v&c=ARXjIeR`Edk;}Om2*6A_PKB)Sh?vHdO z+eQT4GM|dZGMB9A7HBU!vZA{}?N{9V?E5OYn&usRpyqf7UP;rJ30=PnDO{aFuebdT zX)}zv(w!xbjqSf*chUb!vu)dFFH$~!f=%^gH-dwN_B~7%9>ooYmzO7bGp_^BNj~+3 zIN>FHM7b-%wQu*Ez1+G0+%59^zJ7uC(?j&^>3o2u#3Ms=mJg3Z7u?%yZq2c7$We}G z#BaKgBYug`KbG*EjWdg$dYtn`2X*((;A2S!uVgl4I}!V)bH1L@|`+a;k;&~wK=gA>0^d|`}67IZLLYHJcp+P z4<|TbPwvCu;TJgr^GlqA`)HqcOtq5_kx|CcvE?6RgZlSz`lGo>_zF^cu_q&75dESz z(EhkTA9%6$8IaFt4pB^wetu7jz+ibuFbv66&a~O~Jr2}%P*K~rG50h6szZDgM3!DS zBW}lr{&q*18#%K?-lmNM^{vp4IXVDl`l>iBbK}%JpMDtA%)DbdPhR@zX{O)KeBXxX z)|c_TSPZaH!E64!QKs}B<~rR^&iByCw|t;6*kaehN{0^innb_SQ*xFvrA(C0GHzK zBU8KOn6sGGOM06q7w~N}&rzO=gKzn^S?T<~mlusn=TCW;KBzPY@7~^7+aGu=Y7|Vp zv2t;hcpUX+W)bmJi{|$I8GJ~9gDi-Vrt&rHr%u9b=@UF3g%FLp?iDuWv~30d|!%u`zqg_ zTe`dR+{3@k8ToqWvc?y?&%M6Y)fw{3m|qvyek9Iu*VGjF;Z6he{ATxMKcurfoS`o> zC;M|oHG9^Kdkl{CrYM_-cC|LwShEH|7CE{5hxEk&PsLE5_|xzcf10SfJ-Vc~q>j%W z_ZSTswEjGD& zT+i%^>x--QIb!OuS;RecJfB}svm47mPfbc5{?XXxr0gUeZKLu1S-I+vugb(%AEeoXKi?%C2A3{&6LlP{xUrES); zxvO`Jp4g9WLryQm2c?iTb2Y&m{P6ncQDzBcmLz3XQ05xSERC9N#S%!jccB<(izqK% z;Le|JNEy9b*&@0Wzan2`2eD|hA>I{(uNxf>z!c(PAVB-G5fqqz>hrDSSB0s{V^``uS&MoGtHJIwq zeCB1g`eJ3H+KifQ$wkFj%ZG{F5s#J+t=v>5eq6b}(#v2!PguMR&kO1YJnQENpV#v9 zJm2B}e%`MqwkuQJ(y{Uy8WoR!{o}+yj<6xocipY1IU>*bH0cuf6r$4ICLNq_hwV4{ zEQJlpm^-^VAA&8);!n>t;c3RI(k}2_XE=)ZZLO<)uAuW1`mVhgpFb0E&Kw@VzB>*K z13IAXyJ$hUd-mYhK6uSRc;$x9fh(rl#;mpX-gq2*J@Hc&j?0=4!7KJ#t9zCcy(Gu~ zg#=EezX3gEmGmXXq8t*x4B3PIvNRcTS6UTZRIsDfXFAy$v0;M$N}XHm`bR!hS0YOm z)po@h?mrttzW8+PFeUAG^}J^jJ<|v$)p|#GE`XOcw7JNhZK$je z9jw7#EO@jADDob(o??y{op7fUPu6&)QS=!3V`jr2j7ymxmyU8w?dg1>Y+Pfc`!w!t z+pAc!6S2PGt4V(BuzSN$F&b~}a*mQ1Ot;F{Z9GFO-#B%g8*Ooq9X^KXHjwoLy7fd9 zv5s=Iq31ElO8C})6K`_fsHm~mxg*IRua{?{W?S-SChlgec#uFgAkjk-pAsH_|ADq=7S7)U&DBc7Wh-r8EWx!F*qn+a$w!!V#H|QIBs@< z_1u~W{)zIPOf=aMZD~(g^_W`oYUJd#dG6GNMz~*8eVFQE`zYyt-7Mc3>v7>pyfoqA zu`s(k0tAQF#)^|Kzt%O;!IiYNoVJ#y?c9%1KK;M!081EC>1vNAbmDo_`jW$w>br?d zS9TPgO|`P~_mG{$9yxK1WT%roHlLlJ7*AxZXk)@0M2C>MfH$Ab@j%V9hmdoJtPXVWnwfrEix@xemoEDBFt39Ccb0d?k|&c! zcPMOWPfP!*GB4HYF*@GrTv$K5rZUqBBGrGGdEjtc_lh34VwU=oc>sFMna0_Cu%{z> z6dm|`Lq^SSGd_xWr*Xc$!yh{@7{mC$FSgF>sME@3)#>m9PuO(j@iBY-TsuNs9O-nz z;hF%Nd3OKTK6u%F)(KIk@q0U|W22St7RR*BJT)d=U;?;>>nwGTF!oknCp_!v2gtRDZG@C&_sg;=ZD z21P4t^yL5JYG3*``mc2JV)ejE#yO+%Z-O_)CwhOaU;{Tg_XA(H*~?^J(fQI9-D||d zRdmMcEdOJhBNjlBG{q441q=I8;1EyUPOMGZC9esO7RZNwb$};)7EEW+l?yrxw+8+w zhlkkjngLeMb%cVI`xFw~JdtxYI{*JV@~)$dbOrUte{UxFGA58uMoIJ5BJjcdXXhT>3-?dN zmDl|#T2t#B#%s!d=+*$>vC02x>wP@cPSm>GuDNscP2-Xak>9~-;TU=p&WRC9{j6V> zi@$V#Fn5$#nR$rx9D0rBydP4V!1{fByQKmBQP;K_wg6~kbB!1k4aKKm{aA0dry|xt z5ju7A{k>e1_QKzwJ$DbQ;*2k$y@GAq)&0MzTRNfYH}z&tcOow`Z_%AW9vv8`765b>bLBe1|tg%bD-pdE1TuKt zXW0Wm2J6nFC~WlBq0dFKQ9@_y(Pg#vn+ToFO>008ANK+bz<2Ese1F3}kB5)gI3*iz z9vJomx8N{U7kl2pe0!HYtj~-yHsH0Jh1VXTE%izLP@eys>Vxb(`lfsPa@2F_1=2$J zQ0W>Aov#va7s+eDp>XrA3&3(3SpGOwr-c!i{ugUG!Lx+4$6(H<_@#9ujhoe{;^RHe z+rcx)j27*iw_3g!x&C3~=M**vrMY|UMy{7kosyo5Ohtb3zL@thy(jQJ_6IyY=q&Wc;IQ=Y8_>~&cBX4>&Y`ky#??q{H!sbLMP{JawIvU*!$*OF5>=R z_Av6Yk1#h##%ayw;h0GFiN=iEBl6$xp+CZJdVbejf(}kSky{6&pLjiEQ<$uQqe^EL zxbpQ_-Y>I;HZ#Ol1c%W3M&^IXYSBDT3sae=WHD*bB|O3ZrYgJROz*DngT09FmIgFm z(*7!ZbF?nHB8|Hq zI&1wh%I@6ir^(Zw?`q_!Mck*0&R#y)kzMBPjH~Q51=b<~_Y<|?ZPA4ruL?M4-l%bB z2Kg$I)!;((aSLVSrzaVO{P@dlE#}Zr@bP`wo>+J)m}5(aW0(j2_Q*o}Y^T*6-_+BV}U^Td;cUSe2UKG(zquH%ic^dJ3#jZh% z@X)QIRpwj8AwrfXI;QhW`*Hl~UI|V#-UfP-Zjfi5E1)0z6})Bp1rD~G%ZcGc-Q@>} zcZdCtc64X0Wadw551zSx2k^X!`Xlu@LI%`sWPPZ4RrX%juIA7Jb`{po<2#fOxwV=0 zH}S5q+fEyHF2NQcobQ8%MROVtjgQ7ewlk&u4o}&64CAo_y3jh#woN_xs@ne4VB5^u zOPI6OwtNzx$G#o^YV*_FONKuk{Zjj?U+|4Ir-@OXsF=N_i4#x_%Lu2=*Ng~-XB#mp_Ha=0hGW?gj?dXoE2 z3_EP}*CN-EGt32wg*U=|k$xWFUe6x1XJcMWBy)_$2|JLrA;=yy%AL;Ym)^Zh_cjI3 z^VB&o?y@s!E|q-nc~%*xgKVNtdfx3mn_uq{vgZL?&fe=BM67qwwZddxsgSqAH+XY|d9MJTL|baV$p0EozejsX6l=X^ z?JjC}vzmTlI>2W;+NO11CxRN zP8(zc{3RGxJgE3bhw$~dx%1|a-`pt#&2~t>e4Rcu=gylSLl4GAF@C0Ub7x&mcR)+8 zy{!$NxjXig^xx?o>W}Ibt)RE}*Cxt~J<8r0Z|x|FjAqK}%jk9Qk&L**-T}0D?MivVAM;ND;zsJz&80fD zPgx>YMH4=cm#bDzSKuvlxIUi5OYlySw@V64GH(bz6%bk6oHe}iHudx+yP zzm9pblxwq{sp>3tUpG!utwX&^%$^4{RH5#C>%pGkPWMf%PxSKP zK6E|e3%9sqLGS>JFX!h!$v<@TqW=FCXhQnBrRi@Gn0BRMO7!x{6rTk(jXysAjb?i@ zI*Ixh!Ans+u?OfZQm&Dq}e#w;=Ooh;vs{-ga5bcRwPp&yMudn^6_ z*+a0ftVd3*kFcfk6aF^WiGNal)V610c$Yi8D)IMP99GPY)?PFv{D|hEJNXW14@7#@ zQgBgEkI;VQhv|5rK z-^JtI#ai9#oKv5>cJ11=YuBz_%suEcchUAcvB}5gAdeg_oy|n^AiA@i(fZm>+I3&V zJ~uMI_9G-u?67caUKadA9`%-gBN}RHJg)0hD(~si?lG+uR#qC_G(IGQ>zj!%wx3UA z0K$7RfXYnIyW`NLyJu2-LU-JXPgpsxii}AcJdbr6aCl}nJm8r!{2rCkI-PkN^SIkM z=lw|;_zAOqE(mLdzDfM+Rw`ZDQx>$wE^*f#aZ4-L*)|GR4}80IEo+XRZx7}7D$oqH zZecco_5_cg$5VD`3iHg5u!fX;)d=AEv-U9CXK7akx=Xj~a9_K0R_JhFyRNiht?-Cz zSE7MG%y%oF0bbY4&%7D{n=p*e_h#wp-?dRUwKkoHO zUa*(DQXmy0N zh4WX`BmMv%4dVO1C0`Wzws-QdS17%;_IP!lN0I*4T3>R4Ew8O-%tLh#n(iksRpweF zUSRD_ZJn|)vN9ZP5u9Zo`=Df<2oG5V4_PEVv-bNnPfJ&2>XJc6qI@mZYpK2BBl)st z+d2msXvilF{wWyKe*}`A)|bUaiKO=OEDi;JAKBy@UkK4JLxcEy(jn4f#11|7pM1h zcv+I*1zB`VJW=_@$9ii7wZ|L0u9#q5VdnwO1^gwCOOBAu0AmwYO}{|`!iSywV1MeiDWZXM9uZ#WYSDLGxXuAlQ>`rajlb?nt6N2(7KxcXt! z*p^;GFgjW+BjYa3=vW9Wp)sBt{9HKG}q%+owAk zgoiuCGvE)ux)7R6cqM&k=YH4DeO1=!73c+-QG0lH?tt$*7#+X(LDoLvuhPXk*d0vj z>t6j8*80etz4QXt@HNhB>{g%$UJ00ExxT=(5-abL5SX{ zTP;m>r9U#BAM(yP^JzExUA3vMY!!G)pBia==$^h?NwbDL;~slo<*7*b^?B_n`CDaf zvvKIOll`!^33W%E*4OzeWw-q>18gTVPjxhYI71tH+#9GdlrP0Hhv-$hH|e&pdBRU0 z@|Ebl;@kTUU4M9t>j`hFt35)GajpBz6juQzjcwsj^yO@U^p1Y#_)2d^PINM&#Xo#| z$cuTw5)<2>IQkTwAWt2CDxt%ZiO$N?ybpYmg9YCguVUTO-XB?cxX!)~@@t7Uil>;G zvHNeP7PnJF2RZrmhExl)21wV{oQ%%MXG?%T5?(VUDk{&~E@8LU+L)PixTlf?0T}B#Y zrLJ@fl9@~$eFe0?rpBF>J~9+x#M${rwp!Q>oz-6MnWewik-kc}$+RbU!XIeoN$*>h zZ#r6?Yhs-GjEB|Umabpj5~0hox+}@};uHC+^-Zd`9Q~l`rz>SMJ-t2x8 zEy(}x35VB2w`6_y3a|A(yfz2p_2Xy3t8hBi7vH1vTBW;U#H&)`fx?lD|%4>?_u{wsf(ji2JKe-Kw)X zY5U8i8>UR<7ymBsE?zA@ZNq4z)?!)qd$AW8)ABobs>2~`zFBL= zv|Me|lq>8J%clR!?C134=COV7TFJn1IpBu(9Bt8aNm8eqPkiQ+eO2UeADIAtWPEfA zT653`+B1`W;bYX1dFK%Qum&0X`p$d!zG*f1EKk^P?5?bgYU^X1u*v;9X+4c}PRl)X zGbw%9TRBrE9oKUDuELz<+OsqZ?}s014QIp9FXgbuwC{E1A>PwvkF$;)RDPCp8 zp9e>3Z=UvUNVWHQovow2IdH!rX|v{%9QgJU{QfmCv3}i|CrtSN6~42)7m|2v*?Kxz z`6zgD0RD5Hcm=vJ>Bka1<_Tab^uhV2q+beYI7^l8ZxSwfqOJM8=39Ff$pSt8*u>5W zoGX5ij9|=;Xch5ik`WB|L(Sr+HSlaqI3pw66n=|#Z-3KFpXON$yLlt+-^$Vox&uAM zC+5?4dKW*_Q}!Tf`qdZ`eIHKn*#MuSfh_&?ON@WXhF>I4j~<}OkDmd@%lmM=8yxr3 z{?~0-tmEy`zGursR@c5^C1kGo9or7Z)9Zz|Iy%!PxNmNY=5g-tSzsneL`A| z&wN?7QrK}Ow4=M;)K|;g&af|nO3_4wi)zQG;bOv$JHJ|Bx<#OzxnIlwIKMx$lbC`Zk<2g4|zs8n> zcLaDXuIVd{K^Oih!qqp}4%$9cxa?kQ7th9{>xVQ?aAn*D9@nJOyR>niK|2Y3h<5&l zv|e_m>|XysU+J}i8;7dH2cDzWei^(WD^lLTHN&%%{ZwYA>@%%H*s}@WN|}S`_hRXW zI3pn1Vq91m$o3Os#V_AC#JV@f`hLjWU;H8JR5=@BJ`sp-l0Dq`8(aS4k3q~2@t2-tA5SM+Ih~Q(Cfhy9%xh9In)dtlRglrp z)A2q$qa%}kD%O`tPL__ymI2Q6uD;XrS?ZE3==K7%2GTmi+PyNKO_g!egeIzuNH*~5 zH}GxOEVB31_ndnCt+NH%_r=iTxODZ)lDR|Y{N+zY{>C&PO_=6ls)bJ=EAo%ws#yP@ zI)fgcZQEe)S|0MrTVV}@46eMK8?|#;yiS0ISWhTia{Qc3V_N$Rd33?LWB9h<%j`Md zfS&w@h5r@0q~0hE%XjIA4ES)ee7+@HR_KR1F-7muQIAd&niDU`dof+7*C3L^D?@{YEa+`2_Jf z#usO$Vp~DYy-Fwjk8tvC(tPup>$>l{3_2^L!>7#Sd~3WF`1Z*6eZE~CGoijMsSo%kyr;js?~+bypmU`=LORu7OZvN(vcc=`Z>@*4 z77{LP-_!42GSBI5N3SHER;;sA`@Jf5_WV`8Xl9gm*AMYtvwUg9dydX_O=&fb({R}Q zl}dMlzOu9@IDkR0#AR2cLyck^KgGp5%~*%Z{6$)4FDkvK^q?+Zo_wBY1YDjH{^@U> z7Z^cDR78h_t&^VYtt9P0&dmbH9B9_T{EbwRdpl_C0T&&&}9eVW$c>t9CN4E>}2UGAmI?V}gw;uwdN`v;`W!_cM@T5x+9md-Tx z1xtm0!VjyoTWwgzKE{UReb7F}YT}%};#~U}@BDk=DdzVoTYEf$N%M*5!p$SnyJ!5Q zAE6DcSH#OqIcX1Znj2!g&)EGMk9N4RGthJJH7JhQ@5w) z;+0Q26&Ie?VL&tSdL4e2v2@IuP;EXV>kJNaSI8M1bYrdX z#q3j^WsIHV%ucKmOWK?BV;t)ojoRzxZ29G0X77!X{*3*5>Q$ZPmd-4F6g>=cSshHadl_8JnOWb_&RRA9~2DPt?9{dUNB z%u9HH>v#2~_|;9|&hbX}Z!KON9T@rrni*bYoGm>xlF&-ezehXi+qMs1jCJVFFViUf zk!!{{Fz_#(UvzNDZ=LpcxIeEurxvFva29G9Jf5n5Jy2U$GM8%qsqAx?pT8AexD~x@ zL;a0SdYngimp>2jGxt3eQtVevX71b0sIuPe<}S-(Y(K#ujwA|Nvm%)tO@d^9kw2| z&(+&SE*nH2T0bb=^L@NT_QOT^sN)rmhbX)-ARHX^_=+3PN-G?WuwUtPp(P zRUZfj$x@oT9PLSNcK+oR?$U9Vz^;wopQ1_RRZElUIl;-#ZKbQWdi+H6X6u7)CCdwT z`(38Lh|^rk-Wuo2m|H9G4furSVZn7rLf132KLfkQlk<0%ZigE~gWx@x$TQY<4F0D% z%)$%awCCsiA6WPp3*0G&EYJ#le+z!gw%EM~;03?EGCoq>Z{;S8ZEnCb74?J3_u+xVm+BG=BK*Xyr3^M}_)iw8o3au=QV6 zV*T8{Kg#&6@VTk$uwRrvO4^9+L6PuPRd_1pdwlD>`pL~tbWEMOkntut4IkEa9bW{e zC;e}A3RP@<>ah+fw!?}23uyi5cyiX;JuUmOuQMjaOP5g|I0@(0@%hQOaFfQZ^}o== z?G3CkW^kK<(&h+l0AiAi`N9l!tY25zdFO0#&HQ8bG|<+ zG*8F1@F~2ltaJVioC~MIx7CFNtx)=!S-1>d9xk6wa5<8~<>LczY3015@M%xsIlkTa5?3j_Rk+DnP zs(!)5os_51;z0b>Q}_dScAiY(Pk0%GKlB{4@CWiN{_vG}8vev97K1~loywx?Tr-CE{@uXIhj6*eb#1jgLa$uxgEF!hJKyZ1a4H1|Fnm?mSUeu(EUcXu~_ zIXE?WUFV^{YIW}Y(wy*JKYw2M;k(v`7wn}R%DDD}Q{4mE%|XG`I`c}uPI(V^mp3nr zT)I)@ryZTUZC>TM?}AovU=ekcXt$w`(gWT5at{Q{8SkxITb;j{%7v3(Ixl?G`%Y)` zk8kbhEcpYsFMw)KI-HV zt;x>T7w+t(@O1d`?{tI@!NY!=?O)vqD!7W3ErEQWr|WZhRiBfU403afyJT%r=G%@j z(h!&NXm_b`RC4u$3kxwlC-OzF43{Ta)Aq|_cI``@Tw-U>(@L4-11@CQ_ z$<}Y3JzFbY^&!UGrmGcp1ABn%PkGvtu5?zjxAa`{{x04RFO9T~%#5lk1g z8{Lacu3Id(e*H=IEpjV%PhU}n-{5;k=Y!?jME)(U{F~rv#Fw6SeOUCTE|7dD92MIm zR@YI$27QRJI0xM8j-z6EA5FSJzOu&Bdm-90k#9e1;|#vOD&E8%ihP)>Ug1)(JKUk0 z=;7|aQ>Vk--}CNp_jkNI+`YiN!`+BT)VYUbdTCu{jtCxPt@RmQlZfj*PsdEppz;>chOrEa|cZZ}ZS+#7E%|f@#l+ zivy1{3E;kSi(pT}#s`G08W6T-Kv*RS`_WW+8hyDwjAX~O!7uG3DX|b5MkdTY<&{8pRhb(vZ>pY$}713kgz*aVRH%N9-!k> zsW9X}`Ib7~Ooh!OOmgv|RM;?K|Cz9dQepE6`whY#NrlPQ;eRKroeDdTFzv}coeI-k zG+!Z1{Q5ec8J_RwndA8%c;-x_C!5GhROQ{yCR7f{Uu>cbgf_ z>l>iSKL}2lMn~yXPWFPvwBS{LYd&;3pkBLw65Ib)tJ755H^g=V53jl>dM)Ew_o7LD z7Oe8Qcu{4jD_jhHVr%fXX6p<-e$wG%OM;KBF+Mh&jt}AI;Z>87^gb$oj_N5>b~)F5 zXw}we)78!Bp;ddMUEpq$V58533|#&=^$%UFah=BDWJj>Y0eR_}x?rM*GuHqoiv82%U9tM(XAb1Qbp2Dp%6zj$8T&b9K@w%U-V8rucyeXC~uzk^)th3;^4>lmtUe%Y*MP7C1Hv905O#P# z*kc32zHwUEcyNSs+`?58TxZ&o-R7MhvcG8N`pyF6#hru;hnumfZDzAUX5nOz^|rxt zwgvK?v>sYr;_Z&41NkETkfUF(Px|e{e7p6z`cZscPx1BD(DgOmlUB~Wh&7y*HA^}1 zl3$k}pjg&yPG5oVPsy6A8TV;fQ)j8hYsg^GHg>hcr_dc0Yumc((QB+}qK{&n70hd+ z=g@swnyIsw&6=Um@7uWk6l0rpnQ@(IB&zZvrVBG}cAf=?Sf>1bxRf&JmJ3G$+cdBwP8 zA$3OB9tE~ni(AgQy<(W!XMt(&|3G}zflYp9Meo?wV2j=g-%VKTGdvE<5O$T?kV+?c zZxvx{Q(=--t{|+MguO_4l2~Qh6oAFC*-ZR2b{E@FK#dQem>= z8Y8Tk3d7bTTuj)ZR2Xw#SR(A9RM>pN<`ecvDy&EtzR`}iQ(@;3mLcrvRG7{Hox*;{ z%3VCI+{M$%T|BMa#nZ}NJgwZtQ*&o}{coiEGuZZ{|C8=dbLbNE%9PtF8P<1Ybi3_X zKP>&3FRsFdd_SF_1-D!pW!Yo*HmJgk6UtP!>y|eHo=GA57$Cf!_Jsay3Y&*8v zbJfcY_Ch7*L*k{+5Nrk;Ppt|5zk-vs{SO9bB2SN|_8uhfnl>^hc(wSf>wXILFMDRx zBj1K`o8!Jk*T{Id57~viVfGou%h)Pb?~6A3n}exqZ{aT;jgzI~Ji@o~6oM_?B4;xs z=SVNiS$EE3!zW}>&JMH02_gxZMyd5q_2(3b@gwFPNe$j z)zWoVbOY=$K3*mbC-zqM3{M>zuE+aGvT4lJgmY|!ORP_P=oq=u;rfl#l>z_b^u-AJ zW%UI%1Ch?7D342LD)b-WqH}8tWyjP3t9M|{(zAs+-@o_ye4F!yz%QMka4y{2a@ZI8 z(Q+$?U&A5JHxH|P{AG2evvXr&q{2RNuimLh=RlO#j?*8ojc+b0?e@9;v>#`FE%$IjMH=)$%$pR^NyFm_D!w{w<`IysLey9Ub_&%lo~e#B4IXWAF( z^%Zzt&UJQOrnJjToXbw&kH^r~ z2>XVy@xFB91JeBk>9l8@Lw3SX#!kslqxCHpAk!?8U4?A(tWRa?adqVJ@$c$k&1&<@ z&+{;A2;`cb%lhhFHlSYC2xDrSctPC0tr4=z@f_{Y8EmZ)9DUW&Hf(Mlfd9MsNPJ5? z+>Q~CJ6dPqTz&%4MS9{(_pndn>f3^y;puabvjKB;pZo!7``^m=b#p~OUC$c730x_9%J%sY`X(@>&(nBQqyd&_UZi<9-jm zmn8F$WQxqfc$~<`lg7rG;-%*nOvXpvqWlr=Bv2jlouzqQ_9CXWH3IgIbaF~F0el7E zSx7w&9*rBtttGBVoW`n)%O&Nrmv}r+IbTc4Swq@VUpaq}lq3I1BYow-i^t@jLUY?F zZOvWJeyBIY4`eT8Jo<~ZlhIn+^W*il2XAMOBMXliCjGD<=aH@WuO;J3HqR;-c^8}} zwzlB;tJFzb?Ag}U$UURLWZQ*IL^%s7V-b4Ov2^}bsr>B(X7Q2b2_DpE!jJR{!dZUl zp6UF;&DIu4eB?UXp?1pWt$b~1ZM4v1pN=-0mgdNq4vKtBPIccUzLl=`O}ddJjbPRq z(WM_=xOZASO5v)Db+7dy$XfKctIL)1O6!ro*_CbVWn1@IJ7g2w7p9n>H zUVen9oqrR#Zmq^|6*{Q;;Dohy8`+|U%|!`&G4e{r&{&a7CmVU$Z!+SBu7> zkym1WkRK(D+lQ2#BHH*n=4q89|6q~}#skJ8y23T!TYkhO#}v$6(JH=I^IhV*$ak6V zm3+$&@Ce^S(7NQEQQ}>D4Y&~;-va03v_tT#E!YST`K77oa@r(#B_r5=sKt5nS_d^n zcl)iJB)gar?KKy~eliuWxFY%dGsNL{w5xG3C?9@H@u@O9E{5%Tdso+J9ltBYYi`!P zE-J&qsk?U=TN(5c4i@E`%UW6Uvg@n;wD&;52h#MjAuZQbXV0%%XWa>180J^eMgO{B z5q)=OUHID|Ibyg%nz`USPx?E^cet6HKci2f4S4OA2)jjl9$NYlrRntxah69zN3nf> zyhdbQ(4!AqHuL!%-gTU3NqDcyCNFlr*8dr@Qcs3(`rwx%yBY80%Wm2aekHQo2xX^b zw~+y|kz}{gzI3Al(rFITo<4Vrg>Oz|H}4h7ZpxpQ-Bu>$(Kl8WT=fddV}EH(`P1d~ z%Xpf9oQ(IY>HZnC)*nEF@mfEo!G1YUG}ABB4PNWt)01UmId)6D)-P+FkBrEeISl`d z>2b^KwSL+sgOf!o$yfniyM|`0$o4KxN7{Q{IZG~$*8Qv~ zSG?pQ?L+E5B--7{wa`Im?nJH-ECp&tD{5JocC?(4gImU&mP<_WZ1#FZMq*qlb$1 zsVC&8;~CCQvVUJP`=az5HUr~!J6B4-rEw}1;5e1@0Bu&i6G<8e%lS{ErzX6Tg!|0h z#TI_;euDD=?C%ePVf`8EQ8^pV61VXzan-ZL-FlX|+s+cV=`3-zGsLOA>WjShJ63*W zDqwBIIU?F7ygT|(8lCNW0Xo(htSi8;^!%b1*-hl&tIFr{6quLJNHgdlFJ-^JNSc0H zDG^pm`rL17-I34>c?65(>eJ{YiIXgS8oeZOZ%2j_y~rML7CoinMJvTK#DC`vTY5Cp z50m0VBl)xB6`h=^ZsixPoT=_VBmOjcN#ItT=;cgxD_%5nrn(g`x;azbe@^^qw3F0r zX@~c`_uI2*NAi}V1D9qF@={DY9}S;`U*wV1oZNzMpUA*h+_%l+wHEynv{+2i$rrBZ zK{T+|V-2l5(1!9Lk1SIB1b-Jla*p`XbHuNG{n^W(I7fWt9P#V>;w!+eHVFO=ii0OP zd~D=!ktb_MUZ_3rTw{jFyn&l{aVLO)n??b;q)s5E_bNq=jQ1JsxB)o}fQ zG}xld$OY;B-HKt~%?2VMzYpl)QYedgAmZ`VGk@{Bfu#Iy9v5jFYD{LD= zF15ZYjo>NAXZ(MNFsG~2eFA0X0hKR#U+}6=Rn|#xsB@IM*Eb!WJG3o3*KcLCHYuPZ z%$U!GvOOZa%)&;f<`o*r7s0;mchc;T9 zr;nw|NQZAsg%5(QdXD&8&k_G4=ZOE&bHxAHIpS|SNBpPG5&!9P#Q*p?;y0Zm{wK~6 z|C8s4|I9h!Yv+jn>^b6p>KyT(J4gKO=ZODb&k_IANxaS%OJD5H+8p+p(IM=6xK}+q zB>TH0e%5-kwvIgs?BD#&7nn+G(TE-6BT=UOZuWX^2y_4K%lE=xj{9%QnsNRwyfJGY zEfm@W3RLs^D@~g&uyb~G2E*Ff0Z8Iv~_j` z9abBjUEdXrH+Mz*g8k9q_LgY1&mB6!u9(NxVqZThS3G$o_g(Bm9vAOs@0B};(0`%V z8wba_JBWR@gVqL`JKFJ`Ab&|aOUd34@}9N#k#0?GcKJ&5fyz{V`20@QWpUEfIrLU$ z$jwX4S2}0I9`VeaBtBj?F^!03pcm-C_C0xZuSajbwRF^%CL^5orI{=IBm7YuhrU&9Z@jEJ~*QfHS|X~OAma?CtIcFqAt27w(*?4 zI@TEo=T2wf#;NeB`74d9JTUj;ywH~>N4|cX&*@7;+r#gsah~2cz)oQ7#(T~O z_GG-Pe#cJ^bB=j*SvGu!cWu~0o{=vP?OBbkA}CQUM~hRj{ho32uOk0r zuOk0y^0!|_exLkLy^8#6$p4L3k$)}upMDkjC&>RylArzYVKZR-xG`gSC1XhQl*Uhm zyhoC}vU3%`Nb{I)vKCCIIpg?B#%8~~q_)T83`UET|Zn8YK>}nn|M>)7V#4%n^`>YVBfdk(H!bzE6rgv zdiZcPEnY@UjRw$M`y<7k?vb1etlIF0xf2^X536@z#bPB#+W7-FtZ#Pu2G+ zbXbxLGKMg4y`3{L^1CkC%<(oiZ>ewc%s;~pgrou#LlAD-Y>^|fObGzg?#Yz5IU611n*k7Ul zvUZtiSqdENTw=6tj(sG!eA<7M{56kxfVq38<%N{(;Gpxy3JD$s7jx^v^MN7WkB;Z} z_mO7M{O)k%@G6-^^E)z}-4n2Nv?|>B!KPLE-BCKxZ7F4=I)WTx=W*JLOq0=E{Mk2bMqdp*v0nHs zd$&)e3r(T(GGnD@(4C~|&xa2rZL;knpXw0}y0rau9;VKf;5c92Y~@CcAI@P6J3QHY zwqHh>j<&76dewwlXB_r*x5GW0-@?Wg`j$-l-UUPoSpGz& zI*v5jT;1S9liq)_t&w*F7FOWAWlu@%Jg7CBDto@-4OB z-tn>Ks{Nch0&yzgw6I?oq*PVqd&b30G7 zVk+9jGtaZhv%qs7&mzwQJWD)P?_AoV@o3u!{jN|OS=SYjDK!>90)61aihXvz1NvIB zoyQ;kXaYa>b^~nkSc??h zdIbDR$GFDqv-r&>iW2vw zyFM5Jcl5#H2c)-5>LH&k!`26Wl@=bT^Rzww;_kArd-!$IGsn95_49Ao4o$T>z8CYC zU-&0zn*J8={1eKRVB^O*N0CNZ~KmM<>F++X~UpNtsAZX zIm*^q&PlachoXxbGgrs;s2t-b_G*M_uDxW%;g0leI&167SGhyj6WRNJ zlk(-my1#stYs<*oD80*-6?ngek34hCUh69%W8(gHxEzrTH*<*tTy34>E4XKdh>+;-GJ}{;eS0K{2{`hSGaAD*5v4R)IQqAWk|j9Gf8^||6dLW z7yN%dAYAbO@qlo_|Azy@1^@322p9anJ0M)}|Mq}z$#uU;xMZjNlDr)sa|@;LM@9b? z`rK!KUUB~co4E_2ZR8^LuhOs&gnySC!*S#-bYb>x=vg#C+44_I{Bh-t^|o>Tmp;@E z7x+&>L+e9qW`^%rVpu!Gy5I%Pl6Ya~$%}SepfG#^T`;x8GD*T%29b1 zp1!hPUKux0#gF|09jN$Fap~@4E)n9YL znd5PSVTf;s!}UKU9Nv`RaNB7(RQmNl9Zs)bOS*boUVopY%TrmewEcp0b{Xp_V`?A{ z^Q0ZrF15$CO*oYA8#`yDZ0=sXit|&|mYtv9dr{ru&(!utziIDDsdVvkKK8;@`y!oR z%~M`xDLe-sT`wU2s!aK#SyGC1s`}mlAHVhuj2Y5u-1wSnD~;&ODz9qS%~tnos@w1h zx-I$;_u{#7+GB+YOJ98-N$z`TFFSi+}UtUJI*mHfL+vTwltj@MZHUF45b zl&7*(##9Zza^@z+H+Es<)0vY}t+5#Y#o1D+F*H@0970~}$<69Z%O6BzDZHGb?V^FH zS|d{V@@pqtl-OU~mcp;aU9#U|?U3uxrTV$Xv&ui<-{x=lOD^&Fi*_sgTkxi4E7VzR z?&OS0^(pphc(3~5NDbS9D&GO$ZPq%$l)Z=5%2>d>4EcR|iFl8A;MRSuf6{$gtoG^R=8j%JPDjhK7DDIZ)yHP_|UuMS6l5-TMY1c zez@S9+P`Q#FFrB~U&;{n(&G~$ey#DR*NEmGMMvb}hv^CO2E?--)Y@72SwLzHXh)a`4#G`0w;Z1YZKn|!+oCO{qU)3gL}^zySJcc;Gg+4 zKHB&$lm1Y<5zV2@wntj}XulwN(dU}57k$!@|shaR`(1`wl{&>!C4_O|+aeU7W zT47+@(F%_dzF-(1c`c;}w-4}~23I`&bsr8l@O%Uu2=+&tdivo}+L$ZBV;(0jeOz2h zIB<;cZQAItZefqPO4~Yo2d$2OeIxqa3}u72H5tN;30Hl#5#7ES+&=DycPvzXKRnUq z`D66iQo;j29DSVciXT>QX+(eh6gYlBdHrw`;jg)@(XDm(2A8)|zHoM1rV$NSVwyHL z`J6|cAI$@ftXB;z4U<QmLRMyS@<} z%`~F+1JK7Ujp&8Vjp*3IMs#9nBRbG&L{IV8xYb^IvHi(l;_+5z*X5gn#^t+$20mjO z@BCztdwdsTX;XI}e4=0XJH(h)JJUQnQ;v0jD$}JGe|EZj&A}PJ)j5EUPQ3g8=VQcg z^6;?pmteC>TV;33ymmBa;yK+I%1mq@(!S$eKiT}G)%>G({oNf3ZoR%LeV&ndRcA3w zRe5%cr*-I~&c=Gv+DZ(wHqT&-vH`uvBWttfDgMv$FTFo&ZsXtL-{yaSf0gh$|9$+S z=TK)9p%3;+_eHWp*umWE*2Slg{gh^$H92CR3ZzIcmLi2s~0DOH*cP?r3!Pf2q^ELFKUq$En zCH`BE9nt(t8sTiCNBN|k1Kbvuq+bXx|I16bKbyUv41IhZG*>KbjY{6uNP9!_r7gJn z%l#VV%D&qG=UQ~Z3+jIo%GO&x=~wfYf4%g1W7z3opK8_)oGHt_`_>>YKRmUqL9u-=^2-qO?!D0xo+CU*d5&T)Fj5*~U4gF^o}QwG`# z#dj>JyjJPUQ5{&Oz>V@s7y7-+wsciy5m{U1st((Zy1mzBY}5I&(QkEr4m&P|3r6X$ zRi4gJ=L6sd&O3tn(d`MGf(@DpmFBjq@rRP&UcNFNozA74G@YvKM~K(+$g=p{pX0;Y zYcf9fR9v+ey3rcpf>5|$1`m_(KlT-er|2K;F=TkpQ?JU=Gg}HQ+G3dEZFRIx&6s+iwQAnSj}f@gTGg#n^Vlz`e%CIwDGNSLvd+kR zF)YG`@(M0@hR}|~w7-`c{HEr!{{;-txv67^WjG&NfxlGdTiSDePb8;~^H+H%>>B2V z&O3w?)WXr4J-dh~H%IBOe#`NVls(FL?aFsIjlqT7CYQ<`t2 zXmV&kny;neO%iYW7a9;QGaX(>A2#4( zi0>HJu?_uYd^G%NXv_SrUxH%edd5!xrZIFEdD&5Zs-d-vFn^=Mx6KpMZo z%RIuY9a=IkDNWv+=>w$^J)O3<&bS$rF1`2d_KeqvM^}+o;mf9~HlXisQU`d>)LNYr z=k1Rw!d00wI6TvMT+O$`rC>4eyK8A@7Je+)bp}9d3f*feyGPB(hQ7#ht|ilMh*dz zv2Kj~mR^umwj)1Uozt2 zzsgkG)%WQ%{r#c7OxsOb+Mx{5(mZd5-%0PRGF=;gA03Y7h4Dlle}Z~sFOzv)On2G$ z)8?`XFK_CJK7oGA8Pm)paO{8X1{XRBpxb1YvjO$DbIPvX_4iO(ANbiQ?$-SZW` zR5yp*rJEYw;K2(MTI}&?(&+BK;S|i;k5|6T`(;PGzSGgZ7cdRcX0wp`$&+8YKl;qS zKr=~rAqg)M{wcyG8>Z`09^poKRM{%a%>_SCT74^AzH#Cgt_g|!bNO?)@u*>Ixy*{!E~_hZzt<*&g1 ztJDWCr9GCeCx~kO5!1EVo zYq9)6|8?r6cn?eP-AXyDCX6jh<7Lr>j1Mm^Ti+jK9kmEsG{Km;Bpx66*U!$sep8%Z z`IM)>eC1VL+FNk%s#otypX;mhcJ{f_FlgT^{ms5o?_0F4G8CUjURRm!yqm(6cW$aZ zE4}^E?;(pS-N}Sz9nY8URqGY4RpeX!nol5~)zDpBbs$>69I)0uVAp=iYg&@0>*(>S z$i=b!xx$A_0Wur@*UI#LrL}@|SLDiDf{a0yWeveNI4HSXGWku=ToHN7o*igomsCRL zEbr=muKd^E!=AB&U)JG5ZC|vjdv7p7TFY~*vb8x7u@(ul6~;O;&!OseY=wR+u@&0S z^X_ABH|Cp}^81m$tqu9XBI}jcasRu<*jKnq1^8fpH`sG(b{#5yRl#lvey=kPS5lwy zVgq3P$8T7H?+9%0=B+p|z3cLA)AnpzqR$q*S_5tHv=)kW+%8P|qGg25vHK8{sr{P+ z(KiO{-}GPsU(fhYb~64x#_3nPCwcFmW5mBS<{S@mW9d`mJuPjC_9eX1(Gt9_m(KaN z)L9YD9qI3{@vU*0rW4CMpn;r;$7rs~dH}jPT#MIEqs+m^Z*@H3k9YAB*(Zu#E&U)9 zT%x#S-pG5i+aVcW@`2KdpXjN%@w9f4PVLVdo_*~TPjTsU=BGpULF{}353snH^0qLy zJAN9USD31AiN;MV=jFKrZ@j+E&Uu=9=RngM$6BB0-aOfG=$<^?DQMdSJvvyG&ebj4 zxwVkaljy9K)qj_PqvD6st-N?CwyVs+(m6Rji&vVhYtj068GqKp!vkHA5lV=m9! zRGvI}bT&F>*LZQdbn(ZWX&%eMSF-RVXE&qy z(DG0C3Cxq<4VJERW?#=(|#e@Mri7 z^r3wa$!pmfv{Z-hLTjQ8&EN6`t2xX^S6QsyAFU-!^VS4U^{;7Fy0}`8S8~2#uD3bRIlXrp^Om_~#+ElTZ)*PoS#NB$kzNd6KFA|E>RyW+m4~$aQyqYe|6*D%F_Qn zzgZpkzsk5ru8!xoSTCt@m!8Mmy^p5M9kW&Bjmlp3o{)!X+E=|%da#|6k8;>qUTk$@ z=zfu77BD__=c4B9Ykm=b4=vVTP3A`**^Ya_O?|e$E$;>BwANdFTegnh2fx+GmA6}c zo9^(So`dq;Cq2E^VOFMKJtR4!$XYN9EZC{^VBrk$PVo}K;P{B-v;H|-YZ|5L=OM+m zy}P5sT8;J0xZQ74or8D|vW(TAr_0!U@ht)C4t%TPhvkw_1e2LYU~;kkhdl_Z-|Uwa zbiOeIexzUcu@PkB3VNtI_Yg`K2M*939DLu^@9CcWEa$}VSt6WcJA2Tz4>&^CzBHe7 z`kOm1XaBp2?8w?`Zfi%NHJNN^S9v=l!&5TlO5X3dSo`Gb(9Q1UZmd0v*;~B;z2M@y z@{?D6Ap6}>(kg#R`Jssd{2hO3?1^lSMid5J=wB@FjHdeXE>H4$1M)6QKo#ptW$g&ADUtFl0Nts&sevPp;Y3m7bv(+znp1XNU8SIny|%ypqxJn}c6y;cX|n z-?$xG*Z<_(rn(ES+KK=E2I==e`;B9Ym#zR`8wWp2{Cls)ekBzry(+jn`1;G@xcO>KG#L>3#Jc+gE3ziL^dc_F+GRJp956?v@2F$9M|AZ=+o4bM!9V)N<(2w4g`E zaJh~yuZi9ST9M9ryu@1J@y!AEIvtk|@|pTKJ0~aG(CS|HI<(fH|9KX9F5#&(((ULT zYZE{p=1WnAy*pFCo3#*O(2MNOMk%|ir4+xuY-oMTiG#v;&EsR{OkS-?bVu*yR082r>CIL>Dmf9e(7Rb zoqZSVi<*pw1N4#lY>2S8q}C7`bHgexSq@8}5q;Sc>NsfuKzwav!Bi#_25+Xz&omwL2>E)=%2DNeiSagY2cgRz^>8ap7Hez z@8Yjd^DHnXj-dCGZbxN2KOpb3sk}jw_qinRQSy!?c}ukK&&jVl8cZEs8FzcU7+lxQ zwOcEO+O11uyQMnv-mgb*qz_*-vJtZ9J%&dOv|D5BGlu7~Kjz`*N4yf< zPWKwpiEnCrolK?k`qCBUH;8o7nV(3dlg*O!9;!D>y;;)bmVRm4?lsr9U%>cgt#O0S zIA1xQv~z@Z-tXF3Lsv~ZkEYrwolW6-`a9Luqp3Q}sbGYzD+vU)@M`cR`<2_Dbn@YTH7|Ot%d8ktCsd^KcEfHMpkgIPkX9cM*mq_ zhz&5jsq{E)ME7+RndU`wXd@Zj3ymJS$R0|f2P!>IAEWmwfHTVjm#WX#h38YZ=;lRq zSmOa_ycVE~Lx(*=*>?XZkS!8r6JO$4Al%Mv(963lua4>USLufry|~PKIm_YZ*B7wY z691bdOgMv=9Xy$Y{RCl}OQD5>Cz7xYgb8O^!j6@d_lEGjgo&pL-n*#hXcGQIgmYHN z&f`InpZlDtXPvF*xg_j)@TN0Fs^{4x?3;w;2~#~slCWPDeiPafy}S0@Nx0g>{Uirp zNZMEXM63d+j^7s zm1A7oZtG3jmxM_-+G}4Dw$av`v@Z#}-j;V$i1PJd5jsLP?r87g8SlF8aJ@0D`Ep+k z{!~Se2`%Uhw>^&s4QMa!sdlW7wR35!)p;vpRD9zpa9*H1^%Z*+!zQ6EeEN)y8}JE+x^I_36WWkXw59^p%>AlXrW+EOUhDSo=|h z%!Sc?%qw>O?agToJEy76FOx?5X*2WPOdRJ-k1shxoNR04?`BZG|45vl#JPQR?W_M1 z;j0IPt8ecoZq*s$WdF1B3~>$OB;yK>bX_V_vYpS<%7VxzjdCM+#ys9J!JeqGVru}u z@0`qj8`(vBw!5PiXJqtV;Qc`IUgG_s(!=cM?TH>FtibaSPpwJv_rmu?N>dgQZ1^-S_Sl++Wy$MsB7&mrnTuH$)#XOZW_JXMeGEME1mP|rU> zFDw5Q>UoZO%CyV%kM^l-|Fri+C$mqd`h_vq>zDXF#uxH|?HA+&+b>(;_j{s;ne&CO z-=%!n`A&MAH>1C;e98P-w6^WYiM(^Zg+F>Zo|4fD?9=|L#*6ZmCj-fpq80W#;rHD0 z-iAhaFYqp3GkM;1eoiz~fG#wq?jgVTD)Mh8|B_dce-rtwFDiI~5 zt*Rk^cyk=8Ns*ws9&wgt&tqqMVm+Dux_ z&uZIqskF0s<4ju3YiirGskF0s>`YqC=bGb>Bx%*Q*?c-qYh`tCra3s)f5dBQCwsG2 z$>slpM$<6f4_nEr+G?_FY=yVqe~}{ zUPBHu_kN$Uo_XU_OvmVAN`KmY-M_r_JKIXzf^p_V`6@o;UDw^baBDDC_Tbl>3B$&X zb&uqG&5`P(lIRva!-t_~_16*gS4ng6_2D1c`98*;IEOf~!v@#hnt>x(~3y!IL%Bz^($`M&svi5H#een$9PH@D*M zphen;O1Vh&aJN8EtbtF?Lq0)&xbeFEpI;h#jBiKdpXU2f^%pq!pOM~OyZ>k8Q*O-8 zjOB*akJ&NK{#s9G%6Knmozyqxdv=V>B^pzO$K!LAdB&!`x%;W}ZsZc#zW6+~$A5MF z9j@kS^%;p>nLYc-9NTX*Cp)=}&z%`O(`OQ$ot$JX+0^}KV3aK#2A>D>%sr2yn~^=j z%h5}uZR)f|kwH{;zM+o3YP-b5KC zm4`eN&)ekZUGjx|I!NBZcZ1C@S;gij-zYd4N#$Ec8kcWLl5csE&r9-Yo?(GP=B%hz;)A@ScUnM^9NwqbBaeXS!MDi~AY-5stgYuKM!n4S;sx-7eEw8&dXGHCv zHQzSuT1D}a541kE#%xTuAx#p%=e;4PsI$UT|Yql)Bw}3uCX8>hKsdb&Z z2U2Ud&#`VlGd%J9e`a`Q;s?VM$6L7uc>Bkjm0<>iTX`i3XU<)uIXFExzmKuDf&P(i zgew1E;d?tz(HK6ekIPqq{2tuEyT)x=&$x_zms~p+&oA}ZW<10EYijX%2b~|x)^gc= zt+i+sw$ZW!HO&K2p|)*N`C`UB-^;P(F7ch^+`U(k{d6uU`S(Rv+=%^L3;!Xv9GuRW z*NEQi-6GNxW>_B;ut{4&-dy#GPNupwoLk-68M})mE%3^Zy*iDP4@@CT zG^Qtfjn(zxKL7`Moa4HyS!r^a$!TmyyV=if3+8OTcUr!#t|EWFM86>yUBi3P$4->I z($}v{-X&`oU;BaAcJm&#YLuz_LV;6zb8pFe4@5d^EPtU2!(Q=9m9KKw5;h9F9)GO~ z-m0=peE+cUW&PQY0UKvDq|?qPampu~P4%VCZ(dZQnY3O6@(^ zKGj(DbuW_e8rz$Hp8DilRXAdup}UNB>h7M*m5px6+iUOb(D?%0+u`gq-Tt!|p!fA| z2`E$NF*=uj0bhcxW~99;@DZw z&X(p7Wykfh7e$&h&s+)5G`^vaDYH)AHe;GhMM>dY2F8}Zw@C&YyHn?ja|Phl74c8 zc})9zI(u30IM=Qxb?Xj$o%2&&8Txk_c?aP^d6eez7Ivde>?PsLtAHV&+3|el;EwJK z*UXFICCs#2;pL1m!8T%cu=bnmj$*rH+e7>?@F>n;uWjmEqkwTTf$l0`OvU>`v0bHf zua3X3tP2k@4ED@*lk-U6O=YTG;s>j2d!g?no#@p!^P_8N(@U3eX0+^c7vW^r09(HF z70CgeQ=%{0^ar9n_=@hOS2@zD7QvYv%T3O&GLCfybsjiVohn;?3j7jyppV24g*zJu zY~n=@KKh-xPiZP4d|<{GhS%z?WwS|Jt^=0;4On`8kfc$(Xj0{iUUa^{mnKE8vfWS` zjq#O~CEcupDee2#45)iG>7}n#deN}sReA9$A6PYZIA4cu0DQ8JM6Mo$Ps!YdG2BkZ zJNgXlLgRg53xC4Dt?X104^Pg?lV=FL0bAHk_;+TE{h9J^_9ewTtQbG5Pl7 z(E|Bdqj#8ZclP*2GUt0~eo?)iF#;~@@Cxvxb5mK z@qE6{Jxw*vXEbAbZut!uDa%K6rGuTxm4e9xhW@f=n5?b134Heb;Z;?5Ra@hdIs*6? z@9IyreT~@~t+8^8>~Xd%elvK|*$>7bb8*a1E(Hhf%)ZS>-SmylB@Y}+qC=&d!M9Cs z!;pEy@0}sv9B`m~b84*)zVHW@JvT}7oioH+_~04;?Vn%z{&z{EZ_#1z?66ulX$ z^BD8dqwr9j`)^Q(Damik!6<_r+LHFcXjSu|blpL*Jr%6@Vfjm0*U8-9x?-p7SStsk zTVGV-?6Ap{jCy#vEhW$~;yUS>L%?;T8PMZUz>7%C6Nn zdBukquVeY9-fy=4l-B~2mHXP_6C1`?SK3 z=D+`_I+8Vl(mGvZ*2J{n;?m)cPRABg?eXkC4%$+@xCj_^mJq=5poJ{;1BkXfIazhRFYQ>UxB6rf<=U zh}(ZUpXGb2{~pBa13s9-9~kM_R^QLSK0Mv$!eAnj`z-zX4V&$1_I- z39Vj>Z2XN9vJ`rv%s07zKxw?mAXnbeVa`AYF*!W}J&3Nc&l`KnL;nTbO81oTm`ZmU z;r%$U^p^DZjl_?apqug*yZ>V48u~|m6Roa;w#)bb(~Nty>FKs;uM)DRL$|&EnB*Mo zOpv9uGC`{& zTZ6T%O$zWWbU^fr*;({?2bY{}3PihB2N*yH(3ZxU9e*j9H4gj7pS!nX4Rbs4pdB|Z ztcU9Xuw#EVOLlN`y^|%}x3lAt|1eWUmi3wMTgY$lMy=uU_4{pKjFgfyP+ITPPx2ziD~4>{3jNHkD@NQQ6Xo2T1-G&gU3>w&EvU^YC2k zx3uoc`BQ=X)C>3WLu8sef_dPSIghm{GDT`F>egb8C#ZkidUT{3`xnq!H=DqatHon0 zkN+^wEDGneS{Y!3b>T0@$6mGGTB!X)!K`(M(hULQ9ANq=aOTUO3Wn?3>Cc<2y=wp3 z*umr0fNA`WRPfKne5UmmvOy@Aa#iZ^?~jTJo=eoXZ%#fCY(r&WW4}f^4)uqHvq@VR z57_@4A4c|MFX$jhw&&=Cueqh2!BA^wVDKFvzRj2&)?A`;_Q`f_ z3!JJ~e(Bh|4(8Ri(+7K}OTeSN^EDQLSL4LNr#27KPJMG`zB5W*wORMc$Y;LB(-5>) zfcDYt>|FT@`bl_H-L5X3$RBduoi0bsM^4f!(H+$kI%mkoKw z+JqvH*S4`HU@n99^DDMb4?{oF$;~sL4ps%oL)HCJ`L9?jpr^{0@9ug&VO}=Kt+;3U z@->Wi@W8w@oUb##-*R9&Q+-XBF+C=~6+K>N#2;fmEpgz)OMov*h0uIs52* z2O3Z2X~nyIcFrM>+Af_c_J+qNfJtjOoyU*$v?dgu#e-F@%WHL{Caza?sm-oFm8CYT z{g100}`0$`9`e6nB&~)ZCw{#EVbe;7LvYdFYq_YIMKz5W%;Q3b| zyXICX&66E>u3>^Qlm|W9%zC~((+0KQ_B&6PM}6Oeg>oJLuO)Mt_k!npGOrBYAOLYYcxzUR(~Z z(wNj5Ts#1{!XsD3_nR+=eg!kUG0c{C1;+?K`mL?h)9O5X9`+8jABDfo^;YPg=L8@stN%>7rbE#$+$O_^8q-1F zPkPenY=gO8>$n@?bLe%z@e%wostrf+6RUEbeZ6$V$r$r2K08CeqB!YyY+8IP+I)#F z$LY#loq><8rb3;Pr@(>YwI|~CLoTLn`CE`IDP0n4>`?QYVV_6!WUid*Ds0q4&&e9h zg}?D#F<-gyBhOFgR_wDffc8i1`FQxR?1qOH?wxjPZ?!$ceErg8_+Fzt;b4LHM2Mb) zq;Xw>O@AAo3hV!#H7+%#gd57E50Le=C*A1r9XI~$nbfN4g(p>g@<40!h0ZjL*2L#n zdvZp?^X5mFfv07qdiQ+J4lQHOQkmjIN;B>?7o$&FJOcj|9Z7~m&Iq-hls&xKth>8~ zJK2QkT%7iPGVC$fz3eh$f<9FqrIBrXsS^9bl%1T)P`G?Zx%Wa6rtyoc)+sY4{$KXq zK1{N!suw;(CeR5%CJ?v*=e?XjhV(E!GyOG_VKS3UcUARtGyPHNuAZ4h;7mw1`HZ7ULr)r%cxO4#K#DzQR8F0M&){QAz;v;QA0jnB5&}`#YCe<^ZtHo z?|r^cb@jj>_&z;PI;+mvYpuQZ+Fxt0z4kujK|GEJe0TYlBafXAJ@B3MY32bNmi>B@ zuU|8r`ZM!$AIBx?1vY6-6>lEd5=m4x}gpz>oEE&{Sg7J6j5j9<{J9fqqiAux9(YC+W+y>iD}4=eZ71mXAq0!P*|{F{GbB`#lT$^8|cuTVDR; z)QPjNX8YN4J8a>bf9}akvp@GF*Wc8|%K$guKK>ZmG4eld>rY4T;B z)(Q1(Su-c`o)CY1vMigRL$RSTCSh(uoD9cie52J?-}5AYw!_z;OvG3HhU3)xpmUW* z9H!lc`er(xDL-i_&z2LvD0&l_(AGG<8<}1TJ;cfMb^qMB1#N(B7kp92lGg{3&iXRt zrR{|A=|ZOe8Pd$1Cwz~(l{-@?BcJIP;vN&9jh^6tkngu{vQZ|q7xr_Oh2zyF+4nu! zv-y30&pkYz5#)X%&j}s^v;n!#JAONCt=l0_{pC--^bl;QI>u@2=Wf32&TnqM>_@-3 zdDES04;}fx3GIcx+w(CFVr_gO^FjZWzVfpBupb8cbR9m>+toP5agbx$<;Y9*Wz|+V z#<6asTxcis>98&*QMOADVZQ-$PapRkFvoCzq95~8ugtyIBDUm5U)9(DBd=jQdYfO@ z(6eiYKo^vQ{K=PcQNNtOTF_U(9~DzaOXTbO2m8e0AL5bZ&zIEI&q-kMFYezLQTcKI zdK2Eq;r?fS*DCiF8~H1DHWyd-3=jWjF3E=M)ByZsfq&!YSr-4Y`p+a{{D}Sc$SeN= zQ=az^2gWV_y@-F_+wOmT5w!e!Q2#v)$Uit1<)(s4tEJVJ;}b=m>y2^^NJW+ENe8Js z?uO^~usBNI-gn#Wy~B3PK4^>Rt@3iI*~%~1?HzXSK!1P#q`li7+nNqO&iE0g!UHGz81K1tq}lzgQmUzwD=SCaQ8CGU~sJxR&Kl02N0ELz)> zq_sVwwLM8%+ap@rlccphqP0CqS{oLv4JT=BShO~rq_ttu+HjKAhDB?`Nm?5gtqmt> zZCJE6oTRm3(b{m5)`mrE!%12j7Of2@X>C}vHk_ojA<^1UlGcVqYePv|8xpMzC24I) zv^JEawIR{kP?FY$L~BDyS{o9r4JB!9NVGPTq_rW@+E9|#hD2*aNm?5etqmq=ZBVo} zn54Bq(b`~=)&@mugGpK&6s-*=X>CxnHkhQfLDAY^lGX-AYlBHz8x*Y#CTVR@v^JQe zwE@xEK$6x5L~8>{S{o3p4J2u8K(scHq_qLj+CY-l21IKENm?5atqmk;Z9ud(kfgN% z(b_gUo~_-i^-`= z(#dkZc@?SZz!{*ev0=3wu=hw~-ILT>_egWylh|HWgH39&d!$+IN$TBuq|t6zEn{qJia0>tB8GvO2g~*j z`cQfm$~KJxuMMn8g|;0Jics7jE*l|Vg>@0+E>z;zfzw2AV875#fjI?f-fZ`;F6e*E zfp1rXZ|P*QhX#d~4kcia1?B9(LTF&A1+hb{b%x6-2ZhZn$e4K?AbE?<2iHZz72bZk z-(~C}s~kl;PRmVq#nj2Bh#~oQOS$UGTV<3FF+_8_O(%z9cIrwKXO~!!5V2~5-e7w; zA_QyPREa@}WhmHU9?CCTj37mw`HWqy7Z|Ykwd&Tr@{~Cq2~xXkX{M~YsDXxNTBs{R zz+^^tElRkPwZ)bM)i}$l5+&&-ky2N#E|XC7{xC3!vYm!7N=Q5YGWXrPS-84ZX_)hJ z+x* z&cc{<(Vy`aM$?1t!kBc?pYa#Qq>KKH!!RaY^k+PVG3lZ|<1&m%7yTKZVNAN{&o~WZ z(nWv9YZ#L*`ZI3Bm~_#f@f$|dgO0x*4#b#r(Vy`kM$>~X#F%u^pYb8aq>KKH6EP-T^k=+? zG3lZ|<3@~07yTJOVobW|&o~le(nWv9lNggO`ZKP?m~_#f@g+vn{m#T_swgjU?hw^! z2Thb#6sP?&(Ns~JcF#mp)u2a@pgZ&X=3LcflTuZKLu%NksI!T8D(b|rS5d!KmJ7`l z*m5vIVz(4E?uPl`tE!-zPHV`YuG&FjLkK$RV(6tU7*AbHy0nH3^C79T1a}&v`U`F~ z#-wkU4_JY2m=8(qE4cX>Z9l=?$C&gD^8xK^!+a2jfPdbQsFS2t_kCzSyoZhp?Sz4c zN*5jGro!9CTAflVzb{yj0O-S!rzS1&8-zMQg@4717#81e@`@ zOMIva%y?&$N(=j%)LWP&zD>Q22)5t`TSWhRr2@?US~PjXYSA0Z zfjMv^?ml*03VOpLjVb61izWwrVbNr`0 zv=PC!5y7?*!N&1zBZ3VFJ!Ca#4!1_PfP$x6W6}f8!I*TheauObnDmV;XlyZUM6e}o zCI$V4(e@KO@*0yKu)Sl_1IBkudcgXQNe`IcG3l~G-iTmJs^6d?9@Ssa5|2rjW#2{w zTatc*XJMoI3m%4zN#BTIGpAy;a^jteIx*~3)URc`80N-C1RIYQ_!d#YbEeVew-Ld{ zPz$rk7}Za3moX+?roW8<_@v=+!+c=-GB;;L+fQ)gF(zHi|BV3nr1lluf{f}fcpfz- zUHb1v0DN2^x_>t^syR{I|9=ADHzL?@J;=Hu*!1;eiQ#7(cc}4(H(w0d#$ImhX00sn z%^8QtPSM6*ZtPCc#$Il$S~l)bi&e|U9cr;^*|;Bh*RBM>k#cqqBV1?LnJwPsY5h*BW^xU=Nl1dv2)9VHYh(Jr)!rO>I zWBb~;LoKO&1Woa1{|bJEA(|dM#2S+>>*b9*)ROe?H_M~i^PgvpruyB9(Nw=bF`DXk zC`MELr$eHtDrhvRM2V+bHKUCPG~c%6MgTj9XXC4jHnreqnxg4JZ(U4!z+D%UzF|Ia zdQ3FGHq3_&^8pcdiC0i>m=A0p8&|LfY1OiBI%8S6(05z${qBzcF7J?f8~EMVB|r1&=B=*+!)G3R z?*1zX|NObTdKJv=Cj9Dil|hw$r-rw_lg|%5ci;r!Up{xyN%d^k6|A=j|NOaIe+fAN zj^9Q2@N@TnNbvo2^n~s7%#_aHLYH6;ico ztGo!LTH}nBJvs|E?P_77T5h2T>DpAjo~zA+l4(&r8R zEURtkUgss!(&-yQh78-ISZ znT3UBsfBz{b1aiTEnO5`SboF8p3Ktm|g0(6MKRs5yf%gbj*k5T{7r;CR-mgUTqT1K_G{K-lw zQ*EKCSxV_elO#On&NL@$1xi+0DWk?&29@Q+a=q4QjZ}*xg&W)DhBRYUe)I&Z)8D0` zbn=9pqUNzuwSc_b_Hw3L$g&CK8u@B-q12eei;3z&O#~s~_wXNeMfNkP+@!Qi@J5?- z7AOTK>lo$G;?kvbyIv`?S&B@oH8cn#Ge*ie46M>z^*hyib>AWS+_4j+v!m%!vmo7u@6)v-<-#fTh%ws?kQjjBv?*6t z@|AMYUf?i=&opgYy%0FwP!x|5m?!Wp?x8`Ukg@I{nDp;E-iP~8hXzN_v`Y9G`McVE zc6|k-%~AHYgWK@A$|c_vfEU~Cv5|?%O!_ite_Jy?*O|Eqe%8BEQJR^Yn48^ik5p|y zdkNn$%2&`hif8PTrBW3I6Vy_1zdbQ~t-Y;i7izUr<&wRPvn$FN!z)#5SBljuT87vz z$^c?BC%cA>gb`@xtD3!ElIfa4q$oiV{P|&UYL%8<*@g8wg-0K4Hdan82g>2+8!1Oq zGVM?fKXrHIm@?Yuj}GdGklWDCIj)?pHBQ~wF10xjx%yXp3Tm)vmaB`EQfrCzSSor1 z_Cmf~DHVmz)xDyGw5|jpt+Spm(Vm(uHEZogp|syNzt9NqHF&#PYvD`wg;S-XeY)IQ zveoUyctW3Omv$-rcFhB2qMd=sGgrgBU^kc8oX~D6Y?5}nX|iGehP^O8l68(`W~Vcg z&dl`Wb++9L3&~~&bxkb0)>XWi3rZ{dZMV&rs!TRrw|85Sszcqx;bJTi zL%~Ls*g&wGtwy`hYByxA6dG+Q1oOx%^Fe3g^5@_-o3AdGX#8T_sTNDCcCFpAYYR4P z0a|8?XArMmC?i+^J)_wIT!dNbsP=9v>6Ps5CgyBRBPc5gn)#KIgCVX}3Lr;t3waJ` zCD;KCTw*PmpdHq0r|~T0MmJ&prIsUDDihrUwM3c*t*E_BD{H^qMK-GJJ{l=This%Q zKKxEq9LDXSQ>9|e2z_>`(Wo`>Ydd5^<#Bp#K)v}U`>L553uq-L^Mz9Z9K5qzM8~w# zz&E0+?B8ezx^HKWW~Os9qi@Tka?Un2Ze>YVYCEa^ZehOv(n+TCwP z@o-uii@g>`XEVqLWS%Q8R`Zpva^=t|%SMjDTnMp0?ziWwr>eEnRr_=!U$Lt^N zQRm(=z{8)=CQ%;LST3rbi!X||@{N`a)20I3rc$adV#3(=u2qaK34Gk<>W(sHECL$K zc`iKtBTd)zb(9Ist&u;IlqXAJFEsMYCA->QK8Zf=AAV5qZsc7kSBv(ttmQFea6E$T zRBh$UuzD6bXh07}Iie3~C$YMmiz2hFLa1iBSmL;d3C?8FC}HHSqUW7FV^aoaWm$Xz z3Hr%E%77T_<($A*c`=Mo#Tvr6XecyFbu5K%a!&su{Lg<1s7~Up$@bYNF;|tFoG_O24VpzTWt;87Qg?i^W7BJJS&NTuW=#yia6s9om&%1D zJ+;7s(xzIGow>JK6C|%6HAUyNGi6+c{%JE5j7hI7Of_cPo+V10jLZT%?UiK*W__vA za+b@hD7=3X_Txci6Vr2<*_@LdIh+ZgNuHBfXte8W+-f+awQ0sS%n$TNQU7$S(fQ-E zU1X%N^Js1``%nk0Q~4@$6RK*hy0giZtr&LfHu8fn#HGT# z+Io-|{m3Y;TqN zsdyD8b3g6QtGNKC{!rF!bmdY`-KL-r>R+p{QeG~%)T|^rS3|FqPpk`-jVG(BT(%?O z>9(n;|I_3rhGcnpaVHfDQ9`n~Gt?=R)A6^lF+mYm^+iGFVemd(voCTd?bue2X)qU4%yw4PP7WC|4}qA~d(0 zzX7$P<~WuI{hqd0EWYmgg$nFC@UC!0um)~XE&LU-GS}!m7N_GxHC=AZm2MQHR~l*l zRLQO*nPzt5jJ=z`cW-Orzx|jxFixTS!T0;!@=cZ@6gN+XmbCUmbr#^5a5lx>l9-*HKv4G(NVm*086z4P4;Qa?z*quJs4Uwv~>ptDG}L5=BI4qW3x5y{lXM! z$^l|t+vxrnd3CC;H+KWUZ3)r{Ol{k=6lATniV=?ab*5v(KwD>h&;VdvfQAD_5?#53 z_T)}zi=}F*f#qL-3%S`vc(w~I?8Mkr8iH@!)_!A`JXO2s#L<*?Hw4vkB1`rQC7m9& z*&__9ClAVp?#(*2q%nCq3h*#f_HuyB0z5!XlV_wZOztpUa9IZSfqWgZPdZ_YuA$D^ zQlW$~$6M{`MHffB1+3!5R?jyUvCgfwFx#+88-wdq3D#-_1G)0sn$cWFFXFMCuxhtq z0)%>G8r0^V^t_WL?w4|gM`orSV6}*`DQ-W>XD4{|wA0WDM+`J*M9drWr}8}M;Tm1a z#u&|Q2&rCb6Yh&h0; zS4=qH^Raw`ixs|cX*Bo1I}8ISYw)i5@yvR0SxZwXVW*E6yO(OqrQP}d!NI*lL;ds| zRdz4tmyyqIPOTV+c2>CExKocgyG!jxzFE#Yr-0`ah zM~(KESrap}B@BHHrsX9GpbU%lf)QQjx%MFi=i4Y zyTrT7rT4*Yx(^vy(PF(dIVxD?p%&}`7Vg6RLQB~(3RlU-4&>S}m?o8N={IH+&CYtQ zR*^hn=wx+G1lvbAIWawd+?mQuIU}=~5eI6^V2uO+M(};+cn+T2kxA#s(Ma0F4D=Yn z$9i#cawZjdFK=a8F&9ajn;&&F73`g+8YQOM*x)07HawrdMUBmVd(*EU&JDC`(Xz21 zsOb4B%wrPowYv$6BthjX+Rrq5xc0yfY&36}2)?mOya{MyfsraAuh7I)in4iZ>&@Db z_Z4yH)lE92R6D@hn7&*LaRHN8>RQD#M3Su0DMuGCrT0lks%KIsF)55VIk?%BU&=FN zBl!Dm`UN?FH!+6lhTQgg2y}BjR$$oc-fxFC2dI6ncFT+HS{wUCVs3d^CDLI@jDef$ zI%dn2k&la3mfA^!9YNRcWwtHbaxhQSx*i+Tm)X4&7~P9p-t_NV?X%C++V&zG%K8QD zV?!(Uo3U)*x&pDACeEff1eE)Dc6KHu>+F0(R*dkFozSC)8uuL{coJ!ez&k3ks<IzLT_ z{9yZ{v7t1AV`pY(XJ+lGsd-19Eo-8JsSr*Y*=3@2z{XIYQX}uqt4r7?RrE8BGR%W$ zJM~NucOs5yo9wF&-j8Rn(WFKTM`VHVjP9>?)MDshtxx5vT-4#$=$UM*VY(D-6X2_% zMRyr^A=vK>yO{Lxk-2eA<6!VXg#1hjY z8OME09;#?&O@pUsKI5f)eZS3YXQp!)*0IK{AkN2?L!}1_9ZEw%l!^@VP&!7Jj_H#$ zEL0gsuyD+42}A}xUMV5g8hns%0N-qAETM>I{SKUQ@!3l-&{6tRy&X6#T(N-Q76i+0 z@E}@Ocvw%l&|+`7W^+Q4F6Dh3>`MBpPVVw#$AC!l34&f!{djWe)g z-n8$eHG{9RkaU~{)0wCS8ct8rr18k%4BHxsc&7IN)#gTv8}GrHfT14SO}h{BH*^Tq zQd1Tg2n{pzsKRogmzs#fb>=5@Zg!Ma<>$}MFQYrIh$@U8%t#cL>u{+x*FK3tsD5Dz z?bo{ueTXxGOWCN?trIU}lVsXTY)b;23SL8oNv$n{qeQpy9yV)sV*0RZ0<3$?@3i5# zIBPAzpj%cxN49qw7Icv7iCb*6O62Zs?&y03{QWfaw|B3*P*vYaMx%&>(sZ@i7a{0{{w@C zYF|>!xP}6`bQ*rebb3iNZRmS6&IEV{gjGwsfca>1_OVoKtdS$6OjExBzGJQa!Mx4H>&~b4PHYyKVLgZo z>p}7`N(jr-3?ni5-pS1A=As4EL{DSTzCxg^@iOd?u~M(Z4E44Bb6udr-1aFcQ}@=0BrS|Dw@f3Z@-%%F`W*T;~+ zPpM6ZZv-Y(<);wWqL+uVmC%U8av8T0Wa0h5R`JReA>BShS`Tj#yH&;I{7%@AcAYi|ZDQf?;hbh@ibgW^N&_LY+Cm{A zHS0Apia<==ZX&oIYg8PTXV3?@_{3BSkI+T(K-?~5M%c0&Qve@dlX05H4dj`e-SebZ z8Csxo98qnB@&df$iDmIN3{_x?_E4&MBq2}qFP6=WQieCy*#5QTgQ*p+~vC1Jt*x_`n4pr7AbbGP z9~h%l`Jwd$5lnU}n%NC*#5ghHwF>xh!-5z(RlWeOCt=amKm2ys8(Oj@4nvK_szfH1 zhF(fIiYc>rN82Y|$#r{C{Z>(p(yhp-fpTl6q33}8b`Bvh2&|MXC`AoJ7>K5ib)wag zG0YpX!pvoSg;1-|3C&L{oqNm+FfZAl8T7rjY${^N6i;&#<*|05KP4RJ0k^h;I`-c4 z44tnbkf~SCqOERlRal^EXIs=6J-h}m&LB5x`|b#fElrNXiiyUQpPX?d!C!hDRSr>F%R&72K99zS^iE%^prMT-Lp zm{XM4Xn7Lq1$QNT2i%p60@I_V?2xd@)0c;=b>4CG%WaIVbas|nr+L^3U6KyLZ8&CC zt`;ioCaSp;eK7x%r9z&LAXQ0CKv)+>g>t6G?Z#TBjKpxMGG*Hh#xR&6qcgdtCyh4= zY7S~sQ8=qD5pjYjp)hz$c>S96l=L))o6b57kC4IV;7D!s=o@!JL$=Y5*9P68Rm+=9 zODhBhnM7}x&=YTNy-?FDO{@4h)v7`q10pMoC{0YQCDN(mtp53ng$|%wPHG7;;mW@m zw>)uXODqaki##n+v;72`72CPENzEYugh8A_U`h)SOsGcm2~QjJ`A?Qlm2jRa#Rdz; zl`@^-Dr`{gN-t=eLsyD;EKio;(~xi%9-ZK*T`a9&Nkl(1j>oz(sG>Mnb>XB9PYyog~LZh!MUX162OehZutXpB`AjDSx7CN z`M3&l%co}t*+374n5ddkZF{Y9hWUwcEKL0kr#oJp(Y07Lb;pEhGu_cQbcDQGsi@;I z>inwS|D)L!tEaSD`}^7fJOP|t>S5KeXxWK042DErvP-9$RdBTvW|km}k6vbb9)}3J z1SQl;nd!-OrRF@h;`MYHmLWJmC_Te&BJmtI4;wg#1U-0X!6XdDMm(~@h5g$#F8C@R zQkG{Zg)t7`!4~Ch4~SpDViKJLs}{8vz#V1;Xla?`9Cj8LPxxp9q!K#^;usc&2W<8A zQ>*;V9;`4>rSQ8dr5o4^)yd8r%gj3I%u$`ME_9=m;@-eqy;Le<{>PzKlwH>%gf=H* ziKEX~?pGQxI_ShlrO}*K9D^K`V4A+xY{dxDk%6Kg!_GQGOjN|TTYQ%}8v>jWxWpy3 zR4SZO#!GKe?G>3ttT??c9I2}t9a!gjU%>8Nf()2J^)sT@>-!Pcc;SUwG_E1JSHgEH zY{iiBmDY7f8Kv?jq2CUob;BpVA&adRTU_NsLO)U^nMm_Qw(?7H@uAMFYCTP);pa9- zwQ09;&h_Zv|CQCfHpOfVMY5|5Z;x8Ck;Lj{587_ePwhkpL(soHWt;+d#o5q4q9B|$ zi0uc4dP`{(g?g5(cG*8<&m6Je;p#Yp!y^(vuf&pVAhQlM9)D9G#u`?QL|N9sX?*}g1yu8@kP?KX}CPs)CNX)9Yt#t;_5PvKA@Fg|L>kIxxaVA zL#+YWjqz*rOp3d_jdEVqiqUfi*;O+R)=jNa8LKsp^0Eyon8)uC7P`yENdmR}7#rl0rH*?#33$pHaKqVo6%JHlaXh{d11-%;bpcFpyz$0=H8*v}QP-b@IjB75;K4yP zQP@eig62w#a(u-<7I5PUlQhZ(`M5Voiyo7z+ZGaeXl}N+$;96U8q5S@CQ@-q1|4fU zGoN#2#u!0AGkYE49_HQSf3DF@#pB45IddMQ&&|v@W0_;l==|7NW_AwjgXt5~M@J?n z(ukQyINV$~Z6fDz5}BXPgy~Wv(>Z50b9iD7k^4>xf%T&!sUu-}#T!P@L@E7RenKw&`ym^UuUW~z?=Eb&1bVSv$;$f?PsAZXITO`S}p{*IiomL zaRdSe(oxrBGd4MoP~>apGxMRk)ai#aIhE?nWkkD~ET%uM63{oK?@E;Sye8=aq+M9VR$0iG$GI)Mg{P6RmwaOSgj-a4{d^8UAI5t0>az@f=bdNBO zpI&-rq&-XPw{ zBzjW%I`o9h9Bz0C;o=6NIEDe)iNn#x6fCSCrgKwAtaZFhuBq>H3s?7bY>2xrV?@4^s?jE&6%bb)QQJa$#`@EcQ~k-sRPf{ zO!hj*j~b}YFi&0yk=)0|W-}RcyemmI$xpKh;`sYu>PS2v4`+UwW1+?h$UK*uKm%k< zFuPtb4;?A&?-2o7SG$-9Fk7WEne?24i~c5ZK|Y>dSbr>zlap2&%Fm2VbD6_a=uv6_ zPr|`vhMA-3k=#fyFT;bKd3ub;?c&o>)t%EP=299VQ0DpRBh#4carzIpY$ov5<4q)Q zuRhRpbF(wonUN`h4^QhP$**pU6A5GGQ!bmi+(?e*PP`6O-*s}=Ws`A^&7#T_Mj#)f zXSWR`l8(q&0x}>i`rc#zj@wU z<6#A4_UdnJV%jzBB;Sa9rthf!1#1q2V7MMsm&v4^@ng>9#MA_gy+D4`81r)@hsBl= zlUJSjM3u6u25~*vQka%<7+I9!l1lBxIf-DAR=d+=)*TZ%DNo*hS@X|2evHUqUGKg{WO&t#ppSOwL+)%A+z3^yaa0^ z%Eu5&W$493SRPrgslg~B>-_ZSvbSp?2|Ybe%KkCnRT#k z!?GmU26c~6>kTt+7>*%aGe>1-C#GPSJ7cpWT#ND*hc$);Bw8i38sNtUg&cG9Sy%W#dNw^jl@*11#%x%2g`=(Uv3VNBF+7m& z%wVZCHaTPN5!ZeQUmmI4#L>vOJTZ-ym7UCB6h)n6gv;beb}r6-nwrPSP+0nEK6e{J zKputTcF+kfp5n$NXb!{t1lHX!e&$lM6IlRReg?}I#3O<<8`B*EdFut79#70@5pC;! zytp+IZy`Z5ndU8)x|D%@n2vTHLJ7*9&OisS$+^0SX! z-WyA1;KlIp!)?gJ!#W1dM6-?bVdg38LYkThdAc~$Gig>uoLs<3O|oJ;;3%DNU2SSZ zIVh54r^aAQ&mD=#?UipXHJ(Y&bG_af?+6zHYv5>GfP+@z9Enz-h5|uKmR;*TmY1@z zv$%{9i{qJDcuAss6prpDsO;I199GIBHgiRpW8ml?9?>Za85*Qoq3FT+M_3C2j{@K9GqOC@_6~1u@;^P zwLl8!#ms%uGiaE=^`KWl0X+0aS%9m~Jpoe(^UdUhu8MGZ#FMHA0x=W<;J%G308h0D=Poa+hsVbWK5Iq0d8IoBr_$4@_I0zW+#De)6<=mUx`pO>Hb ziQvt`)U+nAx$Ce+fb)8yvQVCSJUWc6Sl;px(v>*Pu7e{~HO2sKY6L^H@z-i@-(X4G=aPweRN`k z=S4;;xO_P_4$@DOK!XrMgl6J4K{v>Y?Jb@^x7UKGMrK}OqQWvq7O>$MBYo4t#gaT? zj}0$<&3}gyMsOwG0efH34E4@1?m!&DJsRqY>JFR(czSP3qEFTBopL^b(G85~Vyu1M z4S6^zhh&QCVsD&T@Z~3gX!@3l%5m9p<9!S7l06XMdDj%}_kv~{S`oOVI)4h06d}B| zc;c#w7&ZH99Q>qtbjFf=xnJB7?S^($&KX}Q4bZ+~Tk{HugR#}ok<=)@4I(in>X!g7-#ltO(kkQw}BaUoEcWsf)Rzt#mV3sQ#=%x|#?5DecErQLe?rUIjbRXBXA?#rR z`ep|5F&C)}-1;Lec-zjQejIWNDT#GKT~Q;1tOr6i2GdF9UZI$lk$@+otHc^GWXR9R zy`>TND&a#U`eZ|kS4Uzg4_AS`%q!9;zZy3offrPQqbhPclj*Nu&hvtrvVai`y3*$z zEk$YKWAGFsKf6iO%|b<&$I!**+A#eVp%whNi=qpJU3bWFLWp8%R4^TP_*o^WQ5{c; zd)Cc21{58FVsMm4r6$)9&o`?rI;A~4$*0dGpp++CdE+rMlka)iJco;TO}iI2Ri4C6 zUAUDLM_*3!5)ocIb{gIIGIO?z6>E;Nj~>Il0dfqap)|Zo7Wn&34*G!CDJAf?B)w0%L`|+IK_bOLdj?9&CVjL zJbke&qsR2U=!IHcfjCu&>kv^+JUl1HAO%a2X z;TJk8(%O=s^%Z`Zk6~-)K$|~l1W3$RwUuBQ5pSs_ zP-l--%s9_&8X0sm9K-3frCbdLRV&}QN$UKAblr46C7-6tVJ67cPL-;iGgCCG-!S@@ z=3ZAF$E}}4T73Rex?_l~H!ub7i{ODz94alUfMed1tdA3f?u^iQy7ii=N*PG%{)#a< z0?WRJ#%8XUFw9)J5p0L+Tmy|Alc!$j+3OqwS3t zCP5x_ohW+$=DWgy{ovB`ZYADd6stF0eyp{I(zs(wa88NJ@AKX%GF?8}bZ*8mdWOs) zQdiz6X-H>U?|P%fIOl^pF?tOm1#zad=M|oQ>|kcaJu3CX?|As`!1TU{94)`uY4J8+ zRUSG-9t6QWeVa2LiN?XY0+R+$ct!J(01(wEgE3sXx9K*3{a;jrKORM=WBo)gENfUI zr~Gwn+Ow$;4qGW6lLC9olxA3gVt4I#Yc8l!`9K z=5=|RGErva2e;nX47YR`1ZVO!IEufEH8wjl6^n9w+BLkiK z=8#-5&!4mnS?gihQsalE&M4hT4k0$9-u}Ki?B!P-T!oD?Ojn!-h~Xt|+||DW|D}@O z=pkG_VuW#o$J7XRb-gr-FW--rOoKdi-DwJyrs~a51o6$5+Z+OM5PzW~RDw8e7z2>Z z9$i?@`RRD9FdoXo*s0tM8;0KH4dX}|B3MD0(Emah;2hE~`{>xHxSriP&FG$KC~-;} zs-_F9{X)FlHWi}r^P9;`y-jTm1!dLk0AodJas)Ah;r?iNxcx4I8RRE(wXpKqgyEsK ze_;oxtkeh=DhvGh*Z`ri5I~BwJ!KQPXlh52-66y;gqNPcDTFYNjF&DeY$&KWhBfd) zF*|zPGj@Q-4yOX-@IX{HGcS3ic>(op)-ytX;?Oy%l*@&E@R;Czu2)fOsVppub%ebObE)LTq?9$_P7yd zgivgBcq5b8ccFVO;^q#q4Zglk06xptj0kD&g{ zI2C22@DhBX5k6Bh>4`C%&cFd3i7B95Gg)suRvv_~Uoxqcb zkD)_|%@LlRIfB|TWsmx&qH?(O>gW>2vsfP5r>58RA>BrKpmb76sw`R#%>yM9_jWh3 zq%)I<#1EAx=-&&;qQ?f)TwGc;F6d?wqenF!_{cQhpf8^?)9>J(MpifID^Jrd+fJ*m z3%0B6CchZMBb;&g>JU11D;68Lpj6{%TK8zLV$Gcl<83M0XG$#}PrXUEf{WGi7;@a5 z7L_NzHsJ1>&~L=^!1P3w^{2wIZ|ggy?DuKoQWoq%R&1KA{2rby&^yr_hf5T}#g<`N zKB|X2dHF{|9>#BudqwfJ&ShJFwv9VsaJ?ux|6yN#KwutU+!D#u8iA)*jW*0AHB~@3h;I^76Xg@~gs4Vyp zmv%kUPnU76$l>;nOD{Ky6=syk17KR&CFR|VigKB<_lc&qAW zr6kHBC$1^A!o?)jN zA;NdTC?>WT>C?a`!Q9&sR9~6Fkcyj!)Gmqc8!2nDU(O2Nx>nBJ7|pHflo5VYmnZuS@xB##Ll zL}I=Yzkt?VEvQ!A2ZwxqS#~GUECvMM8y%<}NCMLy#uZ~c)rJk*F>d3-1^{;aEDIZ$ zReY!W`|_KKt_tz#L~1NpHc_4`6N=J{8ZIFUj(34F7R_ASAs@fvcCO3J)Q3}4fn3fh zM9x?XXhD_se*4%=>gWh#Yt?`u zbG#3lLr#`S_eIp7p2}6RTs+3ZxIEBkK;k~SB{8J3k9eW*^>l?{L~Yx-jX@nxR1L%U zE`s`XX*dpG6GvTQZtgKmz;*evu|e!4Ij9I>733om18qKOn`Vq8&MfYoKwaUzr>4B> zJgWY=A7!GgMD#1zybX)6{yf#xu-vk}4#6+>X)>x^xA5GYfr@v;PCV1hc3 z*@EM$88Z$t=0Oh|Qg$d3P6d9v5bFu;LeL%5m(JL$;LT4_oXDP{8+KvZ6>lFiA2Mm$ z({fIV@+uipvHOI6Cd3WiV8({&@$p)cG2~!CAu@;`mEjzv%PmtiWs29Q*WWb0stG-# znXxH6vIX_7+YvL6PUH@4q4wq)s4gb%7RN`jReVJ+LM>5Ya+kS5FuLTzpio<^;<7jl z5qiYZWon*QMf`sJCNRq(L8e%$IYw^Gb>FYDLYFc-K}4&UpIbz2EY{l&zh2G=)K>E# zC(&Ko`uA4ah|gSYIfd4$Q!e7Qe#Qr;Yk0y(>X~{&?W9hX@z#gzrI`3uU^q?@wxfdq z*)G7b_(QxS&g9Rw(B3<2QH?O2ml-40Mlill3ae?JDFxDfGFU2a%TgI5epsw~#lO|a zW5QZ=__Zp|Y&59x3d_cHqo$(63OWd7{1`sFQaGi)d+7CN*N=YqI1k(_kEyyO!}s}` zC3VlB_Ix641>cV4M;*xr#&)`Pq+B?~@F-&x;tMb7W@`@fsk-XUBcTrgBMg^R@N2XN zN#zX*3`ydJ_~XYgkcrW)rUiFGE4J~uxtgs{|2SZVWAHe|HHLT^xjEBZ`!D@5Ca)14 zzJ#l*BuZ*QOcUt56Rx?^*C5B@B0fc5e{6Bw9P zCs{>lh|8mf4yI?i@Ks)@Q`Y8KtpSfeXE{0x9Cog$-$1_ruP6<#knnd> zq2t7__C(NgOU*)pUbx3R79d8R^O&L*!o<;hLt}#~Cjzs=lyNeZj*4GMH9n3P^s;6$hqtS*5Ff`~t#Anw!nQd3+3XO6dNA|Mx5nz7S9}dTs z1HK)G7hGw;X38Qw(%#n8r2PIkCulI|96iwa3RpdTkWYxto!N2dRxKQKn+f7}WBgOvT0yKS zd>h0UlC=gv<&-HUkNY!M48oABX8c=`XX5Rl>AH-^?o^owz_?nos}Ri7^)2VZ!WGnhU1cz z$d7lT^0S+3(1*+KUDYXjS-e3V=5g_K9!DVxJP%zv6eSEC z!cHBT7;~@==~eZzUBbETlV=e4)@S#&RXg^-#hPM#2Is!zwlI8aSzlR$L3|3+izQHB zX~({Vx8m3m=zDKQ7E<|syBI^-9^6MDxwdPL{G z`d!N5lfA3%#qo)kNU1dyjxu}8E$k3CFXZ^EZb=3r?pCKWP}(WK@)At*mRt?_6peT% z8f;5`yuiq;z0&4V;({{L0{HHLk0lxmFXJLndnY_Y!u>SbxipH^b>FgpbSEm~b=H1B z;6`j|ge8q`L3 zJ{10XV1R;*?PIhg$DZo^FatDI%`ako-7&994LeGi^c2Q@1RCUd-m`c7x-=KQ)TbB* zX`WnAHh@~H!&s1adh)|TR+_h*m6GZ}YyWBF`MI4_t7@~v8gqdS@(#z1JW7;UL7wdNugK4~Li8+SzBi-1@ z{G?7mNrH3;l7nG1^A_u{h_ zEqh>lO#fViW^gSU1}BQS)YgS?rwD%x7TN&p4fxa%PIP9Ls{)8CQwY8{strq%Y$MWW z5u<3Sm_*~r94Zi)!uo>M-{{?rfk%Hzjv9uGmuD-*7REue<|z(G981iwgCQGpQefir zPdQRV6;-a|*yRkA3ZuujP^w3%*n0St&$4 zlu|W*x3|jmUf@~dD36mpl$NkK)J+4iF%r!icqiC!BAa0)fS>SQXz0HgOsmXT{BLtvUpc&fV~-G-Qnd31Tq@n(aVtke|7wIa%fK$`Z#Lb)K{ zAB5k31$(KBB{?Zt#Oy)VJ^~dSWlRR{lOf%>`(1m`GhW<1*rwhRaK8}_x~%$ahQ!Dm zM)%^kH?cZVrU@5Vz`?{R9U#z>P5#PSl-o2;^5UI^b`@U~=PrcM&ew}r>VX`b=?L{( zRRMETWC=zX;Z>Mi3#NEyLlH^9k91?oAH*~L8)jD%O^i+=86~mw(|!>7jv46YS{UM@ zB`>LuPAt*xXmMbWJp7;G@)r_Y0gOX(jcUr1gp2Ru^4rqN@GBzX1!KAhrOO+nbB9T= zwDrPK(_*n>J|AXkGMcYTZ_W?PMoyz$Z~5!r#bPc6j(PT?v=hh_NLyCsV+dtU9J_ar z!)A;Qk)eNepz96I)2$y}eVzjcp)qFe;KV1lD2V3aD`~fwN7U)#eaB5JTbwiE4vWQi zokZMx@_(TW!4?d}mCG3#F!2zGQ<9qt_EX}ud8ummPV0AM=jRvjaR$We(7+)N?|T|S zXI)WWkdqL7U+-8~*EB9Qo=_XuxasEN7U`tz%;! zpdy5n+lBnpvKb{nYvy8_3O-jXmv?UDo;yHlI}%|8Hb{A?cF4b*!i9I!^I6YHA>~?E z9z|so__BhdNy%ou-NrI^;5oCf0Aq(<#5z`nb`f8@DYW>}9nIRkSR}6(JQ9O0UN_ax zI`DRNB0(yQec8E;NgWk}ds~p>__35)>tksPtp^ITrY__n2))+|+h_`*;F{Z-a>2fv z%2$oxcrsN?UHSJ8BDHKnu3)Yr0stmLIIoaPz1x;yyjx~x1XucXZW~?Y6Hurah|$&& z(sK;o@1}(W+FlIr@2-WcExY>UReUR1OSa@zBKi8;NleM2Ed}{u4=03b_FULd42iKU zVY}7JjTXBK`zV?k+b4MFb{_paNP>g@!s*+$pk-A(HZ)KNUY z?|y6v^6h?H3D70C7L*|_#()Z^IVi@&F1gD^n@%v4L8D?Kh-I9yG(vP!2&8k96ag{W ze5U8`$c!v+0TADhv)gn^QmH z__f3yS2x0dVH^1<@=)I9elD41G1tZ6#mHDmxKMS{gBFF@u2OckDc_e#tRiGI_ za5-aL>`swkqDe2dTFEA3sLz$HzhMn*mVGK6H!1Z;eG?tD|jlg+8;`ItZ$Y7X7e zx>|6H-kDC9U3IcR^H{T-(H_*jZesP|RhPeBlho~E`|5T~>RXnF)U%XFdVOybOA`!U zZk=P?*|%%ostWe@SGsC{95UisJM&U$t~Is*RFAKWOOcjj_cHo`ds6bHZ8Sb+xqZ8o zQ*Jhq$~FzbOk&xb2%VR*gh$-oqg-WRfG|U_+q5tYZ;u&}DZ4)}X6YZ|O?@U0^1aeu z){$kTe3EDKG$8o!<#F$as4~Og^VBU)0lbbBDufF5_;k*u5+@OYFYlNull57;g>+DhiMD zCA?!2-`&Q&^ET^W9c|`!Tgg_wugft?(LiF^o4_8tG^9Ffd?DYixGFphHhMv9ENM*o zYysJcGy>}NRv+z~aC{@}h`1px2{+N`18pTB`^WBq=UG(78V^-FB{6AiN@&B>1haSJwdjDhR zpE&ysKA&$XC}~#q@t4;heRbKgUT>Z^;nT`5KlJG1h_(L~-HiX#FN&| zz`6O*7y>JNvkG--n(wdGE8{|K!Uo>(30o%`v=of)`X9kmZ2={>R8; z(}nfN4_F^~5;Fa);bB=HdNPjBYrtop{nJlU7VBo2M3>gD&=2AAOQKu(q6ioaHn+d?j>F9rBrfo9kXZnRkRQL;cwg9(nAUPyhT=ccZ|p z_xi)C-Y;RjQ|C7RPMnqcu^$r2#TWSKh{yCE;)71;k)N&exfJ@Sf9mI-qAvQiF1Gk}fwGx$Z9a5X^uac1WzT+N z^L;2I{*1h@_2&zDFG3ztKITgr(HUc z_ief3$ph9MPi}g>(g%Mly;#zBOuN}<-m}eHUb`V~+~1|h6E^1FEjiSL6I#v2%Bf`wc~a!U!Qov4!@KtM z@9ZBO8W`BQ*nefIf6tZq(v|ta0efyL%ji%ZIp%fIPP;0>L-?shZrUTWQ!7{6^}&^W zHsWhAxSD@=ZFy1*ChH;mZN{7WziqPiy{LZo{`)5DuK!TK_u+Ss{(T6)$MHve zo`>QYrcxDKqV1AA%T|TcMC4OpiJ5%ieEvWB?^3+tAO9uc96%cXI{m}g*74uR&sf&e z_&fB>%C%4qb_tzI9(T-(Q?DasF!8o`f980L2eBNq)he#o$e(t~Sc3<#lApeM-Df@@ zpfB;pl~&Lr2VyNYuUX1nZ0OY~Pv>Nc0;z;yJE8K zTOT=~=$oY*9XOY(t%a@#HB#Zwzr04#v*oH07Hmogo9pV|oB#cazfkn5{p0!r#0xsS zLD_+0V@)0p-T3PB6{U{hX*yg}tx0DPZauU6@BI1WP7e6vaHMq^TkQs^1H5DOeJe5a zu<~(Exr-if_h_!ajsuL2R${yAoUAW6YR9ufz9#7%|NQi4eDu6hL`;8^z83t2XB)RI zBPazoNf;A-Es{Wb>J|U-E}=gsYRlUE&;viudr*I)U6ji*2GEV zN}-O)^c?=3*j06S7mnoW(9N|zCV&3^p`8qTDlI#wZ(JX#U|;`#zx^eRV+eTSSMR&`^l3+~_2H@X^~}ALuyN$8=ezVY z7T3w)9e?+MH@ftsMpwozJj0B5%w|m`{KTt&{i|xQ6-(R!FI*@jE{Qd1i7?*y&HuK* z&n?KcHeHbfk+y!|7yS9xPTqiU3kO~=M|nEd=nubo$?|W<(Chg_^9be`c-pt^*zd>C zGyl?Rp|VEqmbK+iM-JgwZ-HMkgX#+b=3P9TzWv&ldhL(da{6VcUl+tcDenCrFL?Cp zv3wQ(SN=#D_G6>ahT)S}%av}k76zAGG5aAO{kq%l*Z=WN-|*zur~kVvx?tUwzH+Od zUM$#ksPOfl_^XTE`YVJ2$ufb2=@WOeHRZm0_71l(DlDJY;OJ<9C}!D%E)^fU+{ALG3skE+WzU+j5;Vb$lBemdHp=!_Wr;9 zh=Yl@+!Ec1!^^?GYqynn^S}PjBjPkVx9QJ1{S&YIO^<%v)1USJ_kR0JLN8yU5_chO zxW2h^&+tZ~0z{KFr{D$v*a-i*AZn1t2pSXSG-KVfQ3bPC0?&dy7 zm;IVQ?)dXpz0F6z?gi{?uD<5&E6d_?VE^Oh?V5S|5$4`{-?z&!J~4CoUw8d`?762$ zacxHFT^bmxLoZd(wd4x%M_={PJ66uU_|~_#zWR00|G6m- zB7E$9|NUoH*1v3aOS$P4?{7PfZUOGw?tlF6-#&J{zKDI~mV=Iiqxs#F3g+j3t_wYX%W>NG30)3~2u$>F7N-3##(x7>A^um7xDlf7^4{W9in2G2<6 zX~KD4L~HKW34Y_r7tijH~YbhhI_jU7IWc`HyD* z?jxkHG)tv(X#%%h_3%$K3IzqA0TLu7BIbWN&@#(a+s3^6Skj#;LlAUHtf`i@!RIpe+4`z%B?vDBt|Z;ulzcUPBy8 z>gw_CN&D9&P8S@SSOLjl-mWMWQsz&-@`LK!!TN0(?`fa_Xx2)g)FR%MsjDMAU3-Nr*j;;a2Z*4W6xNqkv9)fnT|FbT~pJx5Z z+c#govJBH7qjzil=BqXEx!=us^vgIk-|fmx3Gj)PU-+$4--Z46rpiM3)OTV3efVcT z`hU=V&w2ekQuy*auw>_!DHkAXS6B$+Bi|aC$L?PV#Eb$6rLO(>v75emZGEBYnE2JT zDS{aP{W~A|w&SQ8fb+42i=fkL{?l)V|EEY3J|I|^4|_jx{ZP9u&i5`Uf@niu{mswG{wJ*`t{znwN2}YG z^kbw3_#aF_AP+ERhslmG%*86YQJ#Twy*nU^B z0Nru=r;mQ_+gf_n1J~jEVh*po_tc_;xOC@aw>yqOxqkPecb#`?-K!pthx2ROPAmI& z-L3;+^4HrQ=tTq{Hm>Vuu#BlKiyiLqU+dRZxqp4urH2E9R`gml{`B!r{PnGvKY2@$ zqxb&Q0UG+o{p2S|Us$T)vyJN{M*gMWyXtw#9}eI3Q(M+eGn-#edcAFhod87s zujAkE+;DW>Pp@q0?iu{bEx-IxDL?L}XfSwDT_MmlgHONr52i`4*WZAP`gWY|SY%VD z#uYvKPLrQ#TOa+GKm6&fq~{#G-m+7|Z!Ny~HPWk%zjdqMDt`Un_4$S6=o$j{n$lSvlh@pX^TS9@a%)J?}Ps-DU8@NFGYZUwf|n<&^RNxnC7o z$1rr&+phgXPQTo8>6lSM;_OrR{Dr=H3PG3LL@FVgu6th1zrFmudee>me_2Q*WEK1R zU*31uzbrQ^rLF@;LtGBL;{zXN{b3HupP>&Sl)l#EwLf5OGW(Bf;l`!^*605Cf2@2L z6Yzh1;*!6|{Clp`uXX)&o7N|~Pn>jXzbkxf;pahr&iOz3ncIGe=LF7m{;lk<%&vRW z)>o{(Urt}?-+x+Irx)rM&Tsy;!-)s>TVd&x!$Xoj_G|te*!rFyUn(yyu@Z1fdQJQ= z!pnYQ@hAQHFScEK> zCphkjn(qm>4JQ8?G_V;$UV^{#@OOB6-WvGXFa7=tKl;hBPk%kP>C4amSo@#0U--|~ z;ZzE@b0c}XJ-q9!yH@SN{((LH1A{|$FJff0iMQRl3p{r8_gJ4I9O$t=3)qL}y9x1Z z;Y`(z!5*szun#b+pO5S3oAvXp`uPihTZekAdkBYnta}Of^jHr9?zq^p9tP|Ke1h-y z_E=kSh;zrQadwLkh3Wiwy zun+JdJ|F0@9_BOP(|}tK_Eeg#}tSyAI zs5d~;UrzX6Evp}pbo&VLJPt^@CBj^fbt@s{xQh_<4-taj6Z#par>(8WdaPl>>mffO zgvS-A*4B4IAAqc%TL4)mQ`Uop6i5oUIirm zT};ROPZ7S+vOddnJl{=-=X(H&_YmPqk9E;275)~&H=%z368};{Jof_<{}9tJ#~t*9 z+bnC6>3Gf(;<*S&Id5e;=x-x@vt`}MbUc5Y5YKl3lHY?&$NPr}uRwcaI-VaP#Pbt? z#KW0uk+UC=_``&~=(m8xKSYSeF2i+ZG=OXbq65%-ARb&vw-CH5a97aR@a1SOoTtq;An+fr} z1yIUEh;d?=5aY%;An7Iv_gdC*K+>Hc#B&RfbT#>2ax>kCB&uc_XCpO7YXtF5Fq(IMF{zy zCxrYLVU8l*7Q(%F2S~ch3GuuSkaTH4%Kvsi;-4Tqh;|J~{3Sv>uL2VPRzTuE%;%3` zKEhnb@o<>%XE6T|{w(YY!k@#u3`oBB6CQ%xfaLoCA)X%w6#1Es_fHd!VEke_o?jru z^JdJU#JimE=aC=bojuk8K++u|9L4wnNV;)CJm&yO_Yl+Z{$aut>VxTcev}Z;PXiLq ze!arGoDlDO0g2Z~i06HP#H%nJ@9Ttk-(otRR|)Za3n20CW;({Pd-xvkUcUdO9_tal zXa0m~v{%j z{sGLvTR(~VC;SxX2{C^y5q=tW1tIv|q2XsWykEmdG<;UWi!g^1|8flvXqeNmqT!{O zv$y^#@Bk@iFJT7r03hWYCdBh0K+2gTgnUK9&%z!f{57;gK+@k%IEL{Akn|rX#Pg>B zNq-+9Tv#Lkn{w)3<17YuK;hAq}${p3ra!aK~Yc`+&^1PKf85n2vn!A%wj5 z6GGlc04eVigyX1ZK+-)!i02moNp}(EwA;Kf*7fi?Vql9>V8jyI_Wq|0{ zy?{F=kw$ni>J6|D&%=ax-Uqn#H!zPd9q;eb>7U~J&%vJMd+@)Ta0+rT9nbd?;`x3+ z=Jy2CLH{h@16~T-g7huI&!gOcJ1}S74A=+wAfH(d!fBL)?^zB)JU<0UzRv=_W61AGQNq|+yLdL3}wv_&gYb{h zegKJo7vXg%4kXyfmd6)1j;M)uMRg5DVUaDa)AoHUQ zx#JzEcR=R1j}XskK<4*FLhyNp@XuinY*%zy!V|DR07-X(5YPN(H0f?4d=TvtaL3mm zC({A%;`_fq{S*Er*29EYbZv%>N;ppVb=WV2e}(ai@ULON0CFArARyPF5Ak^b;}@R+ z>#$X6U)@az{QCh#|AcwyA5ip9i04NDDd$s!kmGp`_x0iV5%33Oeh(9Z{}X`B_bEa= z_wQ8vhY3$YZ-C@~fDq5)faITLI^O393(yzS@%(l|JTCzfug-M5ZxI$TelQ)+w-Dm_ zHbCOt!F0U8ldyz-#dJL1MTqCS0f~1H;a{Tv0}}s!!UeQHK;l0{i04NDiT?!C@%|~o zMU4MU$Mf@qc(!&aym7+6!MsQKw-`4G{|@y@h(+_gfaG%@;S$%WQod?S}84z{`d@d$snH5-?jp>-}M8s-yJ8!c@?{7uB;d{W__#WlHix7IbSEt{v(;w35FA!p#bje=8>oK1K zlFwE^^0E0I^uvV5Fn<%GJx>C1T*(1)JUPy1j4vnn40sFR)_+BRCj{S*6aE{<$tx9I z4eF@iT?y4o?igm`tO)O_bK{AgrL8L z@ISy0aK}$z+#$RT^E%;&&>jK%@cwQ>y#E3r-rox-{RNP8&l4j31;P)b-rlO{wh-d| zC4_jt6_9j?0IA1uz6YG-d(_90eqYz`SNR_Ly@V`NCfW&`+5YL+rDEuvi@FQFT z$adWeDEUtxM|tanDDN{&2mfaYe;WA!{(mgpe_V9$oX7EV z*WA=C%rc0b-3{%EIS7Lg@)g2hFo-)0LI{Hp1|ft&7=%FxAq+da+Y&+uAq-;M-LXPw zZ3rRk>(yL;Jszp|={kXf`Uvx(#r_kf;#q;~xI@{^9&iRd6XZ)CQK6(4( zf|4iE`HS|cSM^P-gMIR{lGo62>&kg;py#!V?spH}Z_7T%f1;e0OPoY%O0<`wm=oCms{DJ7TD?abL{{=9XpSZ97T zg4{=aXnz3hk6NccsfRpwLcbjPmC#?+hw!<_KKtF(xAFdo9><=N576T{Lf7k9J9r-6 zrM=%Iy5AJKZvEC-C%<>$difvTdVJ$%oIsDSWS#xY=sP%H`{X$#SJC~}(ET>h?QGb` zE&J^6DAZ3vec}r0tn&&weia?RW}W?RS!X}n`Yx`Uee$l7kJ0^}=#{#oGj#ubcO`Q_ z^rPoDfSzC0%=U-OIE)@wKGY{decw9Abr9-@p?+kY-$+x>*RSQSJC+!`T(BWdVh`!op-3KtOGjlRLP0EwdeJq z=bJ>&x6gV{ca%ksXDsw{pd)yt9M5W*4a-**El}=W;STf1cyfE$%1%0{0Wf)SF6uU#TDB`Td2?J5~B;`o)MJ zaJJ_qlzNX+??vbJDbKGgdj3Q9aoEi9kD9T7p8vRV-%jeax}!P0JD;o3@pW{3!#d}+ zuWbJ)^iM+n%sSia8*7jAdpGH8cSj>3=h3gzi|BTz(CwD&GjB=R{#wWlWqvc%_tE)B z==>9O{+WH|_usQUKdakF=*0`sIy!G#sqZNDCOWT$&O1>0 zhkB(8(GfcDSgD^X^{)H0=OvZT*JJ7#R6P@A-uhh@%U%J45;*H(>?OeS+ zuNRD|_bByVr9NTb$qq=-yJPmXTK}fan(BSPse(Hw)jQY5q4Vp? zxQ5=I=RZ1sQ>kw&^(H!hAD!1y`UiRko;&EgBc*<#)Dz?Fc|GX7q|#66Ft0~+URtU5 zEA>Hie?#cJVWmH!cjP&Q&dVwFaiv~F=S`vWrj>q4@5DNx^U6wnUa42mc~x}YlG3l~ zoq3-`=dCF9HKo3e&f8R;Z!OH+oprH~ht{vg{rgbrKKIer-2$c`%=@wRd$2$2xNQBC z-2eKJ?r2Agybdrk#=7b~dEQ}6PCN|BU6>x{{?UhaM;XQ==%uzbLcL(U z!26wboDB78>ks1fU>(b$K5w1lsaVHF>)ba>)*sCMX&qOsb36^@zFD`={kUl#x2$tN z?pWvkY+A>?P;a64*+K9yc#4@x?r)5-?-A|XkLD3fKa9_}dYP{)>_3jr9rm$g{bRh3 z=*M|qw$J^!X&<9UQs?Wf0d!s#oi}HFqB}aU&VCN9yKnV z);Z37>*&v-T%Td(IL6R7H6=Hcaa-u~Zdw3udvy|iBM!zr8GCz*bLyu!V$J2`*S3kO+f_1hxuFvH@vQI85c^cjR zoIalCtMW^x%lZVq|DsRiK0vp>i*EnWI_I5ye0%#TWqZBo_R~rpK)0V&jwh#Ve^J@r zin5RbUJ3OT>x^%O`eCRaTW5UN z6W9*dw+Fq>Ni*9^n=y;d-&Mx%+2=g>^?AG>qvw67S&{?`GUV_jjOA<#nP@;XZp(d%sysmso#&8s8^T z&Sw@qo;mb*mO}jmeV;uIo&}@nc0Cd73ihDeNuk?GqvtiCui*OYlX-pWV|kxK&$pm2 z;l9N4`%=jz^nA+bx-Bcmvl3hlt_ADCMsO#%8*B#mg8RW%uKj`>vW_X|8ejncp5wlMo(>z>q7U_qii>Y?x$DD1L%Gx@qAr^)4@`3 zCO8|M!}Qa6otrtX74*JdMepZ^nf zBsy-&I`2QT%6+~Ntm+Kk*FoR+8kl(t?>89Zj(uJqyVlu%(>flQ-^%{Yn0Q)yT(3Tp zuNTqt7}B@#`2jtjQ6=Zm^(*L2`1%?hKdm*M_vrXpCC{VdE9h}9s`o!;{)P8X{a4lz zWAd7k>zIBP_n)#(d%+`p4xjg)j=Z1usqfEP=KG@he4h8p=YS17|2%^}2V2(pJk$3K z>KnYT=%@G`fH8Sc$wTOIk1Ef*34Ho4Z#v&woj zmGwA7k2iWIewOQw9&eYDQ|R&b*{9yGpJQF@lLwVNgy-v`tjoBvE=&3nzD`!wXBR!s zJ@h<}%=3I6F=Owu823^>*P-9X7(l;|5nJbTeO6z_^ELEGLx0RVpRbGha?W4*$0PON zrgB_OJl{XM|5NL%YwFo}nXe1<%6$J9GhgO&kA8*q)~~Y87*j7Q^(m!ZLeGCz*-urw zcpd5($Dv&B9dvuUy2IyqbUXV>K0>#1hQ1zpX35>$&zQM2>w+<6t#e-_pMw=X|LCiE zPGIH-JSX%&I6jQY(@LH}x4&fnKl!|79oMYChWC4YE%)Pdagonk%5mrPb$pHsc^2JI zS^tas9No{nk}K$bTFP_&P@m4%?a#xPaz82Soz(@NcS=7WoCr<@7nOM{%Di=D-d5=E z1Y78F9cYWsDd=$>DftvVu5`J*y+LJrqssR3==_5Ikn4@kpHT7?I)C2&kNDhb9V_Mo z-mlD9Gv9&ZHRG|F?VRY3dA&WKI=M^9NjyI;%>3j+)Mp(B&5X(4$pr{n@UZqYvVsvhxq z&OW)O1?T_g(`$xB*SMmh9{axkv>2fb>kI(C` zxNp$$MI}$8<9F2YFJ|2LIUkI19Me@^U&{MI)jso=^w+F2y1f-8uc6!9uupwcf5X=k z_Q^X+ZldE3>{CC~6J7`Q$!AKAUeX?yM(^7`^u8T4?{VH{EModi+-J&jb6qdMaPwt=YJU;Kc|1; zIggHCQ1T)=e#t)dn*Nph)joMu$#rzxhJEUr8g)cl_Q~5y-bKglDeIY9K;C}_^qqWP zMmergbUR~up(Dzn+sP|=9NkXQKJ`iE1Lw4Ta!JXv=(rvG)OWSBBWl_w?Ecl9$nOb^GjR-8$}@+0TI)qgRo6zv)8XcSg)?XH+kBL}S*;IVF#y+nuz} z_NJ|4#mxLgGuA_HgnWwW*YkC01wX)jqpj^EL%@j5`qHagj=aX_d*wMes5}QplyPIhd~iw`x2%kdUXQQla|*gX z(HroZ9Z>>3?=B^$(DTkIpKD6`D!%WB&Rft_M^r)QEh>2xomaO{y`k6Yh&Jt$x0Jk# zj%(VdzNgpby4WYTlzfDaOH`5d?9~r;YGE2GzKE;t`t2ri-5 zrLJs$Panv2LbrdU*XxLm(e0loIeKGz`@PC__jgsjj-$$TEh+1^q>FrhMYp%Af6M)f zZf{-5TcN+LY1Sw7n@T=F&u8#W$n_gRw>PSP*AeB>?TstBh>oANPra;v-x1B(C(kQ+ z5goUryg&558F~E;D)-qCI)7OIp(7ea=Z`5lkItX6&wV>>9ha?hKduG0mHRNagmpgm z;Q4xJUq`ftF*cO#Z=&1ne+%aLyrO(AKSt-B==D3IQ;d=SCH;I}7do#`xt=5F_))z9 z&q0har_>AR_@aICq~4JCwa_mqc@`bFsGL__Igf$2wdeV}u$lgjD2r})NXaAUyqtaN zdHu(Zs9+z*)%~L5%F2DSs@yj_=)7J1C*DWV@q0=>MCXmxkozvLT)(379GXVwm-I#* z(G12otJLSv`4#)*MIGpfs`jy_)K}4Q4g2JEy)m!r(BD$>4mz%-Jco~!_3C>&a(`vf z`(y~cPnN86Uo`YG=bDmFmHzBI+xso+O**1^^tcw3yom1iz&`au&G7uNPd--i zDLO7%ZjVc7+!6Jl)@8h=$SeBT61a$Cs4n#~eC-UT@YBEuiBo zN?t<8FWaZSqBrMtV4qx9@+LZN+dlOj9pw37pWIaPK05BeKJ`Pr1v!Y6vyO#OpRj&+K3`hLa;Tr^NBO?i z`;p`Gce{00IsYT{c#rj-tUG$Vr%H}i8OQmh(RJ)Y*KyqX8(E(Z;9L1VsJ?-p`=Q%g z*L(4PhHh_L$vf!w_Uu#N*L(B6W}kehc`uE{|5@S5T^e=c`=_dEXhuix}>wP<-K8&$n+20_# zpHXF9^XPFE^nM-D1Ujy$op{^ZXq0 zBgnY4avWJaf4-vUz4}q=JbxR?`_*R1J0TzIXSna`_&M%>%;fnzg)xp<=lc#*%5jwK zv)wuSIB#bC7R^{kKet97L)N=j*?vD}9@rhl7~{D0FY@_b`8>CzU*djJj$=bPpKWEo z$LM}e(fvdpr_Se-9%WvyvMyO=-iWe3lgj$chWa9U{7dNZ@1vg&Tfu|iVelw;9E|>% z?d`JO%5hF9`>O^Uy3IN(ufHAr2%lR*f1pABGT(Pbe;;uO{e8ro^{=ol*0~=h^nu(r z_Q}&qE}{2F*}nIU7I;qCCs&kQMaQk_SNXmNI(|bR#Oo3rzoX<`bo{=$-g*u0uYYOV z7c3~}JENR`MLEyaP~QqRmH9^@pQ8Ie(}(iB``7mVyOf+n_dlw0tb;Ot0li*}cz(UC zb6=h6Wj-JN8-AVp7Bdgy^Ov%pA-&A=5@YIPNGv)^XHX% zMXA@&`5WlGO{Kr34`+SRdAmw|PpKcE^Ny6)W&b*Uknh7N??XA|eP|ss59WGdjN8_E z9iCceyJyOFqkqRou%77tl1e?L)cer=4Jh~BsD6R#rrf7X%Jo}TuKT94{>S=F*7XzZ zoJX%i0lf|j);a#Q;8w7yybm8M*D3W$Z1TAXJ>D68BN*~4NH+?kckB;wG>T$?}N6)QUdQ@ap6BR!ohdo7$vn=hUpcRl;6!i| zoj;|IWBu`b{gqrs=hy5rZdo7C=T`gVRV6pjahvw3Zz=oVu}|Jr@*XGb2iu)4X-++>{=>CS4>#?BxJfMQ- z@3ZLpUnAu8kT=nvH||@10{gX&2iEyqd1#$^N7nJ!d{Fl7=Wu`8C(kLlf{v>y*Sl-GJ-$bu$9kjV zdzIXejvug3J=QY!jeYX4l5^;|N&D2N^!Yre?2~7dJdcj6+2?vKTgS~WQ~xjSV|4zW zzL0f5=N~Bf5S@SYmG-z}onw8`anVx_)wNHIXU&i~qee$xB zSJ83P-#}h(bISc(LFX;%0`t*%H6^d2^VaQC-_Td^+_6vIR`MP?Zr?ujmcEkpwog7( z@+mqlwc8%otFPkqh>q)5@*p}cZ=ZFZu#Riif7l&G-)zrMXodR?o!_hEK6L(oed@8k zn(f*rXO%pHj%(ScexR@6d1Ie^s^sWf?QuQmxTL<8{h{OflpLem%PQA-3_bpWeQbW4 zI>$fMY;R{+Io=$)oxGCA(d|syr#__|N69{UR>|||xJCQatIBaK+b6FmxsHxow@-aT znZIM7ysP9rbX?0m^#fhx`q?KRDftu~H~5|QxU90D5p>+BlJn@eab-QG(e)_V$0h5m z$5!ZPzuVs4ka9dD==R2xTtK%sVV`MR79F=>pL#_(o+bO_nvz%1ajEaM z$MvfB4?1o@$yszwr$x}+6LC2NtQ=ik<^E$Opt|)m49k*(q`kHb)4g2JE zC2yhQ;=T5`LFIUc&~YP5&Y|PR?Ngsnj%U(7d0NS{=(sui)aUgL9G89aqLORqxHbFK z>&o%0+b3@*c^e&<{C;~}N~x#OaeYc2K*z=QsSj$Eb+u0(Qt~J|Zrncg3H3belc&}5 zK*ycfr+%ueL-d38xGp89&~aIG+>kPV#6EdU$pv)WgnjBoeIwV^K6zTnv*@@5`_wDS z`Bm+cmz2DMj_di4_PC^SJiX|+J|)NKxPpD^J`{Y?A&!gii_Ng!G zn>il)Cod~`1s#|DulBeheH+_F$BigChmM=DPrax$?i2gul9J2lxQc!1 zi~4r%7yINTB`>4nPVG}a(|2$`|95*_LdiXNzCAqOp1zaq*>`(NUO~sz?Ne{)GS}Ze zc|*xt=(woW9+%K}u^)6?Qpss_+^~I~+auPoXr0fwvwE5P)BgYQ`9hERdJJRof|8do z{VP7#+h_Yn`fgso_Q_{TPW-UFza%;?rSIYOgN{opxgR~QSpS#zRposqhnW@Le=x>z z>%ZjdOY6*EQr2_XK6ypSYk0nH_Nh1Yy?jo$Pu@~;6CJm2pL$E*$NQvx@{y8H(Q(oL z(C0W3=<)QM88@Ks=XGbDoK^A&In|4K1VjR&evf-Zs)Xe9R|_k z$m03^Vx8lt1$%zd9^b2sA3*P)Sh-&Y(fcQ>sCRxv#8`6x}An{oO{ap z^c}Y6_3OvE4(Pl=B@dzVMwRuLRKA|6Df7F2hHS5h?tfDMnddUPooOY{qT5+kzV6sl z=4XH2o;Rcoo@?m55hdr)c{A$g>|ZdB^G~4bP(qKZ8R~nXK6=!yPoU$A==gf5_y2FZ zK7iJ%=yujZy&mf6U$*O6bUQ=n_?1wv{$IPkgw_+s?eS@Jd>>k`gnBd7_dp_PzXyPsPw;%e z7|*QpeU*XVQvW3Pfo|}6#F#v!ziv*+=#w{@8hG}&FeS${Edz;DtQVWKVzTkJZl{nt+#kiS!evRev9{0 z`{cTk*U|ag%KLE>9lxjF=J?U^he|$1$47s}SMhT*bbL~qoCi9-Pss!5_#y4O6pf+d zbNU_b4|M#vl8fm0DdqK2Mz719eO$E8b*NkSzP64V){po-H|uO~N59K;w@=iJW9d{V#1^+3m`mE4bxkL^<*)IFZl_Q@kk9z(|!mG84oqQ^aD-{UtkeqO)N z`;&F@qLP=;`E~n@Z&=4I>#ybe#AkTbOHoa)b}8!mGvj{9>l0(_HUAICtv}+pt&?LV zXVLAC+Gl*uI!=T<8FJ0cyk$M$^<|yBrsQ>WJKOe|zhfQuLvDq99P&xXy?m+?K&q=-JrD!vFfX+Wc=bu_<+*#GD` z{DLxn1|3&M$1PfC+^V|H%D6guJsart+_HYXOZ>l5?Q#3cxD)hvPSN8@bWp$cr6{Gn zmm>f6k>@$6Kjn3Tp695N^XPfb*yp)AYaLh3Y_D#{9rQVy?!@R)G>mR{L=U-t(e37y zTtK&5w9ouW?YtCC*(c8^xs2(*@II**E=9}A`{0`LyxBnKZz|)r(D}Pc-b3dfDDzL1 z`N>3kUN1U6#`H^g-w2M|Xa1ypoU+b#s@54_vyKhx+)ta<8Mkd6cdau%b&)#f-K#(A zi2BfV7*KK+U5A`_4KXHF!Ozu%ZkWWgT~|^Y2NDEe8I|pm z=agJQ#~s+GeyEAg=*T|#SjlJTxS6Z8$Ia?Rwuk52Q*s3zSGUh~-ms4Q<|}nZhh{uQ ze{MJ2L;cE~(J024N9zaHnRjR%kIcW}^Q0Na_*;7EKk<7Q=y6YIS7%g2k7rWJCG>cz z_PaZyCF|I*{%PKCl-F5Pf5Ycj?cqMsoA6xHAMo#P(EUXGt-j2q&M1NIr%TBxbU(ey zygqb%zh0#?8bHVUxBg=C5ITOuKJ`)U>5Ovr$$2G@qvJ{%^PJJET#71s<4e(!{+j2m zKANvXl;_(%X0F;99bk-y)<41bQLR6j&nfy8-siN$=Pv%{U`#!s)Vq{=FQ&iG&->B! z8?cT!Gtbw&8O!K?t9qHw&C0rNpy#)#SL=+n(ev9;@*aAADgMS``Ub3(VTUh zw|>g!ZR>yFbAolOh5Cwhw!f;^?BxFsqfc%qc@y1V;u`eXPY=4kr1^y7G-Hge%b0cM z=QPFZ&_21KHu7*3j+MmFL!$^4!{0o?FM7jEZ~Nq$l2_1i4g1vB^*XGNee#x) zchGVB_NlkDm*=2;^0AUn&~Z_!Juacw{{*Z z^yu|?j-cB~E7`xn>2`AVeZ5)7IWz0OV8)4SGmiJUGG_jk`v+rOuCR}? zK6y>a>*#)3_W#27HLYWG9qNDJeF!su*BNzTj4A7Ecg#BD^44+II^)aM88>GgE7lpm zqYvZt(u;iGWDPTa&+`~#+_28Lee3_g>(DwLhkEk5^!a-UDZPTvhBI&_KuSFF9gO}#;9w1sYOTgiLq_D<|`-A}D!@87n!Gk}@j@%aT~ z9JS8-VqV#P$v)eg(Hru9jc$KV$qVTA7nS2%#mw*dI^I4utpAbEsrp5}uVkP3``XX- zLAQ6Pl{7)EPDL2)^E=`DF5ESseHfnAlUVH>~1 z`zN~JVI_~D`z*)A~{u9>|9lxdIZFGE7Iq%`WXWWfC`Txu4W7Rys^TdoT^Nl%Q zGy1n$GdJms1~JBIbB50wW?VDJJeSRQWWFiuV@Cf5YvyLm#~6#|n|DT&W~`bA*}oaL z&9~q=WyUk}Ej#)Dy~sF-nOpHbW5!8ywlkVC z@=#~ghcS+tZ^t^DaoT))&ex1p^Bvfa88^(sth*Vv&39zKW^9`8#Qkl?mif-CuNjZb zBdnJhPtAAXyv>-nA^8fPM;K$$d{>@JW=xw$JEJ}`4w&!8x|=a;zB~7k87Iv5;5wLb z-aN+Z!;EX@dvZO^*figZ<2U1(`QBWQelqrBCdYowm^0sp_f<2_nD5K`m>Fy4`*rgF zfRS;_oM+w4*fQUr^)cg_`2pny0wm&6qbof%P-vg!zfwFJ_!FKZ)Zvubh2^OHF*Ggiz`>5LZ5xMY4R z_kkH#%q7;}jCJ$Vc%L-mhWY87zZtj9&*1fC#-@3ObunYh{7mj2Gai|r#p}zAr{-sK z{mhscAkXsp!Wfh0=dgZeOq-wE8TFZQ!2CSc)r?tlnRPYei23=fs~L0V7qG5o95=s^ zbv5Ipd5(28W6As?*42z<^NU$mGcK54!n&HVYMy6Z&A4oSDeG#+HS^0@S2M1gU(ULk zam&2Gx|(s<{0i39jQi$SvaV)4G{1^BUCmfCzm;`0tg9It<{Im2#!d6vSywaenBT#=nsLwkPS(|o2j*qg z)r`mHcd@Q!JTt$Wb-f80yD;+}*42zD^9t)~#y<0VSywa0=J&C#W*joVpLI3ksCkuj zHDliV0oK)w6Xp-Hu4bGve~5K8uSa$^S`pLW;`|j8|!MuM25W1x?+q; z^S`sMW=xwu!Md7p!2C(p)r?v52J33Z5%Z^5S2O0!pJrXnIBxz7>uSbH^Cs(R#*+E7 ztg9Kz=FhRNW?V3To^>^2)x5>JnsM3u1=iJ!YvwPqu4Y^}e~EQ9uSbc^EX&mGxnQzSywX-n!m}q znsM0tE!NeHW9DzOu4XKlo2;uDi{|gJu4bGzf0uPN02W|2Ohsodpi^QbFL@4ok=B^(Cy6Gr#`R0;QHCeic()f$DPd;1vOwmq%~ z9e0Y3JJVm%A8Pj#O720&jiBo=Y8^|~S%-?U4*A=)w^Pt#?o0G|CX_sdZfC|m^;!KD z_oIFCoRTZ(xFu!1*3kXd?cm`j$L5O!OWj{ep|o^|jGuHpYePJCJLVd;hUwIB%$MsO(vmPa)ed~A>>RorGejyPhF~&5y-y!QAiD<++ z=0bhSdS@akS;umyFIi6{qGjv27V6v9FD9a0>$n%{J)_jGl!#IoV;bFW-ujgj(YSRi zhWf1aF7{^~=gr-TXu*uD=1YmFZpLkNd-2_@3)SH zdM?!S*7>`ybLw;1{NsM3obRt%=e{ml$1UsJhx?)43ia|>yS|Q@YqA~~WAdJ4&l}Ir z8$Iu|{*vd9^8MifJ!U=7@i}yS-a6+oX`SPmvX0Z%IlejT*XR0J$5rdR?wX;#7wRY0 zSm28@a$YN;zZ&{$*4b|UzU}Q6 z(Cv<++bvn={AaA=hMDbdnsE!={+_Z9$Dw~3`UCfCZ+{Fu@0{L<>xG_oLCHn*yeI8@ z+}3g0I>$F_o$+NIV14bA7nHn+&ac_$_?NBYignIoO>q3Cs&kQMaLc3=XegS)84=xWBAp*8|$)deCvb=yCR2 zXS)M>3(ngGL`TNbv@0msqYHv58cS}S)==PFI?nSrPZ=d>r-ko)}PtGcN7#&x% zPkmDF!Ry05d0NS{=(u_1{Fd% z|5JJ|-e=KyGfFPw`Tp%wU(kE=y0lNOD!GP^TTza)p`YXH9`tyd=<%Ld|0%DJhaks2 zu7`Zy*U#{CDdqPpmoby$d5$q|Sm*bNca-fmmHix|+do3LpBksmcC*2fewO@D{APDF zik`=q-jDknJ&(MSC(!dKE8Aa0w_ml7b?Y4eiSk_Rdl+)OLzuZguXl{GV4dTgRL-xY zUO#32u5z4*`Z zGV2~}D(5@)SY#c?)$63ptLgjrdP28&jyw+E&-GDWHzRnyF8Xj@FL?fXQSvx?+(rAG z&!j$r^R!PcDR~whH>aFmLnH2g{TBOuJo7lu9`t;(==m-v$GKsj?QQC#dB4RNx0QMm z-TsL(K0Afq;C)*;uRUd5`ksKSM@3nW6=gl@==K}>7(Ulwj2lXQ8{Ph)vYk_1phOH?}U9UTW5W@l=V6Yo?vE*{ZBIvlbHTA&ueAfX7wxl-oCPK zE6VoQ(d}=b+dr_*cKe>(c33~f^H6zRl$3e%=)47V-l}zu>sXl|J*9m-DfD;-FulRo zCCc&4E7x&HIj$q+xO$%2?)NM8A!UC#bbonteSMGU6d%H=U z;qwi;-L#VX(e1|esSoNi`J7{)Jf!4NbX-o^e-YjPq567FDu(`pySuk@h$6&PdvMAQNPCfy!@-({Mi}tBk^#y$Yz&^RA_;^gQG`_9*kR%I~=ql<#AVhkT~Ij*?~M`JYl=PyOg|4d}~xy`aaHRq`-;Tx0gB z=X8P3A@<4RN-m+}%J!+x=_~kp)jqkRMeZ@*U>)tP{}9gIR8&H z*LeheUm8Q-2d2%8FX?Ofd|{njR&oWMzi6L&RTsJc?UQRtUPZ^P*?%41&$Es#Gvf}F zafjB)M@l|L=Otg*zW@6$Q{{Q6(_CMDBk#u;Qy*07S*1RV>AUc`6Ekn(`KV9g{PfLS zcZ{j$m3l#`PoVRPm|5ceLPvNG>RVVBjH#ED`ixR9qx0r4^H%N?eIoB)`Zm@JW9o}a zy{gn}=)7gj)VQDY39OsGo#zL})ay#Uq0~3fd7GGd2ls(Kp8G)GnTU2UroOAxn@W8j zo!7$5GV8C8V*T}9tUt!okCpm~Qa?lIMRSaMH=mpIWqfYZ_waQ(#?+HaJ*Cvs=)6A6 ztZ@Cb$o13ra{Vx-KB&~QN_`leH-ee>as8C_9#hskhcWfMQZFd=33Of&GwcjEf#hq!(iQ?D!a zhEm@^=WSwUjq9g(=KAS}xqcW^-&N{OrM{2OYhmUiTtDSJ4wdsb!kGH8Qa@4ZXXw1> zMU49>*H7tpDg7RdsV9|sN~x#Od3~6vbN!V5fYOgKraq|Dvr2s!oi~D+k8%B!{+QCw zVN5-*)C)>|0-aaH%*VNYN`Fe}Ph(8Iq||4WdKsNJhnauo`YHVdrC-6A`l3>=D)kyV zZy7TUuAfeD{q$eBei&1)EA@s_-$3VWV&-4De)=%3pZ*)y4`b@PO1-Jn_tAMR%&c?$ zl;b&6j^_wt>c>j`M5&*l^P(3s?%%n7O213#_h3vtsnk)K78!Fs7bY>IJ1ffzB&p=F?n1y(6Eu^)p;QjH#ED z`ixR9qx0r4v&r>S)_Xx&?+V7$7nOQdsn^hX%b58r*H1Z*RpmU^Fs5Et>J6p7fzI2+ z%;&g%TITxc=ed3uQ{PqUO{Kn%&TC<2i|eP)ieUa;@ zUO(kJ^l6@=)4ikY;*mT{+QCwVN5-* z)C)>|0-aaH%$K=-N`Fe}Ph(8Iq||4WdKsNJhncT%{gnQK(yw4leNm}bm3j@Gw~U#u za{ZM4s?uM>n0j5QHPe-ZQtD}RULR(5xqeE2KIJ1ffzB&p=387pr9Y+gr!l5pQtC5Gy^PMA!_2q2eoB8q z=~pnOzNplzO1*~8TgFV2>!pFrmoG4lhipVFUF`qLOwFDdmI zrCvto&0*$0xPD51LFrd8roO1ut4h6w&RfRJKG#p_uPXgDjH%a^dPAvipz}5{^PgNl zrN6E8cQB^DtJIrHeIK3I!pwhh{gnQp(m%qO`ms_!QR-*tyy#_&`){tF((h9GJs49@ zD)p37PowktFw^4tDg6PZA7e~?P^o8?`Y<|g1T#P6`YHV}rJuu?dS0m)l==iZuZWrd z;rc24DWyM+G4+yCpHb>%blx0he#G@t`U^_Gf-&_)rCwF)HFVxGW)8T1N`F=9uVGBR zuGAY!eFL4hiJ2dB{gnQ;(%-?D`mR!MD)oJIUJEln;rc24L#2O&G4*4mexlUR(0S3z z8TV7JpVIGA`aKv^Pb&44Qct7v`Y?0I_0tFQ`9*)m^~0F@pi<8&^}oI(*mDg^jBOzjH%a^dPAvipz}5{^J}i3&hq(1f5Y{|nEI|# zZz}bDbY2THCtN>$Ht&D>Tdp6*)Q^?=iBdm9=S2&Q`yJO$AIR5#`g^V)#?+HaJ*Cvs z=)6A6oO1p20bD=*1J@5@>Vrx>tJH_lc_WzlBiB!##r4xaas4o+o>%Gxr9OeqD`Mu1 z>!*3HpZ=NahcWe%QlC-kWpv&gX8ywU(=o1}{*~*8G4(~IURCNfblx&%qKnar-k0mA z7cNF?7*nq+^@dX4K<8~@rsHC?rO)8{Y3D`$A3?@Z-&N{OrM{2OYhfmFk^fH+pT_mm zix;CKjHw?h^%JFjhR%y#!MH13j1pSm`stM~^8XPsj(Sq5r<8gco!5t%u8aJ?g!ojh zpLSp5dti*CKB&~QN_`leH-ec<7o$;qGS^S9axuzbOg*pE3rc+goma$6&&6m`@5A-e zt6q$zF{WNp>N85cjLw_G%+)SN^E%4))2m;MDi~8=RPvJ2uc7mn^$MPw=yR!VADh_0 zuct<@q|WaTccH&uJZ7Ha{xjn|`g71#{TV+;)qehcvhw}%{b1r%%)5eh*K4ru==Z&n zO726y@0C@4?v+=5-ZiCcXBM4b)@yQ}===pGSJC+^_NlLGiu=$$xvu05bljeO>ic>v z?kD@?LnWV~sSj{J>Wwc(S&Yd;N*+Ov zFK3^6UT?y2*(Z-Hc@iC0vQK?RGZ&*-`{c5c=h1OX_BpSbb=$&*r#66oAJ7`Pp&F? z86CG~pL$(yelcp;C$B4c3mvy>pL$aVFGhRz$@@w^M8}=kr_LV+;`gk0j-lhal-y^X z_31a`5c+eMd~i}H`TqawFmovy$IPwxISR%&YkiErJD~UE^N{lIh>!JNtn2IX-u!+B zW^(+z0b?Apep8+s*4gj6b!?hBzCFF=#i(VSe4ykb^t?{Bm!EsQp*=pSx8ik%j_*}+ zKRP~E#*d-f&uNzX867{a+H>*)A(9p-$|ahposLC5de zr@pUuVBk~h%t+vxk*)SDQ`{!8fo=P><7u5+k2LcM97<2lv`UW(3?=TiJ; zEb!by_dBFl@SH{WJF4V7y5Ew0_A_H0SFCeA>edHx|?+b_a z@yt5!3&kbmc&5~GTgbf6r_lHNQA|IQ z-|ta=FLXZiE1_Sv&hLSazLj}AM{?-%WE|5EroYSmuGi!DL-a8mFJ>Oj@#-UZKhZ~W+!#~eQR+>l zet_r4ul&DIsdpehKOWLa{!WZOmd}IA?;Gu5W{{t!>MghrF(w}=LXYPZop+}6qj%wBcs`=@x|DiSsrRAtvgo`ar9Z5b zTyJ#Vm{QLx^$B#|l-}VI|Nmusen}ZOqmSkFg3g~)>hnsyiq79c$8YQ7I8SB#E;_!c z)c2J75jyV-9T&a3JwKt3=lsxdNo9VoQXjy?tCW zmw5h}F@6u@c->AYuhX(JZw@mvoFB%xY5fU&?$;;s`!Fl`BwoKtp3`ZbTgv=a@GR6T z@1xFsmX-aisehMH_qd;rl(ob@aG4(Bo>EIqyR=o}uTT z{6Kqqz3BD^F#S}%j6n3g`Riq|M@x} zxNi6P{$Jg9=Pu`j&RK^La)hR@iG?J8WCQ{&JoR z%2#lGm6V@+5WbS{*OAz}a`sF73zBr}lDLMyg4C-Idfxre^B&aB`OQksZ#mZoxjvT6 zkF#8te<*A38fg7%q4jTsbz`j4+R3*~JM7R-{ymcX2lW3b*MI$RSbHz~dgy)WwElOo zj>&g3&R_w(AknL^?iI9?e#%|f4tKP_iuPE=PCv9j`=b}u^|7AF*Rn21#`U^noR0i8 zq+RA=(_d4L{4nD~euQ-j7O<~K>}wMHCUm{rmcL2=d>BqJf1vqnO5(TV8tX1JpB;&P zPhvlS=2QL=;;3&WwB7|+_Z8MtNxhdO_1%=z|3Fgj6WH_#_8qVQtJct;WWNCmuwDHr z`aunQbKRHgVfE{o=V~|sozJt{KXXMguN^LEe>$HJ&FvexeKWW3YyT|!KJD;W`xDuJ zX#X7b(+=zY2K$rQcSy=yX3^LECW{ z+K#i@$*=uyv$#3Q^8C|#eU>D8Rr~i?$JB5W););dUy;AVzED5? zc&Mhmj-dTIxsl~J1a{}D9*W9`4h`ua&oJ?o(D&;V_R0qwNI zh~#|6^nZy>O}^{RsUYbE}An3fCOXS7218QuE7%ym`!7npx?kMXG=Jt@)K(CgEV{_oPx+TpSK z-xzOdSpE;haeb?W*1sNF|AKbv-z}-nkbcJ1usq_v06NY_C3+G%j;1B!XI6fJc@JF& zR-yA_9Xdbu)tt|v8Xm#A-{XD6XCdcTCt24T-or+^;~> zS4ng&G<~Cf>;?IKox2x)yrg&HAyoNZO-KKe}C_yI|T+`my&(`for# zdQhTApmAf+_8ZSRtDW;$l$^(!`~%uWvX47~##il-FY}`YI#0S`-3;HuEVV=r$4q z{ZOZ!erw3>jk&!`JN+{wX~!kWd9CH#g4SbOR?<(?{l72jr^kcG@={jk@Li1^sRn!lr^^@o>Xg+=ViR+iQyujZ%%;JV5dK9MhlJ1wG z@ze5F7x+7eS^S(tFGAy2BJWMzI)+|{PSwo!t%zQ@!YJ2ZEEV#0Uh6+@RB#pFYtE`@uRCHx(-_J zM*Y|e@^*|H{pePS?tsR1$)Djp0W`i(-u^<;4~-v?=n-iAxPI&t@(vf0DgEeaiJpVT zE$YXYeA}pYnBzhUT zZ(P@Z7tT+z-`&!W-jV1-=>GRq|35Nrzl9yv!Mb_g<3ih`OC}(zmgrt+d-O~8 zy@Qf{;*k6k?hjzvKk}~h4@~<`s2_5t}9^t&XV5!m#Y3&|KPz;W%|_s&c9yQ})C-@1Od zr=5Bp$tA8k|B}U3OX|}iDX&YC|3q#-kks!~Qoq7s=I@Zi_ekoyEAb!Z{^Vb?xJuac z&U!*xT*a~+@Jp&d@uW0*!1fck_K3So!XzyIw${|4rHx z7GU!Kuv7m*=y{Dn&#U|=nXZQFSvB!XYS>eC}IM~enTe<%v_cybM z8`mxWj&)WNKMWn0BhdWjv=g_R^HhF{_3?ZbUki<|gT{AgCw^4&{(Mnx@?Jsmp8Ztv zImz;JNO`r;^!3p6?b=D-BY%hcGfCVOG;SIix2&D-vDlTw*Ixnu{>r37^84xClJBYL zlb#n$&kLGCEQ{kB8lsQ#&tvEhP6}gSxNl9{0I8u7La;ZOX^)GN#6`j z-vZP0+DX4INq;2im#Ql>Hb~NUOZ-DQXLGLP+>xJSp2DWT;5sjtxX#P>a-D|-?8g%O zsl;A+73moNHIn)@O2$ct^mDGco|BZnDfvEt-JFLxPjgmRW&Y-z?K!)1_UBxbpJjg{ zX{TdoyPQDVrQw#?X_szExnq)cnUS=|hWs4s{H@?N^InqAu>2OEr^&gPb0g+#u-xUoTM|DjiC==|zYLAv)lPg>E&NCJ8It%GNqi?Xz6%;ZqMi5!$+%sY z#BWLB_o4A8(D=^VW$}YK$8#>`+{n3~^DJlW?Xz?(IR|o%=bX#AnsX=Tan7ncWa%4o zcH|t)IVbtPnw6Y8IZtv{*Jb`f&aRw8IVW>2=G@GAn6vzjS^9>Y9XSVbj^|v+xhc0< zk8+;oti4laZ_U}8b2R5{&efc|IS+H5=B&PRmQPF0?wmt8Cv$G*Jd%8G%4yE>`b<~n ztjpPwvmH8rI^^H5kAcpgE{X2R{k`&|7m~i*-!IWa(D^klzskN4n%}bgTlO>1{8lA; z0~)`rAN!76=Q)FZ^qxc?K*!~&e#YaOc35{8?5xWplJPtxzsBF$ldj{E_4!PazUn&C zf1LXp`62Fapt}NK$_JXtS55*LeeTf$vzVnu(wO>9TIyN zG+j4r`a9ZB{uTE(@>8@QEMV`K*asx`A!xc`*tE&>9r=Ft-}3L-|G@(Gafy9GVxQ9g zY4(xO^fTJ2|E$D5r~flNKUBkIXxxhSE%xc!;bv~%(*6%TPn7SYf8=NBA6US?C$aBK z?1#|&j$qU0xW3C}_Vsd`>pU!AKa<##yF%=h&~#O>>GSlDypjHqU!Z?r0ehXqUN5mX zLemvs(-&Fq<;UnBxkLZJ0`@kEy6T#A z*XS>K1M^7kF^^yY`~7d+0y;b@~q$uLiq`C1?+7Sd%MKm2~F1po4!N;$#>I#@}KEHSis&VvG+^tgV1zCu<2juKl#`6pFE`h zU;+D>#6B*uPeRj8!KQzu|Kz*qKlxqy4;HY`N$m3y`yw>m5^VZ6`cJOXfAWa_g9Yqs z68pNuz6njY1)Khz{*w#zpZpK{4;HZRN$mR)`yn*l5p4P%{U`s7{*%Y_A1q)$lh~8H zLF|>#bXBnFCi+jllm3(6r~hC9d!58yFR?d5(-mOT59mMnr}UpZq5oh3dz-}GF0pq) z(=Ebtet&V!<(w-yH(=9GXb+iGB-^ln-j(PB==I}Fa{pd=cjC$_l4@9hHPFB7+amFI z%C~Z#lYc1%KCy^yo^7u7=jT4jNZ4D=Lyk{pf;3w?O0Cp>Z9uvVy-~mBsZ)bT2e+ zKysZMgVuXOKU~vJeYbKR%YWiIh~)dz>V7uk5VV|Od1VEEmnkb}T%sqSCZVnc(&r9@z#J>Vfw*{MS z!}yojcO-Fpuz-DEq7Nkg6KJ}s0&%zHd?fZ7Nn9N)V6T_x28q7~nyw2r)pC9kdygcp z4;HZZOZ0%mKLXS9gH5;N{3P}nN!%PPV4s)h1&MzJrsoHnZqNBi>^qXUJy^iLFVP1Q z{|QXb?_R{+f%B8tYb0@Xuzoeap5M+qWdwgT0(*lIucMbH*Cj)JQwS0xYPh zf3y5q)@AMJR*CL_X}$Hc-VSJoQ`%R!ACmh#-;+GQIFaw*dER~CAMkh1WgYt&XgS@o ziFF-XPM<^%K+74GkLLZmB)_ekyU_G|vcPzPrazSEV`%y_{n(TH!h2ODmC(2yIoF@h;>%?- z>o7FFTB7Tq@ooBP*LLl2SUZ2$xxNK*U2c_6<^8ba`^ZmomfsIedo{wQZ?RvI-{yW! z9`Jb~`5m6C!UFzIiN8zY@0R$-Vci|LpOTcbm~%DfE==2B-lrltgr3KdL?1)X<4iyH z4*E;nU|Fh!2ST%DOiBDPA8u)97p!pUgx)qvlul@(Ie#&2B{nU>hkmx~Z{-gRI%(|ou}IjtZ2jC^c`R#YPh6+9Q9Yjbv5N~so}1gau3w-SWUTSYFPO|H09Pn%db~cZb1!O z)s)+zhTUq)?Nh@+HRX<|;UskYF6G?R|9I-7A8u=>osV*VWm^{Cn6m{o_0T@B0Nb?_ z-z^V$o+QbC0yaH?{V^=SY3;<#OXAifahtH|iIfWqa7R0F2a>qtL0McCYGiT{(wxjza4@4z2HmcGAsA(yd7A0j=*gw7xsqt%tN8zm&yQLF-!st#7S% z>miBjkmT15t#2>1zJ1z>8O74>?9}M5d{h!2MpX&~JKduw7sh9Bx z3vfWqd>WKbrk}Mt?<9I0rt?m}^G-g6@u?p@FVRcTxHbLQ*X2{WpVAMvB=#L>+`jx5 z-bb~=tGV96rl&D~U;(yi=X^UQaXs>1`CPTE;rWR4{VKG)G5K`HC$yX?iJpO$w=R!( zUIE=VZR>}J+G+2z+@3rnvo}NS(=a`6`3%+@{p7zO(M!<$5A|Qc{FC=&y^vq$K3o1f z_p$On7zYo9-{XC!Jm&ku&?Udx( zCCRr}l5d|R-$6;fLy~;QVABuTFUbGobL#TH_`H-n`*_K`68c3duWGC>gQ9g9O68+qqijSI}&^H%lKcwegqa^o%%PpKC0oM z`h}d28cwQ*$zQ&R{N;p>?8(FNzl8N47GR@#gz=+>&Fbr!XY!?#D__QX zqaS;p#6Bdkuj?PBJT>fm1e$r!C11{Z0-YDV65S7-7qgPj!>ma5yW6no6|@5^z+LUG zlahIUD93qk+({h!=W^&ix&^vVYt7k~vpr`=&d!`&IlFWAiLB2B@^<4 z-1q4x{gQku>jgBwRf%4Q=C?1ot{y?-kLBBFe`tL2Scoo%##ck*YUC{YN&V;siEf0( zwdlv*D*ur6LO;4oqPwASeUkGThMv!;ez>CjL)?E$&a2^ZknzzDn|{JR1{UC8ZeP$& z{)_VMoUeZLszk3r%iGkCeM`>K-ulsd61@+NJJOH+SpG5BbN%pCVlRJu7FR79hmG>- zT$d&LhIZKW4$d1E;FNaiKc}7a^OEtrpdY;?(W}t%HzfJ)z@|T;Ui#sYdY*cyVP#L2 zUzPk*t|!p^Y9zWInqNVZU#t8k`%Gv(I^{cAzoF@RCAuG)Zb;HUl}~^l;yFAtUA<%+ zH9*rfN^}b}-MC~NPC?_RCH@&`{H#PTK;zf-W8aXBqb>dDZHeB6#wAaL>}#uYcIE7o z=dMbIay^!FD(778U&{4rt~X@aRs0>oEd74&KhBvvDK9T)L(aCGT{$Nu>+PK6@4U?C zy0@3}xh;w-Tpz%uMdmv!z{)41-^02mZ{R)w7SMGP-2iQm7X8>; zEKuZuUd#Hz5C# zeHXO6QHdUdmNzXKuRD_SEA&C~?SZE6l{YfJq3H)DdIXw&O41&4l63RXbPIBsc?wOp zEYWMwblZ~oU;Q-5b*)j}k?Sh-d`6(>bD*8`seU?SJ?Mkx-!I?C`N6ba5k*&EPud#uRLLYopVUu#5k6u zTbHEUk)%74q^s=Nw=NzK$5QVnVc8xR0VCXF6jPa zPV#s9_9dUUt9};b^ECBxg}>(nn?ArifCV_I{dMf$w9`JT@~`MG{pfXx-h#H*o}`|~ zl6sy&(=%Ip>gf{v3JP7W?t$?cT03HG_GGi z_5t}}_TT!^!xB9TjT_gGeL{YO^VN@@lIR&|+?=HUSLHa*zo6+h4#d_w8A<73vgIF{V=DUa^~gVFrW0J7bSWH zTJDCVA9f}EZ~#qrC_l>e7@F=xqR*h|DxaOjRms1lf1q)-5?v3CYt)auAlJEG=|{Ip zbUQSzQ$O}D`7y4K`q8}--4Bf$)Q^2gew_2wj~=FNINVHVy9onB>lQB z>DOaPzb4Ox^lLS2`XKEI3vf_7{W_zaa%Ls{IHw;yFVRcTa@QpNx-IF~J!rap`FHd$ zG~JOzpFq&M<8H|byf=w^v-g~qk($KE0Tp8nO3?w06Y zXk5R3>;v-CoUeZLkVKC_iLj;K($17i;{%%SxI|Av z^Ies!pJ$T$lJX(=T&{<(X`AsVKhJ$6ETH=(dJvkv{5Rk@&�kgYpZEPiXvzM2|z` zSM--(m8@!q(=Wu%b!t_9g!@2fKAZB3tP{|Db|rcbnorA%@H4L3q2p^!{T0?*xl4NO z=q-uffu@@tfiJx>*_SWl_u^g(DZl+?nI0Y`J@p!a)^9i0d%13ZdFJnc`WJJ(lRq`JhPtf|+NpwB5egpD#+*iIfi=T$ZEvdiC{8htMX#A#R zTqLi1QV(=Rf{iSw4l=6Ti=RmfvJN!veZpqPwB# zdh}!OmEU4~=tuWU^e{ATR6q7H`EAC5emEhq&p_iA^&fD4+Tp%-o>#Q|cK$pjXIswp zoK=&=O)&l?-!IuHCm9#=J$&DXWE~iW)?-8-a(#oAKPJ(W(0XjjZsz6hWaSRz9LzbC za~P)i$$uw5nC2(ZlQ7LsKmD<#9VTzY&N^5tpUVDJ{yy)^VA@XdKWHateiITs4b5*^ z@|6)ZlLkK^81VzX#9Xg4?*KcCF6Qq zQvV%E{~k&DH+d7Je|seTJ1*(BQ)qjiLEF3W_psC6`|`#3VbgaxpK1KC0oJ{p`zDEh z2wJ~k`2)raw0@%!JprxXtfbrpX#Ao)VV*+cmnC`)8o#L@`1^`qw{dI=i0tRMS|{BPDH{pdA`-h{^O=*Pa7`w!GVV!Wwg`5$C)1xbH) zO8To`K9zYRsedw)(F$Y`mt}xGTK!?dRLc=$u^-8E?5p*oPbE5eYZg~2nU6J+anuJ*Hz3b5 zexT`wBzgjx|E#2+7NGHqvYhb%jbD-IHE4X*+aU9*MzX)?k*re_xqUC!BeR)(MG`mo zhmiIek@Ulwq`obGl==H4{vnBfQ{o?fdseRzc?J6>XuZZIdLCM@Mg7>9yoVZ)3B+EdcXpl)6TwOLHjK^Pwj9ex9@Ae7597E;i3B0%oAD7 z^+7wj{7)ge5}IEPY`U8E(GKf!dxQ3CxXx&Yh1}k*{kB~9w8J4a{WYS7yU_cTGs(JL z`A&ER?_(wQ`kalqy-gC=E!nRROWJD!n(rhu-xKYupUIzLuO(ktfORl^PNDsF^ozVb z<5E9*L!!5!{ZYPvpL$n9>tCn74gH{ot92Fp!KPjb@U^&{soC{f!2ROQvdOs^OE$-(DWIAGEwt z{pc}S&-vy435lMD##QmBea@Y--sP;3e7{L;&N}(qNm4I=FG6M>Pl0Nxae*aW{CP@b5CVzK9^0hPLlJv{6p67z{{(OIiyy6zgzWfy5waf$rtiHW%5OQACG)B`wICIzNbu5&Q{LtoI5#pbMEEb&v}sZ zBLY=C8>~_oVwLR;HBcIATAQjvrW`+Z%H>=WNZ{ zp0hJ&I-W?Ejw9Hg+Xr(F=N!#Bo^vwibk5nF^EnrDF6Ug$2`<*V>EnEF5W4OTT%TY8 z-Jre|--D5}S=KQR<$rRWmjBKADDTMcamYLId(HCBe4bn0l6^XC`cjgNNyhs)ETAVO z+WObsh38?gX@l<&6)_PRRe`Fj%>rpMywXp84e6CDC=?n6!e11Ydx=o@xp!xO720n)Xjc4j4O<&`D zq4DDqJqeAUmUrX#z2)6`zANv+^&NU%OVIOL)!xW@ubuYV&<;1X|19%dUd8$$@5yst z*~B^`3#=pZzt|VZd$Iq3>HJ}gH@%S0Bg;8)Oo%&zI?{~=;^Lt(T z(OnYV4~-kr{}MibDo6PIn|}1TM00zW#<5-Gb2EJIO}>=RA?ru4O0?^k`R(W@{jPi& zpKsTXc0DVgPoZ%&tP94~%9rywUuax|M7KlZTP8_t(%$J4#pM>)RG4eI86zR!+j)&>mdInUQa6J0~pV;m3d{qvtByhq~lG_ z=XBhu7jxQ=>XDrGr`mQq_i?_LPHyo1i}IU%Pq5tQ`%oq8?Wo-5`xoWk@%^)MlkcgM zTlsb4(|ixD`ty7*CKW&TExr#>Qh)wLBkK~MpQlV&DZj_}?8$HQkLN{Q)ANPt`Lcf7 zFWS@dhW3l=c5QC2%UPe>8*<&4vyj{EH}kRIWNU5*rFI^qUg~EvkK_i|2RX`klw4QR z@?gFClYH-zq|#}5&*5|E>H)4>lJseLzrlP{lde5yha|nnIhrNSAEuh~Cd>3ZQ%zr` z8t!WUIo=y^4(INBi{v2|^vqv>=Chugl-->luXw<{9+F(Rvf?3?Ww$tY&AFPgYs+qX zzP9XkWw$?fr*qep-IHYh=W#x{G~#?hq2V~6(81w2pX8O!ZRZnCG92fVTLqPn`qr0} zP@dexr&i)omfSLz)9@V8cZU8vkZarYg#Wv9ZCiFwZJWaQ9yk*8bJ5O+`rLm#+(4&& zM*I}IbevD#ia5f*1kLY;l9Qh*k{kKg{>ToVCGf*6%~S)8--Ju(dHz@Lp$jxqvAq94 zcQNEmr~ej|WizH@2Yn|rTb^`H(f@$Fu0(P0Y{>YF$A#qK=*5e=B6$?LE9h?YdeBcm zk6c~K|Jmp&{?Cqzvl=zm5!<=`+=>DKTjb0Dhx#)hhn4YgRP8x!?{PAmx{;YlzL3?c<3i01U&jfAy zj@w?EzBA}U{A-uRPX@p3F%>jlO_#36#rnF3*beFW3bdp-TapQ3KGQaRL0>>m1YLz* z4f+~%E#tSCe=YhT`0s=s5B>&pSJ3xF^OXn}(>J4+gT6nybev1t(d`$PoeDqdo74qu z`<+}@Op&$!Y0zE74}|!~p~r$YeSgEH=?8-DC4M!;KOM~@_59#j7rLdgq@RZ#3fgNk zkB%JNp*{D*RJF>2)g$$>Lp`iu!fLS7%!cOgvjh^Cxxk zqrz7f$@*0#J%gSNejZ^at5zaCc;=4gQO8C7ZggMpyA}?I{N9hQ4tf<`8RD(K*Td|{ z>hE>8s6R^lL{(8UKhY;4zfYsvg5Uh=i{W|uUF<%z{>IDvuEp8G-`pm@gOJ~s(Z@lX zzWIK|_-uT&1g*bx@EL#7dzeL8av}L1+Mla)?dO3#-=e-2T|nC=#y6rn_}MF~`qKQ* zU8b|~KxlrPPac@BUFBHHlJn_a+MoBSr@9CK0Y6(!wR^(iQM_LFh4XvS&G?6c-@T){ zolLwwOhMPI>^Psi4*!zzX_;loo7DHGkEww1UhjH};hgUc7k~DLiuC%}R@^k8y*}3j z{YT{2{0sTB8S-=eD9wj+>Gg6T#CtuR2-^0oaRZkgS^w4s?e(+y;x>nODCn)r(yu+} z()8;=Ur+pQNbmJ`C}^+0i$PnTqxMVlI|B!FilKDjZ88LwL=ab*}hx{nc-__7wZ^y45B296A@9t#WH_-{@qAqbLbEE zt35%#hx{8J!5)P;^It*lqepV>`E+;Y#o_l-&=d5hh*!IAnBMrk;5U9h=&zYSdN9Oy z{0bsHviv)PJ|g5UhlU%~YsZIp*WpXWkk5lS#j$Uy+>B&s|o2@6K;rv}s z;(ponVLA9+KlXxl{_h9v{I6nvogGtZ~Q2QD7YCvNXA9>w`}5XKje!xD8fQI4m# zqH7+RB<;C=54s24nCp+A$I#um=4;!MEp&aZzkxmq@jpP3D9w*ats7-<{v%VLUqjTZ4WS{>`B6pSoWqC_S?IR3CIB z^-&kk_hIN^^da%q&+$Kg@qVr%alRCSz8*ahwDYY#=x%gF(9W-xpsio`qx0%f-j)2= z8}yT?k9r}mPkKH2nPPlJdcV6E^7Hys7yMrDj)ULp?Q+mwPd9@0dU+VM_n!+PKkqNk zf?h?p2JQXnV$j~t4hQZ1=7f_YJu3W@3CUE@UQ1?z-bQx^?b@>uwCxwi%kyY2bv9qJ z{b?MJFUPO0rhM020l|JVGFQQBhB#WB~;&lYUi zaUtEWRtLZHXDDdLYtL&t1VPdE7;1Z{ur1Z{t91a14*hy1MX^3zJ? zS>KhQ_3vM%f2#f=f8%5O@^O+>J0%_OygpCwoyzC~^}W%POqQZ%zN(yz>5z@u<|4h*OJB!)n zstjFQKi1nvb)j_cLB{)V1jSkRPu%(|e<0{4b$v zm@`#*y!DOi!(;KQv-Rs-@+`FPbt#tox&KA?qU&>Q`pf5kT#sH&yt=r)&7fz3z5zWO z^vBS1LG$swWIpJBM;DLc`ZyEX`v#m_p?{{(e8p>iWc!8a{~`Gm{jSet4ygPn=HL0^ zqJ{TE_|*fLTz@RDbY#!F_ConyPwVZ>^vKqawV*GrpX<$3@Vj3125ozH7qiR9cTdpz zO9!8=r@rz0T5jh2HeOmxkzG&f86Vm4)At)U;@A!K@%&DLw*0e~71L+)E8R?{rffdM z{M)II+UuF)_wi`&_wJPI%g^W8H&g%a(NcN*dQPJD8fE|d33>|Mkn0bkm(iWM=Jj=Q zif+v{Utg3ozr1vPx&eFX$ojYX4n+%_wX0Z7Qg#` z{r2}#@Y~-bQ4>Eh#x;;RXT0NOl%KQXTzbD)>|fvLx<5J&>AgPA-MtupKD~b%va!>n zBDtRU^`O086@q>v`ZQ>-Kb=9lJ{Im+%FlNTtFK{=qg?B2{}slI{%kyS1nqh{7}9&c zH5s(`cY8s5|F#;m_jff8mh{Np4^9Q`{a8oP-jA&W?fuw5(B97-2mMCQr}u5FDLm)! zeBO@UrxSdJsJ0*5d?}un@BP?p(B9AW2EC203)=g;(V%@FxE{3c|Bi#E(4?k5FD|J_ zeE-)LbQO9aXrH}I1?~I5)u4Ux%LGju5&!Xy2zz z+_mI?DY_Ni1&TtY0zIr zAB6bN(toq}DAi{NU4C(WsYt$x-VgqLbVtztj9v};2;Cdfe;+**{HN&U;NPY`o1s4c z4}X1#FRx;}hxjYe$3ffQas8~uuP*15iqB()x-ZY)^*#C7QhkQe%|Ty}9thg&!(7l)Bwf6?UY$=|-;;Zm z^7DGJ9pc|Y`THx`{?_`~-h1ya#%J^KBxtV>Eg?Uz4_iTdeV7RPFUbEiJOI6 z`wF@deL|6z@Ac#)r1$xS&#SV7&rqTVn~Iv>{XrLk_IlDDwAY)dpk43h3#Ig44|amz z>q%Acd;RDS+UwI|i1&K46Z~E;8t+xgKmB}3@N<%UFL%lB^MuOUnKZ~99|HhxQB$ zW%mBL{^yFu?DJQBLA!rFxL3*VeAx+p=hIx!52n1?PnE_;y8pR%DZTS!JZR@zRnU(m z{nRFFH0@}8oiB~erS#6Hvk?C@;yXXXoXg{#FTEjt5dY{uFvhT(pX0spKBfGeADbcG z`8O5x8%V$US@u|YddKTwNbh_a2-^8D6}01hHE74<)O}0kJD%5rcD~dG?R@A7+VOr8 zwBx<-^QHWpF9SiR`>UWG-`hbu{%cxF>1m{-D`?02WYCWH^81zI9q((w?|!ND`4N8i z7d;%}-9Ie{ZND7{o$inBU&_B7|3=X6k0ye4KUDsJQoQ?})}Yw{%{;i^g`z!pz-~QjPZ#NR!9m?~1Hyre% z(6d21|2Bele%Ad$vApbga7WP2$DyDvUw{7cX6k?R-C}-a$zR=!|Lg}P{~CUEJEs3_ zz1a=rd%iP4dw#XQSj_LI+xttGmH*Al^uJxEe|(v~@>a#X-IHXW-|hO(qOFX-smS<8 z&r%QT?|4}aN^m+wUH< zuex|WyU)$^$L^1d<(DPDfL}d^$^Lj6y622%WVyC~*8Y$FvHcH2{cQi52V+c+?0VN8 zwEZy_wDV~_X#4vtX!~cpJ^+wEeRl^v$(TtpDpcKXtKv-ir4Awy58Y-U#|b z=;Bcv&utedG(F0ab^Pj1wC(pD^aR==p}ys<$sawQ>t8^(SMWP`xqc#g4&9LJ=Npf% z&-Dy?AKjnp%jd_9*pr9&k4HuF5#rk`%aS>i<=Y?K!T(MCd-$uc>vukk1^>g*(?OS4 z)4#Xib2)i__SZu2KO21%wEffmQ0wBscTk|mg0{bwgLXdc1?_yQd{~y+^p3wx`Xf8e zr~8YU;CFrv1?~K63EKI!5w!EEzN465_Ia`W;CKJg6a4NEHiCBlFcq}>jgz3a(G9;` z%HRFNe9-PM`h#{qu^Y7eiMofE(z~DN3)=m}bkOb>S{_k~cRz6!{O%u`gWvr^SJ3V! z;^zk#6v<9V?|z~?#Jisu58D02LeTCXHiLHm(CFkykBao&{6X-0zr6jZlK)SrlfyS?c_<1isT{amdBNJCwlxDCH)xm^b<}b|#^57M`nT{;KBc6m&^sZ&KR}QEYRUhH z=;KhodGuCy$^Yl*j%Swi`zfyw%KJx1Xzt2Oa zL;rmm|JI{R{%!Q}V@vug=+fsQDw40GheCNif2s`ib^o{->gV&6p-|p;NWcBWQvOHi z`fz?fP>1^e2)z~RQ+74;Gt{>Ny&TT(qm=LGtFj~eJp4!)Ker;jJd}SedLi_89eO&9 zuj|k!p}&6?orM0qH@d&ORNnp3!(lu;NPkbs-+|ulFX>028$$k%LobH$@MQFUDDN5Q z?x&a1KN~$7#@7qbg~yis{%+88n14QhYYy|_mH3ZBd%q4n6Xxd|&~0IUOrwuO|Gy1g z9sI+ze-hgJ)9A&}AMYUkEcDmA&{dBu)pv>bt`NV3e=@{>0Noz+htUH;e+*q4`qR&@ z?S=8>^Sq@n9zTbFD74R))gj*Juf5@X{t177XfK~PR)+RGz<(0P=XcR#p*}aEYeM^; zqBlZ*KS4K!{`Pt3XvqJHYuFD3eHD5ujGt@JC;g@NydAnVXg^Cg5%fLuhw=Gy=)sWR z{m^@%ecI5CVSGQ#^kMuz61^VA?_<$*LHD8uf_?_NJe2or^hO-d=-n_MUxKa;<-Hue z7uw@B=$ddouSa)<`u?u*VZ6?umqL5I9o-S?^G%rI2li~cogRTqn z;|RSS#^VprosTW$m%gtEZv~N#7aT@2=>jaDMkh4+ec7^mNdE_Oj=3rS^C*`XsdP z!_f;t|0;Sb=pJ-Om@iL7kB0txCb|&xbI_&FtMc81oZnPv&llkD594(NUDIEx-z(88 zp}g0jm&5w?2J})Gztia6px=fb4|>D$!}xj!{*EwS--SL2{j-Fw3;qwFS3>)JSRLly z$Iz!iZ=%=2dguF|qtKsEr#_2eKjHJNzMy@cH5l|~$!{j)|0VQ(sLvj{@$seh`!>2E z^xt>Uv%!B8x+m1{zw`(FKj`tWo|V@y|3ZIVgYPgz|3gemLGw)e~Pl ziubp*_bSUxWy#&}s|`!pg7*HkH|XCa{Qx(?TX}uGUtJ3R zx8iSZExY*pE#9w||5_=3?`L;Iy!Wg9!SDTQd_L&?=VI`CzuOwL_p94M`@H8gXzy1W z21@1oyr?s1?_Y<5_I`9OXzy2-g7*G&FKF*ikAwDpwDQ+W<$1qaAGG(U9YK44+8ea@ zr$a$|e>xsCgCv;=+WXVRpuIoc4|*x;-}G8QGNbe)-!YuNz|Ut z*m?d*k8?lue&|N*M318_&-$vfqbzwS{!R31 z?teCVytC}$`^B`qex(#|`jMd9aBbZ=&n`Kiem*F9PElvikL#>tdSvC-2W|d+kG|C3 zAGGsvH00NfZV%e@i;uZ9{Zi2STTB$nZw*@iR?wc`cF@nqzZ}XNMjr%yJ-Ti%&pye{ zx94#f(<6I79Q*eTw6EG>^uN8okL~#;;)_Rd{q4Oh|HYuK|54EMHO8-fpm(2CmVNgP z{}K^zt^Xk;P?8p9rT|OKS3jQuq^#nH_ec9ZR@ssNe(Y;Fsr1O&rzU8h ze^ALoLeN3R6Eirxu2y}u9I=ebocDV6_zbYsxz^SPjXK0X|@_m49{ z`}}VuXrIsS1ig(u2-^Gm%8^p}-rv^+?el=PpnbmF7qri}$Ak8H-+a)%zgQ32`_28J zeV&(GUn<|{rL{r(e5W;NpU?FK?fZt&pnYC9AGFV_)`RwW@@~*R|2qxZ`_Gz}mg?jC zh1Q_GAMFp?`|0tZeSW+Uw9k{)gZ6pEPS8G&I1bw97v(Q2mGATS#-M$F&=s`LKSzT0 zeb8*sKA%|&+UIBcLHqo@^!fja#OGnPqowj+fGz~>`=G9%eLgrCw9hA}gZ6p-YS2EP z+X>qHbwAIa9ef`S`PaX^q`iOd4BF?{LqYreeI{t1udN2{^QVKLeg5S4mKMwNc~wi$ zzW?YA+V?XPLHj&;IcVRn?F8-fth1nf-q`TUQh7e_?+V)IHzPs&{A4a@-?yy??emnw zpnaZEIabQw_Y?I&`+TK6XrK4?2JQQ@v7mjPI~TOitJj0}{mNd@KL0!m+UJcmuPW8Y z=Z!5v`+Tx1Xx~Q-2krZ}si1v7uoSfKD>sAo`P*U8zJDlxb*X&cKQ#vJ^W~19eLgo7 z^xM!=LHj&!C1~HL?F8-ny0f5tA5!<4Qu)5$>(zG5=y^!-@SK0n?H+UJ3X zLHj(gdc0Jg&kvh}_WfaR(7wMM3EJo5GeP@)c`0b$KWzo={%7Y6mtNm@gZBAp>uZzz zs7QQYbr90~ylyb~eIGm-wC}%ag0{ToDgH^1{C<*aR(4!SzHvLouh%dPKEL=rdIHU9 zrTU87vmT;vo9kNia`2mf+`ly8SF@cb{mp+LRNQ|*hRFe^Xp6XcmIDF{N^`1%ReE%xuEsO_IU~8N6j=z$G85{kv)Hj+x--h{8`YUlY2JQSi4)yhX%0qh3r!Hvc+d|OJ*S?^gpJPGW-?6@T z;r!GrK{q=dns4U(C*MgX>A~|){OVe?>yiBvzfb9L_~(P)^IH$~8Nk1apJhEwe?58& zy^!k(^e~G;ORjIO|9bIdNA~>zeMQNr{e99ekiMTPTR+dgKlp8b$8vU*C+{Y{{9Qah z%H!XM9({M|^M-$mUJ3de=wtM99^VFM&KL7Lm;5*W`Nh)b`z(L3=%#*_KNPh7;h^=8 z1YKRndd5SaRcyucxw!q!^Sij6Lf@PCsvAo0lgxiS~2-^Ib z-*>6MC1~f=mO)lCfAl1$L|z=b+(?L zOWulJyBL2ic{h3+U74mom;P??ZqViz*8_ENef9U3zesv@d7A!w^3Uk;pudlv2-+_` zn+*C&^c32|_4+osF39Jvk2T=h$thzS;59>$mGw<;4M?{k~Ca(4J3B-%5Gv zJCW|@-iJNnX6keJ5i&`SvgGmj)p0%DEV`45u~Ec@ryeK7^8OxO{mIhv<9|Vyj;iD)^fLa%ynfFnU3)0c`PdS)*XNO-tGUqj z(xDS%YI%1@@1YNK{SfrV-Fj&Vjd#4&eTIM1BRik^p!JUj?Rqg0wEp_7 zOVc+5eLb$NP~Ty6Tu_NDo)25o-xA^ny7ybv^(-emK#{Kmd` zX?_zyo8L*$=64$O^|X(=?-p76&`A89%wmE3O+?anT)e ze+xYr{N6vz2JNZW2JQXKWJu4@OE!Y`eyKa?^nNGkL;Ry5KQHBnm$g^>KVeLdZ2a~F zZF{W-ZF{W+?fZ+N&_2H3=nUHQ<=?n8|Ei#Ci605^4d~UNeg82NbTj%iXy1422krZe zhBxJ>!+5s*)BQ`!KMT5@_<<1b`-#%`A5&tL0f+E?MutA z3|jxpWjbCzUdMP`X1y*x|9CTc1I=R*W-oth2JL!YmFpYPN9f}H5X%pxtX0_b(4b z`~CRE{2qz+utoT-oXdV+In(j_^yK>N{3_G$k%+lg0`g!-IM-iMwE`lIMsG>^4X|0mIlL4O@xJc|37xzL|Zy;;sbJ8phI zQ~152KO3(x{(tJT^u_*5$5Sz!@m+P7i-L2=e*bX$z}EH1?*BSN{_a0|f;PYCZ@8KK z`dmcPqb&J({OVe?*N<-WNYF1tPoeD}{lANzM~~&&@)py){3p9#Sql0Ff4;b(&)Q=- z_+5Wig0_AA`)JvbJ>Od`$~^zIpe;Yfzmxi@<9LbXe*k~+D7Np^cgZ9@%94-bSNCD^ ze69aNsQ*{-AO0KPvy%INh;I3JKHre*y6b2^E*j0bekgk3dpyU;wf(ge@*BY4d=vL` z*e%cf;A!yNKBw5SqcZs|;?-4YewFF(ztz6E=+E|Ng`m5MYYp1{R9n!`M|TE2jP45h zdUS8l?oaxHc7HJ#w9l%_nZJ$!zdzdj#z^r08G0<}KSoak?e#z2&o~}dnV;ElKK(w4 zsSxjexO7yf`=Ght_j+17vhOpR4}Pz|^)6=Vk&VxlFu%Xb`KTR|KYhP(tK>iNdsOp( z8Q(H}^`+5y= zyvF}#qrNo1tCRcT^T^__PM(PN=c2we{+eVIzenc3=Hgmj)bGjtSxmM*Hv8qD>B0QP zubxCZpT2}%MY~3+e~9isSJFRoH?B43NzV!Bzni-JT>4B%-HskZALQwuf$qQJ+{OEe zQFJGpq|rS6>(T2K=Ps^~e}ZnSETvyV*WZHs`14tL>G-}PtvvtS{PkorjMvYTexFTR zJ5{!R|6+c(F6DoUF0Ve9{jD_oRd=VquP&YM-O*ia!knX~cfG0p{bKpq`0u*5)Sj+C z-9g`%^y)e6rhg>*2<`qs{WSEzZA$)Eqm$c~^dF+cdZ<60(J^5j`ey(@TAwSoF?`gRty`}M^amCncYu5zZNSJ8!_)BAy-y?+=D+VyKDXzv$RgWg7O1?~OL zQPAGM{Qt(z2R^Q=y7MoNlNfNp0YeQn#RWs0))@V>ErS8cl5B}cmLkcHDW>D`%t)F@ znwiWW|A8#4Y~5lYw4n`PAW)YEFw~_Eq^tqc^-}ZMEVx^5*A%x6t+VT}2GU?!lg)Mm z?Du#7ym#kG&hGmp^8D^O_uT*Io_p@S??oR>(r0RqRtH$?nau&#dT4imwO*L0eS|2R^P?8_5A65rg>x1zCYyD6Uu+|Gl11$UfRDip{PXt)@ z{?ZR8>1n;NCcs)BbOu=KhkStZ;JECdS3u{1^76)5@6})u^gZ#_hx-Y=}O7Au3Pr{X+{J9qb`Y!1ALGN(%Y492FgYbF%G5Ar4=CSuTE(h_Y zuT};0cM^YU4fBbMFa2^XpiBSE1z7s#Vt}P@R(&iUx1}Q&PZ9C z$G%_daDb&hW->$|HTUJiyW)9RZepSruUEkE;QezTO?wm-O|j089VO z1pJ!MPh(@)hx-Q5=l;NB{Bi1kL|RU``jhm{rGPGdbSl7I;G+SSK3aY>ktcoB8(`_9 zN`R$*&IQ=$rvOXeRQ@n>&rRm6MoWODuNDVb`siqYkAklTSmSl`Mgq-a{ZE4dmVSCB z;Qusu?I#_3WbgYO3+U2M;rAn?zYYZa(pQHAtn-9}pGxHEJmH=I>pbD%0PFnVsQ~MI z;R6BIdCsGO{4?OQ0sbcVnE?Mec;z1@>7N6)1o&^jg8|lg&Zz+FJm;YR>pbVt0P8&G zsQ~Ld=Y;_4Jm=K_>pbVO2b28hJm=Z~>pbR~pu9TY*&on#zH=_XI^TIRz&hV~CBRyb zZGMMOZ&9T4mn%N)@lBD=XX^cn_PHt2dCb9p{t3p%l{YVnzfY?3nTG* z4)E8A-#@e{zR#rdnR`Byq^I+lQvud_%)0}u^O%PNtn--n2iTnF46x2;J{Vw~&pZ)e zoyR;EV4cUj6kwg-JRM-2&wM(-I-j{zix>0U6zP2CssQVJW=DXJl3ss+bv|=XfOS5z z5@4ObJP}}>zkDRXI=^`+z&gKqB)~eKc`3j;pLsdJIxo8-D35ucMnIRo=;&uqn8)gi zJptDF&9$F(Wb}j1Zw>}n=RKbZjE~zE#r_uQj(Y=srPoXU2-8!4@!06Ep#SRK ztLo2-Vese8LVH5|p}zky{AX5s{_CUXtf4*wyb^piz%Af&0UiRM2hTCiw4Tm@{p0PY z<3WD!gf2V>ljalotCGi_-#HVMe-8e&+ZWkyEW2g5VdTMnn1bqzuGsBE0!oB@- zF`$nSJ{MrwQzHSEJ#;LaXQbd=9KVdnurs`VQ!--_rq}1~2`55?}UZOMq41Qvv>a%5w%I;uKNT zzCQ)eWfJ@=x6=RjBv|#oTbVYG{a(X^fjqUx$$+l*SbiemS9>f8=%zgay6mmP0hT>_ z_d`j1t&dLyblI;L0=n$g&MzeVvVT_xblI=M1W<#UJ9`6*EL^E!Sfq_ebHdKfRgu@Q?4GOJ|b)7Y{YVWXHeW&ubrh zzM>_-8t?f4r_&!;Aib4$J$rih2l6EUnEsV}bM!{Z^9x+o21q zpI=M=?F!^8{bK=E{>}$j`HB90lAj{-um1L;_&%ia*YWNo{pZ5k8(@vkH9`D)iGL2G zc^7=j--F;a?@9bolDFr*&GIS%*7)8P$ZLQv2Uz2&|74P1jsHUdUG?&CfZqpR5%6n# zo(izWcO}4PJsDt)*NXwx_d%_*_Ow{4&LcezeK&OPZ(T?q)r0(K zJRS|O#^2ii;N@BW%ANpgyq*jAH9n37boKu$0Zx}sILzdM2XF5vU z7cOGo_t&tOzGTnCivFu$|44o(Sl_498As9I0)BLoJ`GJc55C%E_XC9wq~hO~!k+|7 zM=U4)&9uj#g3sP%vErWrFRv!}+o|}ErSLz4A7MQ_w8Lb@G3vIgLmiXGY)6L9bn~O^t-?-Z$hU!JO@4<(0>QqAK(YT_rm{-T`W^+JUYe9=3hW=1=PvMh=wALi!B2x%K~wn-rTBjjyy6!UdC9x-?fCs8=z2%L zufJ2^F0hxEylda`KMs8lbmc|q{Ty5YZ+7?>Uq{;q@?H%-4SvMYH-Y^l8QZ~!pHCa` zTk`TL`YiY&^z)8>5BT91B>W!+>wD%G9sN^a|49B2p%A~)(7rf?G$@n~y;{PQ0Zs=Y=Jd~pU#}qyXJ`ewW z6jS;CUW)!C_;{5$*3qBS(Oe%d1m{t%6-U1n++Ry@A6WjgE=PYW_+$ee;Oe6UUjFK& zeg7T!3h{S2`iH>!J`_X7D_oVpmP2t~5;g5kA z-%5QgvFSaOqMuIT@1*c!Dg46}{z(cyZ-Xzrt)CaA@X8cklfoNQxIcw=rSM+xGIWa9 zcl8u~E`{#}kHhcV`xD^%z&dHQAW62`bs?o=m$z@GmF z;KN|w-(CsU_g_5!>%iy1ULW^>BXo)AvX_U!SD>$Q@(bW)j49vW-X(tbsK0#xJO|yU z|0(b>uut!c;8S3qpT7iO1$Vjr_4iz~^&Geskc9o5-)zuXA9%kL(I*DSdq#Mf+>M{yqmD$KGAx=>HMC zg2q1P%69>L`i+$FdK>>D*gwku4~2V@^lv2X<=~41E4|g=tKeM@_kk~dmo==zyTF5V zjwc+R1b2}iU;qCW+}W3`H{J)n|E|Qo{3ux8J4WT1^d181dozo{>JQ%lpZFVie0jig zU!c#q`gsz(6rxY>#@Dl-^*(F>CvPSAY1W6H-U_~S-F4BNqi+Kre`S*1DEQv@W9K^g zd%?@@v-cHM9~JPKfhg{ubKvt-j&G0mfe+nId0c)!4nA6;4qbYm1?zrZ-Qll-^*wu^ zzi)xfL;5!p`nMhvsP?*=!Z*Ibvg@ztFHYfCrEn|w=#TCD##Ejy;6sm5Ugekd@_34W zDusU&e7c{1*Lr^ld#>{&S+2fjg)x-(PM4%XZUx?q(FrFj!|5JUs(`l>A&KK=Ss3 z{iF1L8{ElQvHXjo52om!Na4?<@JaC8k4PU`lK-6){U1~K2jFM6C*$in#_ITRf|rAj zgD;r+x-M!3U;1Ckc-ac}kMe&z_}FMd&!^}W@LA{w;8p(K4_*#XwuDcBd#Rrl4nG2Z z5bXQscfkHpdXJ^>4^sFku-?yghBd3?-}ENR@*d8oyYcl>uzw`)H7VSl!d>7w#`kJ? zC2uEq3Rz3Q(iaopGmJ;c68+zT^?v(vuKs=>>>u%bE`=XT;gczRK7}7k;eSry=e*gc zX7lvY6keOcuTSCr6dq0CUEs^Pq`hWS^eR~2uiry9R3Gn6(La#Fe~`kTOW`l1@K?a& zQ%QS#8+<;%m%&T!O#1WnUE~FP%H?Mz*gtBIwOtD#u*$a%eD{9l42K8Be;}!kBKYX-&fm&>au2w^InnRG3qJkBL_eMY_kNEybn;Gt zFFcvF=YIpQd4s*LqV&HDuD_9U?dWLX?|~QpJNnpl7C$XI@&}!~m-JvC{3d;Lk)_`P zUiME(`fmcSB>gpx|1IEtjF}mS_ktIr&s!Wm2;TEs)Pa-tZtw$}?EM(!=L6u2#P|H4 z0_*#7hh6+HgCBi2I@RGvzz@T}*5OCNb1?h<_yqWV@JdI23VZ_Gg$!K>fP9OLl&z~{gx9R8~KyAuEUUw~KLNBM8G_4zIEF3PvY z@&7IO(DxJkIJp0Ef`1Ht;6sVN_&NAA_=t;t6Sks%mhoc+c=gAruN!Q7zXE>blZidw zA^tx~^j{x%1=!bL9=tZdRd82;e-k_o_VxQ=a3!FB0qh^;=QP-S|M#VKzWF-y6Kxa(h%{_|e&u|G-5_erqM zOXOX8UjbL(_x~efcxsF7T4YR(=EA|K|z)cfs1fJL~fE8O8rl zvOf5l;(tBqU;iC^0POSg6!;i;mCMi40s14@x5rBG)qhU(&s)GJUg7P@>!Y`WFMN+S zbM;jPS1u>@_ik_(g|+tab3{zSKKmVXwv+!8 z@O^(vn>hTuw;=29$iKsDz!x8-j$QmWf=_%m>2Ev0j|8|R`adM~e-HQ!c(;rHG4QH? zB+TK@gD-)pHj}@<0I$7>4tDAPE%@|*PyD4n1TXs^^cly0-EGKwoH69^OTe2SOX}+u z@CflMj{bV^+z*oexea_I(Dy~~D(08Nj{p7OV_@IDp8|KF?*|?IzkruqN%;R2d>`2J zzYNCH;2FpNM)2AnC-mLm-Qcy3{!Z{=uuuO8cp3f0G1U~{^>*7BHet_}b z3^AYR=hfjhp!0+qpn~MMQWc+*&d;;wC-y+slhZ+Cp zT>O`Uk42Qvm45?x7l%rE9o`N;y_WAzJAQpDD36`v>-)FCC$485?dYEXFJ7GZ8}u!p zLtt;OoCn`~L$d$#6Yy0MyW7QIGQ#}O&HGbaeJ=+;@Lc-#;S! zDX_j@;LCpky!i!5fBFV^H-q2zr$@nEH(wXG|Bu0U-^>2CtM8wJ&p}_~(tpYAv>mwO za0__qg`Def`~zTpf8dCtXThtPoK`q`1-$vid}B;`+m#;E`4I8S*FjbKn&pP5Q@u;QYsu{`Vkw@PVX1 zoCGfg`||!3*gwno@puY91-^tl-yY8!qrL-qF9%=f=KiLu-`9ax!rzZ9m1hfhj{U(w zhj)SXena0sOW-JIk3--!VC6;eKLPFx@Rz_R8UH@Md{Jk?p8pc~4EmzQ#s3%Zl2;}5 zeIp%pIe5y|-!kx{#NX`re+}#($$T^T0qS>--^$NOioQ36>nZ#m@E+>R=kH_SN`OzK z`2TYXUr6CgDSRb`f1biG+2KpWyu;6526udt^^_}5C%6t??(kTOKcB+yNa6ca_|qx; zr4)W7g};-+Pp0tyN#U38B%|&z?UTZ}6fS}F{x+{4-VHtj&b#sSK#KpN6#mN;{%#8Y z5WMC~$@qxw@VT?~oqWsC;@3gf`^H*eGvfoi3T3lP`i6NW1J?U(P-%w#F7SabqkkOz z0q_al59G(!=fVC_eS8so^(*%K7m{~2MgN-=emsT$DTS}w#d!O5&XXcr`PH|=Dvu}g z%{Fk$_sGAaPx3?WpUOLY4_NP$I^@dpQ8Btl`TZPN?{6~YNB&pAdjHZ{XPz!fycY`k&WDOwoq_ZD75>mJ$uvJ!llzPg6so1+_} zHt?PRcY*c3mpXLicQ<$$`CsL59ozz5;qblSesHJ5p9Rl>C13IX9DFRm=fEq!pOo(j z@F8%&i1?e>~E#we>jCtfKUFfWIg_^6#YBk6Hh1h z=ab;8|C{;gCYzqVeXO*6danfg$JBQUZv#IvVc##I{7!;LCKLQy;0NDg&%cWPVerKq z?_Y4^?@Qnd@O%7M;3w|k{Rd8eeiwZI^rBdwJq3PxPs0DA-@v|nkoP?}{x#qSz{o=y#IJVSoaB*x%M~;Zh5;s?<@Y3 z-~-6>{q z+2ZpLJ__z|_-Ekr;2DQsyodR21Ml~6`sdfcU2HaY+-U1_GkE-{y}z#Xhr#tXCH!v( zuh^NCX9D~{SAy>no%;6ec?f*+y^G8{r6uq8!FRuZQGA}|DERR2EQByN$?rQgO~SC@cnOF z6wiOZFMcKmiev2e<6zy#^5y?JSogPld!7SdK%U3{0NzD?&s=BAdqtSUmpFMp19wsX zQY)&T7wo0Iz$+bI3qC`AEOU4x_#i2Ea5gSDu#ubP>dY1F1N)of|9U~q1x zkSztcSlXAJDdw~Ff(slP7#qE#nypj{)o6HZyf|B^_GD*f_GWX_K6u?=_x7It;n9|8 zv@lt$)eF_W1NJM^br zf7bKC$9=_IAt*4R8${ZuKb`usDH=OS31^#`T5So9z*Z&D(`w|l8o{mWO~?j);8G+b zt<{KY-K3B<6TQtuY%@aIOvE-Lq|HQcGqKuCcG^tbHWRnq#BDdR+D)u>Bd6WOYPZER zaoZKwmfUjK_(oQTkJajyp$WaV=^TTQ_Xa&K3qr`NDy9?VCEN+u26FI8$So&CX^k z{G(yEbGT9{*7e$O zbV1tM>1HWe?d_fOf;P?%+B9EQXInIxsVU>b6BD&UeXL$>b)yQ=p1Qc8-pX?i?+kS*QbxOpV!}z18BrLUpuIYs`A84H|3I zsMoxU;lidG^?Z4MsVQ)@Fk9Z&9CxHz-fKd7%GE+r)w_BU5Z+#2nGLu2QWM?zAdNnhNfZoF1#^hjOUttE)j_m`t@94`6^rlD*?`sq5 z2P*}1LC7$Jjw#g&=!P&f+o(^KtF)@mXWrD4lyt4SFEdf534HcaS(~hl3=N?_4@7%y ztqs&%T^8~|kgv+_TCF%)D&&WivamzEGvhw7CE?qlh7Kl{Nfgw7*)c7or}zWxb#b_Lb_@g9D}BVwJ4= zEC-oHe{V163pGy~BfW*fb{49&V%hXOiACOMxtz)?IjLsraM{V#lqv|z7poD?QeW5J z+DXOwpQwHYkA(U(9E`J}!tJdV^OJ?lH1*!xQ(HDLBZwof-U)e)Kl*=>G)@q?35EvA3SS=dM zWlQtpn8|wUOm?z{-LZ{g)|(5^wrM?zwW&;Pn>I4{q=GtIHby;TcZ~P+uWK`dC72c3 zHgz&NrXy_72yW&|E7P`3n_5yTmmnGk_2x@$$5IgB^j&Ris#+{fGh70i>29X>2rb4U zvIj&c(#<9_BelU|sd2#5th$VY(V^0*K6I~7#M@GgVJ0CaB=rN0($@8zOxGx-Qk^g( zx=+BiL@#<*rj#|&T$J|pof|e`GSXeuM~$eadfJ$tC}@$+j%csxLz&rZi9S#z+Lob= z9cew~lDadhmN`$Fppw~IuJ&QUnXwx48HyjljAlw(t{zmkT1ZxIv@l`nG9L5@>Mfv- z%Ljdm2tygn3y)2_Yf@sFzh0=886E4;^$g8umqwzjfozpQkT>0TY${8eFiTjb4w;5F z>xR58$B?g6m6dfHm6d_nO1WCM48kg(Ln-N^#KO%`A}6wbHd^b)SW6r2Ut<+*wAPZb zs5Tm#iC!*i$y!dvqS}B$Nu^`bFow*0QXNeh<&CX7ThQP)Wuk;kOyt_nwW$8FgC#$o z_X4|fb>?mpsB*+HY$dgr;>3Y*gY?d3DdUcku@qdl+)>)kngZM1)>6!CxMC6Xl$eql z*ezJ5lbJ@by-nHK!SNc|BlAq@wq)zMsh;xeY_UF!0b}i(M21#h(NwlJrQwBrQ+$W(J*|AHxmGtu zq^a(>59Cq!R6txtS|e8Kvjx;gPLb?NFOwtTatUlEa}nbm#7B6swL!rGjfnO_9p)wnBZVSTpSp z$7pwk0F|t^fJ`c$Mcg2(xCMw>^7>#@jm?GHoeso5l~BRP!ed4j5UbF=5i^bE?VsxYm?D3t=#@nm|d8*F(C@E zeBhr==5MQ8hT6sKMx%XNJB`ZGl11zt7FoU3$~;S8r%_O?cCFglV$@ihTj`qRXAhH? z7~3qZ)ka`RB8g1$MzGOut#--SYFC}Db~)SD;xcZE;qudIW81}Rt6g>4?6lg9Gq*`@ zi_M&?0=sT*^J{Zcm^NE#CO+}|ny?6CM1h^A@LGQgxuUUV6|zg(R=bYH;xmD+ayPo# zvQk`*>?*p|F1}kkZ7sJOmbMmyHrYJeWp%4{Dzvqj@>q%7V^Y#~#$Y)q)YfWZ$!HSI z`UzU?Ld zR_p(0wJwoXSGBGxT(NCdU5>3UsMWSetF87{>jJVJ-S(?C$G5?D8aFO%r)kA|RL!Df znT76ZU_F`neQUPJDpqrZW=FS%Hh9Z;l-bfK&g5~fm4>oaS-6?8!lc%CW0e`!(wXi$ zx~yUabL>KVG!BZg*huL__*52WqS~!u>5bN4uT#r8rb9k&$$Q37wIkPe$WZN#60fbMocr#eO zxoFZX>^xnIJ5iNR&&UqD!$oerYu-68+dG&v^Rro;7L*{9(f*h=5ZXJiY^8^19c*6QV|+-+vA%v1|FSN0hPW2S6~a?s@kbyu!xAN=W=X}xNcbz zV4vAi%0aDKw#c}w9EDzGd_|>lok@#kU|+#Grt8E(h)-fZ#plhA z2;0)u>X6*wp7Fs^GT^p#e30=q8{bS+9Onz9>!hpQu;J~7CA)TU*YdgW zHDm@fn7P(fkpViy&jusgG#7l0h9!nvC_-4+ss)+-(X;(#{+EXTAg*r&5a>p91T* zE4N%ocphs#c7|4`K#O~Ys6v%cf%^(q}$szvAR{^jlz^3_2bG7=-PfOCA3lo|+CyIrc zeC=AYuEiP(rIIoxzN;Wr8-6sSS2CG;1v>k;h9iuYA~jbjmPkD_F>A{h#;P6d zFK;k)qz+gqt2!#$6&9+BfEYfT$?=Lwlo=v#+Wny^PZU^z1gi| z2*!Om&grBYTbJkqnGGY=Vwt7CmT{KO9K*Krf>b>H`Tjfbflu^oar_!uoJ(*{aWnU5 zm2J&Q6k4I$e@80CG`X$UCFVpn_Ob)e)@zYj`t;vnGc}&AVm_tgH)Yrov6Za}KC-2^ zP|G<{ctYlfZcE(SmTkQpDQu)LE> zCp_H_J4#h|mg^eT6EdVKC?qXVc9AFMm(%2^SRY|it}Vze6H&Q3#tgw`1Hny8p4Rpj zoPNzgZ5#2b(b>drdz#r<9f;4hnWk@)#&XqSr7obIEn}pxz7VGVLQlW8{>&P3+b-p; zRV+c9?7&-A;(Q2Py__6kM}(vJ>JK^2s8kQ)Jz^JP+pZ>`5jdkw2J025N+Qc6Gd6wD z-mS%Itv=3rWL?{)4Y*phBGN?KsF)2>=0Ro7^%FdJPq zOwS;T^emnud~r_3#we@Rl^r({flHqX^czNyM#?iYw1Qa+ZeuQHDMaXw3QDCw7Qx1- zG@RjXk>5NOA>3;6JFldom4@1hpME(9w>z`rhm{+Cv41M$@Z;dwc;fyUOCY&vhJ{U- ziZ==eG&|ei@6gZ=c9<&W-~-#w=pq)&vWFDHW}0>tV^1PS85U%z88J<3DCe3uX*?gOxT{m0Kla287QLUPz zx9Yr$Iaih(bzfND4(TN!BruzuEb^wU43A5$ z7HQLS*0>ic`!W^Isb(q@rA*lzM2oUjc0@4-Wj|SWj>e5Q#BC46dYAcZ7;r;04p@6t z2TLJE#&j}k@Qgjp!{IbD+?iNP^@(VEVLwhmqZDu3Y2xFr>t>a$RID%19%?mV`!>2G zv>NvABrEMfkyy0i8puseXC@ojYM%XQbE3{3CNXl=wgzs;Rlw@R1nwxs!O>K4awgx5=9k5b}^|Kb}l$+t$%xKvcJx%L{qaN@1klLZPBqL`H2hc~S8wjH`g*r~7rZO2j2<4%$ zY6w?V7}Id&o#Qa(f@92?t+BR~&Wg)mHDOb-BiT|hM@037puu;P)M!D#faZp{*Pw*x zgxLx`nGQU%!(0- z`oW86X)`~*#>)V!%`__Q0Jjc)hLf0;rYEFvijh*wdteG^N)!%WCn(`yfN_||C?Pv| zkuIfd!&S0Wuw8hN7g+AlMx{)bNi#yG3fX-JGqZc4jPwjJ+-blc zYo-qD?zxRLJl1FS2`tLylL?kCsM1-2y?B$1@qA&%i8PK*q$FoJt)4`kp}$L-1x#lR z803j_$~2f10tTli5wTFzM2iMdq|_JSS1=_G7`2R8fRS2k<}({~3ozOfs?EG(#$&qx zvn9YREzemt=E8lf1~qZX!=d_xs2S5;zsS%fu>9P939ew~0ZmjnSlrAa<3};JR?D;I z%0^6|AVhPIF^|llwc*=fb`ztNhvpXbd+am$%qqZWS`$TNM(<@lQL0ET>CO zwP<=4!^`}3bA(y_A(ilVa6Xk^E(SYC`?;aTlx((Ulsl7bjj-7=CpjAxOU3_|&tUf< zo71@2dU=)^!T1Slm14jb*4O)?Fg%=G-Sz0D0U)e*zB#H?Tz(6mjtlqryV-QkMx z8R|Bd3InHcf&`qY5TlF`kP?{$C#!QWB%yVif*4(l0E?u7V=z$C#AgZI8|W_O#~I*p zkW@3kwc!?Xs$R2Xq+1lM1F|ziVyPkw;s5?2zI1G07+M&BiCXSjsvMs5F zf+k{hRJU4SHITu);h=VAws?SyCD?ReY<-Yo1>)D=^hv8Ha?iW1UG& z;*?Sgl}2{P7-5NU(}pSOu37xK2*2ivbn!$An&R5Bsz~wKR%Tyq<}XCD^ZvD{HkZQ{ zduM|Z(gB!~hq4D;{#~K{rJ=ZDTqCA(Z|dIUPrvL^FOK5+4O{0pV70-0oADIcgv?=K zo4&0?m%bXnG={yfC|^{(YITNgP{It=Z4Q2$-F;V=u5BE*ud2AL;bC60?S9Z66~@@= zEfgt6G0S#rL6(>bL zaXL@2X=i;2(yKGsI(v{T{LKw0V^A2IC=>rg$5->1Q7C)kI?tgV<1_6Uhb$>?t!ov> zs92lZtz)K^a2we2xQsMPvR;&!Pa&O|gu{0}-&KNQxq;hMbQfkVhp@7WRI)ETG33gy zd2%r*Cl!u~WEOLAIb4UNAhN2s;O1q}(UfrS5K^cht@z6m)N1Pbo3pvxT{v^#!cTLY zw@^k#cV=uW(cw74oSQ;Z+3Rt5V>o(WVrvjSdzYx7J94`9RW=)?y?vS9?(uGOyuh_b z#%w{za>+R9y?r|edipZkhvBw|@1mMp;YyPGnRG8{a&@tTV_fV?g402@%kK^9r=(oJ1N|-|xvQ>1pU0W{;#pgDl7?hwm`fk(SsNvY@qamaQ%uc1HL0#55h)Bh~;B zo3qEspbRN%+{`nOpxVW>W~^GK#4#b3O4A{qVn^D{&eUeg`{}Y%nNkDaoH>ia*4IJh zE2>P=u5}D~~UX$`YzpNSr8Z%xf!+rv^ZCnXnhV(l~bQ^Y2f*2vBz@@Rj~6ZqFM zO2c2n;V9C;u<1>-fmvoN$jqCX&+R=jPYU^Bub}MF(Dx@C*qTl?1|eRrgwkZV4tTd! zNEl}ylx4Z`QgIq8R+cq?F>Zbc8c4U-us<-3jV#$PcB*WKW^1b2U~ZOCZ)4e>q^*jW zU?PoI52nNn)D*z2R;6aUnYkHi?B2LDnm%g77wCW5>eiSGY-IzTt&9&r=ap#@ewm}n zbV{SXbsEPycv$V{8SG*lbyEolzWtnZ%y12vd%o%^=oA~IlN#nA3>77(S7=ju=bKq5 zVsSHw3TWVhc$8#>$zSAk4(2yYq(D2xnrA@uehr$NVL~d#P_vEX3eDPrU&e7{OV0fo znb~eJXVf|>rN6Ix#H;td(b3^iuh{#$$NJeP(bXAx0doM%J;U3_`*w|UO5Lj1ZqI;h zYwyG4<~jLW*bI*2E|`!^Upp_P*?m%OnlZVtoXry1<OzMv+vF6pF5MnDsbs|Z*|LrV;7&y*^0+d9@;jcK zc)2D$Em$*s9NAQzEKnC}dZ)A2ZtEM}-Zz*Td+YX|%)s`o!|tw!%ayYrg19!CGZBLD zXO-zjfKwqRJ8?A>F5 zVO})OA6qtcbJy-og>4E`H=SPuI*Cn$LfIeo3NdN9GO6bVX&IM!9K|Mw)mJO$iZU^M zS3#Ur*a=P3kL9Q77tT%Uyn9^H8lIRcbU~coaj4CTPcfvZRL$XLQMu1;PD&L8Me3Ql z_C1(5F->2wOy6K;-)decrm6Zw&1!C2%!y7$=n;Z6|2~~E^Bp=)jMe3y3QNr zL&cN?T4|=Ax>j$}W(zX1v)I%_!u3CsglsHVP)kYk(~KNMVdHB7DM?>#BBG2fHdmOP zbS)-|PDmK;I{W+xB;4dEVTEX2-dyE?O#Nx& zP^T)gjhVWw_}dBx%~jgwP&>xcwQSQchL26y1~_ZSPAXWG@)-E(H=g9VFxKC_y>~EP zk)}DU#7x|O7BUr_717iY!^Lh|#{V~CoS2gk3jKd2nl^0~5D}U!3y__14i)&w@Ev`l z8M5K+yoHJB0;kK7h*=;D3DIT1T>iGhKe)7RLXC6Fl8pJHYz5b;?fhoW$)Jo|$bOVlZmG^wLiIV2XVsl@cw9L}6<9FVryRv|ZsHS;4qw?LG6!OgLP4&F>~9o_0) zd`C@ZujD|o6*SUxv)>I{Of|_JGe@o2Eo6+?QZV=zCYUPM+DnpSU@W^Zz|JvVImj8c z!)~rw@ucf>mAsI^=gI84Hzkn9p6M~j?z@ZrZ`O$9|;Gz&NJDJ_)}PZ_VmlS zpo_V9wp-2*ABZcOA~A#5lDcA63+6lpccKb;I}2N&EN7ZQtx{8Gr7K23kbQ+k zYz^SPNoM=lmxI#xuOpy74K4o%^%@m>!Mf&}a67Bp zAlMZJ`F+(Y2VjlMB`$|Q9%75AwSj#Xit59>c}YSswdCI?Nh#$B*WIiRQFF7qj%G0) zU2~Sv_)jCP(Krh`pD>y70-HGL3xW|5wHIqgr0KZ49tusfZz9-LU@Mleks)3eQfFgK z2Z61(J&N7zlQ=J{{`z=RsLttxv3+0i$*AmVStVlb3K~DY>r&QEN#~B*wRAfZV7^LN za{dIOHCPE%(lTCk%QXr0nUIy}j#X=S1ZJxgQsL5y(4@4a0o}N_s&RfeA;Wz`vW1L; zNLM81U!qKQZzv4~XUEZ<$K|x8b1YS6GgVvVs#{J%#LeX|c(OVTn2NkNCkn8=@Y2ka zw+*hMlIj34=toA)*$`Qm*;0N4<5@;wo`m$Y0x=<9P*4nURmZ>S z6Y*~wGlsKf=Q!4a-TMLAyH;R3I0Uw0yA6QAVyhf!rG7W%~*! z&Bg?cn3p>BY|$+V&r*wUGtqfjP4yOf8@vKTcXf?(K`d6G`?&r9dJ@@5Hpb@GUV33P zq3-O0jTp|>CHys`5iemSI{s55EpAqEP>P}8T8BaBX8~95x9d8r=7OgC zaJDx|-OiZ%S!LeKq3f{bEk@7Esys`SmEfy6@Ae_*Q*!vqF&FIyRGqyUbL%z~VHtv$ zCV*WZPTiPP0%o7ighs!L-6*k-mCsD@pWZFFP}W@zlIn5+X5Y(4UnpW+JsMxnR((z& znJC)hqEWWQv6!N^qtrNRBqPA&LD}XCpqKuvl2uICLo}1g0B0tNyP7#n6$j2@grHX3 z&9@NE_mAd?u7@R8C^hGxpUX6B63Aqj*;|?5buGHd z6`6AZ?p_=G_^dTeY6Y|qiLs+d_^P=?hgmksn-Ob9RX1@{?Nq)!+E6Lx9WA&_sl6N* z8tiu4yiB2d80}o+Yg9{)34In{u;gU}yzp~^WbFvj26ifFuU_Bj!`w_&eh&i)_TS6NC_->J9@tLxBltwreRH$oHEtNO3U*_-88k%cWhDKB4 zs2R+BD=m7>8;9!JhTe3Eja_|nAJI}r%?gV3H#ZJ_h#i*m?#-pi@vS76j=NT9(E`^B z)A5^fWr>>R%mi<*71}_V*-iKDTnc!v=QA@V!xaw2Z@O)0qqZ$@+t5<%ZA1GTQDzO4 z+%}BEliP+iC|Lqt=d2sfu72)I(<`>iA&IDKbngz<<6i4#`D_yKxV3iMFvr9t(&Hf6@GT%SI zwMLdcecQ~54ST~8RUce&RKZPQdvr*4s|+OijJbiRigJg&Bt$i#sP0wz`kD4;sEk1@ z!FY*oX(&b#3CRlJtXhm^tlWfZ1(GAT-`O%-hKHqV*kehcs>R z`LN!2kroQH9}n3aAsDgB`!`A5-RVS4Veg3HbHxN0lDHfaq8N- zG=F@3u$juN##BbU?$M!jbQAYlwMbVUi|YGUpgG1Ctz&X3tlPGI$7}6DQqWN2?`G;8Y)*5dHDcsYUSLiQ>re&2vh96dX?U5nex}wrpOOue))e7&Kj&btd>== zMv|#w`wLy|k)bPU>pFdQ|SGj+Ey0G&pU+<@C2!f4>&0u!I{ib->;7cW>Zo$yGW zFXMyot#LB+k7)XT9sfUv|GW5K&sq4Me#v#wE&LW;`BVA#07AYqZ@3}qB7fpjHa%ZA zzeB#oZ@R&ZBk_sH^XXf*A>SGJl#ci`Og!H!_#N_{g|CH9CO+|dK7ETQ78_aixCGQskzB^NV zSK*t3F1{B8e0*xONnXA82J@XI@u`eH-G7tfI~4HU9PkxVeD?=@FAVtj{B={h4+ebF zDL&o5<2OZ#pNHXF==n1OX{L&&i7)8(v5Y~4ssH%rGJGda*l!iK(7;MplX1I d`N<8@oqP4mK0Z&cPw_3