summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 9b3ad51)
raw | patch | inline | side by side (parent: 9b3ad51)
author | Chris Wilson <chris@chris-wilson.co.uk> | |
Thu, 9 Feb 2012 10:29:22 +0000 (10:29 +0000) | ||
committer | Chris Wilson <chris@chris-wilson.co.uk> | |
Wed, 15 Feb 2012 11:16:59 +0000 (11:16 +0000) |
Every access to either the GTT or CPU pointer is supposed to be
proceeded by a set_domain ioctl so that GEM is able to manage the cache
domains correctly and for the following access to be coherent. Of
course, some people explicitly want incoherent, non-blocking access
which is going to trigger warnings by this patch but are probably better
served by explicit suppression.
v2: Also mark the pointers as inaccessible following the explicit unmap
and implicit unmap upon return to the cache.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
proceeded by a set_domain ioctl so that GEM is able to manage the cache
domains correctly and for the following access to be coherent. Of
course, some people explicitly want incoherent, non-blocking access
which is going to trigger warnings by this patch but are probably better
served by explicit suppression.
v2: Also mark the pointers as inaccessible following the explicit unmap
and implicit unmap upon return to the cache.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
intel/intel_bufmgr_gem.c | patch | blob | history |
index 2e65580442c59e3b1f133d38a952066a613ef786..0f33b71d7f2e9a865059859c059bba2b082e47f6 100644 (file)
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
free(bo);
}
+static void
+drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
+{
+#if HAVE_VALGRIND
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ if (bo_gem->mem_virtual)
+ VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
+
+ if (bo_gem->gtt_virtual)
+ VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
+#endif
+}
+
/** Frees all cached buffers significantly older than @time. */
static void
drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
bo_gem->map_count = 0;
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
}
DRMLISTDEL(&bo_gem->name_list);
if (write_enable)
bo_gem->mapped_cpu_write = true;
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
strerror(errno));
}
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
*/
if (--bo_gem->map_count == 0) {
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
bo->virtual = NULL;
}
pthread_mutex_unlock(&bufmgr_gem->lock);
if (target_bo == bo)
continue;
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc(target_bo);
if (target_bo == bo)
continue;
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc2(target_bo);