summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 902ee66)
raw | patch | inline | side by side (parent: 902ee66)
author | Chris Wilson <chris@chris-wilson.co.uk> | |
Mon, 5 Dec 2011 21:29:05 +0000 (21:29 +0000) | ||
committer | Chris Wilson <chris@chris-wilson.co.uk> | |
Mon, 5 Dec 2011 22:22:10 +0000 (22:22 +0000) |
There is a per-process limit on the number of vma that the process can
keep open, so we cannot keep an unlimited cache of unused vma's (besides
keeping track of all those vma in the kernel adds considerable overhead).
However, in order to work around inefficiencies in the kernel it is
beneficial to reuse the vma, so keep a MRU cache of vma.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
keep open, so we cannot keep an unlimited cache of unused vma's (besides
keeping track of all those vma in the kernel adds considerable overhead).
However, in order to work around inefficiencies in the kernel it is
beneficial to reuse the vma, so keep a MRU cache of vma.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
intel/intel_bufmgr.h | patch | blob | history | |
intel/intel_bufmgr_gem.c | patch | blob | history |
diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index abe9711bcc88dd1f67618a4c9f47499999ad16cb..808e5dfa4a6df397987ee4004c6b363268b0e18e 100644 (file)
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
+ int limit);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
index db1416a66c5e373a10fb105736f859167891941c..c535dee69787c69fe360e7b5ffe6ce6336431e32 100644 (file)
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
time_t time;
drmMMListHead named;
+ drmMMListHead vma_cache;
+ int vma_count, vma_max;
uint64_t gtt_size;
int available_fences;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
int map_count;
+ drmMMListHead vma_list;
/** BO cache list */
drmMMListHead head;
}
DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
}
bo_gem->name = name;
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
struct drm_gem_close close;
int ret;
+ DRMLISTDEL(&bo_gem->vma_list);
+ if (bo_gem->mem_virtual) {
+ munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->gtt_virtual) {
+ munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
+
/* Close this object */
memset(&close, 0, sizeof(close));
close.handle = bo_gem->gem_handle;
bufmgr_gem->time = time;
}
+static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ DBG("%s: count=%d, limit=%d\n", __FUNCTION__,
+ bufmgr_gem->vma_count, bufmgr_gem->vma_max);
+
+ if (bufmgr_gem->vma_max < 0)
+ return;
+
+ while (bufmgr_gem->vma_count > bufmgr_gem->vma_max) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bufmgr_gem->vma_cache.next,
+ vma_list);
+ assert(bo_gem->map_count == 0);
+ DRMLISTDEL(&bo_gem->vma_list);
+
+ if (bo_gem->mem_virtual) {
+ munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bo_gem->mem_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->gtt_virtual) {
+ munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ bo_gem->gtt_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
+ }
+}
+
+static void drm_intel_gem_bo_add_to_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
+ if (bo_gem->mem_virtual)
+ bufmgr_gem->vma_count++;
+ if (bo_gem->gtt_virtual)
+ bufmgr_gem->vma_count++;
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+static void drm_intel_gem_bo_remove_from_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ DRMLISTDEL(&bo_gem->vma_list);
+ if (bo_gem->mem_virtual)
+ bufmgr_gem->vma_count--;
+ if (bo_gem->gtt_virtual)
+ bufmgr_gem->vma_count--;
+}
+
static void
drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
{
DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
bo_gem->map_count = 0;
}
- if (bo_gem->mem_virtual) {
- munmap(bo_gem->mem_virtual, bo_gem->bo.size);
- bo_gem->mem_virtual = 0;
- }
- if (bo_gem->gtt_virtual) {
- munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
- bo_gem->gtt_virtual = 0;
- }
DRMLISTDEL(&bo_gem->name_list);
bo_gem->name = NULL;
bo_gem->validate_index = -1;
+ if (bo_gem->mem_virtual || bo_gem->gtt_virtual)
+ drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem);
+
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
} else {
drm_intel_gem_bo_free(bo);
pthread_mutex_lock(&bufmgr_gem->lock);
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_remove_from_vma_cache(bufmgr_gem, bo_gem);
+
if (!bo_gem->mem_virtual) {
struct drm_i915_gem_mmap mmap_arg;
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
- assert(bo_gem->map_count == 0);
+ assert(bo_gem->map_count == 1);
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle,
bo_gem->name, strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem);
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->mem_virtual);
bo->virtual = bo_gem->mem_virtual;
- bo_gem->map_count++;
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
pthread_mutex_lock(&bufmgr_gem->lock);
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_remove_from_vma_cache(bufmgr_gem, bo_gem);
+
/* Get a mapping of the buffer if we haven't before. */
if (bo_gem->gtt_virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
bo_gem->name);
- assert(bo_gem->map_count == 0);
+ assert(bo_gem->map_count == 1);
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name,
strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem);
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
}
bo->virtual = bo_gem->gtt_virtual;
- bo_gem->map_count++;
DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->gtt_virtual);
pthread_mutex_lock(&bufmgr_gem->lock);
+ assert(bo_gem->map_count > 0);
+
if (bo_gem->mapped_cpu_write) {
/* Cause a flush to happen if the buffer's pinned for
* scanout, so the results show up in a timely manner.
* limits and cause later failures.
*/
if (--bo_gem->map_count == 0) {
- if (bo_gem->mem_virtual) {
- munmap(bo_gem->mem_virtual, bo_gem->bo.size);
- bo_gem->mem_virtual = NULL;
- }
- if (bo_gem->gtt_virtual) {
- munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
- bo_gem->gtt_virtual = NULL;
- }
-
+ drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem);
bo->virtual = NULL;
}
pthread_mutex_unlock(&bufmgr_gem->lock);
}
}
+void
+drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ bufmgr_gem->vma_max = limit;
+
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
DRMINITLISTHEAD(&bufmgr_gem->named);
init_cache_buckets(bufmgr_gem);
+ DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
+ bufmgr_gem->vma_max = -1; /* unlimited by default */
+
return &bufmgr_gem->bufmgr;
}