]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - glsdk/libdrm.git/blobdiff - intel/intel_bufmgr_gem.c
intel/context: create/destroy implementation
[glsdk/libdrm.git] / intel / intel_bufmgr_gem.c
index 21fe099e500865d0d6c424e2d0e9fcecb0dad3e4..a957c2855dabd2bbbf906f78b030b6c5549a963b 100644 (file)
@@ -1,7 +1,7 @@
 /**************************************************************************
  *
  * Copyright © 2007 Red Hat Inc.
- * Copyright © 2007 Intel Corporation
+ * Copyright © 2007-2012 Intel Corporation
  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
  * All Rights Reserved.
  *
@@ -39,6 +39,7 @@
 #endif
 
 #include <xf86drm.h>
+#include <xf86atomic.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/mman.h>
 #include <sys/stat.h>
 #include <sys/types.h>
+#include <stdbool.h>
 
 #include "errno.h"
 #include "libdrm_lists.h"
-#include "intel_atomic.h"
 #include "intel_bufmgr.h"
 #include "intel_bufmgr_priv.h"
 #include "intel_chipset.h"
+#include "intel_aub.h"
 #include "string.h"
 
 #include "i915_drm.h"
 
+#ifdef HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#define VG(x) x
+#else
+#define VG(x)
+#endif
+
+#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
+
 #define DBG(...) do {                                  \
        if (bufmgr_gem->bufmgr.debug)                   \
                fprintf(stderr, __VA_ARGS__);           \
 } while (0)
 
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
 
 struct drm_intel_gem_bo_bucket {
@@ -73,10 +87,6 @@ struct drm_intel_gem_bo_bucket {
        unsigned long size;
 };
 
-/* Only cache objects up to 64MB.  Bigger than that, and the rounding of the
- * size makes many operations fail that wouldn't otherwise.
- */
-#define DRM_INTEL_GEM_BO_BUCKETS       14
 typedef struct _drm_intel_bufmgr_gem {
        drm_intel_bufmgr bufmgr;
 
@@ -93,14 +103,29 @@ typedef struct _drm_intel_bufmgr_gem {
        int exec_count;
 
        /** Array of lists of cached gem objects of power-of-two sizes */
-       struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
+       struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
+       int num_buckets;
+       time_t time;
+
+       drmMMListHead named;
+       drmMMListHead vma_cache;
+       int vma_count, vma_open, vma_max;
 
        uint64_t gtt_size;
        int available_fences;
        int pci_device;
        int gen;
-       char bo_reuse;
-       char fenced_relocs;
+       unsigned int has_bsd : 1;
+       unsigned int has_blt : 1;
+       unsigned int has_relaxed_fencing : 1;
+       unsigned int has_llc : 1;
+       unsigned int has_wait_timeout : 1;
+       unsigned int bo_reuse : 1;
+       unsigned int no_exec : 1;
+       bool fenced_relocs;
+
+       FILE *aub_file;
+       uint32_t aub_offset;
 } drm_intel_bufmgr_gem;
 
 #define DRM_INTEL_RELOC_FENCE (1<<0)
@@ -121,6 +146,7 @@ struct _drm_intel_bo_gem {
         * Kenel-assigned global name for this object
         */
        unsigned int global_name;
+       drmMMListHead name_list;
 
        /**
         * Index of the buffer within the validation list while preparing a
@@ -133,6 +159,7 @@ struct _drm_intel_bo_gem {
         */
        uint32_t tiling_mode;
        uint32_t swizzle_mode;
+       unsigned long stride;
 
        time_t free_time;
 
@@ -148,6 +175,8 @@ struct _drm_intel_bo_gem {
        void *mem_virtual;
        /** GTT virtual address for the buffer, saved across map/unmap cycles */
        void *gtt_virtual;
+       int map_count;
+       drmMMListHead vma_list;
 
        /** BO cache list */
        drmMMListHead head;
@@ -156,24 +185,24 @@ struct _drm_intel_bo_gem {
         * Boolean of whether this BO and its children have been included in
         * the current drm_intel_bufmgr_check_aperture_space() total.
         */
-       char included_in_check_aperture;
+       bool included_in_check_aperture;
 
        /**
         * Boolean of whether this buffer has been used as a relocation
         * target and had its size accounted for, and thus can't have any
         * further relocations added to it.
         */
-       char used_as_reloc_target;
+       bool used_as_reloc_target;
 
        /**
         * Boolean of whether we have encountered an error whilst building the relocation tree.
         */
-       char has_error;
+       bool has_error;
 
        /**
         * Boolean of whether this buffer can be re-used
         */
-       char reusable;
+       bool reusable;
 
        /**
         * Size in bytes of this buffer and its relocation descendents.
@@ -188,6 +217,14 @@ struct _drm_intel_bo_gem {
         * relocations.
         */
        int reloc_tree_fences;
+
+       /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
+       bool mapped_cpu_write;
+
+       uint32_t aub_offset;
+
+       drm_intel_aub_annotation *aub_annotations;
+       unsigned aub_annotation_count;
 };
 
 static unsigned int
@@ -201,8 +238,9 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
                            uint32_t * swizzle_mode);
 
 static int
-drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
-                           uint32_t stride);
+drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
+                                    uint32_t tiling_mode,
+                                    uint32_t stride);
 
 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
                                                      time_t time);
@@ -239,6 +277,10 @@ drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
                return size;
        }
 
+       /* Do we need to allocate every page for the fence? */
+       if (bufmgr_gem->has_relaxed_fencing)
+               return ROUND_UP_TO(size, 4096);
+
        for (i = min_size; i < size; i <<= 1)
                ;
 
@@ -252,15 +294,20 @@ drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
  */
 static unsigned long
 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
-                           unsigned long pitch, uint32_t tiling_mode)
+                           unsigned long pitch, uint32_t *tiling_mode)
 {
        unsigned long tile_width;
        unsigned long i;
 
-       if (tiling_mode == I915_TILING_NONE)
-               return pitch;
+       /* If untiled, then just align it so that we can do rendering
+        * to it with the 3D engine.
+        */
+       if (*tiling_mode == I915_TILING_NONE)
+               return ALIGN(pitch, 64);
 
-       if (tiling_mode == I915_TILING_X)
+       if (*tiling_mode == I915_TILING_X
+                       || (IS_915(bufmgr_gem->pci_device)
+                           && *tiling_mode == I915_TILING_Y))
                tile_width = 512;
        else
                tile_width = 128;
@@ -269,6 +316,14 @@ drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
        if (bufmgr_gem->gen >= 4)
                return ROUND_UP_TO(pitch, tile_width);
 
+       /* The older hardware has a maximum pitch of 8192 with tiled
+        * surfaces, so fallback to untiled if it's too large.
+        */
+       if (pitch > 8192) {
+               *tiling_mode = I915_TILING_NONE;
+               return ALIGN(pitch, 64);
+       }
+
        /* Pre-965 needs power of two tile width */
        for (i = tile_width; i < pitch; i <<= 1)
                ;
@@ -282,7 +337,7 @@ drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
 {
        int i;
 
-       for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+       for (i = 0; i < bufmgr_gem->num_buckets; i++) {
                struct drm_intel_gem_bo_bucket *bucket =
                    &bufmgr_gem->cache_bucket[i];
                if (bucket->size >= size) {
@@ -331,7 +386,6 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
 {
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-       assert(atomic_read(&bo_gem->refcount) > 0);
        atomic_inc(&bo_gem->refcount);
 }
 
@@ -448,8 +502,23 @@ drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
         * aperture. Optimal packing is for wimps.
         */
        size = bo_gem->bo.size;
-       if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
-               size *= 2;
+       if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
+               int min_size;
+
+               if (bufmgr_gem->has_relaxed_fencing) {
+                       if (bufmgr_gem->gen == 3)
+                               min_size = 1024*1024;
+                       else
+                               min_size = 512*1024;
+
+                       while (min_size < size)
+                               min_size *= 2;
+               } else
+                       min_size = size;
+
+               /* Account for worst-case alignment. */
+               size = 2 * min_size;
+       }
 
        bo_gem->reloc_tree_size = size;
 }
@@ -467,9 +536,9 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
        bo_gem->relocs = malloc(max_relocs *
                                sizeof(struct drm_i915_gem_relocation_entry));
        bo_gem->reloc_target_info = malloc(max_relocs *
-                                          sizeof(drm_intel_reloc_target *));
+                                          sizeof(drm_intel_reloc_target));
        if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
-               bo_gem->has_error = 1;
+               bo_gem->has_error = true;
 
                free (bo_gem->relocs);
                bo_gem->relocs = NULL;
@@ -491,12 +560,10 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
        struct drm_i915_gem_busy busy;
        int ret;
 
-       memset(&busy, 0, sizeof(busy));
+       VG_CLEAR(busy);
        busy.handle = bo_gem->gem_handle;
 
-       do {
-               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
-       } while (ret == -1 && errno == EINTR);
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
 
        return (ret == 0 && busy.busy);
 }
@@ -507,10 +574,11 @@ drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
 {
        struct drm_i915_gem_madvise madv;
 
+       VG_CLEAR(madv);
        madv.handle = bo_gem->gem_handle;
        madv.madv = state;
        madv.retained = 1;
-       ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+       drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
 
        return madv.retained;
 }
@@ -547,19 +615,21 @@ static drm_intel_bo *
 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
                                const char *name,
                                unsigned long size,
-                               unsigned long flags)
+                               unsigned long flags,
+                               uint32_t tiling_mode,
+                               unsigned long stride)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
        drm_intel_bo_gem *bo_gem;
        unsigned int page_size = getpagesize();
        int ret;
        struct drm_intel_gem_bo_bucket *bucket;
-       int alloc_from_cache;
+       bool alloc_from_cache;
        unsigned long bo_size;
-       int for_render = 0;
+       bool for_render = false;
 
        if (flags & BO_ALLOC_FOR_RENDER)
-               for_render = 1;
+               for_render = true;
 
        /* Round the allocated size up to a power of two number of pages. */
        bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
@@ -578,7 +648,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
        pthread_mutex_lock(&bufmgr_gem->lock);
        /* Get a buffer out of the cache if available */
 retry:
-       alloc_from_cache = 0;
+       alloc_from_cache = false;
        if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
                if (for_render) {
                        /* Allocate new render-target BOs from the tail (MRU)
@@ -588,7 +658,7 @@ retry:
                        bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
                                              bucket->head.prev, head);
                        DRMLISTDEL(&bo_gem->head);
-                       alloc_from_cache = 1;
+                       alloc_from_cache = true;
                } else {
                        /* For non-render-target BOs (where we're probably
                         * going to map it first thing in order to fill it
@@ -600,7 +670,7 @@ retry:
                        bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
                                              bucket->head.next, head);
                        if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
-                               alloc_from_cache = 1;
+                               alloc_from_cache = true;
                                DRMLISTDEL(&bo_gem->head);
                        }
                }
@@ -613,6 +683,13 @@ retry:
                                                                    bucket);
                                goto retry;
                        }
+
+                       if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
+                                                                tiling_mode,
+                                                                stride)) {
+                               drm_intel_gem_bo_free(&bo_gem->bo);
+                               goto retry;
+                       }
                }
        }
        pthread_mutex_unlock(&bufmgr_gem->lock);
@@ -625,14 +702,13 @@ retry:
                        return NULL;
 
                bo_gem->bo.size = bo_size;
-               memset(&create, 0, sizeof(create));
+
+               VG_CLEAR(create);
                create.size = bo_size;
 
-               do {
-                       ret = ioctl(bufmgr_gem->fd,
-                                   DRM_IOCTL_I915_GEM_CREATE,
-                                   &create);
-               } while (ret == -1 && errno == EINTR);
+               ret = drmIoctl(bufmgr_gem->fd,
+                              DRM_IOCTL_I915_GEM_CREATE,
+                              &create);
                bo_gem->gem_handle = create.handle;
                bo_gem->bo.handle = bo_gem->gem_handle;
                if (ret != 0) {
@@ -640,17 +716,31 @@ retry:
                        return NULL;
                }
                bo_gem->bo.bufmgr = bufmgr;
+
+               bo_gem->tiling_mode = I915_TILING_NONE;
+               bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+               bo_gem->stride = 0;
+
+               if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
+                                                        tiling_mode,
+                                                        stride)) {
+                   drm_intel_gem_bo_free(&bo_gem->bo);
+                   return NULL;
+               }
+
+               DRMINITLISTHEAD(&bo_gem->name_list);
+               DRMINITLISTHEAD(&bo_gem->vma_list);
        }
 
        bo_gem->name = name;
        atomic_set(&bo_gem->refcount, 1);
        bo_gem->validate_index = -1;
        bo_gem->reloc_tree_fences = 0;
-       bo_gem->used_as_reloc_target = 0;
-       bo_gem->has_error = 0;
-       bo_gem->tiling_mode = I915_TILING_NONE;
-       bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
-       bo_gem->reusable = 1;
+       bo_gem->used_as_reloc_target = false;
+       bo_gem->has_error = false;
+       bo_gem->reusable = true;
+       bo_gem->aub_annotations = NULL;
+       bo_gem->aub_annotation_count = 0;
 
        drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
@@ -667,7 +757,8 @@ drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
                                  unsigned int alignment)
 {
        return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
-                                              BO_ALLOC_FOR_RENDER);
+                                              BO_ALLOC_FOR_RENDER,
+                                              I915_TILING_NONE, 0);
 }
 
 static drm_intel_bo *
@@ -676,7 +767,8 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
                       unsigned long size,
                       unsigned int alignment)
 {
-       return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
+       return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
+                                              I915_TILING_NONE, 0);
 }
 
 static drm_intel_bo *
@@ -685,46 +777,50 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
                             unsigned long *pitch, unsigned long flags)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-       drm_intel_bo *bo;
-       unsigned long size, stride, aligned_y = y;
-       int ret;
-
-       /* If we're tiled, our allocations are in 8 or 32-row blocks,
-        * so failure to align our height means that we won't allocate
-        * enough pages.
-        *
-        * If we're untiled, we still have to align to 2 rows high
-        * because the data port accesses 2x2 blocks even if the
-        * bottom row isn't to be rendered, so failure to align means
-        * we could walk off the end of the GTT and fault.  This is
-        * documented on 965, and may be the case on older chipsets
-        * too so we try to be careful.
-        */
-       if (*tiling_mode == I915_TILING_NONE)
-               aligned_y = ALIGN(y, 2);
-       else if (*tiling_mode == I915_TILING_X)
-               aligned_y = ALIGN(y, 8);
-       else if (*tiling_mode == I915_TILING_Y)
-               aligned_y = ALIGN(y, 32);
-
-       stride = x * cpp;
-       stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
-       size = stride * aligned_y;
-       size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
-
-       bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
-       if (!bo)
-               return NULL;
-
-       ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
-       if (ret != 0) {
-               drm_intel_gem_bo_unreference(bo);
-               return NULL;
-       }
+       unsigned long size, stride;
+       uint32_t tiling;
 
+       do {
+               unsigned long aligned_y, height_alignment;
+
+               tiling = *tiling_mode;
+
+               /* If we're tiled, our allocations are in 8 or 32-row blocks,
+                * so failure to align our height means that we won't allocate
+                * enough pages.
+                *
+                * If we're untiled, we still have to align to 2 rows high
+                * because the data port accesses 2x2 blocks even if the
+                * bottom row isn't to be rendered, so failure to align means
+                * we could walk off the end of the GTT and fault.  This is
+                * documented on 965, and may be the case on older chipsets
+                * too so we try to be careful.
+                */
+               aligned_y = y;
+               height_alignment = 2;
+
+               if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
+                       height_alignment = 16;
+               else if (tiling == I915_TILING_X
+                       || (IS_915(bufmgr_gem->pci_device)
+                           && tiling == I915_TILING_Y))
+                       height_alignment = 8;
+               else if (tiling == I915_TILING_Y)
+                       height_alignment = 32;
+               aligned_y = ALIGN(y, height_alignment);
+
+               stride = x * cpp;
+               stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
+               size = stride * aligned_y;
+               size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
+       } while (*tiling_mode != tiling);
        *pitch = stride;
 
-       return bo;
+       if (tiling == I915_TILING_NONE)
+               stride = 0;
+
+       return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
+                                              tiling, stride);
 }
 
 /**
@@ -743,21 +839,36 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
        int ret;
        struct drm_gem_open open_arg;
        struct drm_i915_gem_get_tiling get_tiling;
+       drmMMListHead *list;
+
+       /* At the moment most applications only have a few named bo.
+        * For instance, in a DRI client only the render buffers passed
+        * between X and the client are named. And since X returns the
+        * alternating names for the front/back buffer a linear search
+        * provides a sufficiently fast match.
+        */
+       for (list = bufmgr_gem->named.next;
+            list != &bufmgr_gem->named;
+            list = list->next) {
+               bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+               if (bo_gem->global_name == handle) {
+                       drm_intel_gem_bo_reference(&bo_gem->bo);
+                       return &bo_gem->bo;
+               }
+       }
 
        bo_gem = calloc(1, sizeof(*bo_gem));
        if (!bo_gem)
                return NULL;
 
-       memset(&open_arg, 0, sizeof(open_arg));
+       VG_CLEAR(open_arg);
        open_arg.name = handle;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_GEM_OPEN,
-                           &open_arg);
-       } while (ret == -1 && errno == EINTR);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_GEM_OPEN,
+                      &open_arg);
        if (ret != 0) {
-               fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
-                       name, handle, strerror(errno));
+               DBG("Couldn't reference %s handle 0x%08x: %s\n",
+                   name, handle, strerror(errno));
                free(bo_gem);
                return NULL;
        }
@@ -769,20 +880,26 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
        atomic_set(&bo_gem->refcount, 1);
        bo_gem->validate_index = -1;
        bo_gem->gem_handle = open_arg.handle;
+       bo_gem->bo.handle = open_arg.handle;
        bo_gem->global_name = handle;
-       bo_gem->reusable = 0;
+       bo_gem->reusable = false;
 
-       memset(&get_tiling, 0, sizeof(get_tiling));
+       VG_CLEAR(get_tiling);
        get_tiling.handle = bo_gem->gem_handle;
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_GET_TILING,
+                      &get_tiling);
        if (ret != 0) {
                drm_intel_gem_bo_unreference(&bo_gem->bo);
                return NULL;
        }
        bo_gem->tiling_mode = get_tiling.tiling_mode;
        bo_gem->swizzle_mode = get_tiling.swizzle_mode;
+       /* XXX stride is unknown */
        drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
+       DRMINITLISTHEAD(&bo_gem->vma_list);
+       DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
        DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
 
        return &bo_gem->bo;
@@ -796,30 +913,53 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
        struct drm_gem_close close;
        int ret;
 
-       if (bo_gem->mem_virtual)
+       DRMLISTDEL(&bo_gem->vma_list);
+       if (bo_gem->mem_virtual) {
+               VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
                munmap(bo_gem->mem_virtual, bo_gem->bo.size);
-       if (bo_gem->gtt_virtual)
+               bufmgr_gem->vma_count--;
+       }
+       if (bo_gem->gtt_virtual) {
                munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+               bufmgr_gem->vma_count--;
+       }
 
        /* Close this object */
-       memset(&close, 0, sizeof(close));
+       VG_CLEAR(close);
        close.handle = bo_gem->gem_handle;
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
        if (ret != 0) {
-               fprintf(stderr,
-                       "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
-                       bo_gem->gem_handle, bo_gem->name, strerror(errno));
+               DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+                   bo_gem->gem_handle, bo_gem->name, strerror(errno));
        }
+       free(bo_gem->aub_annotations);
        free(bo);
 }
 
+static void
+drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
+{
+#if HAVE_VALGRIND
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+       if (bo_gem->mem_virtual)
+               VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
+
+       if (bo_gem->gtt_virtual)
+               VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
+#endif
+}
+
 /** Frees all cached buffers significantly older than @time. */
 static void
 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
 {
        int i;
 
-       for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+       if (bufmgr_gem->time == time)
+               return;
+
+       for (i = 0; i < bufmgr_gem->num_buckets; i++) {
                struct drm_intel_gem_bo_bucket *bucket =
                    &bufmgr_gem->cache_bucket[i];
 
@@ -836,6 +976,69 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
                        drm_intel_gem_bo_free(&bo_gem->bo);
                }
        }
+
+       bufmgr_gem->time = time;
+}
+
+static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+       int limit;
+
+       DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
+           bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
+
+       if (bufmgr_gem->vma_max < 0)
+               return;
+
+       /* We may need to evict a few entries in order to create new mmaps */
+       limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
+       if (limit < 0)
+               limit = 0;
+
+       while (bufmgr_gem->vma_count > limit) {
+               drm_intel_bo_gem *bo_gem;
+
+               bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+                                     bufmgr_gem->vma_cache.next,
+                                     vma_list);
+               assert(bo_gem->map_count == 0);
+               DRMLISTDELINIT(&bo_gem->vma_list);
+
+               if (bo_gem->mem_virtual) {
+                       munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+                       bo_gem->mem_virtual = NULL;
+                       bufmgr_gem->vma_count--;
+               }
+               if (bo_gem->gtt_virtual) {
+                       munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+                       bo_gem->gtt_virtual = NULL;
+                       bufmgr_gem->vma_count--;
+               }
+       }
+}
+
+static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
+                                      drm_intel_bo_gem *bo_gem)
+{
+       bufmgr_gem->vma_open--;
+       DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
+       if (bo_gem->mem_virtual)
+               bufmgr_gem->vma_count++;
+       if (bo_gem->gtt_virtual)
+               bufmgr_gem->vma_count++;
+       drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
+                                     drm_intel_bo_gem *bo_gem)
+{
+       bufmgr_gem->vma_open++;
+       DRMLISTDEL(&bo_gem->vma_list);
+       if (bo_gem->mem_virtual)
+               bufmgr_gem->vma_count--;
+       if (bo_gem->gtt_virtual)
+               bufmgr_gem->vma_count--;
+       drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
 }
 
 static void
@@ -844,17 +1047,18 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
        struct drm_intel_gem_bo_bucket *bucket;
-       uint32_t tiling_mode;
        int i;
 
        /* Unreference all the target buffers */
        for (i = 0; i < bo_gem->reloc_count; i++) {
-               drm_intel_gem_bo_unreference_locked_timed(bo_gem->
-                                                         reloc_target_info[i].bo,
-                                                         time);
+               if (bo_gem->reloc_target_info[i].bo != bo) {
+                       drm_intel_gem_bo_unreference_locked_timed(bo_gem->
+                                                                 reloc_target_info[i].bo,
+                                                                 time);
+               }
        }
        bo_gem->reloc_count = 0;
-       bo_gem->used_as_reloc_target = 0;
+       bo_gem->used_as_reloc_target = false;
 
        DBG("bo_unreference final: %d (%s)\n",
            bo_gem->gem_handle, bo_gem->name);
@@ -869,11 +1073,19 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
                bo_gem->relocs = NULL;
        }
 
+       /* Clear any left-over mappings */
+       if (bo_gem->map_count) {
+               DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
+               bo_gem->map_count = 0;
+               drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+               drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+       }
+
+       DRMLISTDEL(&bo_gem->name_list);
+
        bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
        /* Put the buffer into our internal cache for reuse if we can. */
-       tiling_mode = I915_TILING_NONE;
        if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
-           drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
            drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
                                              I915_MADV_DONTNEED)) {
                bo_gem->free_time = time;
@@ -882,8 +1094,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
                bo_gem->validate_index = -1;
 
                DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
-
-               drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
        } else {
                drm_intel_gem_bo_free(bo);
        }
@@ -913,6 +1123,7 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
 
                pthread_mutex_lock(&bufmgr_gem->lock);
                drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+               drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
                pthread_mutex_unlock(&bufmgr_gem->lock);
        }
 }
@@ -926,96 +1137,97 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
 
        pthread_mutex_lock(&bufmgr_gem->lock);
 
-       /* Allow recursive mapping. Mesa may recursively map buffers with
-        * nested display loops.
-        */
+       if (bo_gem->map_count++ == 0)
+               drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
        if (!bo_gem->mem_virtual) {
                struct drm_i915_gem_mmap mmap_arg;
 
-               DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+               DBG("bo_map: %d (%s), map_count=%d\n",
+                   bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
 
-               memset(&mmap_arg, 0, sizeof(mmap_arg));
+               VG_CLEAR(mmap_arg);
                mmap_arg.handle = bo_gem->gem_handle;
                mmap_arg.offset = 0;
                mmap_arg.size = bo->size;
-               do {
-                       ret = ioctl(bufmgr_gem->fd,
-                                   DRM_IOCTL_I915_GEM_MMAP,
-                                   &mmap_arg);
-               } while (ret == -1 && errno == EINTR);
+               ret = drmIoctl(bufmgr_gem->fd,
+                              DRM_IOCTL_I915_GEM_MMAP,
+                              &mmap_arg);
                if (ret != 0) {
                        ret = -errno;
-                       fprintf(stderr,
-                               "%s:%d: Error mapping buffer %d (%s): %s .\n",
-                               __FILE__, __LINE__, bo_gem->gem_handle,
-                               bo_gem->name, strerror(errno));
+                       DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+                           __FILE__, __LINE__, bo_gem->gem_handle,
+                           bo_gem->name, strerror(errno));
+                       if (--bo_gem->map_count == 0)
+                               drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
                        pthread_mutex_unlock(&bufmgr_gem->lock);
                        return ret;
                }
+               VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
                bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
        }
        DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
            bo_gem->mem_virtual);
        bo->virtual = bo_gem->mem_virtual;
 
+       VG_CLEAR(set_domain);
        set_domain.handle = bo_gem->gem_handle;
        set_domain.read_domains = I915_GEM_DOMAIN_CPU;
        if (write_enable)
                set_domain.write_domain = I915_GEM_DOMAIN_CPU;
        else
                set_domain.write_domain = 0;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_SET_DOMAIN,
-                           &set_domain);
-       } while (ret == -1 && errno == EINTR);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                      &set_domain);
        if (ret != 0) {
-               ret = -errno;
-               fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
-                       __FILE__, __LINE__, bo_gem->gem_handle,
-                       strerror(errno));
-               pthread_mutex_unlock(&bufmgr_gem->lock);
-               return ret;
+               DBG("%s:%d: Error setting to CPU domain %d: %s\n",
+                   __FILE__, __LINE__, bo_gem->gem_handle,
+                   strerror(errno));
        }
 
+       if (write_enable)
+               bo_gem->mapped_cpu_write = true;
+
+       drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+       VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
        pthread_mutex_unlock(&bufmgr_gem->lock);
 
        return 0;
 }
 
-int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+static int
+map_gtt(drm_intel_bo *bo)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       struct drm_i915_gem_set_domain set_domain;
        int ret;
 
-       pthread_mutex_lock(&bufmgr_gem->lock);
+       if (bo_gem->map_count++ == 0)
+               drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
 
        /* Get a mapping of the buffer if we haven't before. */
        if (bo_gem->gtt_virtual == NULL) {
                struct drm_i915_gem_mmap_gtt mmap_arg;
 
-               DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
-                   bo_gem->name);
+               DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
+                   bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
 
-               memset(&mmap_arg, 0, sizeof(mmap_arg));
+               VG_CLEAR(mmap_arg);
                mmap_arg.handle = bo_gem->gem_handle;
 
                /* Get the fake offset back... */
-               do {
-                       ret = ioctl(bufmgr_gem->fd,
-                                   DRM_IOCTL_I915_GEM_MMAP_GTT,
-                                   &mmap_arg);
-               } while (ret == -1 && errno == EINTR);
+               ret = drmIoctl(bufmgr_gem->fd,
+                              DRM_IOCTL_I915_GEM_MMAP_GTT,
+                              &mmap_arg);
                if (ret != 0) {
                        ret = -errno;
-                       fprintf(stderr,
-                               "%s:%d: Error preparing buffer map %d (%s): %s .\n",
-                               __FILE__, __LINE__,
-                               bo_gem->gem_handle, bo_gem->name,
-                               strerror(errno));
-                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
+                           __FILE__, __LINE__,
+                           bo_gem->gem_handle, bo_gem->name,
+                           strerror(errno));
+                       if (--bo_gem->map_count == 0)
+                               drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
                        return ret;
                }
 
@@ -1026,12 +1238,12 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
                if (bo_gem->gtt_virtual == MAP_FAILED) {
                        bo_gem->gtt_virtual = NULL;
                        ret = -errno;
-                       fprintf(stderr,
-                               "%s:%d: Error mapping buffer %d (%s): %s .\n",
-                               __FILE__, __LINE__,
-                               bo_gem->gem_handle, bo_gem->name,
-                               strerror(errno));
-                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+                           __FILE__, __LINE__,
+                           bo_gem->gem_handle, bo_gem->name,
+                           strerror(errno));
+                       if (--bo_gem->map_count == 0)
+                               drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
                        return ret;
                }
        }
@@ -1041,41 +1253,84 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
        DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
            bo_gem->gtt_virtual);
 
-       /* Now move it to the GTT domain so that the CPU caches are flushed */
+       return 0;
+}
+
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
+
+       pthread_mutex_lock(&bufmgr_gem->lock);
+
+       ret = map_gtt(bo);
+       if (ret) {
+               pthread_mutex_unlock(&bufmgr_gem->lock);
+               return ret;
+       }
+
+       /* Now move it to the GTT domain so that the GPU and CPU
+        * caches are flushed and the GPU isn't actively using the
+        * buffer.
+        *
+        * The pagefault handler does this domain change for us when
+        * it has unbound the BO from the GTT, but it's up to us to
+        * tell it when we're about to use things if we had done
+        * rendering and it still happens to be bound to the GTT.
+        */
+       VG_CLEAR(set_domain);
        set_domain.handle = bo_gem->gem_handle;
        set_domain.read_domains = I915_GEM_DOMAIN_GTT;
        set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_SET_DOMAIN,
-                           &set_domain);
-       } while (ret == -1 && errno == EINTR);
-
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                      &set_domain);
        if (ret != 0) {
-               ret = -errno;
-               fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
-                       __FILE__, __LINE__, bo_gem->gem_handle,
-                       strerror(errno));
+               DBG("%s:%d: Error setting domain %d: %s\n",
+                   __FILE__, __LINE__, bo_gem->gem_handle,
+                   strerror(errno));
        }
 
+       drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+       VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
        pthread_mutex_unlock(&bufmgr_gem->lock);
 
-       return ret;
+       return 0;
 }
 
-int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+/**
+ * Performs a mapping of the buffer object like the normal GTT
+ * mapping, but avoids waiting for the GPU to be done reading from or
+ * rendering to the buffer.
+ *
+ * This is used in the implementation of GL_ARB_map_buffer_range: The
+ * user asks to create a buffer, then does a mapping, fills some
+ * space, runs a drawing command, then asks to map it again without
+ * synchronizing because it guarantees that it won't write over the
+ * data that the GPU is busy using (or, more specifically, that if it
+ * does write over the data, it acknowledges that rendering is
+ * undefined).
+ */
+
+int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       int ret = 0;
-
-       if (bo == NULL)
-               return 0;
+       int ret;
 
-       assert(bo_gem->gtt_virtual != NULL);
+       /* If the CPU cache isn't coherent with the GTT, then use a
+        * regular synchronized mapping.  The problem is that we don't
+        * track where the buffer was last used on the CPU side in
+        * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
+        * we would potentially corrupt the buffer even when the user
+        * does reasonable things.
+        */
+       if (!bufmgr_gem->has_llc)
+               return drm_intel_gem_bo_map_gtt(bo);
 
        pthread_mutex_lock(&bufmgr_gem->lock);
-       bo->virtual = NULL;
+       ret = map_gtt(bo);
        pthread_mutex_unlock(&bufmgr_gem->lock);
 
        return ret;
@@ -1085,33 +1340,59 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       struct drm_i915_gem_sw_finish sw_finish;
-       int ret;
+       int ret = 0;
 
        if (bo == NULL)
                return 0;
 
-       assert(bo_gem->mem_virtual != NULL);
-
        pthread_mutex_lock(&bufmgr_gem->lock);
 
-       /* Cause a flush to happen if the buffer's pinned for scanout, so the
-        * results show up in a timely manner.
-        */
-       sw_finish.handle = bo_gem->gem_handle;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_SW_FINISH,
-                           &sw_finish);
-       } while (ret == -1 && errno == EINTR);
-       ret = ret == -1 ? -errno : 0;
+       if (bo_gem->map_count <= 0) {
+               DBG("attempted to unmap an unmapped bo\n");
+               pthread_mutex_unlock(&bufmgr_gem->lock);
+               /* Preserve the old behaviour of just treating this as a
+                * no-op rather than reporting the error.
+                */
+               return 0;
+       }
+
+       if (bo_gem->mapped_cpu_write) {
+               struct drm_i915_gem_sw_finish sw_finish;
+
+               /* Cause a flush to happen if the buffer's pinned for
+                * scanout, so the results show up in a timely manner.
+                * Unlike GTT set domains, this only does work if the
+                * buffer should be scanout-related.
+                */
+               VG_CLEAR(sw_finish);
+               sw_finish.handle = bo_gem->gem_handle;
+               ret = drmIoctl(bufmgr_gem->fd,
+                              DRM_IOCTL_I915_GEM_SW_FINISH,
+                              &sw_finish);
+               ret = ret == -1 ? -errno : 0;
+
+               bo_gem->mapped_cpu_write = false;
+       }
 
-       bo->virtual = NULL;
+       /* We need to unmap after every innovation as we cannot track
+        * an open vma for every bo as that will exhaasut the system
+        * limits and cause later failures.
+        */
+       if (--bo_gem->map_count == 0) {
+               drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+               drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+               bo->virtual = NULL;
+       }
        pthread_mutex_unlock(&bufmgr_gem->lock);
 
        return ret;
 }
 
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+{
+       return drm_intel_gem_bo_unmap(bo);
+}
+
 static int
 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
                         unsigned long size, const void *data)
@@ -1121,22 +1402,19 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
        struct drm_i915_gem_pwrite pwrite;
        int ret;
 
-       memset(&pwrite, 0, sizeof(pwrite));
+       VG_CLEAR(pwrite);
        pwrite.handle = bo_gem->gem_handle;
        pwrite.offset = offset;
        pwrite.size = size;
        pwrite.data_ptr = (uint64_t) (uintptr_t) data;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_PWRITE,
-                           &pwrite);
-       } while (ret == -1 && errno == EINTR);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_PWRITE,
+                      &pwrite);
        if (ret != 0) {
                ret = -errno;
-               fprintf(stderr,
-                       "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
-                       __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
-                       (int)size, strerror(errno));
+               DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+                   __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+                   (int)size, strerror(errno));
        }
 
        return ret;
@@ -1149,9 +1427,11 @@ drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
        struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
        int ret;
 
+       VG_CLEAR(get_pipe_from_crtc_id);
        get_pipe_from_crtc_id.crtc_id = crtc_id;
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
-                   &get_pipe_from_crtc_id);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
+                      &get_pipe_from_crtc_id);
        if (ret != 0) {
                /* We return -1 here to signal that we don't
                 * know which pipe is associated with this crtc.
@@ -1174,32 +1454,81 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
        struct drm_i915_gem_pread pread;
        int ret;
 
-       memset(&pread, 0, sizeof(pread));
+       VG_CLEAR(pread);
        pread.handle = bo_gem->gem_handle;
        pread.offset = offset;
        pread.size = size;
        pread.data_ptr = (uint64_t) (uintptr_t) data;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_PREAD,
-                           &pread);
-       } while (ret == -1 && errno == EINTR);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_PREAD,
+                      &pread);
        if (ret != 0) {
                ret = -errno;
-               fprintf(stderr,
-                       "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
-                       __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
-                       (int)size, strerror(errno));
+               DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+                   __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+                   (int)size, strerror(errno));
        }
 
        return ret;
 }
 
-/** Waits for all GPU rendering to the object to have completed. */
+/** Waits for all GPU rendering with the object to have completed. */
 static void
 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
 {
-       drm_intel_gem_bo_start_gtt_access(bo, 0);
+       drm_intel_gem_bo_start_gtt_access(bo, 1);
+}
+
+/**
+ * Waits on a BO for the given amount of time.
+ *
+ * @bo: buffer object to wait for
+ * @timeout_ns: amount of time to wait in nanoseconds.
+ *   If value is less than 0, an infinite wait will occur.
+ *
+ * Returns 0 if the wait was successful ie. the last batch referencing the
+ * object has completed within the allotted time. Otherwise some negative return
+ * value describes the error. Of particular interest is -ETIME when the wait has
+ * failed to yield the desired result.
+ *
+ * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
+ * the operation to give up after a certain amount of time. Another subtle
+ * difference is the internal locking semantics are different (this variant does
+ * not hold the lock for the duration of the wait). This makes the wait subject
+ * to a larger userspace race window.
+ *
+ * The implementation shall wait until the object is no longer actively
+ * referenced within a batch buffer at the time of the call. The wait will
+ * not guarantee that the buffer is re-issued via another thread, or an flinked
+ * handle. Userspace must make sure this race does not occur if such precision
+ * is important.
+ */
+int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_wait wait;
+       int ret;
+
+       if (!bufmgr_gem->has_wait_timeout) {
+               DBG("%s:%d: Timed wait is not supported. Falling back to "
+                   "infinite wait\n", __FILE__, __LINE__);
+               if (timeout_ns) {
+                       drm_intel_gem_bo_wait_rendering(bo);
+                       return 0;
+               } else {
+                       return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
+               }
+       }
+
+       wait.bo_handle = bo_gem->gem_handle;
+       wait.timeout_ns = timeout_ns;
+       wait.flags = 0;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+       if (ret == -1)
+               return -errno;
+
+       return ret;
 }
 
 /**
@@ -1217,20 +1546,18 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
        struct drm_i915_gem_set_domain set_domain;
        int ret;
 
+       VG_CLEAR(set_domain);
        set_domain.handle = bo_gem->gem_handle;
        set_domain.read_domains = I915_GEM_DOMAIN_GTT;
        set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_SET_DOMAIN,
-                           &set_domain);
-       } while (ret == -1 && errno == EINTR);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                      &set_domain);
        if (ret != 0) {
-               fprintf(stderr,
-                       "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
-                       __FILE__, __LINE__, bo_gem->gem_handle,
-                       set_domain.read_domains, set_domain.write_domain,
-                       strerror(errno));
+               DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+                   __FILE__, __LINE__, bo_gem->gem_handle,
+                   set_domain.read_domains, set_domain.write_domain,
+                   strerror(errno));
        }
 }
 
@@ -1247,7 +1574,7 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
        pthread_mutex_destroy(&bufmgr_gem->lock);
 
        /* Free any cached buffer objects we were going to reuse */
-       for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+       for (i = 0; i < bufmgr_gem->num_buckets; i++) {
                struct drm_intel_gem_bo_bucket *bucket =
                    &bufmgr_gem->cache_bucket[i];
                drm_intel_bo_gem *bo_gem;
@@ -1277,26 +1604,28 @@ static int
 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
                 drm_intel_bo *target_bo, uint32_t target_offset,
                 uint32_t read_domains, uint32_t write_domain,
-                int need_fence)
+                bool need_fence)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
        drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+       bool fenced_command;
 
        if (bo_gem->has_error)
                return -ENOMEM;
 
        if (target_bo_gem->has_error) {
-               bo_gem->has_error = 1;
+               bo_gem->has_error = true;
                return -ENOMEM;
        }
 
-       if (target_bo_gem->tiling_mode == I915_TILING_NONE)
-               need_fence = 0;
-
        /* We never use HW fences for rendering on 965+ */
        if (bufmgr_gem->gen >= 4)
-               need_fence = 0;
+               need_fence = false;
+
+       fenced_command = need_fence;
+       if (target_bo_gem->tiling_mode == I915_TILING_NONE)
+               need_fence = false;
 
        /* Create a new relocation list if needed */
        if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
@@ -1313,7 +1642,10 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
         * already been accounted for.
         */
        assert(!bo_gem->used_as_reloc_target);
-       bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+       if (target_bo_gem != bo_gem) {
+               target_bo_gem->used_as_reloc_target = true;
+               bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+       }
        /* An object needing a fence is a tiled buffer, so it won't have
         * relocs to other buffers.
         */
@@ -1321,9 +1653,6 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
                target_bo_gem->reloc_tree_fences = 1;
        bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
 
-       /* Flag the target to disallow further relocations in it. */
-       target_bo_gem->used_as_reloc_target = 1;
-
        bo_gem->relocs[bo_gem->reloc_count].offset = offset;
        bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
        bo_gem->relocs[bo_gem->reloc_count].target_handle =
@@ -1333,8 +1662,9 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
        bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
 
        bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
-       drm_intel_gem_bo_reference(target_bo);
-       if (need_fence)
+       if (target_bo != bo)
+               drm_intel_gem_bo_reference(target_bo);
+       if (fenced_command)
                bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
                        DRM_INTEL_RELOC_FENCE;
        else
@@ -1364,7 +1694,49 @@ drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
                                  uint32_t read_domains, uint32_t write_domain)
 {
        return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
-                               read_domains, write_domain, 1);
+                               read_domains, write_domain, true);
+}
+
+int
+drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
+{
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+       return bo_gem->reloc_count;
+}
+
+/**
+ * Removes existing relocation entries in the BO after "start".
+ *
+ * This allows a user to avoid a two-step process for state setup with
+ * counting up all the buffer objects and doing a
+ * drm_intel_bufmgr_check_aperture_space() before emitting any of the
+ * relocations for the state setup.  Instead, save the state of the
+ * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
+ * state, and then check if it still fits in the aperture.
+ *
+ * Any further drm_intel_bufmgr_check_aperture_space() queries
+ * involving this buffer in the tree are undefined after this call.
+ */
+void
+drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
+{
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int i;
+       struct timespec time;
+
+       clock_gettime(CLOCK_MONOTONIC, &time);
+
+       assert(bo_gem->reloc_count >= start);
+       /* Unreference the cleared target buffers */
+       for (i = start; i < bo_gem->reloc_count; i++) {
+               if (bo_gem->reloc_target_info[i].bo != bo) {
+                       drm_intel_gem_bo_unreference_locked_timed(bo_gem->
+                                                                 reloc_target_info[i].bo,
+                                                                 time.tv_sec);
+               }
+       }
+       bo_gem->reloc_count = start;
 }
 
 /**
@@ -1384,6 +1756,11 @@ drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
        for (i = 0; i < bo_gem->reloc_count; i++) {
                drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
 
+               if (target_bo == bo)
+                       continue;
+
+               drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+
                /* Continue walking the tree depth-first. */
                drm_intel_gem_bo_process_reloc(target_bo);
 
@@ -1405,6 +1782,11 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
                drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
                int need_fence;
 
+               if (target_bo == bo)
+                       continue;
+
+               drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+
                /* Continue walking the tree depth-first. */
                drm_intel_gem_bo_process_reloc2(target_bo);
 
@@ -1456,27 +1838,309 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
        }
 }
 
-static int
-drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
-                     drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+static void
+aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
+{
+       fwrite(&data, 1, 4, bufmgr_gem->aub_file);
+}
+
+static void
+aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
+{
+       fwrite(data, 1, size, bufmgr_gem->aub_file);
+}
+
+static void
+aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       struct drm_i915_gem_execbuffer execbuf;
-       int ret, i;
+       uint32_t *data;
+       unsigned int i;
 
-       if (bo_gem->has_error)
-               return -ENOMEM;
+       data = malloc(bo->size);
+       drm_intel_bo_get_subdata(bo, offset, size, data);
 
-       pthread_mutex_lock(&bufmgr_gem->lock);
-       /* Update indices and set up the validate list. */
-       drm_intel_gem_bo_process_reloc(bo);
+       /* Easy mode: write out bo with no relocations */
+       if (!bo_gem->reloc_count) {
+               aub_out_data(bufmgr_gem, data, size);
+               free(data);
+               return;
+       }
 
-       /* Add the batch buffer to the validation list.  There are no
+       /* Otherwise, handle the relocations while writing. */
+       for (i = 0; i < size / 4; i++) {
+               int r;
+               for (r = 0; r < bo_gem->reloc_count; r++) {
+                       struct drm_i915_gem_relocation_entry *reloc;
+                       drm_intel_reloc_target *info;
+
+                       reloc = &bo_gem->relocs[r];
+                       info = &bo_gem->reloc_target_info[r];
+
+                       if (reloc->offset == offset + i * 4) {
+                               drm_intel_bo_gem *target_gem;
+                               uint32_t val;
+
+                               target_gem = (drm_intel_bo_gem *)info->bo;
+
+                               val = reloc->delta;
+                               val += target_gem->aub_offset;
+
+                               aub_out(bufmgr_gem, val);
+                               data[i] = val;
+                               break;
+                       }
+               }
+               if (r == bo_gem->reloc_count) {
+                       /* no relocation, just the data */
+                       aub_out(bufmgr_gem, data[i]);
+               }
+       }
+
+       free(data);
+}
+
+static void
+aub_bo_get_address(drm_intel_bo *bo)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+       /* Give the object a graphics address in the AUB file.  We
+        * don't just use the GEM object address because we do AUB
+        * dumping before execution -- we want to successfully log
+        * when the hardware might hang, and we might even want to aub
+        * capture for a driver trying to execute on a different
+        * generation of hardware by disabling the actual kernel exec
+        * call.
+        */
+       bo_gem->aub_offset = bufmgr_gem->aub_offset;
+       bufmgr_gem->aub_offset += bo->size;
+       /* XXX: Handle aperture overflow. */
+       assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
+}
+
+static void
+aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
+                     uint32_t offset, uint32_t size)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+       aub_out(bufmgr_gem,
+               CMD_AUB_TRACE_HEADER_BLOCK |
+               (5 - 2));
+       aub_out(bufmgr_gem,
+               AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
+       aub_out(bufmgr_gem, subtype);
+       aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
+       aub_out(bufmgr_gem, size);
+       aub_write_bo_data(bo, offset, size);
+}
+
+/**
+ * Break up large objects into multiple writes.  Otherwise a 128kb VBO
+ * would overflow the 16 bits of size field in the packet header and
+ * everything goes badly after that.
+ */
+static void
+aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
+                           uint32_t offset, uint32_t size)
+{
+       uint32_t block_size;
+       uint32_t sub_offset;
+
+       for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
+               block_size = size - sub_offset;
+
+               if (block_size > 8 * 4096)
+                       block_size = 8 * 4096;
+
+               aub_write_trace_block(bo, type, subtype, offset + sub_offset,
+                                     block_size);
+       }
+}
+
+static void
+aub_write_bo(drm_intel_bo *bo)
+{
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       uint32_t offset = 0;
+       unsigned i;
+
+       aub_bo_get_address(bo);
+
+       /* Write out each annotated section separately. */
+       for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
+               drm_intel_aub_annotation *annotation =
+                       &bo_gem->aub_annotations[i];
+               uint32_t ending_offset = annotation->ending_offset;
+               if (ending_offset > bo->size)
+                       ending_offset = bo->size;
+               if (ending_offset > offset) {
+                       aub_write_large_trace_block(bo, annotation->type,
+                                                   annotation->subtype,
+                                                   offset,
+                                                   ending_offset - offset);
+                       offset = ending_offset;
+               }
+       }
+
+       /* Write out any remaining unannotated data */
+       if (offset < bo->size) {
+               aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
+                                           offset, bo->size - offset);
+       }
+}
+
+/*
+ * Make a ringbuffer on fly and dump it
+ */
+static void
+aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
+                         uint32_t batch_buffer, int ring_flag)
+{
+       uint32_t ringbuffer[4096];
+       int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
+       int ring_count = 0;
+
+       if (ring_flag == I915_EXEC_BSD)
+               ring = AUB_TRACE_TYPE_RING_PRB1;
+
+       /* Make a ring buffer to execute our batchbuffer. */
+       memset(ringbuffer, 0, sizeof(ringbuffer));
+       ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
+       ringbuffer[ring_count++] = batch_buffer;
+
+       /* Write out the ring.  This appears to trigger execution of
+        * the ring in the simulator.
+        */
+       aub_out(bufmgr_gem,
+               CMD_AUB_TRACE_HEADER_BLOCK |
+               (5 - 2));
+       aub_out(bufmgr_gem,
+               AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
+       aub_out(bufmgr_gem, 0); /* general/surface subtype */
+       aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
+       aub_out(bufmgr_gem, ring_count * 4);
+
+       /* FIXME: Need some flush operations here? */
+       aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
+
+       /* Update offset pointer */
+       bufmgr_gem->aub_offset += 4096;
+}
+
+void
+drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
+                             int x1, int y1, int width, int height,
+                             enum aub_dump_bmp_format format,
+                             int pitch, int offset)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+       uint32_t cpp;
+
+       switch (format) {
+       case AUB_DUMP_BMP_FORMAT_8BIT:
+               cpp = 1;
+               break;
+       case AUB_DUMP_BMP_FORMAT_ARGB_4444:
+               cpp = 2;
+               break;
+       case AUB_DUMP_BMP_FORMAT_ARGB_0888:
+       case AUB_DUMP_BMP_FORMAT_ARGB_8888:
+               cpp = 4;
+               break;
+       default:
+               printf("Unknown AUB dump format %d\n", format);
+               return;
+       }
+
+       if (!bufmgr_gem->aub_file)
+               return;
+
+       aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
+       aub_out(bufmgr_gem, (y1 << 16) | x1);
+       aub_out(bufmgr_gem,
+               (format << 24) |
+               (cpp << 19) |
+               pitch / 4);
+       aub_out(bufmgr_gem, (height << 16) | width);
+       aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
+       aub_out(bufmgr_gem,
+               ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
+               ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
+}
+
+static void
+aub_exec(drm_intel_bo *bo, int ring_flag, int used)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int i;
+       bool batch_buffer_needs_annotations;
+
+       if (!bufmgr_gem->aub_file)
+               return;
+
+       /* If batch buffer is not annotated, annotate it the best we
+        * can.
+        */
+       batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
+       if (batch_buffer_needs_annotations) {
+               drm_intel_aub_annotation annotations[2] = {
+                       { AUB_TRACE_TYPE_BATCH, 0, used },
+                       { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
+               };
+               drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
+       }
+
+       /* Write out all buffers to AUB memory */
+       for (i = 0; i < bufmgr_gem->exec_count; i++) {
+               aub_write_bo(bufmgr_gem->exec_bos[i]);
+       }
+
+       /* Remove any annotations we added */
+       if (batch_buffer_needs_annotations)
+               drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
+
+       /* Dump ring buffer */
+       aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
+
+       fflush(bufmgr_gem->aub_file);
+
+       /*
+        * One frame has been dumped. So reset the aub_offset for the next frame.
+        *
+        * FIXME: Can we do this?
+        */
+       bufmgr_gem->aub_offset = 0x10000;
+}
+
+static int
+drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
+                     drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_execbuffer execbuf;
+       int ret, i;
+
+       if (bo_gem->has_error)
+               return -ENOMEM;
+
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       /* Update indices and set up the validate list. */
+       drm_intel_gem_bo_process_reloc(bo);
+
+       /* Add the batch buffer to the validation list.  There are no
         * relocations pointing to it.
         */
        drm_intel_add_validate_buffer(bo);
 
+       VG_CLEAR(execbuf);
        execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
        execbuf.buffer_count = bufmgr_gem->exec_count;
        execbuf.batch_start_offset = 0;
@@ -1486,25 +2150,21 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
        execbuf.DR1 = 0;
        execbuf.DR4 = DR4;
 
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_EXECBUFFER,
-                           &execbuf);
-       } while (ret != 0 && errno == EINTR);
-
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_EXECBUFFER,
+                      &execbuf);
        if (ret != 0) {
                ret = -errno;
                if (errno == ENOSPC) {
-                       fprintf(stderr,
-                               "Execbuffer fails to pin. "
-                               "Estimate: %u. Actual: %u. Available: %u\n",
-                               drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
-                                                                  bufmgr_gem->
-                                                                  exec_count),
-                               drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
-                                                                 bufmgr_gem->
-                                                                 exec_count),
-                               (unsigned int)bufmgr_gem->gtt_size);
+                       DBG("Execbuffer fails to pin. "
+                           "Estimate: %u. Actual: %u. Available: %u\n",
+                           drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+                                                              bufmgr_gem->
+                                                              exec_count),
+                           drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+                                                             bufmgr_gem->
+                                                             exec_count),
+                           (unsigned int)bufmgr_gem->gtt_size);
                }
        }
        drm_intel_update_buffer_offsets(bufmgr_gem);
@@ -1527,13 +2187,30 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
 }
 
 static int
-drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
-                      drm_clip_rect_t *cliprects, int num_cliprects,
-                      int DR4)
+do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
+        drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+        unsigned int flags)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
        struct drm_i915_gem_execbuffer2 execbuf;
-       int ret, i;
+       int ret = 0;
+       int i;
+
+       switch (flags & 0x7) {
+       default:
+               return -EINVAL;
+       case I915_EXEC_BLT:
+               if (!bufmgr_gem->has_blt)
+                       return -EINVAL;
+               break;
+       case I915_EXEC_BSD:
+               if (!bufmgr_gem->has_bsd)
+                       return -EINVAL;
+               break;
+       case I915_EXEC_RENDER:
+       case I915_EXEC_DEFAULT:
+               break;
+       }
 
        pthread_mutex_lock(&bufmgr_gem->lock);
        /* Update indices and set up the validate list. */
@@ -1544,6 +2221,7 @@ drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
         */
        drm_intel_add_validate_buffer2(bo, 0);
 
+       VG_CLEAR(execbuf);
        execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
        execbuf.buffer_count = bufmgr_gem->exec_count;
        execbuf.batch_start_offset = 0;
@@ -1552,30 +2230,36 @@ drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
        execbuf.num_cliprects = num_cliprects;
        execbuf.DR1 = 0;
        execbuf.DR4 = DR4;
-       execbuf.flags = 0;
-       execbuf.rsvd1 = 0;
+       execbuf.flags = flags;
+       if (ctx == NULL)
+               i915_execbuffer2_set_context_id(execbuf, 0);
+       else
+               i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
        execbuf.rsvd2 = 0;
 
-       do {
-               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
-                           &execbuf);
-       } while (ret != 0 && errno == EAGAIN);
+       aub_exec(bo, flags, used);
+
+       if (bufmgr_gem->no_exec)
+               goto skip_execution;
 
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_EXECBUFFER2,
+                      &execbuf);
        if (ret != 0) {
                ret = -errno;
-               if (ret == -ENOMEM) {
-                       fprintf(stderr,
-                               "Execbuffer fails to pin. "
-                               "Estimate: %u. Actual: %u. Available: %u\n",
-                               drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
-                                                                  bufmgr_gem->exec_count),
-                               drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
-                                                                 bufmgr_gem->exec_count),
-                               (unsigned int) bufmgr_gem->gtt_size);
+               if (ret == -ENOSPC) {
+                       DBG("Execbuffer fails to pin. "
+                           "Estimate: %u. Actual: %u. Available: %u\n",
+                           drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+                                                              bufmgr_gem->exec_count),
+                           drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+                                                             bufmgr_gem->exec_count),
+                           (unsigned int) bufmgr_gem->gtt_size);
                }
        }
        drm_intel_update_buffer_offsets2(bufmgr_gem);
 
+skip_execution:
        if (bufmgr_gem->bufmgr.debug)
                drm_intel_gem_dump_validation_list(bufmgr_gem);
 
@@ -1593,6 +2277,31 @@ drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
        return ret;
 }
 
+static int
+drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
+                      drm_clip_rect_t *cliprects, int num_cliprects,
+                      int DR4)
+{
+       return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
+                       I915_EXEC_RENDER);
+}
+
+static int
+drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
+                       drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+                       unsigned int flags)
+{
+       return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
+                       flags);
+}
+
+int
+drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
+                             int used, unsigned int flags)
+{
+       return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
+}
+
 static int
 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
 {
@@ -1601,16 +2310,13 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
        struct drm_i915_gem_pin pin;
        int ret;
 
-       memset(&pin, 0, sizeof(pin));
+       VG_CLEAR(pin);
        pin.handle = bo_gem->gem_handle;
        pin.alignment = alignment;
 
-       do {
-               ret = ioctl(bufmgr_gem->fd,
-                           DRM_IOCTL_I915_GEM_PIN,
-                           &pin);
-       } while (ret == -1 && errno == EINTR);
-
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_PIN,
+                      &pin);
        if (ret != 0)
                return -errno;
 
@@ -1626,10 +2332,10 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo)
        struct drm_i915_gem_unpin unpin;
        int ret;
 
-       memset(&unpin, 0, sizeof(unpin));
+       VG_CLEAR(unpin);
        unpin.handle = bo_gem->gem_handle;
 
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
        if (ret != 0)
                return -errno;
 
@@ -1637,35 +2343,63 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo)
 }
 
 static int
-drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
-                           uint32_t stride)
+drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
+                                    uint32_t tiling_mode,
+                                    uint32_t stride)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
        struct drm_i915_gem_set_tiling set_tiling;
        int ret;
 
-       if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
+       if (bo_gem->global_name == 0 &&
+           tiling_mode == bo_gem->tiling_mode &&
+           stride == bo_gem->stride)
                return 0;
 
        memset(&set_tiling, 0, sizeof(set_tiling));
-       set_tiling.handle = bo_gem->gem_handle;
-
        do {
-               set_tiling.tiling_mode = *tiling_mode;
+               /* set_tiling is slightly broken and overwrites the
+                * input on the error path, so we have to open code
+                * rmIoctl.
+                */
+               set_tiling.handle = bo_gem->gem_handle;
+               set_tiling.tiling_mode = tiling_mode;
                set_tiling.stride = stride;
 
                ret = ioctl(bufmgr_gem->fd,
                            DRM_IOCTL_I915_GEM_SET_TILING,
                            &set_tiling);
-       } while (ret == -1 && errno == EINTR);
+       } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+       if (ret == -1)
+               return -errno;
+
        bo_gem->tiling_mode = set_tiling.tiling_mode;
        bo_gem->swizzle_mode = set_tiling.swizzle_mode;
+       bo_gem->stride = set_tiling.stride;
+       return 0;
+}
 
-       drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+static int
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+                           uint32_t stride)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int ret;
+
+       /* Linear buffers have no stride. By ensuring that we only ever use
+        * stride 0 with linear buffers, we simplify our code.
+        */
+       if (*tiling_mode == I915_TILING_NONE)
+               stride = 0;
+
+       ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
+       if (ret == 0)
+               drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
        *tiling_mode = bo_gem->tiling_mode;
-       return ret == 0 ? 0 : -errno;
+       return ret;
 }
 
 static int
@@ -1684,18 +2418,22 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
-       struct drm_gem_flink flink;
        int ret;
 
        if (!bo_gem->global_name) {
-               memset(&flink, 0, sizeof(flink));
+               struct drm_gem_flink flink;
+
+               VG_CLEAR(flink);
                flink.handle = bo_gem->gem_handle;
 
-               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
+               ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
                if (ret != 0)
                        return -errno;
+
                bo_gem->global_name = flink.name;
-               bo_gem->reusable = 0;
+               bo_gem->reusable = false;
+
+               DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
        }
 
        *name = bo_gem->global_name;
@@ -1714,7 +2452,7 @@ drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
 {
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
 
-       bufmgr_gem->bo_reuse = 1;
+       bufmgr_gem->bo_reuse = true;
 }
 
 /**
@@ -1730,7 +2468,7 @@ drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
        drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
 
        if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
-               bufmgr_gem->fenced_relocs = 1;
+               bufmgr_gem->fenced_relocs = true;
 }
 
 /**
@@ -1748,7 +2486,7 @@ drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
                return 0;
 
        total += bo->size;
-       bo_gem->included_in_check_aperture = 1;
+       bo_gem->included_in_check_aperture = true;
 
        for (i = 0; i < bo_gem->reloc_count; i++)
                total +=
@@ -1796,7 +2534,7 @@ drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
        if (bo == NULL || !bo_gem->included_in_check_aperture)
                return;
 
-       bo_gem->included_in_check_aperture = 0;
+       bo_gem->included_in_check_aperture = false;
 
        for (i = 0; i < bo_gem->reloc_count; i++)
                drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
@@ -1913,10 +2651,18 @@ drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
 {
        drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-       bo_gem->reusable = 0;
+       bo_gem->reusable = false;
        return 0;
 }
 
+static int
+drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
+{
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+       return bo_gem->reusable;
+}
+
 static int
 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
 {
@@ -1926,6 +2672,8 @@ _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
        for (i = 0; i < bo_gem->reloc_count; i++) {
                if (bo_gem->reloc_target_info[i].bo == target_bo)
                        return 1;
+               if (bo == bo_gem->reloc_target_info[i].bo)
+                       continue;
                if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
                                                target_bo))
                        return 1;
@@ -1947,6 +2695,237 @@ drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
        return 0;
 }
 
+static void
+add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
+{
+       unsigned int i = bufmgr_gem->num_buckets;
+
+       assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
+
+       DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
+       bufmgr_gem->cache_bucket[i].size = size;
+       bufmgr_gem->num_buckets++;
+}
+
+static void
+init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+       unsigned long size, cache_max_size = 64 * 1024 * 1024;
+
+       /* OK, so power of two buckets was too wasteful of memory.
+        * Give 3 other sizes between each power of two, to hopefully
+        * cover things accurately enough.  (The alternative is
+        * probably to just go for exact matching of sizes, and assume
+        * that for things like composited window resize the tiled
+        * width/height alignment and rounding of sizes to pages will
+        * get us useful cache hit rates anyway)
+        */
+       add_bucket(bufmgr_gem, 4096);
+       add_bucket(bufmgr_gem, 4096 * 2);
+       add_bucket(bufmgr_gem, 4096 * 3);
+
+       /* Initialize the linked lists for BO reuse cache. */
+       for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
+               add_bucket(bufmgr_gem, size);
+
+               add_bucket(bufmgr_gem, size + size * 1 / 4);
+               add_bucket(bufmgr_gem, size + size * 2 / 4);
+               add_bucket(bufmgr_gem, size + size * 3 / 4);
+       }
+}
+
+void
+drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+       bufmgr_gem->vma_max = limit;
+
+       drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+/**
+ * Get the PCI ID for the device.  This can be overridden by setting the
+ * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
+ */
+static int
+get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+       char *devid_override;
+       int devid;
+       int ret;
+       drm_i915_getparam_t gp;
+
+       if (geteuid() == getuid()) {
+               devid_override = getenv("INTEL_DEVID_OVERRIDE");
+               if (devid_override) {
+                       bufmgr_gem->no_exec = true;
+                       return strtod(devid_override, NULL);
+               }
+       }
+
+       VG_CLEAR(devid);
+       VG_CLEAR(gp);
+       gp.param = I915_PARAM_CHIPSET_ID;
+       gp.value = &devid;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       if (ret) {
+               fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
+               fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
+       }
+       return devid;
+}
+
+int
+drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+       return bufmgr_gem->pci_device;
+}
+
+/**
+ * Sets up AUB dumping.
+ *
+ * This is a trace file format that can be used with the simulator.
+ * Packets are emitted in a format somewhat like GPU command packets.
+ * You can set up a GTT and upload your objects into the referenced
+ * space, then send off batchbuffers and get BMPs out the other end.
+ */
+void
+drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+       int entry = 0x200003;
+       int i;
+       int gtt_size = 0x10000;
+
+       if (!enable) {
+               if (bufmgr_gem->aub_file) {
+                       fclose(bufmgr_gem->aub_file);
+                       bufmgr_gem->aub_file = NULL;
+               }
+       }
+
+       if (geteuid() != getuid())
+               return;
+
+       bufmgr_gem->aub_file = fopen("intel.aub", "w+");
+       if (!bufmgr_gem->aub_file)
+               return;
+
+       /* Start allocating objects from just after the GTT. */
+       bufmgr_gem->aub_offset = gtt_size;
+
+       /* Start with a (required) version packet. */
+       aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
+       aub_out(bufmgr_gem,
+               (4 << AUB_HEADER_MAJOR_SHIFT) |
+               (0 << AUB_HEADER_MINOR_SHIFT));
+       for (i = 0; i < 8; i++) {
+               aub_out(bufmgr_gem, 0); /* app name */
+       }
+       aub_out(bufmgr_gem, 0); /* timestamp */
+       aub_out(bufmgr_gem, 0); /* timestamp */
+       aub_out(bufmgr_gem, 0); /* comment len */
+
+       /* Set up the GTT. The max we can handle is 256M */
+       aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
+       aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
+       aub_out(bufmgr_gem, 0); /* subtype */
+       aub_out(bufmgr_gem, 0); /* offset */
+       aub_out(bufmgr_gem, gtt_size); /* size */
+       for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
+               aub_out(bufmgr_gem, entry);
+       }
+}
+
+drm_intel_context *
+drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+       struct drm_i915_gem_context_create create;
+       drm_i915_getparam_t gp;
+       drm_intel_context *context = NULL;
+       int tmp = 0, ret;
+
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+       if (ret != 0) {
+               fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
+                       strerror(errno));
+               return NULL;
+       }
+
+       context = calloc(1, sizeof(*context));
+       context->ctx_id = create.ctx_id;
+       context->bufmgr = bufmgr;
+
+       return context;
+}
+
+void
+drm_intel_gem_context_destroy(drm_intel_context *ctx)
+{
+       drm_intel_bufmgr_gem *bufmgr_gem;
+       struct drm_i915_gem_context_destroy destroy;
+       int ret;
+
+       if (ctx == NULL)
+               return;
+
+       bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
+       destroy.ctx_id = ctx->ctx_id;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
+                      &destroy);
+       if (ret != 0)
+               fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
+                       strerror(errno));
+
+       free(ctx);
+}
+
+
+/**
+ * Annotate the given bo for use in aub dumping.
+ *
+ * \param annotations is an array of drm_intel_aub_annotation objects
+ * describing the type of data in various sections of the bo.  Each
+ * element of the array specifies the type and subtype of a section of
+ * the bo, and the past-the-end offset of that section.  The elements
+ * of \c annotations must be sorted so that ending_offset is
+ * increasing.
+ *
+ * \param count is the number of elements in the \c annotations array.
+ * If \c count is zero, then \c annotations will not be dereferenced.
+ *
+ * Annotations are copied into a private data structure, so caller may
+ * re-use the memory pointed to by \c annotations after the call
+ * returns.
+ *
+ * Annotations are stored for the lifetime of the bo; to reset to the
+ * default state (no annotations), call this function with a \c count
+ * of zero.
+ */
+void
+drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+                                        drm_intel_aub_annotation *annotations,
+                                        unsigned count)
+{
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       unsigned size = sizeof(*annotations) * count;
+       drm_intel_aub_annotation *new_annotations =
+               count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
+       if (new_annotations == NULL) {
+               free(bo_gem->aub_annotations);
+               bo_gem->aub_annotations = NULL;
+               bo_gem->aub_annotation_count = 0;
+               return;
+       }
+       memcpy(new_annotations, annotations, size);
+       bo_gem->aub_annotations = new_annotations;
+       bo_gem->aub_annotation_count = count;
+}
+
 /**
  * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
  * and manage map buffer objections.
@@ -1959,9 +2938,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
        drm_intel_bufmgr_gem *bufmgr_gem;
        struct drm_i915_gem_get_aperture aperture;
        drm_i915_getparam_t gp;
-       int ret, i;
-       unsigned long size;
-       int exec2 = 0;
+       int ret, tmp;
+       bool exec2 = false;
 
        bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
        if (bufmgr_gem == NULL)
@@ -1974,7 +2952,9 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
                return NULL;
        }
 
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+       ret = drmIoctl(bufmgr_gem->fd,
+                      DRM_IOCTL_I915_GEM_GET_APERTURE,
+                      &aperture);
 
        if (ret == 0)
                bufmgr_gem->gtt_size = aperture.aper_available_size;
@@ -1988,32 +2968,71 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
                        (int)bufmgr_gem->gtt_size / 1024);
        }
 
-       gp.param = I915_PARAM_CHIPSET_ID;
-       gp.value = &bufmgr_gem->pci_device;
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
-       if (ret) {
-               fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
-               fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
-       }
+       bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
 
-       if (IS_GEN2(bufmgr_gem))
+       if (IS_GEN2(bufmgr_gem->pci_device))
                bufmgr_gem->gen = 2;
-       else if (IS_GEN3(bufmgr_gem))
+       else if (IS_GEN3(bufmgr_gem->pci_device))
                bufmgr_gem->gen = 3;
-       else if (IS_GEN4(bufmgr_gem))
+       else if (IS_GEN4(bufmgr_gem->pci_device))
                bufmgr_gem->gen = 4;
-       else
+       else if (IS_GEN5(bufmgr_gem->pci_device))
+               bufmgr_gem->gen = 5;
+       else if (IS_GEN6(bufmgr_gem->pci_device))
                bufmgr_gem->gen = 6;
+       else if (IS_GEN7(bufmgr_gem->pci_device))
+               bufmgr_gem->gen = 7;
+       else
+               assert(0);
+
+       if (IS_GEN3(bufmgr_gem->pci_device) &&
+           bufmgr_gem->gtt_size > 256*1024*1024) {
+               /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
+                * be used for tiled blits. To simplify the accounting, just
+                * substract the unmappable part (fixed to 256MB on all known
+                * gen3 devices) if the kernel advertises it. */
+               bufmgr_gem->gtt_size -= 256*1024*1024;
+       }
+
+       VG_CLEAR(gp);
+       gp.value = &tmp;
 
        gp.param = I915_PARAM_HAS_EXECBUF2;
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
        if (!ret)
-               exec2 = 1;
+               exec2 = true;
+
+       gp.param = I915_PARAM_HAS_BSD;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       bufmgr_gem->has_bsd = ret == 0;
+
+       gp.param = I915_PARAM_HAS_BLT;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       bufmgr_gem->has_blt = ret == 0;
+
+       gp.param = I915_PARAM_HAS_RELAXED_FENCING;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       bufmgr_gem->has_relaxed_fencing = ret == 0;
+
+       gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       bufmgr_gem->has_wait_timeout = ret == 0;
+
+       gp.param = I915_PARAM_HAS_LLC;
+       ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+       if (ret != 0) {
+               /* Kernel does not supports HAS_LLC query, fallback to GPU
+                * generation detection and assume that we have LLC on GEN6/7
+                */
+               bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
+                               IS_GEN7(bufmgr_gem->pci_device));
+       } else
+               bufmgr_gem->has_llc = ret == 0;
 
        if (bufmgr_gem->gen < 4) {
                gp.param = I915_PARAM_NUM_FENCES_AVAIL;
                gp.value = &bufmgr_gem->available_fences;
-               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+               ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
                if (ret) {
                        fprintf(stderr, "get fences failed: %d [%d]\n", ret,
                                errno);
@@ -2063,9 +3082,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
        bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
        bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
        /* Use the new one if available */
-       if (exec2)
+       if (exec2) {
                bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
-       else
+               bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
+       } else
                bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
        bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
        bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
@@ -2074,15 +3094,16 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
        bufmgr_gem->bufmgr.check_aperture_space =
            drm_intel_gem_check_aperture_space;
        bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
+       bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
        bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
            drm_intel_gem_get_pipe_from_crtc_id;
        bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
 
-       /* Initialize the linked lists for BO reuse cache. */
-       for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
-               DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
-               bufmgr_gem->cache_bucket[i].size = size;
-       }
+       DRMINITLISTHEAD(&bufmgr_gem->named);
+       init_cache_buckets(bufmgr_gem);
+
+       DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
+       bufmgr_gem->vma_max = -1; /* unlimited by default */
 
        return &bufmgr_gem->bufmgr;
 }