index f3af3a96d8e1b9a9d74d9348f644be247d277c30..c3b5d6aeb65b54a06a15df818e4f47e1cf57a83d 100644 (file)
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
#endif
#include <xf86drm.h>
+#include <xf86atomic.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include "errno.h"
#include "libdrm_lists.h"
-#include "intel_atomic.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "intel_chipset.h"
pthread_mutex_t lock;
struct drm_i915_gem_exec_object *exec_objects;
+ struct drm_i915_gem_exec_object2 *exec2_objects;
drm_intel_bo **exec_bos;
int exec_size;
int exec_count;
uint64_t gtt_size;
int available_fences;
int pci_device;
+ int gen;
char bo_reuse;
+ char fenced_relocs;
} drm_intel_bufmgr_gem;
+#define DRM_INTEL_RELOC_FENCE (1<<0)
+
+typedef struct _drm_intel_reloc_target_info {
+ drm_intel_bo *bo;
+ int flags;
+} drm_intel_reloc_target;
+
struct _drm_intel_bo_gem {
drm_intel_bo bo;
/** Array passed to the DRM containing relocation information. */
struct drm_i915_gem_relocation_entry *relocs;
- /** Array of bos corresponding to relocs[i].target_handle */
- drm_intel_bo **reloc_target_bo;
+ /**
+ * Array of info structs corresponding to relocs[i].target_handle etc
+ */
+ drm_intel_reloc_target *reloc_target_info;
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer, saved across map/unmap cycles */
@@ -210,11 +222,11 @@ drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
return size;
/* 965+ just need multiples of page size for tiling */
- if (IS_I965G(bufmgr_gem))
+ if (bufmgr_gem->gen >= 4)
return ROUND_UP_TO(size, 4096);
/* Older chips need powers of two, of at least 512k or 1M */
- if (IS_I9XX(bufmgr_gem)) {
+ if (bufmgr_gem->gen == 3) {
min_size = 1024*1024;
max_size = 128*1024*1024;
} else {
drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long pitch, uint32_t tiling_mode)
{
- unsigned long tile_width = 512;
+ unsigned long tile_width;
unsigned long i;
if (tiling_mode == I915_TILING_NONE)
- return ROUND_UP_TO(pitch, tile_width);
+ return pitch;
+
+ if (tiling_mode == I915_TILING_X)
+ tile_width = 512;
+ else
+ tile_width = 128;
/* 965 is flexible */
- if (IS_I965G(bufmgr_gem))
+ if (bufmgr_gem->gen >= 4)
return ROUND_UP_TO(pitch, tile_width);
/* Pre-965 needs power of two tile width */
}
for (j = 0; j < bo_gem->reloc_count; j++) {
- drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
drm_intel_bo_gem *target_gem =
(drm_intel_bo_gem *) target_bo;
bufmgr_gem->exec_count++;
}
+static void
+drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ int index;
+
+ if (bo_gem->validate_index != -1) {
+ if (need_fence)
+ bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
+ EXEC_OBJECT_NEEDS_FENCE;
+ return;
+ }
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec2_objects =
+ realloc(bufmgr_gem->exec2_objects,
+ sizeof(*bufmgr_gem->exec2_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
+ bufmgr_gem->exec2_objects[index].alignment = 0;
+ bufmgr_gem->exec2_objects[index].offset = 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ bufmgr_gem->exec2_objects[index].flags = 0;
+ bufmgr_gem->exec2_objects[index].rsvd1 = 0;
+ bufmgr_gem->exec2_objects[index].rsvd2 = 0;
+ if (need_fence) {
+ bufmgr_gem->exec2_objects[index].flags |=
+ EXEC_OBJECT_NEEDS_FENCE;
+ }
+ bufmgr_gem->exec_count++;
+}
+
#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
sizeof(uint32_t))
* aperture. Optimal packing is for wimps.
*/
size = bo_gem->bo.size;
- if (!IS_I965G(bufmgr_gem) && bo_gem->tiling_mode != I915_TILING_NONE)
+ if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
size *= 2;
bo_gem->reloc_tree_size = size;
bo_gem->relocs = malloc(max_relocs *
sizeof(struct drm_i915_gem_relocation_entry));
- bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
- if (bo_gem->relocs == NULL || bo_gem->reloc_target_bo == NULL) {
+ bo_gem->reloc_target_info = malloc(max_relocs *
+ sizeof(drm_intel_reloc_target *));
+ if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
bo_gem->has_error = 1;
free (bo_gem->relocs);
bo_gem->relocs = NULL;
- free (bo_gem->reloc_target_bo);
- bo_gem->reloc_target_bo = NULL;
+ free (bo_gem->reloc_target_info);
+ bo_gem->reloc_target_info = NULL;
return 1;
}
unsigned long size, stride, aligned_y = y;
int ret;
+ /* If we're tiled, our allocations are in 8 or 32-row blocks,
+ * so failure to align our height means that we won't allocate
+ * enough pages.
+ *
+ * If we're untiled, we still have to align to 2 rows high
+ * because the data port accesses 2x2 blocks even if the
+ * bottom row isn't to be rendered, so failure to align means
+ * we could walk off the end of the GTT and fault. This is
+ * documented on 965, and may be the case on older chipsets
+ * too so we try to be careful.
+ */
if (*tiling_mode == I915_TILING_NONE)
aligned_y = ALIGN(y, 2);
else if (*tiling_mode == I915_TILING_X)
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
- if (bo_gem->tiling_mode == I915_TILING_NONE)
- bo_gem->reloc_tree_fences = 0;
- else
- bo_gem->reloc_tree_fences = 1;
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
/* Unreference all the target buffers */
for (i = 0; i < bo_gem->reloc_count; i++) {
drm_intel_gem_bo_unreference_locked_timed(bo_gem->
- reloc_target_bo[i],
+ reloc_target_info[i].bo,
time);
}
bo_gem->reloc_count = 0;
bo_gem->gem_handle, bo_gem->name);
/* release memory associated with this object */
- if (bo_gem->reloc_target_bo) {
- free(bo_gem->reloc_target_bo);
- bo_gem->reloc_target_bo = NULL;
+ if (bo_gem->reloc_target_info) {
+ free(bo_gem->reloc_target_info);
+ bo_gem->reloc_target_info = NULL;
}
if (bo_gem->relocs) {
free(bo_gem->relocs);
DRM_IOCTL_I915_GEM_SW_FINISH,
&sw_finish);
} while (ret == -1 && errno == EINTR);
+ ret = ret == -1 ? -errno : 0;
bo->virtual = NULL;
pthread_mutex_unlock(&bufmgr_gem->lock);
- return 0;
+
+ return ret;
}
static int
&pwrite);
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
+ ret = -errno;
fprintf(stderr,
"%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
}
- return 0;
+
+ return ret;
}
static int
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
}
+
return ret;
}
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
int i;
+ free(bufmgr_gem->exec2_objects);
free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
* last known offset in target_bo.
*/
static int
-drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
- drm_intel_bo *target_bo, uint32_t target_offset,
- uint32_t read_domains, uint32_t write_domain)
+do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain,
+ int need_fence)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
return -ENOMEM;
}
+ if (target_bo_gem->tiling_mode == I915_TILING_NONE)
+ need_fence = 0;
+
+ /* We never use HW fences for rendering on 965+ */
+ if (bufmgr_gem->gen >= 4)
+ need_fence = 0;
+
/* Create a new relocation list if needed */
if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
return -ENOMEM;
*/
assert(!bo_gem->used_as_reloc_target);
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+ /* An object needing a fence is a tiled buffer, so it won't have
+ * relocs to other buffers.
+ */
+ if (need_fence)
+ target_bo_gem->reloc_tree_fences = 1;
bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
/* Flag the target to disallow further relocations in it. */
bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
- bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
+ bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
drm_intel_gem_bo_reference(target_bo);
+ if (need_fence)
+ bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
+ DRM_INTEL_RELOC_FENCE;
+ else
+ bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
bo_gem->reloc_count++;
return 0;
}
+static int
+drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain,
+ !bufmgr_gem->fenced_relocs);
+}
+
+static int
+drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo,
+ uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain, 1);
+}
+
/**
* Walk the tree of relocations rooted at BO and accumulate the list of
* validations to be performed and update the relocation buffers with
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
- drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc(target_bo);
}
}
+static void
+drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ int i;
+
+ if (bo_gem->relocs == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
+ int need_fence;
+
+ /* Continue walking the tree depth-first. */
+ drm_intel_gem_bo_process_reloc2(target_bo);
+
+ need_fence = (bo_gem->reloc_target_info[i].flags &
+ DRM_INTEL_RELOC_FENCE);
+
+ /* Add the target to the validate list */
+ drm_intel_add_validate_buffer2(target_bo, need_fence);
+ }
+}
+
+
static void
drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
{
}
}
+static void
+drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
+ DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+ bo_gem->gem_handle, bo_gem->name, bo->offset,
+ (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
+ bo->offset = bufmgr_gem->exec2_objects[i].offset;
+ }
+ }
+}
+
static int
drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
return ret;
}
+static int
+drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ int ret, i;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Update indices and set up the validate list. */
+ drm_intel_gem_bo_process_reloc2(bo);
+
+ /* Add the batch buffer to the validation list. There are no relocations
+ * pointing to it.
+ */
+ drm_intel_add_validate_buffer2(bo, 0);
+
+ execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
+ execbuf.buffer_count = bufmgr_gem->exec_count;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = used;
+ execbuf.cliprects_ptr = (uintptr_t)cliprects;
+ execbuf.num_cliprects = num_cliprects;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = DR4;
+ execbuf.flags = 0;
+ execbuf.rsvd1 = 0;
+ execbuf.rsvd2 = 0;
+
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
+ &execbuf);
+ } while (ret != 0 && errno == EINTR);
+
+ if (ret != 0) {
+ ret = -errno;
+ if (ret == -ENOMEM) {
+ fprintf(stderr,
+ "Execbuffer fails to pin. "
+ "Estimate: %u. Actual: %u. Available: %u\n",
+ drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->exec_count),
+ drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->exec_count),
+ (unsigned int) bufmgr_gem->gtt_size);
+ }
+ }
+ drm_intel_update_buffer_offsets2(bufmgr_gem);
+
+ if (bufmgr_gem->bufmgr.debug)
+ drm_intel_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
static int
drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
return 0;
- /* If we're going from non-tiling to tiling, bump fence count */
- if (bo_gem->tiling_mode == I915_TILING_NONE)
- bo_gem->reloc_tree_fences++;
-
memset(&set_tiling, 0, sizeof(set_tiling));
set_tiling.handle = bo_gem->gem_handle;
- set_tiling.tiling_mode = *tiling_mode;
- set_tiling.stride = stride;
do {
+ set_tiling.tiling_mode = *tiling_mode;
+ set_tiling.stride = stride;
+
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_TILING,
&set_tiling);
} while (ret == -1 && errno == EINTR);
- if (ret != 0) {
- *tiling_mode = bo_gem->tiling_mode;
- return -errno;
- }
bo_gem->tiling_mode = set_tiling.tiling_mode;
bo_gem->swizzle_mode = set_tiling.swizzle_mode;
- /* If we're going from tiling to non-tiling, drop fence count */
- if (bo_gem->tiling_mode == I915_TILING_NONE)
- bo_gem->reloc_tree_fences--;
-
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
*tiling_mode = bo_gem->tiling_mode;
- return 0;
+ return ret == 0 ? 0 : -errno;
}
static int
bufmgr_gem->bo_reuse = 1;
}
+/**
+ * Enable use of fenced reloc type.
+ *
+ * New code should enable this to avoid unnecessary fence register
+ * allocation. If this option is not enabled, all relocs will have fence
+ * register allocated.
+ */
+void
+drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
+ bufmgr_gem->fenced_relocs = 1;
+}
+
/**
* Return the additional aperture space required by the tree of buffer objects
* rooted at bo.
for (i = 0; i < bo_gem->reloc_count; i++)
total +=
drm_intel_gem_bo_get_aperture_space(bo_gem->
- reloc_target_bo[i]);
+ reloc_target_info[i].bo);
return total;
}
for (i = 0; i < bo_gem->reloc_count; i++)
drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
- reloc_target_bo[i]);
+ reloc_target_info[i].bo);
}
/**
int i;
for (i = 0; i < bo_gem->reloc_count; i++) {
- if (bo_gem->reloc_target_bo[i] == target_bo)
+ if (bo_gem->reloc_target_info[i].bo == target_bo)
return 1;
- if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
+ if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
target_bo))
return 1;
}
drm_i915_getparam_t gp;
int ret, i;
unsigned long size;
+ int exec2 = 0;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+ if (bufmgr_gem == NULL)
+ return NULL;
+
bufmgr_gem->fd = fd;
if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
}
- if (!IS_I965G(bufmgr_gem)) {
+ if (IS_GEN2(bufmgr_gem))
+ bufmgr_gem->gen = 2;
+ else if (IS_GEN3(bufmgr_gem))
+ bufmgr_gem->gen = 3;
+ else if (IS_GEN4(bufmgr_gem))
+ bufmgr_gem->gen = 4;
+ else
+ bufmgr_gem->gen = 6;
+
+ gp.param = I915_PARAM_HAS_EXECBUF2;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (!ret)
+ exec2 = 1;
+
+ if (bufmgr_gem->gen < 4) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
fprintf(stderr, "param: %d, val: %d\n", gp.param,
*gp.value);
bufmgr_gem->available_fences = 0;
+ } else {
+ /* XXX The kernel reports the total number of fences,
+ * including any that may be pinned.
+ *
+ * We presume that there will be at least one pinned
+ * fence for the scanout buffer, but there may be more
+ * than one scanout and the user may be manually
+ * pinning buffers. Let's move to execbuffer2 and
+ * thereby forget the insanity of using fences...
+ */
+ bufmgr_gem->available_fences -= 2;
+ if (bufmgr_gem->available_fences < 0)
+ bufmgr_gem->available_fences = 0;
}
}
bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
+ bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
- bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+ /* Use the new one if available */
+ if (exec2)
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
+ else
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;