1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
41 #include <xf86drm.h>
42 #include <xf86atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/types.h>
54 #include <stdbool.h>
56 #include "errno.h"
57 #ifndef ETIME
58 #define ETIME ETIMEDOUT
59 #endif
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
64 #include "intel_aub.h"
65 #include "string.h"
67 #include "i915_drm.h"
69 #ifdef HAVE_VALGRIND
70 #include <valgrind.h>
71 #include <memcheck.h>
72 #define VG(x) x
73 #else
74 #define VG(x)
75 #endif
77 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
82 } while (0)
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
88 struct drm_intel_gem_bo_bucket {
89 drmMMListHead head;
90 unsigned long size;
91 };
93 typedef struct _drm_intel_bufmgr_gem {
94 drm_intel_bufmgr bufmgr;
96 int fd;
98 int max_relocs;
100 pthread_mutex_t lock;
102 struct drm_i915_gem_exec_object *exec_objects;
103 struct drm_i915_gem_exec_object2 *exec2_objects;
104 drm_intel_bo **exec_bos;
105 int exec_size;
106 int exec_count;
108 /** Array of lists of cached gem objects of power-of-two sizes */
109 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
110 int num_buckets;
111 time_t time;
113 drmMMListHead named;
114 drmMMListHead vma_cache;
115 int vma_count, vma_open, vma_max;
117 uint64_t gtt_size;
118 int available_fences;
119 int pci_device;
120 int gen;
121 unsigned int has_bsd : 1;
122 unsigned int has_blt : 1;
123 unsigned int has_relaxed_fencing : 1;
124 unsigned int has_llc : 1;
125 unsigned int has_wait_timeout : 1;
126 unsigned int bo_reuse : 1;
127 unsigned int no_exec : 1;
128 bool fenced_relocs;
130 FILE *aub_file;
131 uint32_t aub_offset;
132 } drm_intel_bufmgr_gem;
134 #define DRM_INTEL_RELOC_FENCE (1<<0)
136 typedef struct _drm_intel_reloc_target_info {
137 drm_intel_bo *bo;
138 int flags;
139 } drm_intel_reloc_target;
141 struct _drm_intel_bo_gem {
142 drm_intel_bo bo;
144 atomic_t refcount;
145 uint32_t gem_handle;
146 const char *name;
148 /**
149 * Kenel-assigned global name for this object
150 */
151 unsigned int global_name;
152 drmMMListHead name_list;
154 /**
155 * Index of the buffer within the validation list while preparing a
156 * batchbuffer execution.
157 */
158 int validate_index;
160 /**
161 * Current tiling mode
162 */
163 uint32_t tiling_mode;
164 uint32_t swizzle_mode;
165 unsigned long stride;
167 time_t free_time;
169 /** Array passed to the DRM containing relocation information. */
170 struct drm_i915_gem_relocation_entry *relocs;
171 /**
172 * Array of info structs corresponding to relocs[i].target_handle etc
173 */
174 drm_intel_reloc_target *reloc_target_info;
175 /** Number of entries in relocs */
176 int reloc_count;
177 /** Mapped address for the buffer, saved across map/unmap cycles */
178 void *mem_virtual;
179 /** GTT virtual address for the buffer, saved across map/unmap cycles */
180 void *gtt_virtual;
181 int map_count;
182 drmMMListHead vma_list;
184 /** BO cache list */
185 drmMMListHead head;
187 /**
188 * Boolean of whether this BO and its children have been included in
189 * the current drm_intel_bufmgr_check_aperture_space() total.
190 */
191 bool included_in_check_aperture;
193 /**
194 * Boolean of whether this buffer has been used as a relocation
195 * target and had its size accounted for, and thus can't have any
196 * further relocations added to it.
197 */
198 bool used_as_reloc_target;
200 /**
201 * Boolean of whether we have encountered an error whilst building the relocation tree.
202 */
203 bool has_error;
205 /**
206 * Boolean of whether this buffer can be re-used
207 */
208 bool reusable;
210 /**
211 * Size in bytes of this buffer and its relocation descendents.
212 *
213 * Used to avoid costly tree walking in
214 * drm_intel_bufmgr_check_aperture in the common case.
215 */
216 int reloc_tree_size;
218 /**
219 * Number of potential fence registers required by this buffer and its
220 * relocations.
221 */
222 int reloc_tree_fences;
224 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
225 bool mapped_cpu_write;
227 uint32_t aub_offset;
229 drm_intel_aub_annotation *aub_annotations;
230 unsigned aub_annotation_count;
231 };
233 static unsigned int
234 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
236 static unsigned int
237 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
239 static int
240 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
241 uint32_t * swizzle_mode);
243 static int
244 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
245 uint32_t tiling_mode,
246 uint32_t stride);
248 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
249 time_t time);
251 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
253 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
255 static unsigned long
256 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
257 uint32_t *tiling_mode)
258 {
259 unsigned long min_size, max_size;
260 unsigned long i;
262 if (*tiling_mode == I915_TILING_NONE)
263 return size;
265 /* 965+ just need multiples of page size for tiling */
266 if (bufmgr_gem->gen >= 4)
267 return ROUND_UP_TO(size, 4096);
269 /* Older chips need powers of two, of at least 512k or 1M */
270 if (bufmgr_gem->gen == 3) {
271 min_size = 1024*1024;
272 max_size = 128*1024*1024;
273 } else {
274 min_size = 512*1024;
275 max_size = 64*1024*1024;
276 }
278 if (size > max_size) {
279 *tiling_mode = I915_TILING_NONE;
280 return size;
281 }
283 /* Do we need to allocate every page for the fence? */
284 if (bufmgr_gem->has_relaxed_fencing)
285 return ROUND_UP_TO(size, 4096);
287 for (i = min_size; i < size; i <<= 1)
288 ;
290 return i;
291 }
293 /*
294 * Round a given pitch up to the minimum required for X tiling on a
295 * given chip. We use 512 as the minimum to allow for a later tiling
296 * change.
297 */
298 static unsigned long
299 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
300 unsigned long pitch, uint32_t *tiling_mode)
301 {
302 unsigned long tile_width;
303 unsigned long i;
305 /* If untiled, then just align it so that we can do rendering
306 * to it with the 3D engine.
307 */
308 if (*tiling_mode == I915_TILING_NONE)
309 return ALIGN(pitch, 64);
311 if (*tiling_mode == I915_TILING_X
312 || (IS_915(bufmgr_gem->pci_device)
313 && *tiling_mode == I915_TILING_Y))
314 tile_width = 512;
315 else
316 tile_width = 128;
318 /* 965 is flexible */
319 if (bufmgr_gem->gen >= 4)
320 return ROUND_UP_TO(pitch, tile_width);
322 /* The older hardware has a maximum pitch of 8192 with tiled
323 * surfaces, so fallback to untiled if it's too large.
324 */
325 if (pitch > 8192) {
326 *tiling_mode = I915_TILING_NONE;
327 return ALIGN(pitch, 64);
328 }
330 /* Pre-965 needs power of two tile width */
331 for (i = tile_width; i < pitch; i <<= 1)
332 ;
334 return i;
335 }
337 static struct drm_intel_gem_bo_bucket *
338 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
339 unsigned long size)
340 {
341 int i;
343 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
344 struct drm_intel_gem_bo_bucket *bucket =
345 &bufmgr_gem->cache_bucket[i];
346 if (bucket->size >= size) {
347 return bucket;
348 }
349 }
351 return NULL;
352 }
354 static void
355 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
356 {
357 int i, j;
359 for (i = 0; i < bufmgr_gem->exec_count; i++) {
360 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
361 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
363 if (bo_gem->relocs == NULL) {
364 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
365 bo_gem->name);
366 continue;
367 }
369 for (j = 0; j < bo_gem->reloc_count; j++) {
370 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
371 drm_intel_bo_gem *target_gem =
372 (drm_intel_bo_gem *) target_bo;
374 DBG("%2d: %d (%s)@0x%08llx -> "
375 "%d (%s)@0x%08lx + 0x%08x\n",
376 i,
377 bo_gem->gem_handle, bo_gem->name,
378 (unsigned long long)bo_gem->relocs[j].offset,
379 target_gem->gem_handle,
380 target_gem->name,
381 target_bo->offset,
382 bo_gem->relocs[j].delta);
383 }
384 }
385 }
387 static inline void
388 drm_intel_gem_bo_reference(drm_intel_bo *bo)
389 {
390 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
392 atomic_inc(&bo_gem->refcount);
393 }
395 /**
396 * Adds the given buffer to the list of buffers to be validated (moved into the
397 * appropriate memory type) with the next batch submission.
398 *
399 * If a buffer is validated multiple times in a batch submission, it ends up
400 * with the intersection of the memory type flags and the union of the
401 * access flags.
402 */
403 static void
404 drm_intel_add_validate_buffer(drm_intel_bo *bo)
405 {
406 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
407 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
408 int index;
410 if (bo_gem->validate_index != -1)
411 return;
413 /* Extend the array of validation entries as necessary. */
414 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
415 int new_size = bufmgr_gem->exec_size * 2;
417 if (new_size == 0)
418 new_size = 5;
420 bufmgr_gem->exec_objects =
421 realloc(bufmgr_gem->exec_objects,
422 sizeof(*bufmgr_gem->exec_objects) * new_size);
423 bufmgr_gem->exec_bos =
424 realloc(bufmgr_gem->exec_bos,
425 sizeof(*bufmgr_gem->exec_bos) * new_size);
426 bufmgr_gem->exec_size = new_size;
427 }
429 index = bufmgr_gem->exec_count;
430 bo_gem->validate_index = index;
431 /* Fill in array entry */
432 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
433 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
434 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
435 bufmgr_gem->exec_objects[index].alignment = 0;
436 bufmgr_gem->exec_objects[index].offset = 0;
437 bufmgr_gem->exec_bos[index] = bo;
438 bufmgr_gem->exec_count++;
439 }
441 static void
442 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
443 {
444 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
445 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
446 int index;
448 if (bo_gem->validate_index != -1) {
449 if (need_fence)
450 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
451 EXEC_OBJECT_NEEDS_FENCE;
452 return;
453 }
455 /* Extend the array of validation entries as necessary. */
456 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
457 int new_size = bufmgr_gem->exec_size * 2;
459 if (new_size == 0)
460 new_size = 5;
462 bufmgr_gem->exec2_objects =
463 realloc(bufmgr_gem->exec2_objects,
464 sizeof(*bufmgr_gem->exec2_objects) * new_size);
465 bufmgr_gem->exec_bos =
466 realloc(bufmgr_gem->exec_bos,
467 sizeof(*bufmgr_gem->exec_bos) * new_size);
468 bufmgr_gem->exec_size = new_size;
469 }
471 index = bufmgr_gem->exec_count;
472 bo_gem->validate_index = index;
473 /* Fill in array entry */
474 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
475 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
476 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
477 bufmgr_gem->exec2_objects[index].alignment = 0;
478 bufmgr_gem->exec2_objects[index].offset = 0;
479 bufmgr_gem->exec_bos[index] = bo;
480 bufmgr_gem->exec2_objects[index].flags = 0;
481 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
482 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
483 if (need_fence) {
484 bufmgr_gem->exec2_objects[index].flags |=
485 EXEC_OBJECT_NEEDS_FENCE;
486 }
487 bufmgr_gem->exec_count++;
488 }
490 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
491 sizeof(uint32_t))
493 static void
494 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
495 drm_intel_bo_gem *bo_gem)
496 {
497 int size;
499 assert(!bo_gem->used_as_reloc_target);
501 /* The older chipsets are far-less flexible in terms of tiling,
502 * and require tiled buffer to be size aligned in the aperture.
503 * This means that in the worst possible case we will need a hole
504 * twice as large as the object in order for it to fit into the
505 * aperture. Optimal packing is for wimps.
506 */
507 size = bo_gem->bo.size;
508 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
509 int min_size;
511 if (bufmgr_gem->has_relaxed_fencing) {
512 if (bufmgr_gem->gen == 3)
513 min_size = 1024*1024;
514 else
515 min_size = 512*1024;
517 while (min_size < size)
518 min_size *= 2;
519 } else
520 min_size = size;
522 /* Account for worst-case alignment. */
523 size = 2 * min_size;
524 }
526 bo_gem->reloc_tree_size = size;
527 }
529 static int
530 drm_intel_setup_reloc_list(drm_intel_bo *bo)
531 {
532 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
533 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
534 unsigned int max_relocs = bufmgr_gem->max_relocs;
536 if (bo->size / 4 < max_relocs)
537 max_relocs = bo->size / 4;
539 bo_gem->relocs = malloc(max_relocs *
540 sizeof(struct drm_i915_gem_relocation_entry));
541 bo_gem->reloc_target_info = malloc(max_relocs *
542 sizeof(drm_intel_reloc_target));
543 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
544 bo_gem->has_error = true;
546 free (bo_gem->relocs);
547 bo_gem->relocs = NULL;
549 free (bo_gem->reloc_target_info);
550 bo_gem->reloc_target_info = NULL;
552 return 1;
553 }
555 return 0;
556 }
558 static int
559 drm_intel_gem_bo_busy(drm_intel_bo *bo)
560 {
561 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
562 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
563 struct drm_i915_gem_busy busy;
564 int ret;
566 VG_CLEAR(busy);
567 busy.handle = bo_gem->gem_handle;
569 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
571 return (ret == 0 && busy.busy);
572 }
574 static int
575 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
576 drm_intel_bo_gem *bo_gem, int state)
577 {
578 struct drm_i915_gem_madvise madv;
580 VG_CLEAR(madv);
581 madv.handle = bo_gem->gem_handle;
582 madv.madv = state;
583 madv.retained = 1;
584 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
586 return madv.retained;
587 }
589 static int
590 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
591 {
592 return drm_intel_gem_bo_madvise_internal
593 ((drm_intel_bufmgr_gem *) bo->bufmgr,
594 (drm_intel_bo_gem *) bo,
595 madv);
596 }
598 /* drop the oldest entries that have been purged by the kernel */
599 static void
600 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
601 struct drm_intel_gem_bo_bucket *bucket)
602 {
603 while (!DRMLISTEMPTY(&bucket->head)) {
604 drm_intel_bo_gem *bo_gem;
606 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
607 bucket->head.next, head);
608 if (drm_intel_gem_bo_madvise_internal
609 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
610 break;
612 DRMLISTDEL(&bo_gem->head);
613 drm_intel_gem_bo_free(&bo_gem->bo);
614 }
615 }
617 static drm_intel_bo *
618 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
619 const char *name,
620 unsigned long size,
621 unsigned long flags,
622 uint32_t tiling_mode,
623 unsigned long stride)
624 {
625 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
626 drm_intel_bo_gem *bo_gem;
627 unsigned int page_size = getpagesize();
628 int ret;
629 struct drm_intel_gem_bo_bucket *bucket;
630 bool alloc_from_cache;
631 unsigned long bo_size;
632 bool for_render = false;
634 if (flags & BO_ALLOC_FOR_RENDER)
635 for_render = true;
637 /* Round the allocated size up to a power of two number of pages. */
638 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
640 /* If we don't have caching at this size, don't actually round the
641 * allocation up.
642 */
643 if (bucket == NULL) {
644 bo_size = size;
645 if (bo_size < page_size)
646 bo_size = page_size;
647 } else {
648 bo_size = bucket->size;
649 }
651 pthread_mutex_lock(&bufmgr_gem->lock);
652 /* Get a buffer out of the cache if available */
653 retry:
654 alloc_from_cache = false;
655 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
656 if (for_render) {
657 /* Allocate new render-target BOs from the tail (MRU)
658 * of the list, as it will likely be hot in the GPU
659 * cache and in the aperture for us.
660 */
661 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
662 bucket->head.prev, head);
663 DRMLISTDEL(&bo_gem->head);
664 alloc_from_cache = true;
665 } else {
666 /* For non-render-target BOs (where we're probably
667 * going to map it first thing in order to fill it
668 * with data), check if the last BO in the cache is
669 * unbusy, and only reuse in that case. Otherwise,
670 * allocating a new buffer is probably faster than
671 * waiting for the GPU to finish.
672 */
673 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
674 bucket->head.next, head);
675 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
676 alloc_from_cache = true;
677 DRMLISTDEL(&bo_gem->head);
678 }
679 }
681 if (alloc_from_cache) {
682 if (!drm_intel_gem_bo_madvise_internal
683 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
684 drm_intel_gem_bo_free(&bo_gem->bo);
685 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
686 bucket);
687 goto retry;
688 }
690 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
691 tiling_mode,
692 stride)) {
693 drm_intel_gem_bo_free(&bo_gem->bo);
694 goto retry;
695 }
696 }
697 }
698 pthread_mutex_unlock(&bufmgr_gem->lock);
700 if (!alloc_from_cache) {
701 struct drm_i915_gem_create create;
703 bo_gem = calloc(1, sizeof(*bo_gem));
704 if (!bo_gem)
705 return NULL;
707 bo_gem->bo.size = bo_size;
709 VG_CLEAR(create);
710 create.size = bo_size;
712 ret = drmIoctl(bufmgr_gem->fd,
713 DRM_IOCTL_I915_GEM_CREATE,
714 &create);
715 bo_gem->gem_handle = create.handle;
716 bo_gem->bo.handle = bo_gem->gem_handle;
717 if (ret != 0) {
718 free(bo_gem);
719 return NULL;
720 }
721 bo_gem->bo.bufmgr = bufmgr;
723 bo_gem->tiling_mode = I915_TILING_NONE;
724 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
725 bo_gem->stride = 0;
727 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
728 tiling_mode,
729 stride)) {
730 drm_intel_gem_bo_free(&bo_gem->bo);
731 return NULL;
732 }
734 DRMINITLISTHEAD(&bo_gem->name_list);
735 DRMINITLISTHEAD(&bo_gem->vma_list);
736 }
738 bo_gem->name = name;
739 atomic_set(&bo_gem->refcount, 1);
740 bo_gem->validate_index = -1;
741 bo_gem->reloc_tree_fences = 0;
742 bo_gem->used_as_reloc_target = false;
743 bo_gem->has_error = false;
744 bo_gem->reusable = true;
745 bo_gem->aub_annotations = NULL;
746 bo_gem->aub_annotation_count = 0;
748 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
750 DBG("bo_create: buf %d (%s) %ldb\n",
751 bo_gem->gem_handle, bo_gem->name, size);
753 return &bo_gem->bo;
754 }
756 static drm_intel_bo *
757 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
758 const char *name,
759 unsigned long size,
760 unsigned int alignment)
761 {
762 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
763 BO_ALLOC_FOR_RENDER,
764 I915_TILING_NONE, 0);
765 }
767 static drm_intel_bo *
768 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
769 const char *name,
770 unsigned long size,
771 unsigned int alignment)
772 {
773 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
774 I915_TILING_NONE, 0);
775 }
777 static drm_intel_bo *
778 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
779 int x, int y, int cpp, uint32_t *tiling_mode,
780 unsigned long *pitch, unsigned long flags)
781 {
782 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
783 unsigned long size, stride;
784 uint32_t tiling;
786 do {
787 unsigned long aligned_y, height_alignment;
789 tiling = *tiling_mode;
791 /* If we're tiled, our allocations are in 8 or 32-row blocks,
792 * so failure to align our height means that we won't allocate
793 * enough pages.
794 *
795 * If we're untiled, we still have to align to 2 rows high
796 * because the data port accesses 2x2 blocks even if the
797 * bottom row isn't to be rendered, so failure to align means
798 * we could walk off the end of the GTT and fault. This is
799 * documented on 965, and may be the case on older chipsets
800 * too so we try to be careful.
801 */
802 aligned_y = y;
803 height_alignment = 2;
805 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
806 height_alignment = 16;
807 else if (tiling == I915_TILING_X
808 || (IS_915(bufmgr_gem->pci_device)
809 && tiling == I915_TILING_Y))
810 height_alignment = 8;
811 else if (tiling == I915_TILING_Y)
812 height_alignment = 32;
813 aligned_y = ALIGN(y, height_alignment);
815 stride = x * cpp;
816 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
817 size = stride * aligned_y;
818 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
819 } while (*tiling_mode != tiling);
820 *pitch = stride;
822 if (tiling == I915_TILING_NONE)
823 stride = 0;
825 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
826 tiling, stride);
827 }
829 /**
830 * Returns a drm_intel_bo wrapping the given buffer object handle.
831 *
832 * This can be used when one application needs to pass a buffer object
833 * to another.
834 */
835 drm_intel_bo *
836 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
837 const char *name,
838 unsigned int handle)
839 {
840 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
841 drm_intel_bo_gem *bo_gem;
842 int ret;
843 struct drm_gem_open open_arg;
844 struct drm_i915_gem_get_tiling get_tiling;
845 drmMMListHead *list;
847 /* At the moment most applications only have a few named bo.
848 * For instance, in a DRI client only the render buffers passed
849 * between X and the client are named. And since X returns the
850 * alternating names for the front/back buffer a linear search
851 * provides a sufficiently fast match.
852 */
853 for (list = bufmgr_gem->named.next;
854 list != &bufmgr_gem->named;
855 list = list->next) {
856 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
857 if (bo_gem->global_name == handle) {
858 drm_intel_gem_bo_reference(&bo_gem->bo);
859 return &bo_gem->bo;
860 }
861 }
863 bo_gem = calloc(1, sizeof(*bo_gem));
864 if (!bo_gem)
865 return NULL;
867 VG_CLEAR(open_arg);
868 open_arg.name = handle;
869 ret = drmIoctl(bufmgr_gem->fd,
870 DRM_IOCTL_GEM_OPEN,
871 &open_arg);
872 if (ret != 0) {
873 DBG("Couldn't reference %s handle 0x%08x: %s\n",
874 name, handle, strerror(errno));
875 free(bo_gem);
876 return NULL;
877 }
878 bo_gem->bo.size = open_arg.size;
879 bo_gem->bo.offset = 0;
880 bo_gem->bo.virtual = NULL;
881 bo_gem->bo.bufmgr = bufmgr;
882 bo_gem->name = name;
883 atomic_set(&bo_gem->refcount, 1);
884 bo_gem->validate_index = -1;
885 bo_gem->gem_handle = open_arg.handle;
886 bo_gem->bo.handle = open_arg.handle;
887 bo_gem->global_name = handle;
888 bo_gem->reusable = false;
890 VG_CLEAR(get_tiling);
891 get_tiling.handle = bo_gem->gem_handle;
892 ret = drmIoctl(bufmgr_gem->fd,
893 DRM_IOCTL_I915_GEM_GET_TILING,
894 &get_tiling);
895 if (ret != 0) {
896 drm_intel_gem_bo_unreference(&bo_gem->bo);
897 return NULL;
898 }
899 bo_gem->tiling_mode = get_tiling.tiling_mode;
900 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
901 /* XXX stride is unknown */
902 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
904 DRMINITLISTHEAD(&bo_gem->vma_list);
905 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
906 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
908 return &bo_gem->bo;
909 }
911 static void
912 drm_intel_gem_bo_free(drm_intel_bo *bo)
913 {
914 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
915 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
916 struct drm_gem_close close;
917 int ret;
919 DRMLISTDEL(&bo_gem->vma_list);
920 if (bo_gem->mem_virtual) {
921 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
922 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
923 bufmgr_gem->vma_count--;
924 }
925 if (bo_gem->gtt_virtual) {
926 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
927 bufmgr_gem->vma_count--;
928 }
930 /* Close this object */
931 VG_CLEAR(close);
932 close.handle = bo_gem->gem_handle;
933 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
934 if (ret != 0) {
935 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
936 bo_gem->gem_handle, bo_gem->name, strerror(errno));
937 }
938 free(bo_gem->aub_annotations);
939 free(bo);
940 }
942 static void
943 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
944 {
945 #if HAVE_VALGRIND
946 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
948 if (bo_gem->mem_virtual)
949 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
951 if (bo_gem->gtt_virtual)
952 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
953 #endif
954 }
956 /** Frees all cached buffers significantly older than @time. */
957 static void
958 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
959 {
960 int i;
962 if (bufmgr_gem->time == time)
963 return;
965 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
966 struct drm_intel_gem_bo_bucket *bucket =
967 &bufmgr_gem->cache_bucket[i];
969 while (!DRMLISTEMPTY(&bucket->head)) {
970 drm_intel_bo_gem *bo_gem;
972 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
973 bucket->head.next, head);
974 if (time - bo_gem->free_time <= 1)
975 break;
977 DRMLISTDEL(&bo_gem->head);
979 drm_intel_gem_bo_free(&bo_gem->bo);
980 }
981 }
983 bufmgr_gem->time = time;
984 }
986 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
987 {
988 int limit;
990 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
991 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
993 if (bufmgr_gem->vma_max < 0)
994 return;
996 /* We may need to evict a few entries in order to create new mmaps */
997 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
998 if (limit < 0)
999 limit = 0;
1001 while (bufmgr_gem->vma_count > limit) {
1002 drm_intel_bo_gem *bo_gem;
1004 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1005 bufmgr_gem->vma_cache.next,
1006 vma_list);
1007 assert(bo_gem->map_count == 0);
1008 DRMLISTDELINIT(&bo_gem->vma_list);
1010 if (bo_gem->mem_virtual) {
1011 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1012 bo_gem->mem_virtual = NULL;
1013 bufmgr_gem->vma_count--;
1014 }
1015 if (bo_gem->gtt_virtual) {
1016 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1017 bo_gem->gtt_virtual = NULL;
1018 bufmgr_gem->vma_count--;
1019 }
1020 }
1021 }
1023 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1024 drm_intel_bo_gem *bo_gem)
1025 {
1026 bufmgr_gem->vma_open--;
1027 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1028 if (bo_gem->mem_virtual)
1029 bufmgr_gem->vma_count++;
1030 if (bo_gem->gtt_virtual)
1031 bufmgr_gem->vma_count++;
1032 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1033 }
1035 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1036 drm_intel_bo_gem *bo_gem)
1037 {
1038 bufmgr_gem->vma_open++;
1039 DRMLISTDEL(&bo_gem->vma_list);
1040 if (bo_gem->mem_virtual)
1041 bufmgr_gem->vma_count--;
1042 if (bo_gem->gtt_virtual)
1043 bufmgr_gem->vma_count--;
1044 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1045 }
1047 static void
1048 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1049 {
1050 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1051 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1052 struct drm_intel_gem_bo_bucket *bucket;
1053 int i;
1055 /* Unreference all the target buffers */
1056 for (i = 0; i < bo_gem->reloc_count; i++) {
1057 if (bo_gem->reloc_target_info[i].bo != bo) {
1058 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1059 reloc_target_info[i].bo,
1060 time);
1061 }
1062 }
1063 bo_gem->reloc_count = 0;
1064 bo_gem->used_as_reloc_target = false;
1066 DBG("bo_unreference final: %d (%s)\n",
1067 bo_gem->gem_handle, bo_gem->name);
1069 /* release memory associated with this object */
1070 if (bo_gem->reloc_target_info) {
1071 free(bo_gem->reloc_target_info);
1072 bo_gem->reloc_target_info = NULL;
1073 }
1074 if (bo_gem->relocs) {
1075 free(bo_gem->relocs);
1076 bo_gem->relocs = NULL;
1077 }
1079 /* Clear any left-over mappings */
1080 if (bo_gem->map_count) {
1081 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1082 bo_gem->map_count = 0;
1083 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1084 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1085 }
1087 DRMLISTDEL(&bo_gem->name_list);
1089 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1090 /* Put the buffer into our internal cache for reuse if we can. */
1091 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1092 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1093 I915_MADV_DONTNEED)) {
1094 bo_gem->free_time = time;
1096 bo_gem->name = NULL;
1097 bo_gem->validate_index = -1;
1099 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1100 } else {
1101 drm_intel_gem_bo_free(bo);
1102 }
1103 }
1105 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1106 time_t time)
1107 {
1108 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1110 assert(atomic_read(&bo_gem->refcount) > 0);
1111 if (atomic_dec_and_test(&bo_gem->refcount))
1112 drm_intel_gem_bo_unreference_final(bo, time);
1113 }
1115 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1116 {
1117 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1119 assert(atomic_read(&bo_gem->refcount) > 0);
1120 if (atomic_dec_and_test(&bo_gem->refcount)) {
1121 drm_intel_bufmgr_gem *bufmgr_gem =
1122 (drm_intel_bufmgr_gem *) bo->bufmgr;
1123 struct timespec time;
1125 clock_gettime(CLOCK_MONOTONIC, &time);
1127 pthread_mutex_lock(&bufmgr_gem->lock);
1128 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1129 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1130 pthread_mutex_unlock(&bufmgr_gem->lock);
1131 }
1132 }
1134 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1135 {
1136 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1137 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1138 struct drm_i915_gem_set_domain set_domain;
1139 int ret;
1141 pthread_mutex_lock(&bufmgr_gem->lock);
1143 if (bo_gem->map_count++ == 0)
1144 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1146 if (!bo_gem->mem_virtual) {
1147 struct drm_i915_gem_mmap mmap_arg;
1149 DBG("bo_map: %d (%s), map_count=%d\n",
1150 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1152 VG_CLEAR(mmap_arg);
1153 mmap_arg.handle = bo_gem->gem_handle;
1154 mmap_arg.offset = 0;
1155 mmap_arg.size = bo->size;
1156 ret = drmIoctl(bufmgr_gem->fd,
1157 DRM_IOCTL_I915_GEM_MMAP,
1158 &mmap_arg);
1159 if (ret != 0) {
1160 ret = -errno;
1161 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1162 __FILE__, __LINE__, bo_gem->gem_handle,
1163 bo_gem->name, strerror(errno));
1164 if (--bo_gem->map_count == 0)
1165 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1166 pthread_mutex_unlock(&bufmgr_gem->lock);
1167 return ret;
1168 }
1169 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1170 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1171 }
1172 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1173 bo_gem->mem_virtual);
1174 bo->virtual = bo_gem->mem_virtual;
1176 VG_CLEAR(set_domain);
1177 set_domain.handle = bo_gem->gem_handle;
1178 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1179 if (write_enable)
1180 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1181 else
1182 set_domain.write_domain = 0;
1183 ret = drmIoctl(bufmgr_gem->fd,
1184 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1185 &set_domain);
1186 if (ret != 0) {
1187 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1188 __FILE__, __LINE__, bo_gem->gem_handle,
1189 strerror(errno));
1190 }
1192 if (write_enable)
1193 bo_gem->mapped_cpu_write = true;
1195 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1196 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1197 pthread_mutex_unlock(&bufmgr_gem->lock);
1199 return 0;
1200 }
1202 static int
1203 map_gtt(drm_intel_bo *bo)
1204 {
1205 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1206 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1207 int ret;
1209 if (bo_gem->map_count++ == 0)
1210 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1212 /* Get a mapping of the buffer if we haven't before. */
1213 if (bo_gem->gtt_virtual == NULL) {
1214 struct drm_i915_gem_mmap_gtt mmap_arg;
1216 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1217 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1219 VG_CLEAR(mmap_arg);
1220 mmap_arg.handle = bo_gem->gem_handle;
1222 /* Get the fake offset back... */
1223 ret = drmIoctl(bufmgr_gem->fd,
1224 DRM_IOCTL_I915_GEM_MMAP_GTT,
1225 &mmap_arg);
1226 if (ret != 0) {
1227 ret = -errno;
1228 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1229 __FILE__, __LINE__,
1230 bo_gem->gem_handle, bo_gem->name,
1231 strerror(errno));
1232 if (--bo_gem->map_count == 0)
1233 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1234 return ret;
1235 }
1237 /* and mmap it */
1238 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1239 MAP_SHARED, bufmgr_gem->fd,
1240 mmap_arg.offset);
1241 if (bo_gem->gtt_virtual == MAP_FAILED) {
1242 bo_gem->gtt_virtual = NULL;
1243 ret = -errno;
1244 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1245 __FILE__, __LINE__,
1246 bo_gem->gem_handle, bo_gem->name,
1247 strerror(errno));
1248 if (--bo_gem->map_count == 0)
1249 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1250 return ret;
1251 }
1252 }
1254 bo->virtual = bo_gem->gtt_virtual;
1256 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1257 bo_gem->gtt_virtual);
1259 return 0;
1260 }
1262 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1263 {
1264 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1265 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1266 struct drm_i915_gem_set_domain set_domain;
1267 int ret;
1269 pthread_mutex_lock(&bufmgr_gem->lock);
1271 ret = map_gtt(bo);
1272 if (ret) {
1273 pthread_mutex_unlock(&bufmgr_gem->lock);
1274 return ret;
1275 }
1277 /* Now move it to the GTT domain so that the GPU and CPU
1278 * caches are flushed and the GPU isn't actively using the
1279 * buffer.
1280 *
1281 * The pagefault handler does this domain change for us when
1282 * it has unbound the BO from the GTT, but it's up to us to
1283 * tell it when we're about to use things if we had done
1284 * rendering and it still happens to be bound to the GTT.
1285 */
1286 VG_CLEAR(set_domain);
1287 set_domain.handle = bo_gem->gem_handle;
1288 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1289 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1290 ret = drmIoctl(bufmgr_gem->fd,
1291 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1292 &set_domain);
1293 if (ret != 0) {
1294 DBG("%s:%d: Error setting domain %d: %s\n",
1295 __FILE__, __LINE__, bo_gem->gem_handle,
1296 strerror(errno));
1297 }
1299 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1300 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1301 pthread_mutex_unlock(&bufmgr_gem->lock);
1303 return 0;
1304 }
1306 /**
1307 * Performs a mapping of the buffer object like the normal GTT
1308 * mapping, but avoids waiting for the GPU to be done reading from or
1309 * rendering to the buffer.
1310 *
1311 * This is used in the implementation of GL_ARB_map_buffer_range: The
1312 * user asks to create a buffer, then does a mapping, fills some
1313 * space, runs a drawing command, then asks to map it again without
1314 * synchronizing because it guarantees that it won't write over the
1315 * data that the GPU is busy using (or, more specifically, that if it
1316 * does write over the data, it acknowledges that rendering is
1317 * undefined).
1318 */
1320 int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1321 {
1322 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1323 int ret;
1325 /* If the CPU cache isn't coherent with the GTT, then use a
1326 * regular synchronized mapping. The problem is that we don't
1327 * track where the buffer was last used on the CPU side in
1328 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1329 * we would potentially corrupt the buffer even when the user
1330 * does reasonable things.
1331 */
1332 if (!bufmgr_gem->has_llc)
1333 return drm_intel_gem_bo_map_gtt(bo);
1335 pthread_mutex_lock(&bufmgr_gem->lock);
1336 ret = map_gtt(bo);
1337 pthread_mutex_unlock(&bufmgr_gem->lock);
1339 return ret;
1340 }
1342 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1343 {
1344 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1345 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1346 int ret = 0;
1348 if (bo == NULL)
1349 return 0;
1351 pthread_mutex_lock(&bufmgr_gem->lock);
1353 if (bo_gem->map_count <= 0) {
1354 DBG("attempted to unmap an unmapped bo\n");
1355 pthread_mutex_unlock(&bufmgr_gem->lock);
1356 /* Preserve the old behaviour of just treating this as a
1357 * no-op rather than reporting the error.
1358 */
1359 return 0;
1360 }
1362 if (bo_gem->mapped_cpu_write) {
1363 struct drm_i915_gem_sw_finish sw_finish;
1365 /* Cause a flush to happen if the buffer's pinned for
1366 * scanout, so the results show up in a timely manner.
1367 * Unlike GTT set domains, this only does work if the
1368 * buffer should be scanout-related.
1369 */
1370 VG_CLEAR(sw_finish);
1371 sw_finish.handle = bo_gem->gem_handle;
1372 ret = drmIoctl(bufmgr_gem->fd,
1373 DRM_IOCTL_I915_GEM_SW_FINISH,
1374 &sw_finish);
1375 ret = ret == -1 ? -errno : 0;
1377 bo_gem->mapped_cpu_write = false;
1378 }
1380 /* We need to unmap after every innovation as we cannot track
1381 * an open vma for every bo as that will exhaasut the system
1382 * limits and cause later failures.
1383 */
1384 if (--bo_gem->map_count == 0) {
1385 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1386 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1387 bo->virtual = NULL;
1388 }
1389 pthread_mutex_unlock(&bufmgr_gem->lock);
1391 return ret;
1392 }
1394 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1395 {
1396 return drm_intel_gem_bo_unmap(bo);
1397 }
1399 static int
1400 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1401 unsigned long size, const void *data)
1402 {
1403 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1404 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1405 struct drm_i915_gem_pwrite pwrite;
1406 int ret;
1408 VG_CLEAR(pwrite);
1409 pwrite.handle = bo_gem->gem_handle;
1410 pwrite.offset = offset;
1411 pwrite.size = size;
1412 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1413 ret = drmIoctl(bufmgr_gem->fd,
1414 DRM_IOCTL_I915_GEM_PWRITE,
1415 &pwrite);
1416 if (ret != 0) {
1417 ret = -errno;
1418 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1419 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1420 (int)size, strerror(errno));
1421 }
1423 return ret;
1424 }
1426 static int
1427 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1428 {
1429 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1430 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1431 int ret;
1433 VG_CLEAR(get_pipe_from_crtc_id);
1434 get_pipe_from_crtc_id.crtc_id = crtc_id;
1435 ret = drmIoctl(bufmgr_gem->fd,
1436 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1437 &get_pipe_from_crtc_id);
1438 if (ret != 0) {
1439 /* We return -1 here to signal that we don't
1440 * know which pipe is associated with this crtc.
1441 * This lets the caller know that this information
1442 * isn't available; using the wrong pipe for
1443 * vblank waiting can cause the chipset to lock up
1444 */
1445 return -1;
1446 }
1448 return get_pipe_from_crtc_id.pipe;
1449 }
1451 static int
1452 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1453 unsigned long size, void *data)
1454 {
1455 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1456 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1457 struct drm_i915_gem_pread pread;
1458 int ret;
1460 VG_CLEAR(pread);
1461 pread.handle = bo_gem->gem_handle;
1462 pread.offset = offset;
1463 pread.size = size;
1464 pread.data_ptr = (uint64_t) (uintptr_t) data;
1465 ret = drmIoctl(bufmgr_gem->fd,
1466 DRM_IOCTL_I915_GEM_PREAD,
1467 &pread);
1468 if (ret != 0) {
1469 ret = -errno;
1470 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1471 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1472 (int)size, strerror(errno));
1473 }
1475 return ret;
1476 }
1478 /** Waits for all GPU rendering with the object to have completed. */
1479 static void
1480 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1481 {
1482 drm_intel_gem_bo_start_gtt_access(bo, 1);
1483 }
1485 /**
1486 * Waits on a BO for the given amount of time.
1487 *
1488 * @bo: buffer object to wait for
1489 * @timeout_ns: amount of time to wait in nanoseconds.
1490 * If value is less than 0, an infinite wait will occur.
1491 *
1492 * Returns 0 if the wait was successful ie. the last batch referencing the
1493 * object has completed within the allotted time. Otherwise some negative return
1494 * value describes the error. Of particular interest is -ETIME when the wait has
1495 * failed to yield the desired result.
1496 *
1497 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1498 * the operation to give up after a certain amount of time. Another subtle
1499 * difference is the internal locking semantics are different (this variant does
1500 * not hold the lock for the duration of the wait). This makes the wait subject
1501 * to a larger userspace race window.
1502 *
1503 * The implementation shall wait until the object is no longer actively
1504 * referenced within a batch buffer at the time of the call. The wait will
1505 * not guarantee that the buffer is re-issued via another thread, or an flinked
1506 * handle. Userspace must make sure this race does not occur if such precision
1507 * is important.
1508 */
1509 int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1510 {
1511 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1512 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1513 struct drm_i915_gem_wait wait;
1514 int ret;
1516 if (!bufmgr_gem->has_wait_timeout) {
1517 DBG("%s:%d: Timed wait is not supported. Falling back to "
1518 "infinite wait\n", __FILE__, __LINE__);
1519 if (timeout_ns) {
1520 drm_intel_gem_bo_wait_rendering(bo);
1521 return 0;
1522 } else {
1523 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1524 }
1525 }
1527 wait.bo_handle = bo_gem->gem_handle;
1528 wait.timeout_ns = timeout_ns;
1529 wait.flags = 0;
1530 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1531 if (ret == -1)
1532 return -errno;
1534 return ret;
1535 }
1537 /**
1538 * Sets the object to the GTT read and possibly write domain, used by the X
1539 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1540 *
1541 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1542 * can do tiled pixmaps this way.
1543 */
1544 void
1545 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1546 {
1547 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1548 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1549 struct drm_i915_gem_set_domain set_domain;
1550 int ret;
1552 VG_CLEAR(set_domain);
1553 set_domain.handle = bo_gem->gem_handle;
1554 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1555 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1556 ret = drmIoctl(bufmgr_gem->fd,
1557 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1558 &set_domain);
1559 if (ret != 0) {
1560 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1561 __FILE__, __LINE__, bo_gem->gem_handle,
1562 set_domain.read_domains, set_domain.write_domain,
1563 strerror(errno));
1564 }
1565 }
1567 static void
1568 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1569 {
1570 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1571 int i;
1573 free(bufmgr_gem->exec2_objects);
1574 free(bufmgr_gem->exec_objects);
1575 free(bufmgr_gem->exec_bos);
1577 pthread_mutex_destroy(&bufmgr_gem->lock);
1579 /* Free any cached buffer objects we were going to reuse */
1580 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1581 struct drm_intel_gem_bo_bucket *bucket =
1582 &bufmgr_gem->cache_bucket[i];
1583 drm_intel_bo_gem *bo_gem;
1585 while (!DRMLISTEMPTY(&bucket->head)) {
1586 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1587 bucket->head.next, head);
1588 DRMLISTDEL(&bo_gem->head);
1590 drm_intel_gem_bo_free(&bo_gem->bo);
1591 }
1592 }
1594 free(bufmgr);
1595 }
1597 /**
1598 * Adds the target buffer to the validation list and adds the relocation
1599 * to the reloc_buffer's relocation list.
1600 *
1601 * The relocation entry at the given offset must already contain the
1602 * precomputed relocation value, because the kernel will optimize out
1603 * the relocation entry write when the buffer hasn't moved from the
1604 * last known offset in target_bo.
1605 */
1606 static int
1607 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1608 drm_intel_bo *target_bo, uint32_t target_offset,
1609 uint32_t read_domains, uint32_t write_domain,
1610 bool need_fence)
1611 {
1612 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1613 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1614 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1615 bool fenced_command;
1617 if (bo_gem->has_error)
1618 return -ENOMEM;
1620 if (target_bo_gem->has_error) {
1621 bo_gem->has_error = true;
1622 return -ENOMEM;
1623 }
1625 /* We never use HW fences for rendering on 965+ */
1626 if (bufmgr_gem->gen >= 4)
1627 need_fence = false;
1629 fenced_command = need_fence;
1630 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1631 need_fence = false;
1633 /* Create a new relocation list if needed */
1634 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1635 return -ENOMEM;
1637 /* Check overflow */
1638 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1640 /* Check args */
1641 assert(offset <= bo->size - 4);
1642 assert((write_domain & (write_domain - 1)) == 0);
1644 /* Make sure that we're not adding a reloc to something whose size has
1645 * already been accounted for.
1646 */
1647 assert(!bo_gem->used_as_reloc_target);
1648 if (target_bo_gem != bo_gem) {
1649 target_bo_gem->used_as_reloc_target = true;
1650 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1651 }
1652 /* An object needing a fence is a tiled buffer, so it won't have
1653 * relocs to other buffers.
1654 */
1655 if (need_fence)
1656 target_bo_gem->reloc_tree_fences = 1;
1657 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1659 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1660 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1661 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1662 target_bo_gem->gem_handle;
1663 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1664 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1665 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1667 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1668 if (target_bo != bo)
1669 drm_intel_gem_bo_reference(target_bo);
1670 if (fenced_command)
1671 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1672 DRM_INTEL_RELOC_FENCE;
1673 else
1674 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1676 bo_gem->reloc_count++;
1678 return 0;
1679 }
1681 static int
1682 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1683 drm_intel_bo *target_bo, uint32_t target_offset,
1684 uint32_t read_domains, uint32_t write_domain)
1685 {
1686 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1688 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1689 read_domains, write_domain,
1690 !bufmgr_gem->fenced_relocs);
1691 }
1693 static int
1694 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1695 drm_intel_bo *target_bo,
1696 uint32_t target_offset,
1697 uint32_t read_domains, uint32_t write_domain)
1698 {
1699 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1700 read_domains, write_domain, true);
1701 }
1703 int
1704 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1705 {
1706 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1708 return bo_gem->reloc_count;
1709 }
1711 /**
1712 * Removes existing relocation entries in the BO after "start".
1713 *
1714 * This allows a user to avoid a two-step process for state setup with
1715 * counting up all the buffer objects and doing a
1716 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1717 * relocations for the state setup. Instead, save the state of the
1718 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1719 * state, and then check if it still fits in the aperture.
1720 *
1721 * Any further drm_intel_bufmgr_check_aperture_space() queries
1722 * involving this buffer in the tree are undefined after this call.
1723 */
1724 void
1725 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1726 {
1727 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1728 int i;
1729 struct timespec time;
1731 clock_gettime(CLOCK_MONOTONIC, &time);
1733 assert(bo_gem->reloc_count >= start);
1734 /* Unreference the cleared target buffers */
1735 for (i = start; i < bo_gem->reloc_count; i++) {
1736 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
1737 if (&target_bo_gem->bo != bo) {
1738 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
1739 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1740 time.tv_sec);
1741 }
1742 }
1743 bo_gem->reloc_count = start;
1744 }
1746 /**
1747 * Walk the tree of relocations rooted at BO and accumulate the list of
1748 * validations to be performed and update the relocation buffers with
1749 * index values into the validation list.
1750 */
1751 static void
1752 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1753 {
1754 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1755 int i;
1757 if (bo_gem->relocs == NULL)
1758 return;
1760 for (i = 0; i < bo_gem->reloc_count; i++) {
1761 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1763 if (target_bo == bo)
1764 continue;
1766 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1768 /* Continue walking the tree depth-first. */
1769 drm_intel_gem_bo_process_reloc(target_bo);
1771 /* Add the target to the validate list */
1772 drm_intel_add_validate_buffer(target_bo);
1773 }
1774 }
1776 static void
1777 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1778 {
1779 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1780 int i;
1782 if (bo_gem->relocs == NULL)
1783 return;
1785 for (i = 0; i < bo_gem->reloc_count; i++) {
1786 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1787 int need_fence;
1789 if (target_bo == bo)
1790 continue;
1792 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1794 /* Continue walking the tree depth-first. */
1795 drm_intel_gem_bo_process_reloc2(target_bo);
1797 need_fence = (bo_gem->reloc_target_info[i].flags &
1798 DRM_INTEL_RELOC_FENCE);
1800 /* Add the target to the validate list */
1801 drm_intel_add_validate_buffer2(target_bo, need_fence);
1802 }
1803 }
1806 static void
1807 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1808 {
1809 int i;
1811 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1812 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1813 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1815 /* Update the buffer offset */
1816 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1817 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1818 bo_gem->gem_handle, bo_gem->name, bo->offset,
1819 (unsigned long long)bufmgr_gem->exec_objects[i].
1820 offset);
1821 bo->offset = bufmgr_gem->exec_objects[i].offset;
1822 }
1823 }
1824 }
1826 static void
1827 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1828 {
1829 int i;
1831 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1832 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1833 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1835 /* Update the buffer offset */
1836 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1837 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1838 bo_gem->gem_handle, bo_gem->name, bo->offset,
1839 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1840 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1841 }
1842 }
1843 }
1845 static void
1846 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1847 {
1848 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1849 }
1851 static void
1852 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1853 {
1854 fwrite(data, 1, size, bufmgr_gem->aub_file);
1855 }
1857 static void
1858 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1859 {
1860 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1861 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1862 uint32_t *data;
1863 unsigned int i;
1865 data = malloc(bo->size);
1866 drm_intel_bo_get_subdata(bo, offset, size, data);
1868 /* Easy mode: write out bo with no relocations */
1869 if (!bo_gem->reloc_count) {
1870 aub_out_data(bufmgr_gem, data, size);
1871 free(data);
1872 return;
1873 }
1875 /* Otherwise, handle the relocations while writing. */
1876 for (i = 0; i < size / 4; i++) {
1877 int r;
1878 for (r = 0; r < bo_gem->reloc_count; r++) {
1879 struct drm_i915_gem_relocation_entry *reloc;
1880 drm_intel_reloc_target *info;
1882 reloc = &bo_gem->relocs[r];
1883 info = &bo_gem->reloc_target_info[r];
1885 if (reloc->offset == offset + i * 4) {
1886 drm_intel_bo_gem *target_gem;
1887 uint32_t val;
1889 target_gem = (drm_intel_bo_gem *)info->bo;
1891 val = reloc->delta;
1892 val += target_gem->aub_offset;
1894 aub_out(bufmgr_gem, val);
1895 data[i] = val;
1896 break;
1897 }
1898 }
1899 if (r == bo_gem->reloc_count) {
1900 /* no relocation, just the data */
1901 aub_out(bufmgr_gem, data[i]);
1902 }
1903 }
1905 free(data);
1906 }
1908 static void
1909 aub_bo_get_address(drm_intel_bo *bo)
1910 {
1911 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1912 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1914 /* Give the object a graphics address in the AUB file. We
1915 * don't just use the GEM object address because we do AUB
1916 * dumping before execution -- we want to successfully log
1917 * when the hardware might hang, and we might even want to aub
1918 * capture for a driver trying to execute on a different
1919 * generation of hardware by disabling the actual kernel exec
1920 * call.
1921 */
1922 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1923 bufmgr_gem->aub_offset += bo->size;
1924 /* XXX: Handle aperture overflow. */
1925 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1926 }
1928 static void
1929 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1930 uint32_t offset, uint32_t size)
1931 {
1932 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1933 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1935 aub_out(bufmgr_gem,
1936 CMD_AUB_TRACE_HEADER_BLOCK |
1937 (5 - 2));
1938 aub_out(bufmgr_gem,
1939 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1940 aub_out(bufmgr_gem, subtype);
1941 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1942 aub_out(bufmgr_gem, size);
1943 aub_write_bo_data(bo, offset, size);
1944 }
1946 /**
1947 * Break up large objects into multiple writes. Otherwise a 128kb VBO
1948 * would overflow the 16 bits of size field in the packet header and
1949 * everything goes badly after that.
1950 */
1951 static void
1952 aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1953 uint32_t offset, uint32_t size)
1954 {
1955 uint32_t block_size;
1956 uint32_t sub_offset;
1958 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
1959 block_size = size - sub_offset;
1961 if (block_size > 8 * 4096)
1962 block_size = 8 * 4096;
1964 aub_write_trace_block(bo, type, subtype, offset + sub_offset,
1965 block_size);
1966 }
1967 }
1969 static void
1970 aub_write_bo(drm_intel_bo *bo)
1971 {
1972 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1973 uint32_t offset = 0;
1974 unsigned i;
1976 aub_bo_get_address(bo);
1978 /* Write out each annotated section separately. */
1979 for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
1980 drm_intel_aub_annotation *annotation =
1981 &bo_gem->aub_annotations[i];
1982 uint32_t ending_offset = annotation->ending_offset;
1983 if (ending_offset > bo->size)
1984 ending_offset = bo->size;
1985 if (ending_offset > offset) {
1986 aub_write_large_trace_block(bo, annotation->type,
1987 annotation->subtype,
1988 offset,
1989 ending_offset - offset);
1990 offset = ending_offset;
1991 }
1992 }
1994 /* Write out any remaining unannotated data */
1995 if (offset < bo->size) {
1996 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1997 offset, bo->size - offset);
1998 }
1999 }
2001 /*
2002 * Make a ringbuffer on fly and dump it
2003 */
2004 static void
2005 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
2006 uint32_t batch_buffer, int ring_flag)
2007 {
2008 uint32_t ringbuffer[4096];
2009 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
2010 int ring_count = 0;
2012 if (ring_flag == I915_EXEC_BSD)
2013 ring = AUB_TRACE_TYPE_RING_PRB1;
2015 /* Make a ring buffer to execute our batchbuffer. */
2016 memset(ringbuffer, 0, sizeof(ringbuffer));
2017 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
2018 ringbuffer[ring_count++] = batch_buffer;
2020 /* Write out the ring. This appears to trigger execution of
2021 * the ring in the simulator.
2022 */
2023 aub_out(bufmgr_gem,
2024 CMD_AUB_TRACE_HEADER_BLOCK |
2025 (5 - 2));
2026 aub_out(bufmgr_gem,
2027 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
2028 aub_out(bufmgr_gem, 0); /* general/surface subtype */
2029 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
2030 aub_out(bufmgr_gem, ring_count * 4);
2032 /* FIXME: Need some flush operations here? */
2033 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
2035 /* Update offset pointer */
2036 bufmgr_gem->aub_offset += 4096;
2037 }
2039 void
2040 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2041 int x1, int y1, int width, int height,
2042 enum aub_dump_bmp_format format,
2043 int pitch, int offset)
2044 {
2045 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2046 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2047 uint32_t cpp;
2049 switch (format) {
2050 case AUB_DUMP_BMP_FORMAT_8BIT:
2051 cpp = 1;
2052 break;
2053 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
2054 cpp = 2;
2055 break;
2056 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
2057 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
2058 cpp = 4;
2059 break;
2060 default:
2061 printf("Unknown AUB dump format %d\n", format);
2062 return;
2063 }
2065 if (!bufmgr_gem->aub_file)
2066 return;
2068 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
2069 aub_out(bufmgr_gem, (y1 << 16) | x1);
2070 aub_out(bufmgr_gem,
2071 (format << 24) |
2072 (cpp << 19) |
2073 pitch / 4);
2074 aub_out(bufmgr_gem, (height << 16) | width);
2075 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
2076 aub_out(bufmgr_gem,
2077 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
2078 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
2079 }
2081 static void
2082 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
2083 {
2084 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2085 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2086 int i;
2087 bool batch_buffer_needs_annotations;
2089 if (!bufmgr_gem->aub_file)
2090 return;
2092 /* If batch buffer is not annotated, annotate it the best we
2093 * can.
2094 */
2095 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
2096 if (batch_buffer_needs_annotations) {
2097 drm_intel_aub_annotation annotations[2] = {
2098 { AUB_TRACE_TYPE_BATCH, 0, used },
2099 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
2100 };
2101 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
2102 }
2104 /* Write out all buffers to AUB memory */
2105 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2106 aub_write_bo(bufmgr_gem->exec_bos[i]);
2107 }
2109 /* Remove any annotations we added */
2110 if (batch_buffer_needs_annotations)
2111 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
2113 /* Dump ring buffer */
2114 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
2116 fflush(bufmgr_gem->aub_file);
2118 /*
2119 * One frame has been dumped. So reset the aub_offset for the next frame.
2120 *
2121 * FIXME: Can we do this?
2122 */
2123 bufmgr_gem->aub_offset = 0x10000;
2124 }
2126 static int
2127 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2128 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2129 {
2130 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2131 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2132 struct drm_i915_gem_execbuffer execbuf;
2133 int ret, i;
2135 if (bo_gem->has_error)
2136 return -ENOMEM;
2138 pthread_mutex_lock(&bufmgr_gem->lock);
2139 /* Update indices and set up the validate list. */
2140 drm_intel_gem_bo_process_reloc(bo);
2142 /* Add the batch buffer to the validation list. There are no
2143 * relocations pointing to it.
2144 */
2145 drm_intel_add_validate_buffer(bo);
2147 VG_CLEAR(execbuf);
2148 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2149 execbuf.buffer_count = bufmgr_gem->exec_count;
2150 execbuf.batch_start_offset = 0;
2151 execbuf.batch_len = used;
2152 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2153 execbuf.num_cliprects = num_cliprects;
2154 execbuf.DR1 = 0;
2155 execbuf.DR4 = DR4;
2157 ret = drmIoctl(bufmgr_gem->fd,
2158 DRM_IOCTL_I915_GEM_EXECBUFFER,
2159 &execbuf);
2160 if (ret != 0) {
2161 ret = -errno;
2162 if (errno == ENOSPC) {
2163 DBG("Execbuffer fails to pin. "
2164 "Estimate: %u. Actual: %u. Available: %u\n",
2165 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2166 bufmgr_gem->
2167 exec_count),
2168 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2169 bufmgr_gem->
2170 exec_count),
2171 (unsigned int)bufmgr_gem->gtt_size);
2172 }
2173 }
2174 drm_intel_update_buffer_offsets(bufmgr_gem);
2176 if (bufmgr_gem->bufmgr.debug)
2177 drm_intel_gem_dump_validation_list(bufmgr_gem);
2179 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2180 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2181 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2183 /* Disconnect the buffer from the validate list */
2184 bo_gem->validate_index = -1;
2185 bufmgr_gem->exec_bos[i] = NULL;
2186 }
2187 bufmgr_gem->exec_count = 0;
2188 pthread_mutex_unlock(&bufmgr_gem->lock);
2190 return ret;
2191 }
2193 static int
2194 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2195 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2196 unsigned int flags)
2197 {
2198 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2199 struct drm_i915_gem_execbuffer2 execbuf;
2200 int ret = 0;
2201 int i;
2203 switch (flags & 0x7) {
2204 default:
2205 return -EINVAL;
2206 case I915_EXEC_BLT:
2207 if (!bufmgr_gem->has_blt)
2208 return -EINVAL;
2209 break;
2210 case I915_EXEC_BSD:
2211 if (!bufmgr_gem->has_bsd)
2212 return -EINVAL;
2213 break;
2214 case I915_EXEC_RENDER:
2215 case I915_EXEC_DEFAULT:
2216 break;
2217 }
2219 pthread_mutex_lock(&bufmgr_gem->lock);
2220 /* Update indices and set up the validate list. */
2221 drm_intel_gem_bo_process_reloc2(bo);
2223 /* Add the batch buffer to the validation list. There are no relocations
2224 * pointing to it.
2225 */
2226 drm_intel_add_validate_buffer2(bo, 0);
2228 VG_CLEAR(execbuf);
2229 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2230 execbuf.buffer_count = bufmgr_gem->exec_count;
2231 execbuf.batch_start_offset = 0;
2232 execbuf.batch_len = used;
2233 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2234 execbuf.num_cliprects = num_cliprects;
2235 execbuf.DR1 = 0;
2236 execbuf.DR4 = DR4;
2237 execbuf.flags = flags;
2238 if (ctx == NULL)
2239 i915_execbuffer2_set_context_id(execbuf, 0);
2240 else
2241 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2242 execbuf.rsvd2 = 0;
2244 aub_exec(bo, flags, used);
2246 if (bufmgr_gem->no_exec)
2247 goto skip_execution;
2249 ret = drmIoctl(bufmgr_gem->fd,
2250 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2251 &execbuf);
2252 if (ret != 0) {
2253 ret = -errno;
2254 if (ret == -ENOSPC) {
2255 DBG("Execbuffer fails to pin. "
2256 "Estimate: %u. Actual: %u. Available: %u\n",
2257 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2258 bufmgr_gem->exec_count),
2259 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2260 bufmgr_gem->exec_count),
2261 (unsigned int) bufmgr_gem->gtt_size);
2262 }
2263 }
2264 drm_intel_update_buffer_offsets2(bufmgr_gem);
2266 skip_execution:
2267 if (bufmgr_gem->bufmgr.debug)
2268 drm_intel_gem_dump_validation_list(bufmgr_gem);
2270 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2271 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2272 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2274 /* Disconnect the buffer from the validate list */
2275 bo_gem->validate_index = -1;
2276 bufmgr_gem->exec_bos[i] = NULL;
2277 }
2278 bufmgr_gem->exec_count = 0;
2279 pthread_mutex_unlock(&bufmgr_gem->lock);
2281 return ret;
2282 }
2284 static int
2285 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2286 drm_clip_rect_t *cliprects, int num_cliprects,
2287 int DR4)
2288 {
2289 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2290 I915_EXEC_RENDER);
2291 }
2293 static int
2294 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2295 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2296 unsigned int flags)
2297 {
2298 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2299 flags);
2300 }
2302 int
2303 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2304 int used, unsigned int flags)
2305 {
2306 return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
2307 }
2309 static int
2310 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2311 {
2312 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2313 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2314 struct drm_i915_gem_pin pin;
2315 int ret;
2317 VG_CLEAR(pin);
2318 pin.handle = bo_gem->gem_handle;
2319 pin.alignment = alignment;
2321 ret = drmIoctl(bufmgr_gem->fd,
2322 DRM_IOCTL_I915_GEM_PIN,
2323 &pin);
2324 if (ret != 0)
2325 return -errno;
2327 bo->offset = pin.offset;
2328 return 0;
2329 }
2331 static int
2332 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2333 {
2334 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2335 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2336 struct drm_i915_gem_unpin unpin;
2337 int ret;
2339 VG_CLEAR(unpin);
2340 unpin.handle = bo_gem->gem_handle;
2342 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2343 if (ret != 0)
2344 return -errno;
2346 return 0;
2347 }
2349 static int
2350 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2351 uint32_t tiling_mode,
2352 uint32_t stride)
2353 {
2354 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2355 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2356 struct drm_i915_gem_set_tiling set_tiling;
2357 int ret;
2359 if (bo_gem->global_name == 0 &&
2360 tiling_mode == bo_gem->tiling_mode &&
2361 stride == bo_gem->stride)
2362 return 0;
2364 memset(&set_tiling, 0, sizeof(set_tiling));
2365 do {
2366 /* set_tiling is slightly broken and overwrites the
2367 * input on the error path, so we have to open code
2368 * rmIoctl.
2369 */
2370 set_tiling.handle = bo_gem->gem_handle;
2371 set_tiling.tiling_mode = tiling_mode;
2372 set_tiling.stride = stride;
2374 ret = ioctl(bufmgr_gem->fd,
2375 DRM_IOCTL_I915_GEM_SET_TILING,
2376 &set_tiling);
2377 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2378 if (ret == -1)
2379 return -errno;
2381 bo_gem->tiling_mode = set_tiling.tiling_mode;
2382 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2383 bo_gem->stride = set_tiling.stride;
2384 return 0;
2385 }
2387 static int
2388 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2389 uint32_t stride)
2390 {
2391 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2392 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2393 int ret;
2395 /* Linear buffers have no stride. By ensuring that we only ever use
2396 * stride 0 with linear buffers, we simplify our code.
2397 */
2398 if (*tiling_mode == I915_TILING_NONE)
2399 stride = 0;
2401 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2402 if (ret == 0)
2403 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2405 *tiling_mode = bo_gem->tiling_mode;
2406 return ret;
2407 }
2409 static int
2410 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2411 uint32_t * swizzle_mode)
2412 {
2413 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2415 *tiling_mode = bo_gem->tiling_mode;
2416 *swizzle_mode = bo_gem->swizzle_mode;
2417 return 0;
2418 }
2420 drm_intel_bo *
2421 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2422 {
2423 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2424 int ret;
2425 uint32_t handle;
2426 drm_intel_bo_gem *bo_gem;
2427 struct drm_i915_gem_get_tiling get_tiling;
2429 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2430 if (ret) {
2431 fprintf(stderr,"ret is %d %d\n", ret, errno);
2432 return NULL;
2433 }
2435 bo_gem = calloc(1, sizeof(*bo_gem));
2436 if (!bo_gem)
2437 return NULL;
2439 bo_gem->bo.size = size;
2440 bo_gem->bo.handle = handle;
2441 bo_gem->bo.bufmgr = bufmgr;
2443 bo_gem->gem_handle = handle;
2445 atomic_set(&bo_gem->refcount, 1);
2447 bo_gem->name = "prime";
2448 bo_gem->validate_index = -1;
2449 bo_gem->reloc_tree_fences = 0;
2450 bo_gem->used_as_reloc_target = false;
2451 bo_gem->has_error = false;
2452 bo_gem->reusable = false;
2454 DRMINITLISTHEAD(&bo_gem->name_list);
2455 DRMINITLISTHEAD(&bo_gem->vma_list);
2457 VG_CLEAR(get_tiling);
2458 get_tiling.handle = bo_gem->gem_handle;
2459 ret = drmIoctl(bufmgr_gem->fd,
2460 DRM_IOCTL_I915_GEM_GET_TILING,
2461 &get_tiling);
2462 if (ret != 0) {
2463 drm_intel_gem_bo_unreference(&bo_gem->bo);
2464 return NULL;
2465 }
2466 bo_gem->tiling_mode = get_tiling.tiling_mode;
2467 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2468 /* XXX stride is unknown */
2469 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2471 return &bo_gem->bo;
2472 }
2474 int
2475 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2476 {
2477 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2478 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2480 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2481 DRM_CLOEXEC, prime_fd) != 0)
2482 return -errno;
2484 bo_gem->reusable = false;
2486 return 0;
2487 }
2489 static int
2490 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2491 {
2492 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2493 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2494 int ret;
2496 if (!bo_gem->global_name) {
2497 struct drm_gem_flink flink;
2499 VG_CLEAR(flink);
2500 flink.handle = bo_gem->gem_handle;
2502 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2503 if (ret != 0)
2504 return -errno;
2506 bo_gem->global_name = flink.name;
2507 bo_gem->reusable = false;
2509 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2510 }
2512 *name = bo_gem->global_name;
2513 return 0;
2514 }
2516 /**
2517 * Enables unlimited caching of buffer objects for reuse.
2518 *
2519 * This is potentially very memory expensive, as the cache at each bucket
2520 * size is only bounded by how many buffers of that size we've managed to have
2521 * in flight at once.
2522 */
2523 void
2524 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2525 {
2526 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2528 bufmgr_gem->bo_reuse = true;
2529 }
2531 /**
2532 * Enable use of fenced reloc type.
2533 *
2534 * New code should enable this to avoid unnecessary fence register
2535 * allocation. If this option is not enabled, all relocs will have fence
2536 * register allocated.
2537 */
2538 void
2539 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2540 {
2541 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2543 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2544 bufmgr_gem->fenced_relocs = true;
2545 }
2547 /**
2548 * Return the additional aperture space required by the tree of buffer objects
2549 * rooted at bo.
2550 */
2551 static int
2552 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2553 {
2554 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2555 int i;
2556 int total = 0;
2558 if (bo == NULL || bo_gem->included_in_check_aperture)
2559 return 0;
2561 total += bo->size;
2562 bo_gem->included_in_check_aperture = true;
2564 for (i = 0; i < bo_gem->reloc_count; i++)
2565 total +=
2566 drm_intel_gem_bo_get_aperture_space(bo_gem->
2567 reloc_target_info[i].bo);
2569 return total;
2570 }
2572 /**
2573 * Count the number of buffers in this list that need a fence reg
2574 *
2575 * If the count is greater than the number of available regs, we'll have
2576 * to ask the caller to resubmit a batch with fewer tiled buffers.
2577 *
2578 * This function over-counts if the same buffer is used multiple times.
2579 */
2580 static unsigned int
2581 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2582 {
2583 int i;
2584 unsigned int total = 0;
2586 for (i = 0; i < count; i++) {
2587 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2589 if (bo_gem == NULL)
2590 continue;
2592 total += bo_gem->reloc_tree_fences;
2593 }
2594 return total;
2595 }
2597 /**
2598 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2599 * for the next drm_intel_bufmgr_check_aperture_space() call.
2600 */
2601 static void
2602 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2603 {
2604 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2605 int i;
2607 if (bo == NULL || !bo_gem->included_in_check_aperture)
2608 return;
2610 bo_gem->included_in_check_aperture = false;
2612 for (i = 0; i < bo_gem->reloc_count; i++)
2613 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2614 reloc_target_info[i].bo);
2615 }
2617 /**
2618 * Return a conservative estimate for the amount of aperture required
2619 * for a collection of buffers. This may double-count some buffers.
2620 */
2621 static unsigned int
2622 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2623 {
2624 int i;
2625 unsigned int total = 0;
2627 for (i = 0; i < count; i++) {
2628 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2629 if (bo_gem != NULL)
2630 total += bo_gem->reloc_tree_size;
2631 }
2632 return total;
2633 }
2635 /**
2636 * Return the amount of aperture needed for a collection of buffers.
2637 * This avoids double counting any buffers, at the cost of looking
2638 * at every buffer in the set.
2639 */
2640 static unsigned int
2641 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2642 {
2643 int i;
2644 unsigned int total = 0;
2646 for (i = 0; i < count; i++) {
2647 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2648 /* For the first buffer object in the array, we get an
2649 * accurate count back for its reloc_tree size (since nothing
2650 * had been flagged as being counted yet). We can save that
2651 * value out as a more conservative reloc_tree_size that
2652 * avoids double-counting target buffers. Since the first
2653 * buffer happens to usually be the batch buffer in our
2654 * callers, this can pull us back from doing the tree
2655 * walk on every new batch emit.
2656 */
2657 if (i == 0) {
2658 drm_intel_bo_gem *bo_gem =
2659 (drm_intel_bo_gem *) bo_array[i];
2660 bo_gem->reloc_tree_size = total;
2661 }
2662 }
2664 for (i = 0; i < count; i++)
2665 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2666 return total;
2667 }
2669 /**
2670 * Return -1 if the batchbuffer should be flushed before attempting to
2671 * emit rendering referencing the buffers pointed to by bo_array.
2672 *
2673 * This is required because if we try to emit a batchbuffer with relocations
2674 * to a tree of buffers that won't simultaneously fit in the aperture,
2675 * the rendering will return an error at a point where the software is not
2676 * prepared to recover from it.
2677 *
2678 * However, we also want to emit the batchbuffer significantly before we reach
2679 * the limit, as a series of batchbuffers each of which references buffers
2680 * covering almost all of the aperture means that at each emit we end up
2681 * waiting to evict a buffer from the last rendering, and we get synchronous
2682 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2683 * get better parallelism.
2684 */
2685 static int
2686 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2687 {
2688 drm_intel_bufmgr_gem *bufmgr_gem =
2689 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2690 unsigned int total = 0;
2691 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2692 int total_fences;
2694 /* Check for fence reg constraints if necessary */
2695 if (bufmgr_gem->available_fences) {
2696 total_fences = drm_intel_gem_total_fences(bo_array, count);
2697 if (total_fences > bufmgr_gem->available_fences)
2698 return -ENOSPC;
2699 }
2701 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2703 if (total > threshold)
2704 total = drm_intel_gem_compute_batch_space(bo_array, count);
2706 if (total > threshold) {
2707 DBG("check_space: overflowed available aperture, "
2708 "%dkb vs %dkb\n",
2709 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2710 return -ENOSPC;
2711 } else {
2712 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2713 (int)bufmgr_gem->gtt_size / 1024);
2714 return 0;
2715 }
2716 }
2718 /*
2719 * Disable buffer reuse for objects which are shared with the kernel
2720 * as scanout buffers
2721 */
2722 static int
2723 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2724 {
2725 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2727 bo_gem->reusable = false;
2728 return 0;
2729 }
2731 static int
2732 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2733 {
2734 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2736 return bo_gem->reusable;
2737 }
2739 static int
2740 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2741 {
2742 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2743 int i;
2745 for (i = 0; i < bo_gem->reloc_count; i++) {
2746 if (bo_gem->reloc_target_info[i].bo == target_bo)
2747 return 1;
2748 if (bo == bo_gem->reloc_target_info[i].bo)
2749 continue;
2750 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2751 target_bo))
2752 return 1;
2753 }
2755 return 0;
2756 }
2758 /** Return true if target_bo is referenced by bo's relocation tree. */
2759 static int
2760 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2761 {
2762 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2764 if (bo == NULL || target_bo == NULL)
2765 return 0;
2766 if (target_bo_gem->used_as_reloc_target)
2767 return _drm_intel_gem_bo_references(bo, target_bo);
2768 return 0;
2769 }
2771 static void
2772 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2773 {
2774 unsigned int i = bufmgr_gem->num_buckets;
2776 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2778 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2779 bufmgr_gem->cache_bucket[i].size = size;
2780 bufmgr_gem->num_buckets++;
2781 }
2783 static void
2784 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2785 {
2786 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2788 /* OK, so power of two buckets was too wasteful of memory.
2789 * Give 3 other sizes between each power of two, to hopefully
2790 * cover things accurately enough. (The alternative is
2791 * probably to just go for exact matching of sizes, and assume
2792 * that for things like composited window resize the tiled
2793 * width/height alignment and rounding of sizes to pages will
2794 * get us useful cache hit rates anyway)
2795 */
2796 add_bucket(bufmgr_gem, 4096);
2797 add_bucket(bufmgr_gem, 4096 * 2);
2798 add_bucket(bufmgr_gem, 4096 * 3);
2800 /* Initialize the linked lists for BO reuse cache. */
2801 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2802 add_bucket(bufmgr_gem, size);
2804 add_bucket(bufmgr_gem, size + size * 1 / 4);
2805 add_bucket(bufmgr_gem, size + size * 2 / 4);
2806 add_bucket(bufmgr_gem, size + size * 3 / 4);
2807 }
2808 }
2810 void
2811 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2812 {
2813 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2815 bufmgr_gem->vma_max = limit;
2817 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2818 }
2820 /**
2821 * Get the PCI ID for the device. This can be overridden by setting the
2822 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2823 */
2824 static int
2825 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2826 {
2827 char *devid_override;
2828 int devid;
2829 int ret;
2830 drm_i915_getparam_t gp;
2832 if (geteuid() == getuid()) {
2833 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2834 if (devid_override) {
2835 bufmgr_gem->no_exec = true;
2836 return strtod(devid_override, NULL);
2837 }
2838 }
2840 VG_CLEAR(devid);
2841 VG_CLEAR(gp);
2842 gp.param = I915_PARAM_CHIPSET_ID;
2843 gp.value = &devid;
2844 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2845 if (ret) {
2846 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2847 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2848 }
2849 return devid;
2850 }
2852 int
2853 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2854 {
2855 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2857 return bufmgr_gem->pci_device;
2858 }
2860 /**
2861 * Sets up AUB dumping.
2862 *
2863 * This is a trace file format that can be used with the simulator.
2864 * Packets are emitted in a format somewhat like GPU command packets.
2865 * You can set up a GTT and upload your objects into the referenced
2866 * space, then send off batchbuffers and get BMPs out the other end.
2867 */
2868 void
2869 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2870 {
2871 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2872 int entry = 0x200003;
2873 int i;
2874 int gtt_size = 0x10000;
2876 if (!enable) {
2877 if (bufmgr_gem->aub_file) {
2878 fclose(bufmgr_gem->aub_file);
2879 bufmgr_gem->aub_file = NULL;
2880 }
2881 }
2883 if (geteuid() != getuid())
2884 return;
2886 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2887 if (!bufmgr_gem->aub_file)
2888 return;
2890 /* Start allocating objects from just after the GTT. */
2891 bufmgr_gem->aub_offset = gtt_size;
2893 /* Start with a (required) version packet. */
2894 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2895 aub_out(bufmgr_gem,
2896 (4 << AUB_HEADER_MAJOR_SHIFT) |
2897 (0 << AUB_HEADER_MINOR_SHIFT));
2898 for (i = 0; i < 8; i++) {
2899 aub_out(bufmgr_gem, 0); /* app name */
2900 }
2901 aub_out(bufmgr_gem, 0); /* timestamp */
2902 aub_out(bufmgr_gem, 0); /* timestamp */
2903 aub_out(bufmgr_gem, 0); /* comment len */
2905 /* Set up the GTT. The max we can handle is 256M */
2906 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2907 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2908 aub_out(bufmgr_gem, 0); /* subtype */
2909 aub_out(bufmgr_gem, 0); /* offset */
2910 aub_out(bufmgr_gem, gtt_size); /* size */
2911 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2912 aub_out(bufmgr_gem, entry);
2913 }
2914 }
2916 drm_intel_context *
2917 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
2918 {
2919 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2920 struct drm_i915_gem_context_create create;
2921 drm_intel_context *context = NULL;
2922 int ret;
2924 VG_CLEAR(create);
2925 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2926 if (ret != 0) {
2927 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2928 strerror(errno));
2929 return NULL;
2930 }
2932 context = calloc(1, sizeof(*context));
2933 context->ctx_id = create.ctx_id;
2934 context->bufmgr = bufmgr;
2936 return context;
2937 }
2939 void
2940 drm_intel_gem_context_destroy(drm_intel_context *ctx)
2941 {
2942 drm_intel_bufmgr_gem *bufmgr_gem;
2943 struct drm_i915_gem_context_destroy destroy;
2944 int ret;
2946 if (ctx == NULL)
2947 return;
2949 VG_CLEAR(destroy);
2951 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
2952 destroy.ctx_id = ctx->ctx_id;
2953 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
2954 &destroy);
2955 if (ret != 0)
2956 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2957 strerror(errno));
2959 free(ctx);
2960 }
2962 int
2963 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
2964 uint32_t offset,
2965 uint64_t *result)
2966 {
2967 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2968 struct drm_i915_reg_read reg_read;
2969 int ret;
2971 VG_CLEAR(reg_read);
2972 reg_read.offset = offset;
2974 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
2976 *result = reg_read.val;
2977 return ret;
2978 }
2981 /**
2982 * Annotate the given bo for use in aub dumping.
2983 *
2984 * \param annotations is an array of drm_intel_aub_annotation objects
2985 * describing the type of data in various sections of the bo. Each
2986 * element of the array specifies the type and subtype of a section of
2987 * the bo, and the past-the-end offset of that section. The elements
2988 * of \c annotations must be sorted so that ending_offset is
2989 * increasing.
2990 *
2991 * \param count is the number of elements in the \c annotations array.
2992 * If \c count is zero, then \c annotations will not be dereferenced.
2993 *
2994 * Annotations are copied into a private data structure, so caller may
2995 * re-use the memory pointed to by \c annotations after the call
2996 * returns.
2997 *
2998 * Annotations are stored for the lifetime of the bo; to reset to the
2999 * default state (no annotations), call this function with a \c count
3000 * of zero.
3001 */
3002 void
3003 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3004 drm_intel_aub_annotation *annotations,
3005 unsigned count)
3006 {
3007 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3008 unsigned size = sizeof(*annotations) * count;
3009 drm_intel_aub_annotation *new_annotations =
3010 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
3011 if (new_annotations == NULL) {
3012 free(bo_gem->aub_annotations);
3013 bo_gem->aub_annotations = NULL;
3014 bo_gem->aub_annotation_count = 0;
3015 return;
3016 }
3017 memcpy(new_annotations, annotations, size);
3018 bo_gem->aub_annotations = new_annotations;
3019 bo_gem->aub_annotation_count = count;
3020 }
3022 /**
3023 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3024 * and manage map buffer objections.
3025 *
3026 * \param fd File descriptor of the opened DRM device.
3027 */
3028 drm_intel_bufmgr *
3029 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3030 {
3031 drm_intel_bufmgr_gem *bufmgr_gem;
3032 struct drm_i915_gem_get_aperture aperture;
3033 drm_i915_getparam_t gp;
3034 int ret, tmp;
3035 bool exec2 = false;
3037 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3038 if (bufmgr_gem == NULL)
3039 return NULL;
3041 bufmgr_gem->fd = fd;
3043 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3044 free(bufmgr_gem);
3045 return NULL;
3046 }
3048 ret = drmIoctl(bufmgr_gem->fd,
3049 DRM_IOCTL_I915_GEM_GET_APERTURE,
3050 &aperture);
3052 if (ret == 0)
3053 bufmgr_gem->gtt_size = aperture.aper_available_size;
3054 else {
3055 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3056 strerror(errno));
3057 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3058 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3059 "May lead to reduced performance or incorrect "
3060 "rendering.\n",
3061 (int)bufmgr_gem->gtt_size / 1024);
3062 }
3064 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3066 if (IS_GEN2(bufmgr_gem->pci_device))
3067 bufmgr_gem->gen = 2;
3068 else if (IS_GEN3(bufmgr_gem->pci_device))
3069 bufmgr_gem->gen = 3;
3070 else if (IS_GEN4(bufmgr_gem->pci_device))
3071 bufmgr_gem->gen = 4;
3072 else if (IS_GEN5(bufmgr_gem->pci_device))
3073 bufmgr_gem->gen = 5;
3074 else if (IS_GEN6(bufmgr_gem->pci_device))
3075 bufmgr_gem->gen = 6;
3076 else if (IS_GEN7(bufmgr_gem->pci_device))
3077 bufmgr_gem->gen = 7;
3078 else {
3079 free(bufmgr_gem);
3080 return NULL;
3081 }
3083 if (IS_GEN3(bufmgr_gem->pci_device) &&
3084 bufmgr_gem->gtt_size > 256*1024*1024) {
3085 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3086 * be used for tiled blits. To simplify the accounting, just
3087 * substract the unmappable part (fixed to 256MB on all known
3088 * gen3 devices) if the kernel advertises it. */
3089 bufmgr_gem->gtt_size -= 256*1024*1024;
3090 }
3092 VG_CLEAR(gp);
3093 gp.value = &tmp;
3095 gp.param = I915_PARAM_HAS_EXECBUF2;
3096 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3097 if (!ret)
3098 exec2 = true;
3100 gp.param = I915_PARAM_HAS_BSD;
3101 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3102 bufmgr_gem->has_bsd = ret == 0;
3104 gp.param = I915_PARAM_HAS_BLT;
3105 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3106 bufmgr_gem->has_blt = ret == 0;
3108 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3109 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3110 bufmgr_gem->has_relaxed_fencing = ret == 0;
3112 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3113 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3114 bufmgr_gem->has_wait_timeout = ret == 0;
3116 gp.param = I915_PARAM_HAS_LLC;
3117 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3118 if (ret != 0) {
3119 /* Kernel does not supports HAS_LLC query, fallback to GPU
3120 * generation detection and assume that we have LLC on GEN6/7
3121 */
3122 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3123 IS_GEN7(bufmgr_gem->pci_device));
3124 } else
3125 bufmgr_gem->has_llc = *gp.value;
3127 if (bufmgr_gem->gen < 4) {
3128 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3129 gp.value = &bufmgr_gem->available_fences;
3130 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3131 if (ret) {
3132 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3133 errno);
3134 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3135 *gp.value);
3136 bufmgr_gem->available_fences = 0;
3137 } else {
3138 /* XXX The kernel reports the total number of fences,
3139 * including any that may be pinned.
3140 *
3141 * We presume that there will be at least one pinned
3142 * fence for the scanout buffer, but there may be more
3143 * than one scanout and the user may be manually
3144 * pinning buffers. Let's move to execbuffer2 and
3145 * thereby forget the insanity of using fences...
3146 */
3147 bufmgr_gem->available_fences -= 2;
3148 if (bufmgr_gem->available_fences < 0)
3149 bufmgr_gem->available_fences = 0;
3150 }
3151 }
3153 /* Let's go with one relocation per every 2 dwords (but round down a bit
3154 * since a power of two will mean an extra page allocation for the reloc
3155 * buffer).
3156 *
3157 * Every 4 was too few for the blender benchmark.
3158 */
3159 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3161 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3162 bufmgr_gem->bufmgr.bo_alloc_for_render =
3163 drm_intel_gem_bo_alloc_for_render;
3164 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3165 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3166 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3167 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3168 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3169 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3170 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3171 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3172 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3173 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3174 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3175 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3176 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3177 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3178 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3179 /* Use the new one if available */
3180 if (exec2) {
3181 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3182 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3183 } else
3184 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3185 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3186 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3187 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
3188 bufmgr_gem->bufmgr.debug = 0;
3189 bufmgr_gem->bufmgr.check_aperture_space =
3190 drm_intel_gem_check_aperture_space;
3191 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3192 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3193 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3194 drm_intel_gem_get_pipe_from_crtc_id;
3195 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3197 DRMINITLISTHEAD(&bufmgr_gem->named);
3198 init_cache_buckets(bufmgr_gem);
3200 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3201 bufmgr_gem->vma_max = -1; /* unlimited by default */
3203 return &bufmgr_gem->bufmgr;
3204 }