1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
41 #include <xf86drm.h>
42 #include <xf86atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/types.h>
54 #include <stdbool.h>
56 #include "errno.h"
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
61 #include "intel_aub.h"
62 #include "string.h"
64 #include "i915_drm.h"
66 #ifdef HAVE_VALGRIND
67 #include <valgrind.h>
68 #include <memcheck.h>
69 #define VG(x) x
70 #else
71 #define VG(x)
72 #endif
74 #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
76 #define DBG(...) do { \
77 if (bufmgr_gem->bufmgr.debug) \
78 fprintf(stderr, __VA_ARGS__); \
79 } while (0)
81 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
83 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
85 struct drm_intel_gem_bo_bucket {
86 drmMMListHead head;
87 unsigned long size;
88 };
90 typedef struct _drm_intel_bufmgr_gem {
91 drm_intel_bufmgr bufmgr;
93 int fd;
95 int max_relocs;
97 pthread_mutex_t lock;
99 struct drm_i915_gem_exec_object *exec_objects;
100 struct drm_i915_gem_exec_object2 *exec2_objects;
101 drm_intel_bo **exec_bos;
102 int exec_size;
103 int exec_count;
105 /** Array of lists of cached gem objects of power-of-two sizes */
106 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
107 int num_buckets;
108 time_t time;
110 drmMMListHead named;
111 drmMMListHead vma_cache;
112 int vma_count, vma_open, vma_max;
114 uint64_t gtt_size;
115 int available_fences;
116 int pci_device;
117 int gen;
118 unsigned int has_bsd : 1;
119 unsigned int has_blt : 1;
120 unsigned int has_relaxed_fencing : 1;
121 unsigned int has_llc : 1;
122 unsigned int bo_reuse : 1;
123 unsigned int no_exec : 1;
124 bool fenced_relocs;
126 FILE *aub_file;
127 uint32_t aub_offset;
128 } drm_intel_bufmgr_gem;
130 #define DRM_INTEL_RELOC_FENCE (1<<0)
132 typedef struct _drm_intel_reloc_target_info {
133 drm_intel_bo *bo;
134 int flags;
135 } drm_intel_reloc_target;
137 struct _drm_intel_bo_gem {
138 drm_intel_bo bo;
140 atomic_t refcount;
141 uint32_t gem_handle;
142 const char *name;
144 /**
145 * Kenel-assigned global name for this object
146 */
147 unsigned int global_name;
148 drmMMListHead name_list;
150 /**
151 * Index of the buffer within the validation list while preparing a
152 * batchbuffer execution.
153 */
154 int validate_index;
156 /**
157 * Current tiling mode
158 */
159 uint32_t tiling_mode;
160 uint32_t swizzle_mode;
161 unsigned long stride;
163 time_t free_time;
165 /** Array passed to the DRM containing relocation information. */
166 struct drm_i915_gem_relocation_entry *relocs;
167 /**
168 * Array of info structs corresponding to relocs[i].target_handle etc
169 */
170 drm_intel_reloc_target *reloc_target_info;
171 /** Number of entries in relocs */
172 int reloc_count;
173 /** Mapped address for the buffer, saved across map/unmap cycles */
174 void *mem_virtual;
175 /** GTT virtual address for the buffer, saved across map/unmap cycles */
176 void *gtt_virtual;
177 int map_count;
178 drmMMListHead vma_list;
180 /** BO cache list */
181 drmMMListHead head;
183 /**
184 * Boolean of whether this BO and its children have been included in
185 * the current drm_intel_bufmgr_check_aperture_space() total.
186 */
187 bool included_in_check_aperture;
189 /**
190 * Boolean of whether this buffer has been used as a relocation
191 * target and had its size accounted for, and thus can't have any
192 * further relocations added to it.
193 */
194 bool used_as_reloc_target;
196 /**
197 * Boolean of whether we have encountered an error whilst building the relocation tree.
198 */
199 bool has_error;
201 /**
202 * Boolean of whether this buffer can be re-used
203 */
204 bool reusable;
206 /**
207 * Size in bytes of this buffer and its relocation descendents.
208 *
209 * Used to avoid costly tree walking in
210 * drm_intel_bufmgr_check_aperture in the common case.
211 */
212 int reloc_tree_size;
214 /**
215 * Number of potential fence registers required by this buffer and its
216 * relocations.
217 */
218 int reloc_tree_fences;
220 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
221 bool mapped_cpu_write;
223 uint32_t aub_offset;
224 };
226 static unsigned int
227 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
229 static unsigned int
230 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
232 static int
233 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
234 uint32_t * swizzle_mode);
236 static int
237 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
238 uint32_t tiling_mode,
239 uint32_t stride);
241 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
242 time_t time);
244 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
246 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
248 static unsigned long
249 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
250 uint32_t *tiling_mode)
251 {
252 unsigned long min_size, max_size;
253 unsigned long i;
255 if (*tiling_mode == I915_TILING_NONE)
256 return size;
258 /* 965+ just need multiples of page size for tiling */
259 if (bufmgr_gem->gen >= 4)
260 return ROUND_UP_TO(size, 4096);
262 /* Older chips need powers of two, of at least 512k or 1M */
263 if (bufmgr_gem->gen == 3) {
264 min_size = 1024*1024;
265 max_size = 128*1024*1024;
266 } else {
267 min_size = 512*1024;
268 max_size = 64*1024*1024;
269 }
271 if (size > max_size) {
272 *tiling_mode = I915_TILING_NONE;
273 return size;
274 }
276 /* Do we need to allocate every page for the fence? */
277 if (bufmgr_gem->has_relaxed_fencing)
278 return ROUND_UP_TO(size, 4096);
280 for (i = min_size; i < size; i <<= 1)
281 ;
283 return i;
284 }
286 /*
287 * Round a given pitch up to the minimum required for X tiling on a
288 * given chip. We use 512 as the minimum to allow for a later tiling
289 * change.
290 */
291 static unsigned long
292 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
293 unsigned long pitch, uint32_t *tiling_mode)
294 {
295 unsigned long tile_width;
296 unsigned long i;
298 /* If untiled, then just align it so that we can do rendering
299 * to it with the 3D engine.
300 */
301 if (*tiling_mode == I915_TILING_NONE)
302 return ALIGN(pitch, 64);
304 if (*tiling_mode == I915_TILING_X
305 || (IS_915(bufmgr_gem->pci_device)
306 && *tiling_mode == I915_TILING_Y))
307 tile_width = 512;
308 else
309 tile_width = 128;
311 /* 965 is flexible */
312 if (bufmgr_gem->gen >= 4)
313 return ROUND_UP_TO(pitch, tile_width);
315 /* The older hardware has a maximum pitch of 8192 with tiled
316 * surfaces, so fallback to untiled if it's too large.
317 */
318 if (pitch > 8192) {
319 *tiling_mode = I915_TILING_NONE;
320 return ALIGN(pitch, 64);
321 }
323 /* Pre-965 needs power of two tile width */
324 for (i = tile_width; i < pitch; i <<= 1)
325 ;
327 return i;
328 }
330 static struct drm_intel_gem_bo_bucket *
331 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
332 unsigned long size)
333 {
334 int i;
336 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
337 struct drm_intel_gem_bo_bucket *bucket =
338 &bufmgr_gem->cache_bucket[i];
339 if (bucket->size >= size) {
340 return bucket;
341 }
342 }
344 return NULL;
345 }
347 static void
348 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
349 {
350 int i, j;
352 for (i = 0; i < bufmgr_gem->exec_count; i++) {
353 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
354 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
356 if (bo_gem->relocs == NULL) {
357 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
358 bo_gem->name);
359 continue;
360 }
362 for (j = 0; j < bo_gem->reloc_count; j++) {
363 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
364 drm_intel_bo_gem *target_gem =
365 (drm_intel_bo_gem *) target_bo;
367 DBG("%2d: %d (%s)@0x%08llx -> "
368 "%d (%s)@0x%08lx + 0x%08x\n",
369 i,
370 bo_gem->gem_handle, bo_gem->name,
371 (unsigned long long)bo_gem->relocs[j].offset,
372 target_gem->gem_handle,
373 target_gem->name,
374 target_bo->offset,
375 bo_gem->relocs[j].delta);
376 }
377 }
378 }
380 static inline void
381 drm_intel_gem_bo_reference(drm_intel_bo *bo)
382 {
383 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
385 atomic_inc(&bo_gem->refcount);
386 }
388 /**
389 * Adds the given buffer to the list of buffers to be validated (moved into the
390 * appropriate memory type) with the next batch submission.
391 *
392 * If a buffer is validated multiple times in a batch submission, it ends up
393 * with the intersection of the memory type flags and the union of the
394 * access flags.
395 */
396 static void
397 drm_intel_add_validate_buffer(drm_intel_bo *bo)
398 {
399 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
400 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
401 int index;
403 if (bo_gem->validate_index != -1)
404 return;
406 /* Extend the array of validation entries as necessary. */
407 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
408 int new_size = bufmgr_gem->exec_size * 2;
410 if (new_size == 0)
411 new_size = 5;
413 bufmgr_gem->exec_objects =
414 realloc(bufmgr_gem->exec_objects,
415 sizeof(*bufmgr_gem->exec_objects) * new_size);
416 bufmgr_gem->exec_bos =
417 realloc(bufmgr_gem->exec_bos,
418 sizeof(*bufmgr_gem->exec_bos) * new_size);
419 bufmgr_gem->exec_size = new_size;
420 }
422 index = bufmgr_gem->exec_count;
423 bo_gem->validate_index = index;
424 /* Fill in array entry */
425 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
426 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
427 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
428 bufmgr_gem->exec_objects[index].alignment = 0;
429 bufmgr_gem->exec_objects[index].offset = 0;
430 bufmgr_gem->exec_bos[index] = bo;
431 bufmgr_gem->exec_count++;
432 }
434 static void
435 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
436 {
437 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
438 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
439 int index;
441 if (bo_gem->validate_index != -1) {
442 if (need_fence)
443 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
444 EXEC_OBJECT_NEEDS_FENCE;
445 return;
446 }
448 /* Extend the array of validation entries as necessary. */
449 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
450 int new_size = bufmgr_gem->exec_size * 2;
452 if (new_size == 0)
453 new_size = 5;
455 bufmgr_gem->exec2_objects =
456 realloc(bufmgr_gem->exec2_objects,
457 sizeof(*bufmgr_gem->exec2_objects) * new_size);
458 bufmgr_gem->exec_bos =
459 realloc(bufmgr_gem->exec_bos,
460 sizeof(*bufmgr_gem->exec_bos) * new_size);
461 bufmgr_gem->exec_size = new_size;
462 }
464 index = bufmgr_gem->exec_count;
465 bo_gem->validate_index = index;
466 /* Fill in array entry */
467 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
468 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
469 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
470 bufmgr_gem->exec2_objects[index].alignment = 0;
471 bufmgr_gem->exec2_objects[index].offset = 0;
472 bufmgr_gem->exec_bos[index] = bo;
473 bufmgr_gem->exec2_objects[index].flags = 0;
474 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
475 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
476 if (need_fence) {
477 bufmgr_gem->exec2_objects[index].flags |=
478 EXEC_OBJECT_NEEDS_FENCE;
479 }
480 bufmgr_gem->exec_count++;
481 }
483 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
484 sizeof(uint32_t))
486 static void
487 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
488 drm_intel_bo_gem *bo_gem)
489 {
490 int size;
492 assert(!bo_gem->used_as_reloc_target);
494 /* The older chipsets are far-less flexible in terms of tiling,
495 * and require tiled buffer to be size aligned in the aperture.
496 * This means that in the worst possible case we will need a hole
497 * twice as large as the object in order for it to fit into the
498 * aperture. Optimal packing is for wimps.
499 */
500 size = bo_gem->bo.size;
501 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
502 int min_size;
504 if (bufmgr_gem->has_relaxed_fencing) {
505 if (bufmgr_gem->gen == 3)
506 min_size = 1024*1024;
507 else
508 min_size = 512*1024;
510 while (min_size < size)
511 min_size *= 2;
512 } else
513 min_size = size;
515 /* Account for worst-case alignment. */
516 size = 2 * min_size;
517 }
519 bo_gem->reloc_tree_size = size;
520 }
522 static int
523 drm_intel_setup_reloc_list(drm_intel_bo *bo)
524 {
525 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
526 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
527 unsigned int max_relocs = bufmgr_gem->max_relocs;
529 if (bo->size / 4 < max_relocs)
530 max_relocs = bo->size / 4;
532 bo_gem->relocs = malloc(max_relocs *
533 sizeof(struct drm_i915_gem_relocation_entry));
534 bo_gem->reloc_target_info = malloc(max_relocs *
535 sizeof(drm_intel_reloc_target));
536 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
537 bo_gem->has_error = true;
539 free (bo_gem->relocs);
540 bo_gem->relocs = NULL;
542 free (bo_gem->reloc_target_info);
543 bo_gem->reloc_target_info = NULL;
545 return 1;
546 }
548 return 0;
549 }
551 static int
552 drm_intel_gem_bo_busy(drm_intel_bo *bo)
553 {
554 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
555 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
556 struct drm_i915_gem_busy busy;
557 int ret;
559 VG_CLEAR(busy);
560 busy.handle = bo_gem->gem_handle;
562 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
564 return (ret == 0 && busy.busy);
565 }
567 static int
568 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
569 drm_intel_bo_gem *bo_gem, int state)
570 {
571 struct drm_i915_gem_madvise madv;
573 VG_CLEAR(madv);
574 madv.handle = bo_gem->gem_handle;
575 madv.madv = state;
576 madv.retained = 1;
577 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
579 return madv.retained;
580 }
582 static int
583 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
584 {
585 return drm_intel_gem_bo_madvise_internal
586 ((drm_intel_bufmgr_gem *) bo->bufmgr,
587 (drm_intel_bo_gem *) bo,
588 madv);
589 }
591 /* drop the oldest entries that have been purged by the kernel */
592 static void
593 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
594 struct drm_intel_gem_bo_bucket *bucket)
595 {
596 while (!DRMLISTEMPTY(&bucket->head)) {
597 drm_intel_bo_gem *bo_gem;
599 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
600 bucket->head.next, head);
601 if (drm_intel_gem_bo_madvise_internal
602 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
603 break;
605 DRMLISTDEL(&bo_gem->head);
606 drm_intel_gem_bo_free(&bo_gem->bo);
607 }
608 }
610 static drm_intel_bo *
611 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
612 const char *name,
613 unsigned long size,
614 unsigned long flags,
615 uint32_t tiling_mode,
616 unsigned long stride)
617 {
618 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
619 drm_intel_bo_gem *bo_gem;
620 unsigned int page_size = getpagesize();
621 int ret;
622 struct drm_intel_gem_bo_bucket *bucket;
623 bool alloc_from_cache;
624 unsigned long bo_size;
625 bool for_render = false;
627 if (flags & BO_ALLOC_FOR_RENDER)
628 for_render = true;
630 /* Round the allocated size up to a power of two number of pages. */
631 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
633 /* If we don't have caching at this size, don't actually round the
634 * allocation up.
635 */
636 if (bucket == NULL) {
637 bo_size = size;
638 if (bo_size < page_size)
639 bo_size = page_size;
640 } else {
641 bo_size = bucket->size;
642 }
644 pthread_mutex_lock(&bufmgr_gem->lock);
645 /* Get a buffer out of the cache if available */
646 retry:
647 alloc_from_cache = false;
648 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
649 if (for_render) {
650 /* Allocate new render-target BOs from the tail (MRU)
651 * of the list, as it will likely be hot in the GPU
652 * cache and in the aperture for us.
653 */
654 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
655 bucket->head.prev, head);
656 DRMLISTDEL(&bo_gem->head);
657 alloc_from_cache = true;
658 } else {
659 /* For non-render-target BOs (where we're probably
660 * going to map it first thing in order to fill it
661 * with data), check if the last BO in the cache is
662 * unbusy, and only reuse in that case. Otherwise,
663 * allocating a new buffer is probably faster than
664 * waiting for the GPU to finish.
665 */
666 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
667 bucket->head.next, head);
668 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
669 alloc_from_cache = true;
670 DRMLISTDEL(&bo_gem->head);
671 }
672 }
674 if (alloc_from_cache) {
675 if (!drm_intel_gem_bo_madvise_internal
676 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
677 drm_intel_gem_bo_free(&bo_gem->bo);
678 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
679 bucket);
680 goto retry;
681 }
683 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
684 tiling_mode,
685 stride)) {
686 drm_intel_gem_bo_free(&bo_gem->bo);
687 goto retry;
688 }
689 }
690 }
691 pthread_mutex_unlock(&bufmgr_gem->lock);
693 if (!alloc_from_cache) {
694 struct drm_i915_gem_create create;
696 bo_gem = calloc(1, sizeof(*bo_gem));
697 if (!bo_gem)
698 return NULL;
700 bo_gem->bo.size = bo_size;
702 VG_CLEAR(create);
703 create.size = bo_size;
705 ret = drmIoctl(bufmgr_gem->fd,
706 DRM_IOCTL_I915_GEM_CREATE,
707 &create);
708 bo_gem->gem_handle = create.handle;
709 bo_gem->bo.handle = bo_gem->gem_handle;
710 if (ret != 0) {
711 free(bo_gem);
712 return NULL;
713 }
714 bo_gem->bo.bufmgr = bufmgr;
716 bo_gem->tiling_mode = I915_TILING_NONE;
717 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
718 bo_gem->stride = 0;
720 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
721 tiling_mode,
722 stride)) {
723 drm_intel_gem_bo_free(&bo_gem->bo);
724 return NULL;
725 }
727 DRMINITLISTHEAD(&bo_gem->name_list);
728 DRMINITLISTHEAD(&bo_gem->vma_list);
729 }
731 bo_gem->name = name;
732 atomic_set(&bo_gem->refcount, 1);
733 bo_gem->validate_index = -1;
734 bo_gem->reloc_tree_fences = 0;
735 bo_gem->used_as_reloc_target = false;
736 bo_gem->has_error = false;
737 bo_gem->reusable = true;
739 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
741 DBG("bo_create: buf %d (%s) %ldb\n",
742 bo_gem->gem_handle, bo_gem->name, size);
744 return &bo_gem->bo;
745 }
747 static drm_intel_bo *
748 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
749 const char *name,
750 unsigned long size,
751 unsigned int alignment)
752 {
753 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
754 BO_ALLOC_FOR_RENDER,
755 I915_TILING_NONE, 0);
756 }
758 static drm_intel_bo *
759 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
760 const char *name,
761 unsigned long size,
762 unsigned int alignment)
763 {
764 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
765 I915_TILING_NONE, 0);
766 }
768 static drm_intel_bo *
769 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
770 int x, int y, int cpp, uint32_t *tiling_mode,
771 unsigned long *pitch, unsigned long flags)
772 {
773 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
774 unsigned long size, stride;
775 uint32_t tiling;
777 do {
778 unsigned long aligned_y, height_alignment;
780 tiling = *tiling_mode;
782 /* If we're tiled, our allocations are in 8 or 32-row blocks,
783 * so failure to align our height means that we won't allocate
784 * enough pages.
785 *
786 * If we're untiled, we still have to align to 2 rows high
787 * because the data port accesses 2x2 blocks even if the
788 * bottom row isn't to be rendered, so failure to align means
789 * we could walk off the end of the GTT and fault. This is
790 * documented on 965, and may be the case on older chipsets
791 * too so we try to be careful.
792 */
793 aligned_y = y;
794 height_alignment = 2;
796 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
797 height_alignment = 16;
798 else if (tiling == I915_TILING_X
799 || (IS_915(bufmgr_gem->pci_device)
800 && tiling == I915_TILING_Y))
801 height_alignment = 8;
802 else if (tiling == I915_TILING_Y)
803 height_alignment = 32;
804 aligned_y = ALIGN(y, height_alignment);
806 stride = x * cpp;
807 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
808 size = stride * aligned_y;
809 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
810 } while (*tiling_mode != tiling);
811 *pitch = stride;
813 if (tiling == I915_TILING_NONE)
814 stride = 0;
816 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
817 tiling, stride);
818 }
820 /**
821 * Returns a drm_intel_bo wrapping the given buffer object handle.
822 *
823 * This can be used when one application needs to pass a buffer object
824 * to another.
825 */
826 drm_intel_bo *
827 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
828 const char *name,
829 unsigned int handle)
830 {
831 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
832 drm_intel_bo_gem *bo_gem;
833 int ret;
834 struct drm_gem_open open_arg;
835 struct drm_i915_gem_get_tiling get_tiling;
836 drmMMListHead *list;
838 /* At the moment most applications only have a few named bo.
839 * For instance, in a DRI client only the render buffers passed
840 * between X and the client are named. And since X returns the
841 * alternating names for the front/back buffer a linear search
842 * provides a sufficiently fast match.
843 */
844 for (list = bufmgr_gem->named.next;
845 list != &bufmgr_gem->named;
846 list = list->next) {
847 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
848 if (bo_gem->global_name == handle) {
849 drm_intel_gem_bo_reference(&bo_gem->bo);
850 return &bo_gem->bo;
851 }
852 }
854 bo_gem = calloc(1, sizeof(*bo_gem));
855 if (!bo_gem)
856 return NULL;
858 VG_CLEAR(open_arg);
859 open_arg.name = handle;
860 ret = drmIoctl(bufmgr_gem->fd,
861 DRM_IOCTL_GEM_OPEN,
862 &open_arg);
863 if (ret != 0) {
864 DBG("Couldn't reference %s handle 0x%08x: %s\n",
865 name, handle, strerror(errno));
866 free(bo_gem);
867 return NULL;
868 }
869 bo_gem->bo.size = open_arg.size;
870 bo_gem->bo.offset = 0;
871 bo_gem->bo.virtual = NULL;
872 bo_gem->bo.bufmgr = bufmgr;
873 bo_gem->name = name;
874 atomic_set(&bo_gem->refcount, 1);
875 bo_gem->validate_index = -1;
876 bo_gem->gem_handle = open_arg.handle;
877 bo_gem->bo.handle = open_arg.handle;
878 bo_gem->global_name = handle;
879 bo_gem->reusable = false;
881 VG_CLEAR(get_tiling);
882 get_tiling.handle = bo_gem->gem_handle;
883 ret = drmIoctl(bufmgr_gem->fd,
884 DRM_IOCTL_I915_GEM_GET_TILING,
885 &get_tiling);
886 if (ret != 0) {
887 drm_intel_gem_bo_unreference(&bo_gem->bo);
888 return NULL;
889 }
890 bo_gem->tiling_mode = get_tiling.tiling_mode;
891 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
892 /* XXX stride is unknown */
893 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
895 DRMINITLISTHEAD(&bo_gem->vma_list);
896 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
897 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
899 return &bo_gem->bo;
900 }
902 static void
903 drm_intel_gem_bo_free(drm_intel_bo *bo)
904 {
905 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
906 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
907 struct drm_gem_close close;
908 int ret;
910 DRMLISTDEL(&bo_gem->vma_list);
911 if (bo_gem->mem_virtual) {
912 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
913 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
914 bufmgr_gem->vma_count--;
915 }
916 if (bo_gem->gtt_virtual) {
917 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
918 bufmgr_gem->vma_count--;
919 }
921 /* Close this object */
922 VG_CLEAR(close);
923 close.handle = bo_gem->gem_handle;
924 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
925 if (ret != 0) {
926 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
927 bo_gem->gem_handle, bo_gem->name, strerror(errno));
928 }
929 free(bo);
930 }
932 static void
933 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
934 {
935 #if HAVE_VALGRIND
936 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
938 if (bo_gem->mem_virtual)
939 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
941 if (bo_gem->gtt_virtual)
942 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
943 #endif
944 }
946 /** Frees all cached buffers significantly older than @time. */
947 static void
948 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
949 {
950 int i;
952 if (bufmgr_gem->time == time)
953 return;
955 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
956 struct drm_intel_gem_bo_bucket *bucket =
957 &bufmgr_gem->cache_bucket[i];
959 while (!DRMLISTEMPTY(&bucket->head)) {
960 drm_intel_bo_gem *bo_gem;
962 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
963 bucket->head.next, head);
964 if (time - bo_gem->free_time <= 1)
965 break;
967 DRMLISTDEL(&bo_gem->head);
969 drm_intel_gem_bo_free(&bo_gem->bo);
970 }
971 }
973 bufmgr_gem->time = time;
974 }
976 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
977 {
978 int limit;
980 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
981 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
983 if (bufmgr_gem->vma_max < 0)
984 return;
986 /* We may need to evict a few entries in order to create new mmaps */
987 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
988 if (limit < 0)
989 limit = 0;
991 while (bufmgr_gem->vma_count > limit) {
992 drm_intel_bo_gem *bo_gem;
994 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
995 bufmgr_gem->vma_cache.next,
996 vma_list);
997 assert(bo_gem->map_count == 0);
998 DRMLISTDELINIT(&bo_gem->vma_list);
1000 if (bo_gem->mem_virtual) {
1001 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1002 bo_gem->mem_virtual = NULL;
1003 bufmgr_gem->vma_count--;
1004 }
1005 if (bo_gem->gtt_virtual) {
1006 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1007 bo_gem->gtt_virtual = NULL;
1008 bufmgr_gem->vma_count--;
1009 }
1010 }
1011 }
1013 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1014 drm_intel_bo_gem *bo_gem)
1015 {
1016 bufmgr_gem->vma_open--;
1017 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1018 if (bo_gem->mem_virtual)
1019 bufmgr_gem->vma_count++;
1020 if (bo_gem->gtt_virtual)
1021 bufmgr_gem->vma_count++;
1022 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1023 }
1025 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1026 drm_intel_bo_gem *bo_gem)
1027 {
1028 bufmgr_gem->vma_open++;
1029 DRMLISTDEL(&bo_gem->vma_list);
1030 if (bo_gem->mem_virtual)
1031 bufmgr_gem->vma_count--;
1032 if (bo_gem->gtt_virtual)
1033 bufmgr_gem->vma_count--;
1034 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1035 }
1037 static void
1038 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1039 {
1040 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1041 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1042 struct drm_intel_gem_bo_bucket *bucket;
1043 int i;
1045 /* Unreference all the target buffers */
1046 for (i = 0; i < bo_gem->reloc_count; i++) {
1047 if (bo_gem->reloc_target_info[i].bo != bo) {
1048 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1049 reloc_target_info[i].bo,
1050 time);
1051 }
1052 }
1053 bo_gem->reloc_count = 0;
1054 bo_gem->used_as_reloc_target = false;
1056 DBG("bo_unreference final: %d (%s)\n",
1057 bo_gem->gem_handle, bo_gem->name);
1059 /* release memory associated with this object */
1060 if (bo_gem->reloc_target_info) {
1061 free(bo_gem->reloc_target_info);
1062 bo_gem->reloc_target_info = NULL;
1063 }
1064 if (bo_gem->relocs) {
1065 free(bo_gem->relocs);
1066 bo_gem->relocs = NULL;
1067 }
1069 /* Clear any left-over mappings */
1070 if (bo_gem->map_count) {
1071 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1072 bo_gem->map_count = 0;
1073 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1074 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1075 }
1077 DRMLISTDEL(&bo_gem->name_list);
1079 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1080 /* Put the buffer into our internal cache for reuse if we can. */
1081 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1082 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1083 I915_MADV_DONTNEED)) {
1084 bo_gem->free_time = time;
1086 bo_gem->name = NULL;
1087 bo_gem->validate_index = -1;
1089 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1090 } else {
1091 drm_intel_gem_bo_free(bo);
1092 }
1093 }
1095 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1096 time_t time)
1097 {
1098 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1100 assert(atomic_read(&bo_gem->refcount) > 0);
1101 if (atomic_dec_and_test(&bo_gem->refcount))
1102 drm_intel_gem_bo_unreference_final(bo, time);
1103 }
1105 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1106 {
1107 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1109 assert(atomic_read(&bo_gem->refcount) > 0);
1110 if (atomic_dec_and_test(&bo_gem->refcount)) {
1111 drm_intel_bufmgr_gem *bufmgr_gem =
1112 (drm_intel_bufmgr_gem *) bo->bufmgr;
1113 struct timespec time;
1115 clock_gettime(CLOCK_MONOTONIC, &time);
1117 pthread_mutex_lock(&bufmgr_gem->lock);
1118 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1119 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1120 pthread_mutex_unlock(&bufmgr_gem->lock);
1121 }
1122 }
1124 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1125 {
1126 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1128 struct drm_i915_gem_set_domain set_domain;
1129 int ret;
1131 pthread_mutex_lock(&bufmgr_gem->lock);
1133 if (bo_gem->map_count++ == 0)
1134 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1136 if (!bo_gem->mem_virtual) {
1137 struct drm_i915_gem_mmap mmap_arg;
1139 DBG("bo_map: %d (%s), map_count=%d\n",
1140 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1142 VG_CLEAR(mmap_arg);
1143 mmap_arg.handle = bo_gem->gem_handle;
1144 mmap_arg.offset = 0;
1145 mmap_arg.size = bo->size;
1146 ret = drmIoctl(bufmgr_gem->fd,
1147 DRM_IOCTL_I915_GEM_MMAP,
1148 &mmap_arg);
1149 if (ret != 0) {
1150 ret = -errno;
1151 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1152 __FILE__, __LINE__, bo_gem->gem_handle,
1153 bo_gem->name, strerror(errno));
1154 if (--bo_gem->map_count == 0)
1155 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1156 pthread_mutex_unlock(&bufmgr_gem->lock);
1157 return ret;
1158 }
1159 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1160 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1161 }
1162 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1163 bo_gem->mem_virtual);
1164 bo->virtual = bo_gem->mem_virtual;
1166 VG_CLEAR(set_domain);
1167 set_domain.handle = bo_gem->gem_handle;
1168 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1169 if (write_enable)
1170 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1171 else
1172 set_domain.write_domain = 0;
1173 ret = drmIoctl(bufmgr_gem->fd,
1174 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1175 &set_domain);
1176 if (ret != 0) {
1177 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1178 __FILE__, __LINE__, bo_gem->gem_handle,
1179 strerror(errno));
1180 }
1182 if (write_enable)
1183 bo_gem->mapped_cpu_write = true;
1185 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1186 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1187 pthread_mutex_unlock(&bufmgr_gem->lock);
1189 return 0;
1190 }
1192 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1193 {
1194 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1195 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1196 struct drm_i915_gem_set_domain set_domain;
1197 int ret;
1199 pthread_mutex_lock(&bufmgr_gem->lock);
1201 if (bo_gem->map_count++ == 0)
1202 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1204 /* Get a mapping of the buffer if we haven't before. */
1205 if (bo_gem->gtt_virtual == NULL) {
1206 struct drm_i915_gem_mmap_gtt mmap_arg;
1208 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1209 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1211 VG_CLEAR(mmap_arg);
1212 mmap_arg.handle = bo_gem->gem_handle;
1214 /* Get the fake offset back... */
1215 ret = drmIoctl(bufmgr_gem->fd,
1216 DRM_IOCTL_I915_GEM_MMAP_GTT,
1217 &mmap_arg);
1218 if (ret != 0) {
1219 ret = -errno;
1220 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1221 __FILE__, __LINE__,
1222 bo_gem->gem_handle, bo_gem->name,
1223 strerror(errno));
1224 if (--bo_gem->map_count == 0)
1225 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1226 pthread_mutex_unlock(&bufmgr_gem->lock);
1227 return ret;
1228 }
1230 /* and mmap it */
1231 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1232 MAP_SHARED, bufmgr_gem->fd,
1233 mmap_arg.offset);
1234 if (bo_gem->gtt_virtual == MAP_FAILED) {
1235 bo_gem->gtt_virtual = NULL;
1236 ret = -errno;
1237 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1238 __FILE__, __LINE__,
1239 bo_gem->gem_handle, bo_gem->name,
1240 strerror(errno));
1241 if (--bo_gem->map_count == 0)
1242 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1243 pthread_mutex_unlock(&bufmgr_gem->lock);
1244 return ret;
1245 }
1246 }
1248 bo->virtual = bo_gem->gtt_virtual;
1250 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1251 bo_gem->gtt_virtual);
1253 /* Now move it to the GTT domain so that the CPU caches are flushed */
1254 VG_CLEAR(set_domain);
1255 set_domain.handle = bo_gem->gem_handle;
1256 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1257 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1258 ret = drmIoctl(bufmgr_gem->fd,
1259 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1260 &set_domain);
1261 if (ret != 0) {
1262 DBG("%s:%d: Error setting domain %d: %s\n",
1263 __FILE__, __LINE__, bo_gem->gem_handle,
1264 strerror(errno));
1265 }
1267 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1268 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1269 pthread_mutex_unlock(&bufmgr_gem->lock);
1271 return 0;
1272 }
1274 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1275 {
1276 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1277 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1278 int ret = 0;
1280 if (bo == NULL)
1281 return 0;
1283 pthread_mutex_lock(&bufmgr_gem->lock);
1285 if (bo_gem->map_count <= 0) {
1286 DBG("attempted to unmap an unmapped bo\n");
1287 pthread_mutex_unlock(&bufmgr_gem->lock);
1288 /* Preserve the old behaviour of just treating this as a
1289 * no-op rather than reporting the error.
1290 */
1291 return 0;
1292 }
1294 if (bo_gem->mapped_cpu_write) {
1295 struct drm_i915_gem_sw_finish sw_finish;
1297 /* Cause a flush to happen if the buffer's pinned for
1298 * scanout, so the results show up in a timely manner.
1299 * Unlike GTT set domains, this only does work if the
1300 * buffer should be scanout-related.
1301 */
1302 VG_CLEAR(sw_finish);
1303 sw_finish.handle = bo_gem->gem_handle;
1304 ret = drmIoctl(bufmgr_gem->fd,
1305 DRM_IOCTL_I915_GEM_SW_FINISH,
1306 &sw_finish);
1307 ret = ret == -1 ? -errno : 0;
1309 bo_gem->mapped_cpu_write = false;
1310 }
1312 /* We need to unmap after every innovation as we cannot track
1313 * an open vma for every bo as that will exhaasut the system
1314 * limits and cause later failures.
1315 */
1316 if (--bo_gem->map_count == 0) {
1317 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1318 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1319 bo->virtual = NULL;
1320 }
1321 pthread_mutex_unlock(&bufmgr_gem->lock);
1323 return ret;
1324 }
1326 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1327 {
1328 return drm_intel_gem_bo_unmap(bo);
1329 }
1331 static int
1332 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1333 unsigned long size, const void *data)
1334 {
1335 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1336 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1337 struct drm_i915_gem_pwrite pwrite;
1338 int ret;
1340 VG_CLEAR(pwrite);
1341 pwrite.handle = bo_gem->gem_handle;
1342 pwrite.offset = offset;
1343 pwrite.size = size;
1344 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1345 ret = drmIoctl(bufmgr_gem->fd,
1346 DRM_IOCTL_I915_GEM_PWRITE,
1347 &pwrite);
1348 if (ret != 0) {
1349 ret = -errno;
1350 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1351 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1352 (int)size, strerror(errno));
1353 }
1355 return ret;
1356 }
1358 static int
1359 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1360 {
1361 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1362 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1363 int ret;
1365 VG_CLEAR(get_pipe_from_crtc_id);
1366 get_pipe_from_crtc_id.crtc_id = crtc_id;
1367 ret = drmIoctl(bufmgr_gem->fd,
1368 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1369 &get_pipe_from_crtc_id);
1370 if (ret != 0) {
1371 /* We return -1 here to signal that we don't
1372 * know which pipe is associated with this crtc.
1373 * This lets the caller know that this information
1374 * isn't available; using the wrong pipe for
1375 * vblank waiting can cause the chipset to lock up
1376 */
1377 return -1;
1378 }
1380 return get_pipe_from_crtc_id.pipe;
1381 }
1383 static int
1384 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1385 unsigned long size, void *data)
1386 {
1387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1389 struct drm_i915_gem_pread pread;
1390 int ret;
1392 VG_CLEAR(pread);
1393 pread.handle = bo_gem->gem_handle;
1394 pread.offset = offset;
1395 pread.size = size;
1396 pread.data_ptr = (uint64_t) (uintptr_t) data;
1397 ret = drmIoctl(bufmgr_gem->fd,
1398 DRM_IOCTL_I915_GEM_PREAD,
1399 &pread);
1400 if (ret != 0) {
1401 ret = -errno;
1402 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1403 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1404 (int)size, strerror(errno));
1405 }
1407 return ret;
1408 }
1410 /** Waits for all GPU rendering with the object to have completed. */
1411 static void
1412 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1413 {
1414 drm_intel_gem_bo_start_gtt_access(bo, 1);
1415 }
1417 /**
1418 * Sets the object to the GTT read and possibly write domain, used by the X
1419 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1420 *
1421 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1422 * can do tiled pixmaps this way.
1423 */
1424 void
1425 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1426 {
1427 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1428 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1429 struct drm_i915_gem_set_domain set_domain;
1430 int ret;
1432 VG_CLEAR(set_domain);
1433 set_domain.handle = bo_gem->gem_handle;
1434 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1435 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1436 ret = drmIoctl(bufmgr_gem->fd,
1437 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1438 &set_domain);
1439 if (ret != 0) {
1440 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1441 __FILE__, __LINE__, bo_gem->gem_handle,
1442 set_domain.read_domains, set_domain.write_domain,
1443 strerror(errno));
1444 }
1445 }
1447 static void
1448 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1449 {
1450 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1451 int i;
1453 free(bufmgr_gem->exec2_objects);
1454 free(bufmgr_gem->exec_objects);
1455 free(bufmgr_gem->exec_bos);
1457 pthread_mutex_destroy(&bufmgr_gem->lock);
1459 /* Free any cached buffer objects we were going to reuse */
1460 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1461 struct drm_intel_gem_bo_bucket *bucket =
1462 &bufmgr_gem->cache_bucket[i];
1463 drm_intel_bo_gem *bo_gem;
1465 while (!DRMLISTEMPTY(&bucket->head)) {
1466 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1467 bucket->head.next, head);
1468 DRMLISTDEL(&bo_gem->head);
1470 drm_intel_gem_bo_free(&bo_gem->bo);
1471 }
1472 }
1474 free(bufmgr);
1475 }
1477 /**
1478 * Adds the target buffer to the validation list and adds the relocation
1479 * to the reloc_buffer's relocation list.
1480 *
1481 * The relocation entry at the given offset must already contain the
1482 * precomputed relocation value, because the kernel will optimize out
1483 * the relocation entry write when the buffer hasn't moved from the
1484 * last known offset in target_bo.
1485 */
1486 static int
1487 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1488 drm_intel_bo *target_bo, uint32_t target_offset,
1489 uint32_t read_domains, uint32_t write_domain,
1490 bool need_fence)
1491 {
1492 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1493 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1494 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1495 bool fenced_command;
1497 if (bo_gem->has_error)
1498 return -ENOMEM;
1500 if (target_bo_gem->has_error) {
1501 bo_gem->has_error = true;
1502 return -ENOMEM;
1503 }
1505 /* We never use HW fences for rendering on 965+ */
1506 if (bufmgr_gem->gen >= 4)
1507 need_fence = false;
1509 fenced_command = need_fence;
1510 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1511 need_fence = false;
1513 /* Create a new relocation list if needed */
1514 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1515 return -ENOMEM;
1517 /* Check overflow */
1518 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1520 /* Check args */
1521 assert(offset <= bo->size - 4);
1522 assert((write_domain & (write_domain - 1)) == 0);
1524 /* Make sure that we're not adding a reloc to something whose size has
1525 * already been accounted for.
1526 */
1527 assert(!bo_gem->used_as_reloc_target);
1528 if (target_bo_gem != bo_gem) {
1529 target_bo_gem->used_as_reloc_target = true;
1530 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1531 }
1532 /* An object needing a fence is a tiled buffer, so it won't have
1533 * relocs to other buffers.
1534 */
1535 if (need_fence)
1536 target_bo_gem->reloc_tree_fences = 1;
1537 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1539 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1540 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1541 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1542 target_bo_gem->gem_handle;
1543 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1544 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1545 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1547 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1548 if (target_bo != bo)
1549 drm_intel_gem_bo_reference(target_bo);
1550 if (fenced_command)
1551 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1552 DRM_INTEL_RELOC_FENCE;
1553 else
1554 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1556 bo_gem->reloc_count++;
1558 return 0;
1559 }
1561 static int
1562 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1563 drm_intel_bo *target_bo, uint32_t target_offset,
1564 uint32_t read_domains, uint32_t write_domain)
1565 {
1566 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1568 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1569 read_domains, write_domain,
1570 !bufmgr_gem->fenced_relocs);
1571 }
1573 static int
1574 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1575 drm_intel_bo *target_bo,
1576 uint32_t target_offset,
1577 uint32_t read_domains, uint32_t write_domain)
1578 {
1579 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1580 read_domains, write_domain, true);
1581 }
1583 int
1584 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1585 {
1586 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1588 return bo_gem->reloc_count;
1589 }
1591 /**
1592 * Removes existing relocation entries in the BO after "start".
1593 *
1594 * This allows a user to avoid a two-step process for state setup with
1595 * counting up all the buffer objects and doing a
1596 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1597 * relocations for the state setup. Instead, save the state of the
1598 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1599 * state, and then check if it still fits in the aperture.
1600 *
1601 * Any further drm_intel_bufmgr_check_aperture_space() queries
1602 * involving this buffer in the tree are undefined after this call.
1603 */
1604 void
1605 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1606 {
1607 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1608 int i;
1609 struct timespec time;
1611 clock_gettime(CLOCK_MONOTONIC, &time);
1613 assert(bo_gem->reloc_count >= start);
1614 /* Unreference the cleared target buffers */
1615 for (i = start; i < bo_gem->reloc_count; i++) {
1616 if (bo_gem->reloc_target_info[i].bo != bo) {
1617 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1618 reloc_target_info[i].bo,
1619 time.tv_sec);
1620 }
1621 }
1622 bo_gem->reloc_count = start;
1623 }
1625 /**
1626 * Walk the tree of relocations rooted at BO and accumulate the list of
1627 * validations to be performed and update the relocation buffers with
1628 * index values into the validation list.
1629 */
1630 static void
1631 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1632 {
1633 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1634 int i;
1636 if (bo_gem->relocs == NULL)
1637 return;
1639 for (i = 0; i < bo_gem->reloc_count; i++) {
1640 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1642 if (target_bo == bo)
1643 continue;
1645 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1647 /* Continue walking the tree depth-first. */
1648 drm_intel_gem_bo_process_reloc(target_bo);
1650 /* Add the target to the validate list */
1651 drm_intel_add_validate_buffer(target_bo);
1652 }
1653 }
1655 static void
1656 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1657 {
1658 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1659 int i;
1661 if (bo_gem->relocs == NULL)
1662 return;
1664 for (i = 0; i < bo_gem->reloc_count; i++) {
1665 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1666 int need_fence;
1668 if (target_bo == bo)
1669 continue;
1671 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1673 /* Continue walking the tree depth-first. */
1674 drm_intel_gem_bo_process_reloc2(target_bo);
1676 need_fence = (bo_gem->reloc_target_info[i].flags &
1677 DRM_INTEL_RELOC_FENCE);
1679 /* Add the target to the validate list */
1680 drm_intel_add_validate_buffer2(target_bo, need_fence);
1681 }
1682 }
1685 static void
1686 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1687 {
1688 int i;
1690 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1691 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1692 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1694 /* Update the buffer offset */
1695 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1696 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1697 bo_gem->gem_handle, bo_gem->name, bo->offset,
1698 (unsigned long long)bufmgr_gem->exec_objects[i].
1699 offset);
1700 bo->offset = bufmgr_gem->exec_objects[i].offset;
1701 }
1702 }
1703 }
1705 static void
1706 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1707 {
1708 int i;
1710 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1711 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1712 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1714 /* Update the buffer offset */
1715 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1716 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1717 bo_gem->gem_handle, bo_gem->name, bo->offset,
1718 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1719 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1720 }
1721 }
1722 }
1724 static void
1725 aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
1726 {
1727 fwrite(&data, 1, 4, bufmgr_gem->aub_file);
1728 }
1730 static void
1731 aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
1732 {
1733 fwrite(data, 1, size, bufmgr_gem->aub_file);
1734 }
1736 static void
1737 aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
1738 {
1739 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1740 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1741 uint32_t *data;
1742 unsigned int i;
1744 data = malloc(bo->size);
1745 drm_intel_bo_get_subdata(bo, offset, size, data);
1747 /* Easy mode: write out bo with no relocations */
1748 if (!bo_gem->reloc_count) {
1749 aub_out_data(bufmgr_gem, data, size);
1750 free(data);
1751 return;
1752 }
1754 /* Otherwise, handle the relocations while writing. */
1755 for (i = 0; i < size / 4; i++) {
1756 int r;
1757 for (r = 0; r < bo_gem->reloc_count; r++) {
1758 struct drm_i915_gem_relocation_entry *reloc;
1759 drm_intel_reloc_target *info;
1761 reloc = &bo_gem->relocs[r];
1762 info = &bo_gem->reloc_target_info[r];
1764 if (reloc->offset == offset + i * 4) {
1765 drm_intel_bo_gem *target_gem;
1766 uint32_t val;
1768 target_gem = (drm_intel_bo_gem *)info->bo;
1770 val = reloc->delta;
1771 val += target_gem->aub_offset;
1773 aub_out(bufmgr_gem, val);
1774 data[i] = val;
1775 break;
1776 }
1777 }
1778 if (r == bo_gem->reloc_count) {
1779 /* no relocation, just the data */
1780 aub_out(bufmgr_gem, data[i]);
1781 }
1782 }
1784 free(data);
1785 }
1787 static void
1788 aub_bo_get_address(drm_intel_bo *bo)
1789 {
1790 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1791 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1793 /* Give the object a graphics address in the AUB file. We
1794 * don't just use the GEM object address because we do AUB
1795 * dumping before execution -- we want to successfully log
1796 * when the hardware might hang, and we might even want to aub
1797 * capture for a driver trying to execute on a different
1798 * generation of hardware by disabling the actual kernel exec
1799 * call.
1800 */
1801 bo_gem->aub_offset = bufmgr_gem->aub_offset;
1802 bufmgr_gem->aub_offset += bo->size;
1803 /* XXX: Handle aperture overflow. */
1804 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
1805 }
1807 static void
1808 aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
1809 uint32_t offset, uint32_t size)
1810 {
1811 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1812 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1814 aub_out(bufmgr_gem,
1815 CMD_AUB_TRACE_HEADER_BLOCK |
1816 (5 - 2));
1817 aub_out(bufmgr_gem,
1818 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
1819 aub_out(bufmgr_gem, subtype);
1820 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1821 aub_out(bufmgr_gem, size);
1822 aub_write_bo_data(bo, offset, size);
1823 }
1825 static void
1826 aub_write_bo(drm_intel_bo *bo)
1827 {
1828 uint32_t block_size;
1829 uint32_t offset;
1831 aub_bo_get_address(bo);
1833 /* Break up large objects into multiple writes. Otherwise a
1834 * 128kb VBO would overflow the 16 bits of size field in the
1835 * packet header and everything goes badly after that.
1836 */
1837 for (offset = 0; offset < bo->size; offset += block_size) {
1838 block_size = bo->size - offset;
1840 if (block_size > 8 * 4096)
1841 block_size = 8 * 4096;
1843 aub_write_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1844 offset, block_size);
1845 }
1846 }
1848 /*
1849 * Make a ringbuffer on fly and dump it
1850 */
1851 static void
1852 aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
1853 uint32_t batch_buffer, int ring_flag)
1854 {
1855 uint32_t ringbuffer[4096];
1856 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
1857 int ring_count = 0;
1859 if (ring_flag == I915_EXEC_BSD)
1860 ring = AUB_TRACE_TYPE_RING_PRB1;
1862 /* Make a ring buffer to execute our batchbuffer. */
1863 memset(ringbuffer, 0, sizeof(ringbuffer));
1864 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
1865 ringbuffer[ring_count++] = batch_buffer;
1867 /* Write out the ring. This appears to trigger execution of
1868 * the ring in the simulator.
1869 */
1870 aub_out(bufmgr_gem,
1871 CMD_AUB_TRACE_HEADER_BLOCK |
1872 (5 - 2));
1873 aub_out(bufmgr_gem,
1874 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
1875 aub_out(bufmgr_gem, 0); /* general/surface subtype */
1876 aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
1877 aub_out(bufmgr_gem, ring_count * 4);
1879 /* FIXME: Need some flush operations here? */
1880 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
1882 /* Update offset pointer */
1883 bufmgr_gem->aub_offset += 4096;
1884 }
1886 void
1887 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
1888 int x1, int y1, int width, int height,
1889 enum aub_dump_bmp_format format,
1890 int pitch, int offset)
1891 {
1892 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1893 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1894 uint32_t cpp;
1896 switch (format) {
1897 case AUB_DUMP_BMP_FORMAT_8BIT:
1898 cpp = 1;
1899 break;
1900 case AUB_DUMP_BMP_FORMAT_ARGB_4444:
1901 cpp = 2;
1902 break;
1903 case AUB_DUMP_BMP_FORMAT_ARGB_0888:
1904 case AUB_DUMP_BMP_FORMAT_ARGB_8888:
1905 cpp = 4;
1906 break;
1907 default:
1908 printf("Unknown AUB dump format %d\n", format);
1909 return;
1910 }
1912 if (!bufmgr_gem->aub_file)
1913 return;
1915 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
1916 aub_out(bufmgr_gem, (y1 << 16) | x1);
1917 aub_out(bufmgr_gem,
1918 (format << 24) |
1919 (cpp << 19) |
1920 pitch / 4);
1921 aub_out(bufmgr_gem, (height << 16) | width);
1922 aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
1923 aub_out(bufmgr_gem,
1924 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
1925 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
1926 }
1928 static void
1929 aub_exec(drm_intel_bo *bo, int ring_flag, int used)
1930 {
1931 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1933 int i;
1935 if (!bufmgr_gem->aub_file)
1936 return;
1938 /* Write out all but the batchbuffer to AUB memory */
1939 for (i = 0; i < bufmgr_gem->exec_count - 1; i++) {
1940 if (bufmgr_gem->exec_bos[i] != bo)
1941 aub_write_bo(bufmgr_gem->exec_bos[i]);
1942 }
1944 aub_bo_get_address(bo);
1946 /* Dump the batchbuffer. */
1947 aub_write_trace_block(bo, AUB_TRACE_TYPE_BATCH, 0,
1948 0, used);
1949 aub_write_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
1950 used, bo->size - used);
1952 /* Dump ring buffer */
1953 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
1955 fflush(bufmgr_gem->aub_file);
1957 /*
1958 * One frame has been dumped. So reset the aub_offset for the next frame.
1959 *
1960 * FIXME: Can we do this?
1961 */
1962 bufmgr_gem->aub_offset = 0x10000;
1963 }
1965 static int
1966 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1967 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1968 {
1969 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1970 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1971 struct drm_i915_gem_execbuffer execbuf;
1972 int ret, i;
1974 if (bo_gem->has_error)
1975 return -ENOMEM;
1977 pthread_mutex_lock(&bufmgr_gem->lock);
1978 /* Update indices and set up the validate list. */
1979 drm_intel_gem_bo_process_reloc(bo);
1981 /* Add the batch buffer to the validation list. There are no
1982 * relocations pointing to it.
1983 */
1984 drm_intel_add_validate_buffer(bo);
1986 VG_CLEAR(execbuf);
1987 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1988 execbuf.buffer_count = bufmgr_gem->exec_count;
1989 execbuf.batch_start_offset = 0;
1990 execbuf.batch_len = used;
1991 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1992 execbuf.num_cliprects = num_cliprects;
1993 execbuf.DR1 = 0;
1994 execbuf.DR4 = DR4;
1996 ret = drmIoctl(bufmgr_gem->fd,
1997 DRM_IOCTL_I915_GEM_EXECBUFFER,
1998 &execbuf);
1999 if (ret != 0) {
2000 ret = -errno;
2001 if (errno == ENOSPC) {
2002 DBG("Execbuffer fails to pin. "
2003 "Estimate: %u. Actual: %u. Available: %u\n",
2004 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2005 bufmgr_gem->
2006 exec_count),
2007 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2008 bufmgr_gem->
2009 exec_count),
2010 (unsigned int)bufmgr_gem->gtt_size);
2011 }
2012 }
2013 drm_intel_update_buffer_offsets(bufmgr_gem);
2015 if (bufmgr_gem->bufmgr.debug)
2016 drm_intel_gem_dump_validation_list(bufmgr_gem);
2018 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2019 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2020 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2022 /* Disconnect the buffer from the validate list */
2023 bo_gem->validate_index = -1;
2024 bufmgr_gem->exec_bos[i] = NULL;
2025 }
2026 bufmgr_gem->exec_count = 0;
2027 pthread_mutex_unlock(&bufmgr_gem->lock);
2029 return ret;
2030 }
2032 static int
2033 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2034 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2035 unsigned int flags)
2036 {
2037 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2038 struct drm_i915_gem_execbuffer2 execbuf;
2039 int ret = 0;
2040 int i;
2042 switch (flags & 0x7) {
2043 default:
2044 return -EINVAL;
2045 case I915_EXEC_BLT:
2046 if (!bufmgr_gem->has_blt)
2047 return -EINVAL;
2048 break;
2049 case I915_EXEC_BSD:
2050 if (!bufmgr_gem->has_bsd)
2051 return -EINVAL;
2052 break;
2053 case I915_EXEC_RENDER:
2054 case I915_EXEC_DEFAULT:
2055 break;
2056 }
2058 pthread_mutex_lock(&bufmgr_gem->lock);
2059 /* Update indices and set up the validate list. */
2060 drm_intel_gem_bo_process_reloc2(bo);
2062 /* Add the batch buffer to the validation list. There are no relocations
2063 * pointing to it.
2064 */
2065 drm_intel_add_validate_buffer2(bo, 0);
2067 VG_CLEAR(execbuf);
2068 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2069 execbuf.buffer_count = bufmgr_gem->exec_count;
2070 execbuf.batch_start_offset = 0;
2071 execbuf.batch_len = used;
2072 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2073 execbuf.num_cliprects = num_cliprects;
2074 execbuf.DR1 = 0;
2075 execbuf.DR4 = DR4;
2076 execbuf.flags = flags;
2077 execbuf.rsvd1 = 0;
2078 execbuf.rsvd2 = 0;
2080 aub_exec(bo, flags, used);
2082 if (bufmgr_gem->no_exec)
2083 goto skip_execution;
2085 ret = drmIoctl(bufmgr_gem->fd,
2086 DRM_IOCTL_I915_GEM_EXECBUFFER2,
2087 &execbuf);
2088 if (ret != 0) {
2089 ret = -errno;
2090 if (ret == -ENOSPC) {
2091 DBG("Execbuffer fails to pin. "
2092 "Estimate: %u. Actual: %u. Available: %u\n",
2093 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2094 bufmgr_gem->exec_count),
2095 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2096 bufmgr_gem->exec_count),
2097 (unsigned int) bufmgr_gem->gtt_size);
2098 }
2099 }
2100 drm_intel_update_buffer_offsets2(bufmgr_gem);
2102 skip_execution:
2103 if (bufmgr_gem->bufmgr.debug)
2104 drm_intel_gem_dump_validation_list(bufmgr_gem);
2106 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2107 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2108 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2110 /* Disconnect the buffer from the validate list */
2111 bo_gem->validate_index = -1;
2112 bufmgr_gem->exec_bos[i] = NULL;
2113 }
2114 bufmgr_gem->exec_count = 0;
2115 pthread_mutex_unlock(&bufmgr_gem->lock);
2117 return ret;
2118 }
2120 static int
2121 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2122 drm_clip_rect_t *cliprects, int num_cliprects,
2123 int DR4)
2124 {
2125 return drm_intel_gem_bo_mrb_exec2(bo, used,
2126 cliprects, num_cliprects, DR4,
2127 I915_EXEC_RENDER);
2128 }
2130 static int
2131 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2132 {
2133 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2134 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2135 struct drm_i915_gem_pin pin;
2136 int ret;
2138 VG_CLEAR(pin);
2139 pin.handle = bo_gem->gem_handle;
2140 pin.alignment = alignment;
2142 ret = drmIoctl(bufmgr_gem->fd,
2143 DRM_IOCTL_I915_GEM_PIN,
2144 &pin);
2145 if (ret != 0)
2146 return -errno;
2148 bo->offset = pin.offset;
2149 return 0;
2150 }
2152 static int
2153 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2154 {
2155 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2156 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2157 struct drm_i915_gem_unpin unpin;
2158 int ret;
2160 VG_CLEAR(unpin);
2161 unpin.handle = bo_gem->gem_handle;
2163 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2164 if (ret != 0)
2165 return -errno;
2167 return 0;
2168 }
2170 static int
2171 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2172 uint32_t tiling_mode,
2173 uint32_t stride)
2174 {
2175 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2176 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2177 struct drm_i915_gem_set_tiling set_tiling;
2178 int ret;
2180 if (bo_gem->global_name == 0 &&
2181 tiling_mode == bo_gem->tiling_mode &&
2182 stride == bo_gem->stride)
2183 return 0;
2185 memset(&set_tiling, 0, sizeof(set_tiling));
2186 do {
2187 /* set_tiling is slightly broken and overwrites the
2188 * input on the error path, so we have to open code
2189 * rmIoctl.
2190 */
2191 set_tiling.handle = bo_gem->gem_handle;
2192 set_tiling.tiling_mode = tiling_mode;
2193 set_tiling.stride = stride;
2195 ret = ioctl(bufmgr_gem->fd,
2196 DRM_IOCTL_I915_GEM_SET_TILING,
2197 &set_tiling);
2198 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2199 if (ret == -1)
2200 return -errno;
2202 bo_gem->tiling_mode = set_tiling.tiling_mode;
2203 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2204 bo_gem->stride = set_tiling.stride;
2205 return 0;
2206 }
2208 static int
2209 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2210 uint32_t stride)
2211 {
2212 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2213 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2214 int ret;
2216 /* Linear buffers have no stride. By ensuring that we only ever use
2217 * stride 0 with linear buffers, we simplify our code.
2218 */
2219 if (*tiling_mode == I915_TILING_NONE)
2220 stride = 0;
2222 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2223 if (ret == 0)
2224 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
2226 *tiling_mode = bo_gem->tiling_mode;
2227 return ret;
2228 }
2230 static int
2231 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2232 uint32_t * swizzle_mode)
2233 {
2234 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2236 *tiling_mode = bo_gem->tiling_mode;
2237 *swizzle_mode = bo_gem->swizzle_mode;
2238 return 0;
2239 }
2241 static int
2242 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2243 {
2244 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2245 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2246 int ret;
2248 if (!bo_gem->global_name) {
2249 struct drm_gem_flink flink;
2251 VG_CLEAR(flink);
2252 flink.handle = bo_gem->gem_handle;
2254 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
2255 if (ret != 0)
2256 return -errno;
2258 bo_gem->global_name = flink.name;
2259 bo_gem->reusable = false;
2261 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
2262 }
2264 *name = bo_gem->global_name;
2265 return 0;
2266 }
2268 /**
2269 * Enables unlimited caching of buffer objects for reuse.
2270 *
2271 * This is potentially very memory expensive, as the cache at each bucket
2272 * size is only bounded by how many buffers of that size we've managed to have
2273 * in flight at once.
2274 */
2275 void
2276 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2277 {
2278 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2280 bufmgr_gem->bo_reuse = true;
2281 }
2283 /**
2284 * Enable use of fenced reloc type.
2285 *
2286 * New code should enable this to avoid unnecessary fence register
2287 * allocation. If this option is not enabled, all relocs will have fence
2288 * register allocated.
2289 */
2290 void
2291 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2292 {
2293 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2295 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2296 bufmgr_gem->fenced_relocs = true;
2297 }
2299 /**
2300 * Return the additional aperture space required by the tree of buffer objects
2301 * rooted at bo.
2302 */
2303 static int
2304 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2305 {
2306 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2307 int i;
2308 int total = 0;
2310 if (bo == NULL || bo_gem->included_in_check_aperture)
2311 return 0;
2313 total += bo->size;
2314 bo_gem->included_in_check_aperture = true;
2316 for (i = 0; i < bo_gem->reloc_count; i++)
2317 total +=
2318 drm_intel_gem_bo_get_aperture_space(bo_gem->
2319 reloc_target_info[i].bo);
2321 return total;
2322 }
2324 /**
2325 * Count the number of buffers in this list that need a fence reg
2326 *
2327 * If the count is greater than the number of available regs, we'll have
2328 * to ask the caller to resubmit a batch with fewer tiled buffers.
2329 *
2330 * This function over-counts if the same buffer is used multiple times.
2331 */
2332 static unsigned int
2333 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2334 {
2335 int i;
2336 unsigned int total = 0;
2338 for (i = 0; i < count; i++) {
2339 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2341 if (bo_gem == NULL)
2342 continue;
2344 total += bo_gem->reloc_tree_fences;
2345 }
2346 return total;
2347 }
2349 /**
2350 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2351 * for the next drm_intel_bufmgr_check_aperture_space() call.
2352 */
2353 static void
2354 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2355 {
2356 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2357 int i;
2359 if (bo == NULL || !bo_gem->included_in_check_aperture)
2360 return;
2362 bo_gem->included_in_check_aperture = false;
2364 for (i = 0; i < bo_gem->reloc_count; i++)
2365 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2366 reloc_target_info[i].bo);
2367 }
2369 /**
2370 * Return a conservative estimate for the amount of aperture required
2371 * for a collection of buffers. This may double-count some buffers.
2372 */
2373 static unsigned int
2374 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2375 {
2376 int i;
2377 unsigned int total = 0;
2379 for (i = 0; i < count; i++) {
2380 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2381 if (bo_gem != NULL)
2382 total += bo_gem->reloc_tree_size;
2383 }
2384 return total;
2385 }
2387 /**
2388 * Return the amount of aperture needed for a collection of buffers.
2389 * This avoids double counting any buffers, at the cost of looking
2390 * at every buffer in the set.
2391 */
2392 static unsigned int
2393 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2394 {
2395 int i;
2396 unsigned int total = 0;
2398 for (i = 0; i < count; i++) {
2399 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2400 /* For the first buffer object in the array, we get an
2401 * accurate count back for its reloc_tree size (since nothing
2402 * had been flagged as being counted yet). We can save that
2403 * value out as a more conservative reloc_tree_size that
2404 * avoids double-counting target buffers. Since the first
2405 * buffer happens to usually be the batch buffer in our
2406 * callers, this can pull us back from doing the tree
2407 * walk on every new batch emit.
2408 */
2409 if (i == 0) {
2410 drm_intel_bo_gem *bo_gem =
2411 (drm_intel_bo_gem *) bo_array[i];
2412 bo_gem->reloc_tree_size = total;
2413 }
2414 }
2416 for (i = 0; i < count; i++)
2417 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2418 return total;
2419 }
2421 /**
2422 * Return -1 if the batchbuffer should be flushed before attempting to
2423 * emit rendering referencing the buffers pointed to by bo_array.
2424 *
2425 * This is required because if we try to emit a batchbuffer with relocations
2426 * to a tree of buffers that won't simultaneously fit in the aperture,
2427 * the rendering will return an error at a point where the software is not
2428 * prepared to recover from it.
2429 *
2430 * However, we also want to emit the batchbuffer significantly before we reach
2431 * the limit, as a series of batchbuffers each of which references buffers
2432 * covering almost all of the aperture means that at each emit we end up
2433 * waiting to evict a buffer from the last rendering, and we get synchronous
2434 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2435 * get better parallelism.
2436 */
2437 static int
2438 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2439 {
2440 drm_intel_bufmgr_gem *bufmgr_gem =
2441 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2442 unsigned int total = 0;
2443 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2444 int total_fences;
2446 /* Check for fence reg constraints if necessary */
2447 if (bufmgr_gem->available_fences) {
2448 total_fences = drm_intel_gem_total_fences(bo_array, count);
2449 if (total_fences > bufmgr_gem->available_fences)
2450 return -ENOSPC;
2451 }
2453 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2455 if (total > threshold)
2456 total = drm_intel_gem_compute_batch_space(bo_array, count);
2458 if (total > threshold) {
2459 DBG("check_space: overflowed available aperture, "
2460 "%dkb vs %dkb\n",
2461 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2462 return -ENOSPC;
2463 } else {
2464 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2465 (int)bufmgr_gem->gtt_size / 1024);
2466 return 0;
2467 }
2468 }
2470 /*
2471 * Disable buffer reuse for objects which are shared with the kernel
2472 * as scanout buffers
2473 */
2474 static int
2475 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2476 {
2477 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2479 bo_gem->reusable = false;
2480 return 0;
2481 }
2483 static int
2484 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2485 {
2486 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2488 return bo_gem->reusable;
2489 }
2491 static int
2492 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2493 {
2494 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2495 int i;
2497 for (i = 0; i < bo_gem->reloc_count; i++) {
2498 if (bo_gem->reloc_target_info[i].bo == target_bo)
2499 return 1;
2500 if (bo == bo_gem->reloc_target_info[i].bo)
2501 continue;
2502 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2503 target_bo))
2504 return 1;
2505 }
2507 return 0;
2508 }
2510 /** Return true if target_bo is referenced by bo's relocation tree. */
2511 static int
2512 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2513 {
2514 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2516 if (bo == NULL || target_bo == NULL)
2517 return 0;
2518 if (target_bo_gem->used_as_reloc_target)
2519 return _drm_intel_gem_bo_references(bo, target_bo);
2520 return 0;
2521 }
2523 static void
2524 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2525 {
2526 unsigned int i = bufmgr_gem->num_buckets;
2528 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2530 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2531 bufmgr_gem->cache_bucket[i].size = size;
2532 bufmgr_gem->num_buckets++;
2533 }
2535 static void
2536 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2537 {
2538 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2540 /* OK, so power of two buckets was too wasteful of memory.
2541 * Give 3 other sizes between each power of two, to hopefully
2542 * cover things accurately enough. (The alternative is
2543 * probably to just go for exact matching of sizes, and assume
2544 * that for things like composited window resize the tiled
2545 * width/height alignment and rounding of sizes to pages will
2546 * get us useful cache hit rates anyway)
2547 */
2548 add_bucket(bufmgr_gem, 4096);
2549 add_bucket(bufmgr_gem, 4096 * 2);
2550 add_bucket(bufmgr_gem, 4096 * 3);
2552 /* Initialize the linked lists for BO reuse cache. */
2553 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2554 add_bucket(bufmgr_gem, size);
2556 add_bucket(bufmgr_gem, size + size * 1 / 4);
2557 add_bucket(bufmgr_gem, size + size * 2 / 4);
2558 add_bucket(bufmgr_gem, size + size * 3 / 4);
2559 }
2560 }
2562 void
2563 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2564 {
2565 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2567 bufmgr_gem->vma_max = limit;
2569 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2570 }
2572 /**
2573 * Get the PCI ID for the device. This can be overridden by setting the
2574 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2575 */
2576 static int
2577 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
2578 {
2579 char *devid_override;
2580 int devid;
2581 int ret;
2582 drm_i915_getparam_t gp;
2584 if (geteuid() == getuid()) {
2585 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2586 if (devid_override) {
2587 bufmgr_gem->no_exec = true;
2588 return strtod(devid_override, NULL);
2589 }
2590 }
2592 VG_CLEAR(gp);
2593 gp.param = I915_PARAM_CHIPSET_ID;
2594 gp.value = &devid;
2595 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2596 if (ret) {
2597 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2598 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2599 }
2600 return devid;
2601 }
2603 int
2604 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
2605 {
2606 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2608 return bufmgr_gem->pci_device;
2609 }
2611 /**
2612 * Sets up AUB dumping.
2613 *
2614 * This is a trace file format that can be used with the simulator.
2615 * Packets are emitted in a format somewhat like GPU command packets.
2616 * You can set up a GTT and upload your objects into the referenced
2617 * space, then send off batchbuffers and get BMPs out the other end.
2618 */
2619 void
2620 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
2621 {
2622 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2623 int entry = 0x200003;
2624 int i;
2625 int gtt_size = 0x10000;
2627 if (!enable) {
2628 if (bufmgr_gem->aub_file) {
2629 fclose(bufmgr_gem->aub_file);
2630 bufmgr_gem->aub_file = NULL;
2631 }
2632 }
2634 if (geteuid() != getuid())
2635 return;
2637 bufmgr_gem->aub_file = fopen("intel.aub", "w+");
2638 if (!bufmgr_gem->aub_file)
2639 return;
2641 /* Start allocating objects from just after the GTT. */
2642 bufmgr_gem->aub_offset = gtt_size;
2644 /* Start with a (required) version packet. */
2645 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
2646 aub_out(bufmgr_gem,
2647 (4 << AUB_HEADER_MAJOR_SHIFT) |
2648 (0 << AUB_HEADER_MINOR_SHIFT));
2649 for (i = 0; i < 8; i++) {
2650 aub_out(bufmgr_gem, 0); /* app name */
2651 }
2652 aub_out(bufmgr_gem, 0); /* timestamp */
2653 aub_out(bufmgr_gem, 0); /* timestamp */
2654 aub_out(bufmgr_gem, 0); /* comment len */
2656 /* Set up the GTT. The max we can handle is 256M */
2657 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
2658 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
2659 aub_out(bufmgr_gem, 0); /* subtype */
2660 aub_out(bufmgr_gem, 0); /* offset */
2661 aub_out(bufmgr_gem, gtt_size); /* size */
2662 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
2663 aub_out(bufmgr_gem, entry);
2664 }
2665 }
2667 /**
2668 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2669 * and manage map buffer objections.
2670 *
2671 * \param fd File descriptor of the opened DRM device.
2672 */
2673 drm_intel_bufmgr *
2674 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2675 {
2676 drm_intel_bufmgr_gem *bufmgr_gem;
2677 struct drm_i915_gem_get_aperture aperture;
2678 drm_i915_getparam_t gp;
2679 int ret, tmp;
2680 bool exec2 = false;
2682 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2683 if (bufmgr_gem == NULL)
2684 return NULL;
2686 bufmgr_gem->fd = fd;
2688 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2689 free(bufmgr_gem);
2690 return NULL;
2691 }
2693 ret = drmIoctl(bufmgr_gem->fd,
2694 DRM_IOCTL_I915_GEM_GET_APERTURE,
2695 &aperture);
2697 if (ret == 0)
2698 bufmgr_gem->gtt_size = aperture.aper_available_size;
2699 else {
2700 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2701 strerror(errno));
2702 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2703 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2704 "May lead to reduced performance or incorrect "
2705 "rendering.\n",
2706 (int)bufmgr_gem->gtt_size / 1024);
2707 }
2709 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
2711 if (IS_GEN2(bufmgr_gem->pci_device))
2712 bufmgr_gem->gen = 2;
2713 else if (IS_GEN3(bufmgr_gem->pci_device))
2714 bufmgr_gem->gen = 3;
2715 else if (IS_GEN4(bufmgr_gem->pci_device))
2716 bufmgr_gem->gen = 4;
2717 else if (IS_GEN5(bufmgr_gem->pci_device))
2718 bufmgr_gem->gen = 5;
2719 else if (IS_GEN6(bufmgr_gem->pci_device))
2720 bufmgr_gem->gen = 6;
2721 else if (IS_GEN7(bufmgr_gem->pci_device))
2722 bufmgr_gem->gen = 7;
2723 else
2724 assert(0);
2726 if (IS_GEN3(bufmgr_gem->pci_device) &&
2727 bufmgr_gem->gtt_size > 256*1024*1024) {
2728 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2729 * be used for tiled blits. To simplify the accounting, just
2730 * substract the unmappable part (fixed to 256MB on all known
2731 * gen3 devices) if the kernel advertises it. */
2732 bufmgr_gem->gtt_size -= 256*1024*1024;
2733 }
2735 gp.value = &tmp;
2737 gp.param = I915_PARAM_HAS_EXECBUF2;
2738 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2739 if (!ret)
2740 exec2 = true;
2742 gp.param = I915_PARAM_HAS_BSD;
2743 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2744 bufmgr_gem->has_bsd = ret == 0;
2746 gp.param = I915_PARAM_HAS_BLT;
2747 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2748 bufmgr_gem->has_blt = ret == 0;
2750 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2751 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2752 bufmgr_gem->has_relaxed_fencing = ret == 0;
2754 gp.param = I915_PARAM_HAS_LLC;
2755 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2756 if (ret == -EINVAL) {
2757 /* Kernel does not supports HAS_LLC query, fallback to GPU
2758 * generation detection and assume that we have LLC on GEN6/7
2759 */
2760 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
2761 IS_GEN7(bufmgr_gem->pci_device));
2762 } else
2763 bufmgr_gem->has_llc = ret == 0;
2765 if (bufmgr_gem->gen < 4) {
2766 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2767 gp.value = &bufmgr_gem->available_fences;
2768 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2769 if (ret) {
2770 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2771 errno);
2772 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2773 *gp.value);
2774 bufmgr_gem->available_fences = 0;
2775 } else {
2776 /* XXX The kernel reports the total number of fences,
2777 * including any that may be pinned.
2778 *
2779 * We presume that there will be at least one pinned
2780 * fence for the scanout buffer, but there may be more
2781 * than one scanout and the user may be manually
2782 * pinning buffers. Let's move to execbuffer2 and
2783 * thereby forget the insanity of using fences...
2784 */
2785 bufmgr_gem->available_fences -= 2;
2786 if (bufmgr_gem->available_fences < 0)
2787 bufmgr_gem->available_fences = 0;
2788 }
2789 }
2791 /* Let's go with one relocation per every 2 dwords (but round down a bit
2792 * since a power of two will mean an extra page allocation for the reloc
2793 * buffer).
2794 *
2795 * Every 4 was too few for the blender benchmark.
2796 */
2797 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2799 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2800 bufmgr_gem->bufmgr.bo_alloc_for_render =
2801 drm_intel_gem_bo_alloc_for_render;
2802 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2803 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2804 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2805 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2806 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2807 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2808 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2809 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2810 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2811 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2812 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2813 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2814 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2815 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2816 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2817 /* Use the new one if available */
2818 if (exec2) {
2819 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2820 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2821 } else
2822 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2823 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2824 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2825 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2826 bufmgr_gem->bufmgr.debug = 0;
2827 bufmgr_gem->bufmgr.check_aperture_space =
2828 drm_intel_gem_check_aperture_space;
2829 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2830 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2831 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2832 drm_intel_gem_get_pipe_from_crtc_id;
2833 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2835 DRMINITLISTHEAD(&bufmgr_gem->named);
2836 init_cache_buckets(bufmgr_gem);
2838 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2839 bufmgr_gem->vma_max = -1; /* unlimited by default */
2841 return &bufmgr_gem->bufmgr;
2842 }