1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
41 #include <xf86drm.h>
42 #include <xf86atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/types.h>
55 #include "errno.h"
56 #include "libdrm_lists.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
60 #include "string.h"
62 #include "i915_drm.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
67 } while (0)
69 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
71 struct drm_intel_gem_bo_bucket {
72 drmMMListHead head;
73 unsigned long size;
74 };
76 /* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
78 */
79 #define DRM_INTEL_GEM_BO_BUCKETS 14
80 typedef struct _drm_intel_bufmgr_gem {
81 drm_intel_bufmgr bufmgr;
83 int fd;
85 int max_relocs;
87 pthread_mutex_t lock;
89 struct drm_i915_gem_exec_object *exec_objects;
90 struct drm_i915_gem_exec_object2 *exec2_objects;
91 drm_intel_bo **exec_bos;
92 int exec_size;
93 int exec_count;
95 /** Array of lists of cached gem objects of power-of-two sizes */
96 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
98 uint64_t gtt_size;
99 int available_fences;
100 int pci_device;
101 int gen;
102 char bo_reuse;
103 char fenced_relocs;
104 } drm_intel_bufmgr_gem;
106 #define DRM_INTEL_RELOC_FENCE (1<<0)
108 typedef struct _drm_intel_reloc_target_info {
109 drm_intel_bo *bo;
110 int flags;
111 } drm_intel_reloc_target;
113 struct _drm_intel_bo_gem {
114 drm_intel_bo bo;
116 atomic_t refcount;
117 uint32_t gem_handle;
118 const char *name;
120 /**
121 * Kenel-assigned global name for this object
122 */
123 unsigned int global_name;
125 /**
126 * Index of the buffer within the validation list while preparing a
127 * batchbuffer execution.
128 */
129 int validate_index;
131 /**
132 * Current tiling mode
133 */
134 uint32_t tiling_mode;
135 uint32_t swizzle_mode;
137 time_t free_time;
139 /** Array passed to the DRM containing relocation information. */
140 struct drm_i915_gem_relocation_entry *relocs;
141 /**
142 * Array of info structs corresponding to relocs[i].target_handle etc
143 */
144 drm_intel_reloc_target *reloc_target_info;
145 /** Number of entries in relocs */
146 int reloc_count;
147 /** Mapped address for the buffer, saved across map/unmap cycles */
148 void *mem_virtual;
149 /** GTT virtual address for the buffer, saved across map/unmap cycles */
150 void *gtt_virtual;
152 /** BO cache list */
153 drmMMListHead head;
155 /**
156 * Boolean of whether this BO and its children have been included in
157 * the current drm_intel_bufmgr_check_aperture_space() total.
158 */
159 char included_in_check_aperture;
161 /**
162 * Boolean of whether this buffer has been used as a relocation
163 * target and had its size accounted for, and thus can't have any
164 * further relocations added to it.
165 */
166 char used_as_reloc_target;
168 /**
169 * Boolean of whether we have encountered an error whilst building the relocation tree.
170 */
171 char has_error;
173 /**
174 * Boolean of whether this buffer can be re-used
175 */
176 char reusable;
178 /**
179 * Size in bytes of this buffer and its relocation descendents.
180 *
181 * Used to avoid costly tree walking in
182 * drm_intel_bufmgr_check_aperture in the common case.
183 */
184 int reloc_tree_size;
186 /**
187 * Number of potential fence registers required by this buffer and its
188 * relocations.
189 */
190 int reloc_tree_fences;
191 };
193 static unsigned int
194 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
196 static unsigned int
197 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
199 static int
200 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
201 uint32_t * swizzle_mode);
203 static int
204 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
205 uint32_t stride);
207 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
208 time_t time);
210 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
212 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
214 static unsigned long
215 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
216 uint32_t *tiling_mode)
217 {
218 unsigned long min_size, max_size;
219 unsigned long i;
221 if (*tiling_mode == I915_TILING_NONE)
222 return size;
224 /* 965+ just need multiples of page size for tiling */
225 if (bufmgr_gem->gen >= 4)
226 return ROUND_UP_TO(size, 4096);
228 /* Older chips need powers of two, of at least 512k or 1M */
229 if (bufmgr_gem->gen == 3) {
230 min_size = 1024*1024;
231 max_size = 128*1024*1024;
232 } else {
233 min_size = 512*1024;
234 max_size = 64*1024*1024;
235 }
237 if (size > max_size) {
238 *tiling_mode = I915_TILING_NONE;
239 return size;
240 }
242 for (i = min_size; i < size; i <<= 1)
243 ;
245 return i;
246 }
248 /*
249 * Round a given pitch up to the minimum required for X tiling on a
250 * given chip. We use 512 as the minimum to allow for a later tiling
251 * change.
252 */
253 static unsigned long
254 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
255 unsigned long pitch, uint32_t tiling_mode)
256 {
257 unsigned long tile_width;
258 unsigned long i;
260 /* If untiled, then just align it so that we can do rendering
261 * to it with the 3D engine.
262 */
263 if (tiling_mode == I915_TILING_NONE)
264 return ALIGN(pitch, 64);
266 if (tiling_mode == I915_TILING_X)
267 tile_width = 512;
268 else
269 tile_width = 128;
271 /* 965 is flexible */
272 if (bufmgr_gem->gen >= 4)
273 return ROUND_UP_TO(pitch, tile_width);
275 /* Pre-965 needs power of two tile width */
276 for (i = tile_width; i < pitch; i <<= 1)
277 ;
279 return i;
280 }
282 static struct drm_intel_gem_bo_bucket *
283 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
284 unsigned long size)
285 {
286 int i;
288 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
289 struct drm_intel_gem_bo_bucket *bucket =
290 &bufmgr_gem->cache_bucket[i];
291 if (bucket->size >= size) {
292 return bucket;
293 }
294 }
296 return NULL;
297 }
299 static void
300 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
301 {
302 int i, j;
304 for (i = 0; i < bufmgr_gem->exec_count; i++) {
305 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
306 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
308 if (bo_gem->relocs == NULL) {
309 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
310 bo_gem->name);
311 continue;
312 }
314 for (j = 0; j < bo_gem->reloc_count; j++) {
315 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
316 drm_intel_bo_gem *target_gem =
317 (drm_intel_bo_gem *) target_bo;
319 DBG("%2d: %d (%s)@0x%08llx -> "
320 "%d (%s)@0x%08lx + 0x%08x\n",
321 i,
322 bo_gem->gem_handle, bo_gem->name,
323 (unsigned long long)bo_gem->relocs[j].offset,
324 target_gem->gem_handle,
325 target_gem->name,
326 target_bo->offset,
327 bo_gem->relocs[j].delta);
328 }
329 }
330 }
332 static inline void
333 drm_intel_gem_bo_reference(drm_intel_bo *bo)
334 {
335 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
337 assert(atomic_read(&bo_gem->refcount) > 0);
338 atomic_inc(&bo_gem->refcount);
339 }
341 /**
342 * Adds the given buffer to the list of buffers to be validated (moved into the
343 * appropriate memory type) with the next batch submission.
344 *
345 * If a buffer is validated multiple times in a batch submission, it ends up
346 * with the intersection of the memory type flags and the union of the
347 * access flags.
348 */
349 static void
350 drm_intel_add_validate_buffer(drm_intel_bo *bo)
351 {
352 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
353 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
354 int index;
356 if (bo_gem->validate_index != -1)
357 return;
359 /* Extend the array of validation entries as necessary. */
360 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
361 int new_size = bufmgr_gem->exec_size * 2;
363 if (new_size == 0)
364 new_size = 5;
366 bufmgr_gem->exec_objects =
367 realloc(bufmgr_gem->exec_objects,
368 sizeof(*bufmgr_gem->exec_objects) * new_size);
369 bufmgr_gem->exec_bos =
370 realloc(bufmgr_gem->exec_bos,
371 sizeof(*bufmgr_gem->exec_bos) * new_size);
372 bufmgr_gem->exec_size = new_size;
373 }
375 index = bufmgr_gem->exec_count;
376 bo_gem->validate_index = index;
377 /* Fill in array entry */
378 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
379 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
380 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
381 bufmgr_gem->exec_objects[index].alignment = 0;
382 bufmgr_gem->exec_objects[index].offset = 0;
383 bufmgr_gem->exec_bos[index] = bo;
384 bufmgr_gem->exec_count++;
385 }
387 static void
388 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
389 {
390 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
391 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
392 int index;
394 if (bo_gem->validate_index != -1) {
395 if (need_fence)
396 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
397 EXEC_OBJECT_NEEDS_FENCE;
398 return;
399 }
401 /* Extend the array of validation entries as necessary. */
402 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
403 int new_size = bufmgr_gem->exec_size * 2;
405 if (new_size == 0)
406 new_size = 5;
408 bufmgr_gem->exec2_objects =
409 realloc(bufmgr_gem->exec2_objects,
410 sizeof(*bufmgr_gem->exec2_objects) * new_size);
411 bufmgr_gem->exec_bos =
412 realloc(bufmgr_gem->exec_bos,
413 sizeof(*bufmgr_gem->exec_bos) * new_size);
414 bufmgr_gem->exec_size = new_size;
415 }
417 index = bufmgr_gem->exec_count;
418 bo_gem->validate_index = index;
419 /* Fill in array entry */
420 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
421 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
422 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
423 bufmgr_gem->exec2_objects[index].alignment = 0;
424 bufmgr_gem->exec2_objects[index].offset = 0;
425 bufmgr_gem->exec_bos[index] = bo;
426 bufmgr_gem->exec2_objects[index].flags = 0;
427 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
428 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
429 if (need_fence) {
430 bufmgr_gem->exec2_objects[index].flags |=
431 EXEC_OBJECT_NEEDS_FENCE;
432 }
433 bufmgr_gem->exec_count++;
434 }
436 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
437 sizeof(uint32_t))
439 static void
440 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
441 drm_intel_bo_gem *bo_gem)
442 {
443 int size;
445 assert(!bo_gem->used_as_reloc_target);
447 /* The older chipsets are far-less flexible in terms of tiling,
448 * and require tiled buffer to be size aligned in the aperture.
449 * This means that in the worst possible case we will need a hole
450 * twice as large as the object in order for it to fit into the
451 * aperture. Optimal packing is for wimps.
452 */
453 size = bo_gem->bo.size;
454 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
455 size *= 2;
457 bo_gem->reloc_tree_size = size;
458 }
460 static int
461 drm_intel_setup_reloc_list(drm_intel_bo *bo)
462 {
463 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
464 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
465 unsigned int max_relocs = bufmgr_gem->max_relocs;
467 if (bo->size / 4 < max_relocs)
468 max_relocs = bo->size / 4;
470 bo_gem->relocs = malloc(max_relocs *
471 sizeof(struct drm_i915_gem_relocation_entry));
472 bo_gem->reloc_target_info = malloc(max_relocs *
473 sizeof(drm_intel_reloc_target));
474 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
475 bo_gem->has_error = 1;
477 free (bo_gem->relocs);
478 bo_gem->relocs = NULL;
480 free (bo_gem->reloc_target_info);
481 bo_gem->reloc_target_info = NULL;
483 return 1;
484 }
486 return 0;
487 }
489 static int
490 drm_intel_gem_bo_busy(drm_intel_bo *bo)
491 {
492 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
493 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
494 struct drm_i915_gem_busy busy;
495 int ret;
497 memset(&busy, 0, sizeof(busy));
498 busy.handle = bo_gem->gem_handle;
500 do {
501 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
502 } while (ret == -1 && errno == EINTR);
504 return (ret == 0 && busy.busy);
505 }
507 static int
508 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
509 drm_intel_bo_gem *bo_gem, int state)
510 {
511 struct drm_i915_gem_madvise madv;
513 madv.handle = bo_gem->gem_handle;
514 madv.madv = state;
515 madv.retained = 1;
516 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
518 return madv.retained;
519 }
521 static int
522 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
523 {
524 return drm_intel_gem_bo_madvise_internal
525 ((drm_intel_bufmgr_gem *) bo->bufmgr,
526 (drm_intel_bo_gem *) bo,
527 madv);
528 }
530 /* drop the oldest entries that have been purged by the kernel */
531 static void
532 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
533 struct drm_intel_gem_bo_bucket *bucket)
534 {
535 while (!DRMLISTEMPTY(&bucket->head)) {
536 drm_intel_bo_gem *bo_gem;
538 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
539 bucket->head.next, head);
540 if (drm_intel_gem_bo_madvise_internal
541 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
542 break;
544 DRMLISTDEL(&bo_gem->head);
545 drm_intel_gem_bo_free(&bo_gem->bo);
546 }
547 }
549 static drm_intel_bo *
550 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
551 const char *name,
552 unsigned long size,
553 unsigned long flags)
554 {
555 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
556 drm_intel_bo_gem *bo_gem;
557 unsigned int page_size = getpagesize();
558 int ret;
559 struct drm_intel_gem_bo_bucket *bucket;
560 int alloc_from_cache;
561 unsigned long bo_size;
562 int for_render = 0;
564 if (flags & BO_ALLOC_FOR_RENDER)
565 for_render = 1;
567 /* Round the allocated size up to a power of two number of pages. */
568 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
570 /* If we don't have caching at this size, don't actually round the
571 * allocation up.
572 */
573 if (bucket == NULL) {
574 bo_size = size;
575 if (bo_size < page_size)
576 bo_size = page_size;
577 } else {
578 bo_size = bucket->size;
579 }
581 pthread_mutex_lock(&bufmgr_gem->lock);
582 /* Get a buffer out of the cache if available */
583 retry:
584 alloc_from_cache = 0;
585 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
586 if (for_render) {
587 /* Allocate new render-target BOs from the tail (MRU)
588 * of the list, as it will likely be hot in the GPU
589 * cache and in the aperture for us.
590 */
591 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
592 bucket->head.prev, head);
593 DRMLISTDEL(&bo_gem->head);
594 alloc_from_cache = 1;
595 } else {
596 /* For non-render-target BOs (where we're probably
597 * going to map it first thing in order to fill it
598 * with data), check if the last BO in the cache is
599 * unbusy, and only reuse in that case. Otherwise,
600 * allocating a new buffer is probably faster than
601 * waiting for the GPU to finish.
602 */
603 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
604 bucket->head.next, head);
605 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
606 alloc_from_cache = 1;
607 DRMLISTDEL(&bo_gem->head);
608 }
609 }
611 if (alloc_from_cache) {
612 if (!drm_intel_gem_bo_madvise_internal
613 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
614 drm_intel_gem_bo_free(&bo_gem->bo);
615 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
616 bucket);
617 goto retry;
618 }
619 }
620 }
621 pthread_mutex_unlock(&bufmgr_gem->lock);
623 if (!alloc_from_cache) {
624 struct drm_i915_gem_create create;
626 bo_gem = calloc(1, sizeof(*bo_gem));
627 if (!bo_gem)
628 return NULL;
630 bo_gem->bo.size = bo_size;
631 memset(&create, 0, sizeof(create));
632 create.size = bo_size;
634 do {
635 ret = ioctl(bufmgr_gem->fd,
636 DRM_IOCTL_I915_GEM_CREATE,
637 &create);
638 } while (ret == -1 && errno == EINTR);
639 bo_gem->gem_handle = create.handle;
640 bo_gem->bo.handle = bo_gem->gem_handle;
641 if (ret != 0) {
642 free(bo_gem);
643 return NULL;
644 }
645 bo_gem->bo.bufmgr = bufmgr;
646 }
648 bo_gem->name = name;
649 atomic_set(&bo_gem->refcount, 1);
650 bo_gem->validate_index = -1;
651 bo_gem->reloc_tree_fences = 0;
652 bo_gem->used_as_reloc_target = 0;
653 bo_gem->has_error = 0;
654 bo_gem->tiling_mode = I915_TILING_NONE;
655 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
656 bo_gem->reusable = 1;
658 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
660 DBG("bo_create: buf %d (%s) %ldb\n",
661 bo_gem->gem_handle, bo_gem->name, size);
663 return &bo_gem->bo;
664 }
666 static drm_intel_bo *
667 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
668 const char *name,
669 unsigned long size,
670 unsigned int alignment)
671 {
672 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
673 BO_ALLOC_FOR_RENDER);
674 }
676 static drm_intel_bo *
677 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
678 const char *name,
679 unsigned long size,
680 unsigned int alignment)
681 {
682 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
683 }
685 static drm_intel_bo *
686 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
687 int x, int y, int cpp, uint32_t *tiling_mode,
688 unsigned long *pitch, unsigned long flags)
689 {
690 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
691 drm_intel_bo *bo;
692 unsigned long size, stride, aligned_y = y;
693 int ret;
695 /* If we're tiled, our allocations are in 8 or 32-row blocks,
696 * so failure to align our height means that we won't allocate
697 * enough pages.
698 *
699 * If we're untiled, we still have to align to 2 rows high
700 * because the data port accesses 2x2 blocks even if the
701 * bottom row isn't to be rendered, so failure to align means
702 * we could walk off the end of the GTT and fault. This is
703 * documented on 965, and may be the case on older chipsets
704 * too so we try to be careful.
705 */
706 if (*tiling_mode == I915_TILING_NONE)
707 aligned_y = ALIGN(y, 2);
708 else if (*tiling_mode == I915_TILING_X)
709 aligned_y = ALIGN(y, 8);
710 else if (*tiling_mode == I915_TILING_Y)
711 aligned_y = ALIGN(y, 32);
713 stride = x * cpp;
714 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
715 size = stride * aligned_y;
716 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
718 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
719 if (!bo)
720 return NULL;
722 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
723 if (ret != 0) {
724 drm_intel_gem_bo_unreference(bo);
725 return NULL;
726 }
728 *pitch = stride;
730 return bo;
731 }
733 /**
734 * Returns a drm_intel_bo wrapping the given buffer object handle.
735 *
736 * This can be used when one application needs to pass a buffer object
737 * to another.
738 */
739 drm_intel_bo *
740 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
741 const char *name,
742 unsigned int handle)
743 {
744 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
745 drm_intel_bo_gem *bo_gem;
746 int ret;
747 struct drm_gem_open open_arg;
748 struct drm_i915_gem_get_tiling get_tiling;
750 bo_gem = calloc(1, sizeof(*bo_gem));
751 if (!bo_gem)
752 return NULL;
754 memset(&open_arg, 0, sizeof(open_arg));
755 open_arg.name = handle;
756 do {
757 ret = ioctl(bufmgr_gem->fd,
758 DRM_IOCTL_GEM_OPEN,
759 &open_arg);
760 } while (ret == -1 && errno == EINTR);
761 if (ret != 0) {
762 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
763 name, handle, strerror(errno));
764 free(bo_gem);
765 return NULL;
766 }
767 bo_gem->bo.size = open_arg.size;
768 bo_gem->bo.offset = 0;
769 bo_gem->bo.virtual = NULL;
770 bo_gem->bo.bufmgr = bufmgr;
771 bo_gem->name = name;
772 atomic_set(&bo_gem->refcount, 1);
773 bo_gem->validate_index = -1;
774 bo_gem->gem_handle = open_arg.handle;
775 bo_gem->global_name = handle;
776 bo_gem->reusable = 0;
778 memset(&get_tiling, 0, sizeof(get_tiling));
779 get_tiling.handle = bo_gem->gem_handle;
780 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
781 if (ret != 0) {
782 drm_intel_gem_bo_unreference(&bo_gem->bo);
783 return NULL;
784 }
785 bo_gem->tiling_mode = get_tiling.tiling_mode;
786 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
787 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
789 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
791 return &bo_gem->bo;
792 }
794 static void
795 drm_intel_gem_bo_free(drm_intel_bo *bo)
796 {
797 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
798 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
799 struct drm_gem_close close;
800 int ret;
802 if (bo_gem->mem_virtual)
803 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
804 if (bo_gem->gtt_virtual)
805 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
807 /* Close this object */
808 memset(&close, 0, sizeof(close));
809 close.handle = bo_gem->gem_handle;
810 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
811 if (ret != 0) {
812 fprintf(stderr,
813 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
814 bo_gem->gem_handle, bo_gem->name, strerror(errno));
815 }
816 free(bo);
817 }
819 /** Frees all cached buffers significantly older than @time. */
820 static void
821 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
822 {
823 int i;
825 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
826 struct drm_intel_gem_bo_bucket *bucket =
827 &bufmgr_gem->cache_bucket[i];
829 while (!DRMLISTEMPTY(&bucket->head)) {
830 drm_intel_bo_gem *bo_gem;
832 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
833 bucket->head.next, head);
834 if (time - bo_gem->free_time <= 1)
835 break;
837 DRMLISTDEL(&bo_gem->head);
839 drm_intel_gem_bo_free(&bo_gem->bo);
840 }
841 }
842 }
844 static void
845 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
846 {
847 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
848 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
849 struct drm_intel_gem_bo_bucket *bucket;
850 uint32_t tiling_mode;
851 int i;
853 /* Unreference all the target buffers */
854 for (i = 0; i < bo_gem->reloc_count; i++) {
855 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
856 reloc_target_info[i].bo,
857 time);
858 }
859 bo_gem->reloc_count = 0;
860 bo_gem->used_as_reloc_target = 0;
862 DBG("bo_unreference final: %d (%s)\n",
863 bo_gem->gem_handle, bo_gem->name);
865 /* release memory associated with this object */
866 if (bo_gem->reloc_target_info) {
867 free(bo_gem->reloc_target_info);
868 bo_gem->reloc_target_info = NULL;
869 }
870 if (bo_gem->relocs) {
871 free(bo_gem->relocs);
872 bo_gem->relocs = NULL;
873 }
875 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
876 /* Put the buffer into our internal cache for reuse if we can. */
877 tiling_mode = I915_TILING_NONE;
878 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
879 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
880 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
881 I915_MADV_DONTNEED)) {
882 bo_gem->free_time = time;
884 bo_gem->name = NULL;
885 bo_gem->validate_index = -1;
887 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
889 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
890 } else {
891 drm_intel_gem_bo_free(bo);
892 }
893 }
895 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
896 time_t time)
897 {
898 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
900 assert(atomic_read(&bo_gem->refcount) > 0);
901 if (atomic_dec_and_test(&bo_gem->refcount))
902 drm_intel_gem_bo_unreference_final(bo, time);
903 }
905 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
906 {
907 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
909 assert(atomic_read(&bo_gem->refcount) > 0);
910 if (atomic_dec_and_test(&bo_gem->refcount)) {
911 drm_intel_bufmgr_gem *bufmgr_gem =
912 (drm_intel_bufmgr_gem *) bo->bufmgr;
913 struct timespec time;
915 clock_gettime(CLOCK_MONOTONIC, &time);
917 pthread_mutex_lock(&bufmgr_gem->lock);
918 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
919 pthread_mutex_unlock(&bufmgr_gem->lock);
920 }
921 }
923 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
924 {
925 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
926 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
927 struct drm_i915_gem_set_domain set_domain;
928 int ret;
930 pthread_mutex_lock(&bufmgr_gem->lock);
932 /* Allow recursive mapping. Mesa may recursively map buffers with
933 * nested display loops.
934 */
935 if (!bo_gem->mem_virtual) {
936 struct drm_i915_gem_mmap mmap_arg;
938 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
940 memset(&mmap_arg, 0, sizeof(mmap_arg));
941 mmap_arg.handle = bo_gem->gem_handle;
942 mmap_arg.offset = 0;
943 mmap_arg.size = bo->size;
944 do {
945 ret = ioctl(bufmgr_gem->fd,
946 DRM_IOCTL_I915_GEM_MMAP,
947 &mmap_arg);
948 } while (ret == -1 && errno == EINTR);
949 if (ret != 0) {
950 ret = -errno;
951 fprintf(stderr,
952 "%s:%d: Error mapping buffer %d (%s): %s .\n",
953 __FILE__, __LINE__, bo_gem->gem_handle,
954 bo_gem->name, strerror(errno));
955 pthread_mutex_unlock(&bufmgr_gem->lock);
956 return ret;
957 }
958 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
959 }
960 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
961 bo_gem->mem_virtual);
962 bo->virtual = bo_gem->mem_virtual;
964 set_domain.handle = bo_gem->gem_handle;
965 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
966 if (write_enable)
967 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
968 else
969 set_domain.write_domain = 0;
970 do {
971 ret = ioctl(bufmgr_gem->fd,
972 DRM_IOCTL_I915_GEM_SET_DOMAIN,
973 &set_domain);
974 } while (ret == -1 && errno == EINTR);
975 if (ret != 0) {
976 ret = -errno;
977 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
978 __FILE__, __LINE__, bo_gem->gem_handle,
979 strerror(errno));
980 pthread_mutex_unlock(&bufmgr_gem->lock);
981 return ret;
982 }
984 pthread_mutex_unlock(&bufmgr_gem->lock);
986 return 0;
987 }
989 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
990 {
991 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
992 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
993 struct drm_i915_gem_set_domain set_domain;
994 int ret;
996 pthread_mutex_lock(&bufmgr_gem->lock);
998 /* Get a mapping of the buffer if we haven't before. */
999 if (bo_gem->gtt_virtual == NULL) {
1000 struct drm_i915_gem_mmap_gtt mmap_arg;
1002 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1003 bo_gem->name);
1005 memset(&mmap_arg, 0, sizeof(mmap_arg));
1006 mmap_arg.handle = bo_gem->gem_handle;
1008 /* Get the fake offset back... */
1009 do {
1010 ret = ioctl(bufmgr_gem->fd,
1011 DRM_IOCTL_I915_GEM_MMAP_GTT,
1012 &mmap_arg);
1013 } while (ret == -1 && errno == EINTR);
1014 if (ret != 0) {
1015 ret = -errno;
1016 fprintf(stderr,
1017 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
1018 __FILE__, __LINE__,
1019 bo_gem->gem_handle, bo_gem->name,
1020 strerror(errno));
1021 pthread_mutex_unlock(&bufmgr_gem->lock);
1022 return ret;
1023 }
1025 /* and mmap it */
1026 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1027 MAP_SHARED, bufmgr_gem->fd,
1028 mmap_arg.offset);
1029 if (bo_gem->gtt_virtual == MAP_FAILED) {
1030 bo_gem->gtt_virtual = NULL;
1031 ret = -errno;
1032 fprintf(stderr,
1033 "%s:%d: Error mapping buffer %d (%s): %s .\n",
1034 __FILE__, __LINE__,
1035 bo_gem->gem_handle, bo_gem->name,
1036 strerror(errno));
1037 pthread_mutex_unlock(&bufmgr_gem->lock);
1038 return ret;
1039 }
1040 }
1042 bo->virtual = bo_gem->gtt_virtual;
1044 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1045 bo_gem->gtt_virtual);
1047 /* Now move it to the GTT domain so that the CPU caches are flushed */
1048 set_domain.handle = bo_gem->gem_handle;
1049 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1050 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1051 do {
1052 ret = ioctl(bufmgr_gem->fd,
1053 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1054 &set_domain);
1055 } while (ret == -1 && errno == EINTR);
1057 if (ret != 0) {
1058 ret = -errno;
1059 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
1060 __FILE__, __LINE__, bo_gem->gem_handle,
1061 strerror(errno));
1062 }
1064 pthread_mutex_unlock(&bufmgr_gem->lock);
1066 return ret;
1067 }
1069 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1070 {
1071 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1072 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1073 int ret = 0;
1075 if (bo == NULL)
1076 return 0;
1078 assert(bo_gem->gtt_virtual != NULL);
1080 pthread_mutex_lock(&bufmgr_gem->lock);
1081 bo->virtual = NULL;
1082 pthread_mutex_unlock(&bufmgr_gem->lock);
1084 return ret;
1085 }
1087 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1088 {
1089 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1090 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1091 struct drm_i915_gem_sw_finish sw_finish;
1092 int ret;
1094 if (bo == NULL)
1095 return 0;
1097 assert(bo_gem->mem_virtual != NULL);
1099 pthread_mutex_lock(&bufmgr_gem->lock);
1101 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1102 * results show up in a timely manner.
1103 */
1104 sw_finish.handle = bo_gem->gem_handle;
1105 do {
1106 ret = ioctl(bufmgr_gem->fd,
1107 DRM_IOCTL_I915_GEM_SW_FINISH,
1108 &sw_finish);
1109 } while (ret == -1 && errno == EINTR);
1110 ret = ret == -1 ? -errno : 0;
1112 bo->virtual = NULL;
1113 pthread_mutex_unlock(&bufmgr_gem->lock);
1115 return ret;
1116 }
1118 static int
1119 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1120 unsigned long size, const void *data)
1121 {
1122 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1123 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1124 struct drm_i915_gem_pwrite pwrite;
1125 int ret;
1127 memset(&pwrite, 0, sizeof(pwrite));
1128 pwrite.handle = bo_gem->gem_handle;
1129 pwrite.offset = offset;
1130 pwrite.size = size;
1131 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1132 do {
1133 ret = ioctl(bufmgr_gem->fd,
1134 DRM_IOCTL_I915_GEM_PWRITE,
1135 &pwrite);
1136 } while (ret == -1 && errno == EINTR);
1137 if (ret != 0) {
1138 ret = -errno;
1139 fprintf(stderr,
1140 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1141 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1142 (int)size, strerror(errno));
1143 }
1145 return ret;
1146 }
1148 static int
1149 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1150 {
1151 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1152 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1153 int ret;
1155 get_pipe_from_crtc_id.crtc_id = crtc_id;
1156 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1157 &get_pipe_from_crtc_id);
1158 if (ret != 0) {
1159 /* We return -1 here to signal that we don't
1160 * know which pipe is associated with this crtc.
1161 * This lets the caller know that this information
1162 * isn't available; using the wrong pipe for
1163 * vblank waiting can cause the chipset to lock up
1164 */
1165 return -1;
1166 }
1168 return get_pipe_from_crtc_id.pipe;
1169 }
1171 static int
1172 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1173 unsigned long size, void *data)
1174 {
1175 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1176 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1177 struct drm_i915_gem_pread pread;
1178 int ret;
1180 memset(&pread, 0, sizeof(pread));
1181 pread.handle = bo_gem->gem_handle;
1182 pread.offset = offset;
1183 pread.size = size;
1184 pread.data_ptr = (uint64_t) (uintptr_t) data;
1185 do {
1186 ret = ioctl(bufmgr_gem->fd,
1187 DRM_IOCTL_I915_GEM_PREAD,
1188 &pread);
1189 } while (ret == -1 && errno == EINTR);
1190 if (ret != 0) {
1191 ret = -errno;
1192 fprintf(stderr,
1193 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1194 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1195 (int)size, strerror(errno));
1196 }
1198 return ret;
1199 }
1201 /** Waits for all GPU rendering to the object to have completed. */
1202 static void
1203 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1204 {
1205 drm_intel_gem_bo_start_gtt_access(bo, 0);
1206 }
1208 /**
1209 * Sets the object to the GTT read and possibly write domain, used by the X
1210 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1211 *
1212 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1213 * can do tiled pixmaps this way.
1214 */
1215 void
1216 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1217 {
1218 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1219 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1220 struct drm_i915_gem_set_domain set_domain;
1221 int ret;
1223 set_domain.handle = bo_gem->gem_handle;
1224 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1225 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1226 do {
1227 ret = ioctl(bufmgr_gem->fd,
1228 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1229 &set_domain);
1230 } while (ret == -1 && errno == EINTR);
1231 if (ret != 0) {
1232 fprintf(stderr,
1233 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1234 __FILE__, __LINE__, bo_gem->gem_handle,
1235 set_domain.read_domains, set_domain.write_domain,
1236 strerror(errno));
1237 }
1238 }
1240 static void
1241 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1242 {
1243 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1244 int i;
1246 free(bufmgr_gem->exec2_objects);
1247 free(bufmgr_gem->exec_objects);
1248 free(bufmgr_gem->exec_bos);
1250 pthread_mutex_destroy(&bufmgr_gem->lock);
1252 /* Free any cached buffer objects we were going to reuse */
1253 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1254 struct drm_intel_gem_bo_bucket *bucket =
1255 &bufmgr_gem->cache_bucket[i];
1256 drm_intel_bo_gem *bo_gem;
1258 while (!DRMLISTEMPTY(&bucket->head)) {
1259 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1260 bucket->head.next, head);
1261 DRMLISTDEL(&bo_gem->head);
1263 drm_intel_gem_bo_free(&bo_gem->bo);
1264 }
1265 }
1267 free(bufmgr);
1268 }
1270 /**
1271 * Adds the target buffer to the validation list and adds the relocation
1272 * to the reloc_buffer's relocation list.
1273 *
1274 * The relocation entry at the given offset must already contain the
1275 * precomputed relocation value, because the kernel will optimize out
1276 * the relocation entry write when the buffer hasn't moved from the
1277 * last known offset in target_bo.
1278 */
1279 static int
1280 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1281 drm_intel_bo *target_bo, uint32_t target_offset,
1282 uint32_t read_domains, uint32_t write_domain,
1283 int need_fence)
1284 {
1285 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1286 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1287 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1289 if (bo_gem->has_error)
1290 return -ENOMEM;
1292 if (target_bo_gem->has_error) {
1293 bo_gem->has_error = 1;
1294 return -ENOMEM;
1295 }
1297 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1298 need_fence = 0;
1300 /* We never use HW fences for rendering on 965+ */
1301 if (bufmgr_gem->gen >= 4)
1302 need_fence = 0;
1304 /* Create a new relocation list if needed */
1305 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1306 return -ENOMEM;
1308 /* Check overflow */
1309 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1311 /* Check args */
1312 assert(offset <= bo->size - 4);
1313 assert((write_domain & (write_domain - 1)) == 0);
1315 /* Make sure that we're not adding a reloc to something whose size has
1316 * already been accounted for.
1317 */
1318 assert(!bo_gem->used_as_reloc_target);
1319 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1320 /* An object needing a fence is a tiled buffer, so it won't have
1321 * relocs to other buffers.
1322 */
1323 if (need_fence)
1324 target_bo_gem->reloc_tree_fences = 1;
1325 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1327 /* Flag the target to disallow further relocations in it. */
1328 target_bo_gem->used_as_reloc_target = 1;
1330 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1331 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1332 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1333 target_bo_gem->gem_handle;
1334 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1335 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1336 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1338 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1339 drm_intel_gem_bo_reference(target_bo);
1340 if (need_fence)
1341 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1342 DRM_INTEL_RELOC_FENCE;
1343 else
1344 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1346 bo_gem->reloc_count++;
1348 return 0;
1349 }
1351 static int
1352 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1353 drm_intel_bo *target_bo, uint32_t target_offset,
1354 uint32_t read_domains, uint32_t write_domain)
1355 {
1356 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1358 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1359 read_domains, write_domain,
1360 !bufmgr_gem->fenced_relocs);
1361 }
1363 static int
1364 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1365 drm_intel_bo *target_bo,
1366 uint32_t target_offset,
1367 uint32_t read_domains, uint32_t write_domain)
1368 {
1369 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1370 read_domains, write_domain, 1);
1371 }
1373 /**
1374 * Walk the tree of relocations rooted at BO and accumulate the list of
1375 * validations to be performed and update the relocation buffers with
1376 * index values into the validation list.
1377 */
1378 static void
1379 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1380 {
1381 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1382 int i;
1384 if (bo_gem->relocs == NULL)
1385 return;
1387 for (i = 0; i < bo_gem->reloc_count; i++) {
1388 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1390 /* Continue walking the tree depth-first. */
1391 drm_intel_gem_bo_process_reloc(target_bo);
1393 /* Add the target to the validate list */
1394 drm_intel_add_validate_buffer(target_bo);
1395 }
1396 }
1398 static void
1399 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1400 {
1401 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1402 int i;
1404 if (bo_gem->relocs == NULL)
1405 return;
1407 for (i = 0; i < bo_gem->reloc_count; i++) {
1408 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1409 int need_fence;
1411 /* Continue walking the tree depth-first. */
1412 drm_intel_gem_bo_process_reloc2(target_bo);
1414 need_fence = (bo_gem->reloc_target_info[i].flags &
1415 DRM_INTEL_RELOC_FENCE);
1417 /* Add the target to the validate list */
1418 drm_intel_add_validate_buffer2(target_bo, need_fence);
1419 }
1420 }
1423 static void
1424 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1425 {
1426 int i;
1428 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1429 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1430 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1432 /* Update the buffer offset */
1433 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1434 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1435 bo_gem->gem_handle, bo_gem->name, bo->offset,
1436 (unsigned long long)bufmgr_gem->exec_objects[i].
1437 offset);
1438 bo->offset = bufmgr_gem->exec_objects[i].offset;
1439 }
1440 }
1441 }
1443 static void
1444 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1445 {
1446 int i;
1448 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1449 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1450 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1452 /* Update the buffer offset */
1453 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1454 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1455 bo_gem->gem_handle, bo_gem->name, bo->offset,
1456 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1457 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1458 }
1459 }
1460 }
1462 static int
1463 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1464 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1465 {
1466 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1467 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1468 struct drm_i915_gem_execbuffer execbuf;
1469 int ret, i;
1471 if (bo_gem->has_error)
1472 return -ENOMEM;
1474 pthread_mutex_lock(&bufmgr_gem->lock);
1475 /* Update indices and set up the validate list. */
1476 drm_intel_gem_bo_process_reloc(bo);
1478 /* Add the batch buffer to the validation list. There are no
1479 * relocations pointing to it.
1480 */
1481 drm_intel_add_validate_buffer(bo);
1483 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1484 execbuf.buffer_count = bufmgr_gem->exec_count;
1485 execbuf.batch_start_offset = 0;
1486 execbuf.batch_len = used;
1487 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1488 execbuf.num_cliprects = num_cliprects;
1489 execbuf.DR1 = 0;
1490 execbuf.DR4 = DR4;
1492 do {
1493 ret = ioctl(bufmgr_gem->fd,
1494 DRM_IOCTL_I915_GEM_EXECBUFFER,
1495 &execbuf);
1496 } while (ret != 0 && errno == EINTR);
1498 if (ret != 0) {
1499 ret = -errno;
1500 if (errno == ENOSPC) {
1501 fprintf(stderr,
1502 "Execbuffer fails to pin. "
1503 "Estimate: %u. Actual: %u. Available: %u\n",
1504 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1505 bufmgr_gem->
1506 exec_count),
1507 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1508 bufmgr_gem->
1509 exec_count),
1510 (unsigned int)bufmgr_gem->gtt_size);
1511 }
1512 }
1513 drm_intel_update_buffer_offsets(bufmgr_gem);
1515 if (bufmgr_gem->bufmgr.debug)
1516 drm_intel_gem_dump_validation_list(bufmgr_gem);
1518 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1519 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1520 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1522 /* Disconnect the buffer from the validate list */
1523 bo_gem->validate_index = -1;
1524 bufmgr_gem->exec_bos[i] = NULL;
1525 }
1526 bufmgr_gem->exec_count = 0;
1527 pthread_mutex_unlock(&bufmgr_gem->lock);
1529 return ret;
1530 }
1532 static int
1533 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1534 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1535 int ring_flag)
1536 {
1537 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1538 struct drm_i915_gem_execbuffer2 execbuf;
1539 int ret, i;
1541 if ((ring_flag != I915_EXEC_RENDER) && (ring_flag != I915_EXEC_BSD))
1542 return -EINVAL;
1544 pthread_mutex_lock(&bufmgr_gem->lock);
1545 /* Update indices and set up the validate list. */
1546 drm_intel_gem_bo_process_reloc2(bo);
1548 /* Add the batch buffer to the validation list. There are no relocations
1549 * pointing to it.
1550 */
1551 drm_intel_add_validate_buffer2(bo, 0);
1553 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1554 execbuf.buffer_count = bufmgr_gem->exec_count;
1555 execbuf.batch_start_offset = 0;
1556 execbuf.batch_len = used;
1557 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1558 execbuf.num_cliprects = num_cliprects;
1559 execbuf.DR1 = 0;
1560 execbuf.DR4 = DR4;
1561 execbuf.flags = ring_flag;
1562 execbuf.rsvd1 = 0;
1563 execbuf.rsvd2 = 0;
1565 do {
1566 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
1567 &execbuf);
1568 } while (ret != 0 && errno == EINTR);
1570 if (ret != 0) {
1571 ret = -errno;
1572 if (ret == -ENOMEM) {
1573 fprintf(stderr,
1574 "Execbuffer fails to pin. "
1575 "Estimate: %u. Actual: %u. Available: %u\n",
1576 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1577 bufmgr_gem->exec_count),
1578 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1579 bufmgr_gem->exec_count),
1580 (unsigned int) bufmgr_gem->gtt_size);
1581 }
1582 }
1583 drm_intel_update_buffer_offsets2(bufmgr_gem);
1585 if (bufmgr_gem->bufmgr.debug)
1586 drm_intel_gem_dump_validation_list(bufmgr_gem);
1588 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1589 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1590 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1592 /* Disconnect the buffer from the validate list */
1593 bo_gem->validate_index = -1;
1594 bufmgr_gem->exec_bos[i] = NULL;
1595 }
1596 bufmgr_gem->exec_count = 0;
1597 pthread_mutex_unlock(&bufmgr_gem->lock);
1599 return ret;
1600 }
1602 static int
1603 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1604 drm_clip_rect_t *cliprects, int num_cliprects,
1605 int DR4)
1606 {
1607 return drm_intel_gem_bo_mrb_exec2(bo, used,
1608 cliprects, num_cliprects, DR4,
1609 I915_EXEC_RENDER);
1610 }
1612 static int
1613 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1614 {
1615 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1616 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1617 struct drm_i915_gem_pin pin;
1618 int ret;
1620 memset(&pin, 0, sizeof(pin));
1621 pin.handle = bo_gem->gem_handle;
1622 pin.alignment = alignment;
1624 do {
1625 ret = ioctl(bufmgr_gem->fd,
1626 DRM_IOCTL_I915_GEM_PIN,
1627 &pin);
1628 } while (ret == -1 && errno == EINTR);
1630 if (ret != 0)
1631 return -errno;
1633 bo->offset = pin.offset;
1634 return 0;
1635 }
1637 static int
1638 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1639 {
1640 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1641 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1642 struct drm_i915_gem_unpin unpin;
1643 int ret;
1645 memset(&unpin, 0, sizeof(unpin));
1646 unpin.handle = bo_gem->gem_handle;
1648 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1649 if (ret != 0)
1650 return -errno;
1652 return 0;
1653 }
1655 static int
1656 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1657 uint32_t stride)
1658 {
1659 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1660 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1661 struct drm_i915_gem_set_tiling set_tiling;
1662 int ret;
1664 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1665 return 0;
1667 memset(&set_tiling, 0, sizeof(set_tiling));
1668 set_tiling.handle = bo_gem->gem_handle;
1670 do {
1671 set_tiling.tiling_mode = *tiling_mode;
1672 set_tiling.stride = stride;
1674 ret = ioctl(bufmgr_gem->fd,
1675 DRM_IOCTL_I915_GEM_SET_TILING,
1676 &set_tiling);
1677 } while (ret == -1 && errno == EINTR);
1678 if (ret == 0) {
1679 bo_gem->tiling_mode = set_tiling.tiling_mode;
1680 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1681 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1682 } else
1683 ret = -errno;
1685 *tiling_mode = bo_gem->tiling_mode;
1686 return ret;
1687 }
1689 static int
1690 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1691 uint32_t * swizzle_mode)
1692 {
1693 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1695 *tiling_mode = bo_gem->tiling_mode;
1696 *swizzle_mode = bo_gem->swizzle_mode;
1697 return 0;
1698 }
1700 static int
1701 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1702 {
1703 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1704 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1705 struct drm_gem_flink flink;
1706 int ret;
1708 if (!bo_gem->global_name) {
1709 memset(&flink, 0, sizeof(flink));
1710 flink.handle = bo_gem->gem_handle;
1712 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1713 if (ret != 0)
1714 return -errno;
1715 bo_gem->global_name = flink.name;
1716 bo_gem->reusable = 0;
1717 }
1719 *name = bo_gem->global_name;
1720 return 0;
1721 }
1723 /**
1724 * Enables unlimited caching of buffer objects for reuse.
1725 *
1726 * This is potentially very memory expensive, as the cache at each bucket
1727 * size is only bounded by how many buffers of that size we've managed to have
1728 * in flight at once.
1729 */
1730 void
1731 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1732 {
1733 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1735 bufmgr_gem->bo_reuse = 1;
1736 }
1738 /**
1739 * Enable use of fenced reloc type.
1740 *
1741 * New code should enable this to avoid unnecessary fence register
1742 * allocation. If this option is not enabled, all relocs will have fence
1743 * register allocated.
1744 */
1745 void
1746 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1747 {
1748 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1750 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1751 bufmgr_gem->fenced_relocs = 1;
1752 }
1754 /**
1755 * Return the additional aperture space required by the tree of buffer objects
1756 * rooted at bo.
1757 */
1758 static int
1759 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1760 {
1761 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1762 int i;
1763 int total = 0;
1765 if (bo == NULL || bo_gem->included_in_check_aperture)
1766 return 0;
1768 total += bo->size;
1769 bo_gem->included_in_check_aperture = 1;
1771 for (i = 0; i < bo_gem->reloc_count; i++)
1772 total +=
1773 drm_intel_gem_bo_get_aperture_space(bo_gem->
1774 reloc_target_info[i].bo);
1776 return total;
1777 }
1779 /**
1780 * Count the number of buffers in this list that need a fence reg
1781 *
1782 * If the count is greater than the number of available regs, we'll have
1783 * to ask the caller to resubmit a batch with fewer tiled buffers.
1784 *
1785 * This function over-counts if the same buffer is used multiple times.
1786 */
1787 static unsigned int
1788 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1789 {
1790 int i;
1791 unsigned int total = 0;
1793 for (i = 0; i < count; i++) {
1794 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1796 if (bo_gem == NULL)
1797 continue;
1799 total += bo_gem->reloc_tree_fences;
1800 }
1801 return total;
1802 }
1804 /**
1805 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1806 * for the next drm_intel_bufmgr_check_aperture_space() call.
1807 */
1808 static void
1809 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1810 {
1811 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1812 int i;
1814 if (bo == NULL || !bo_gem->included_in_check_aperture)
1815 return;
1817 bo_gem->included_in_check_aperture = 0;
1819 for (i = 0; i < bo_gem->reloc_count; i++)
1820 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1821 reloc_target_info[i].bo);
1822 }
1824 /**
1825 * Return a conservative estimate for the amount of aperture required
1826 * for a collection of buffers. This may double-count some buffers.
1827 */
1828 static unsigned int
1829 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1830 {
1831 int i;
1832 unsigned int total = 0;
1834 for (i = 0; i < count; i++) {
1835 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1836 if (bo_gem != NULL)
1837 total += bo_gem->reloc_tree_size;
1838 }
1839 return total;
1840 }
1842 /**
1843 * Return the amount of aperture needed for a collection of buffers.
1844 * This avoids double counting any buffers, at the cost of looking
1845 * at every buffer in the set.
1846 */
1847 static unsigned int
1848 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1849 {
1850 int i;
1851 unsigned int total = 0;
1853 for (i = 0; i < count; i++) {
1854 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1855 /* For the first buffer object in the array, we get an
1856 * accurate count back for its reloc_tree size (since nothing
1857 * had been flagged as being counted yet). We can save that
1858 * value out as a more conservative reloc_tree_size that
1859 * avoids double-counting target buffers. Since the first
1860 * buffer happens to usually be the batch buffer in our
1861 * callers, this can pull us back from doing the tree
1862 * walk on every new batch emit.
1863 */
1864 if (i == 0) {
1865 drm_intel_bo_gem *bo_gem =
1866 (drm_intel_bo_gem *) bo_array[i];
1867 bo_gem->reloc_tree_size = total;
1868 }
1869 }
1871 for (i = 0; i < count; i++)
1872 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1873 return total;
1874 }
1876 /**
1877 * Return -1 if the batchbuffer should be flushed before attempting to
1878 * emit rendering referencing the buffers pointed to by bo_array.
1879 *
1880 * This is required because if we try to emit a batchbuffer with relocations
1881 * to a tree of buffers that won't simultaneously fit in the aperture,
1882 * the rendering will return an error at a point where the software is not
1883 * prepared to recover from it.
1884 *
1885 * However, we also want to emit the batchbuffer significantly before we reach
1886 * the limit, as a series of batchbuffers each of which references buffers
1887 * covering almost all of the aperture means that at each emit we end up
1888 * waiting to evict a buffer from the last rendering, and we get synchronous
1889 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1890 * get better parallelism.
1891 */
1892 static int
1893 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1894 {
1895 drm_intel_bufmgr_gem *bufmgr_gem =
1896 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1897 unsigned int total = 0;
1898 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1899 int total_fences;
1901 /* Check for fence reg constraints if necessary */
1902 if (bufmgr_gem->available_fences) {
1903 total_fences = drm_intel_gem_total_fences(bo_array, count);
1904 if (total_fences > bufmgr_gem->available_fences)
1905 return -ENOSPC;
1906 }
1908 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1910 if (total > threshold)
1911 total = drm_intel_gem_compute_batch_space(bo_array, count);
1913 if (total > threshold) {
1914 DBG("check_space: overflowed available aperture, "
1915 "%dkb vs %dkb\n",
1916 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1917 return -ENOSPC;
1918 } else {
1919 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1920 (int)bufmgr_gem->gtt_size / 1024);
1921 return 0;
1922 }
1923 }
1925 /*
1926 * Disable buffer reuse for objects which are shared with the kernel
1927 * as scanout buffers
1928 */
1929 static int
1930 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1931 {
1932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1934 bo_gem->reusable = 0;
1935 return 0;
1936 }
1938 static int
1939 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
1940 {
1941 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1943 return bo_gem->reusable;
1944 }
1946 static int
1947 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1948 {
1949 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1950 int i;
1952 for (i = 0; i < bo_gem->reloc_count; i++) {
1953 if (bo_gem->reloc_target_info[i].bo == target_bo)
1954 return 1;
1955 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
1956 target_bo))
1957 return 1;
1958 }
1960 return 0;
1961 }
1963 /** Return true if target_bo is referenced by bo's relocation tree. */
1964 static int
1965 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1966 {
1967 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1969 if (bo == NULL || target_bo == NULL)
1970 return 0;
1971 if (target_bo_gem->used_as_reloc_target)
1972 return _drm_intel_gem_bo_references(bo, target_bo);
1973 return 0;
1974 }
1976 /**
1977 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1978 * and manage map buffer objections.
1979 *
1980 * \param fd File descriptor of the opened DRM device.
1981 */
1982 drm_intel_bufmgr *
1983 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1984 {
1985 drm_intel_bufmgr_gem *bufmgr_gem;
1986 struct drm_i915_gem_get_aperture aperture;
1987 drm_i915_getparam_t gp;
1988 int ret, i;
1989 unsigned long size;
1990 int exec2 = 0, has_bsd = 0;
1992 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1993 if (bufmgr_gem == NULL)
1994 return NULL;
1996 bufmgr_gem->fd = fd;
1998 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1999 free(bufmgr_gem);
2000 return NULL;
2001 }
2003 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
2005 if (ret == 0)
2006 bufmgr_gem->gtt_size = aperture.aper_available_size;
2007 else {
2008 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2009 strerror(errno));
2010 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2011 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2012 "May lead to reduced performance or incorrect "
2013 "rendering.\n",
2014 (int)bufmgr_gem->gtt_size / 1024);
2015 }
2017 gp.param = I915_PARAM_CHIPSET_ID;
2018 gp.value = &bufmgr_gem->pci_device;
2019 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2020 if (ret) {
2021 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2022 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2023 }
2025 if (IS_GEN2(bufmgr_gem))
2026 bufmgr_gem->gen = 2;
2027 else if (IS_GEN3(bufmgr_gem))
2028 bufmgr_gem->gen = 3;
2029 else if (IS_GEN4(bufmgr_gem))
2030 bufmgr_gem->gen = 4;
2031 else
2032 bufmgr_gem->gen = 6;
2034 gp.param = I915_PARAM_HAS_EXECBUF2;
2035 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2036 if (!ret)
2037 exec2 = 1;
2039 gp.param = I915_PARAM_HAS_BSD;
2040 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2041 if (!ret)
2042 has_bsd = 1;
2044 if (bufmgr_gem->gen < 4) {
2045 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2046 gp.value = &bufmgr_gem->available_fences;
2047 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2048 if (ret) {
2049 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2050 errno);
2051 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2052 *gp.value);
2053 bufmgr_gem->available_fences = 0;
2054 } else {
2055 /* XXX The kernel reports the total number of fences,
2056 * including any that may be pinned.
2057 *
2058 * We presume that there will be at least one pinned
2059 * fence for the scanout buffer, but there may be more
2060 * than one scanout and the user may be manually
2061 * pinning buffers. Let's move to execbuffer2 and
2062 * thereby forget the insanity of using fences...
2063 */
2064 bufmgr_gem->available_fences -= 2;
2065 if (bufmgr_gem->available_fences < 0)
2066 bufmgr_gem->available_fences = 0;
2067 }
2068 }
2070 /* Let's go with one relocation per every 2 dwords (but round down a bit
2071 * since a power of two will mean an extra page allocation for the reloc
2072 * buffer).
2073 *
2074 * Every 4 was too few for the blender benchmark.
2075 */
2076 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2078 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2079 bufmgr_gem->bufmgr.bo_alloc_for_render =
2080 drm_intel_gem_bo_alloc_for_render;
2081 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2082 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2083 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2084 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2085 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2086 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2087 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2088 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2089 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2090 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2091 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2092 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2093 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2094 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2095 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2096 /* Use the new one if available */
2097 if (exec2) {
2098 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2099 if (has_bsd)
2100 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2101 } else
2102 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2103 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2104 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2105 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2106 bufmgr_gem->bufmgr.debug = 0;
2107 bufmgr_gem->bufmgr.check_aperture_space =
2108 drm_intel_gem_check_aperture_space;
2109 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2110 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2111 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2112 drm_intel_gem_get_pipe_from_crtc_id;
2113 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2115 /* Initialize the linked lists for BO reuse cache. */
2116 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
2117 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2118 bufmgr_gem->cache_bucket[i].size = size;
2119 }
2121 return &bufmgr_gem->bufmgr;
2122 }