87795f33f7ecfb28025e0e2fe62cc44126b0ff21
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
41 #include <xf86drm.h>
42 #include <fcntl.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <unistd.h>
47 #include <assert.h>
48 #include <pthread.h>
49 #include <sys/ioctl.h>
50 #include <sys/mman.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
54 #include "errno.h"
55 #include "libdrm_lists.h"
56 #include "intel_atomic.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
60 #include "string.h"
62 #include "i915_drm.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
67 } while (0)
69 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
71 struct drm_intel_gem_bo_bucket {
72 drmMMListHead head;
73 unsigned long size;
74 };
76 /* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
78 */
79 #define DRM_INTEL_GEM_BO_BUCKETS 14
80 typedef struct _drm_intel_bufmgr_gem {
81 drm_intel_bufmgr bufmgr;
83 int fd;
85 int max_relocs;
87 pthread_mutex_t lock;
89 struct drm_i915_gem_exec_object *exec_objects;
90 drm_intel_bo **exec_bos;
91 int exec_size;
92 int exec_count;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
97 uint64_t gtt_size;
98 int available_fences;
99 int pci_device;
100 char bo_reuse;
101 } drm_intel_bufmgr_gem;
103 struct _drm_intel_bo_gem {
104 drm_intel_bo bo;
106 atomic_t refcount;
107 uint32_t gem_handle;
108 const char *name;
110 /**
111 * Kenel-assigned global name for this object
112 */
113 unsigned int global_name;
115 /**
116 * Index of the buffer within the validation list while preparing a
117 * batchbuffer execution.
118 */
119 int validate_index;
121 /**
122 * Current tiling mode
123 */
124 uint32_t tiling_mode;
125 uint32_t swizzle_mode;
127 time_t free_time;
129 /** Array passed to the DRM containing relocation information. */
130 struct drm_i915_gem_relocation_entry *relocs;
131 /** Array of bos corresponding to relocs[i].target_handle */
132 drm_intel_bo **reloc_target_bo;
133 /** Number of entries in relocs */
134 int reloc_count;
135 /** Mapped address for the buffer, saved across map/unmap cycles */
136 void *mem_virtual;
137 /** GTT virtual address for the buffer, saved across map/unmap cycles */
138 void *gtt_virtual;
140 /** BO cache list */
141 drmMMListHead head;
143 /**
144 * Boolean of whether this BO and its children have been included in
145 * the current drm_intel_bufmgr_check_aperture_space() total.
146 */
147 char included_in_check_aperture;
149 /**
150 * Boolean of whether this buffer has been used as a relocation
151 * target and had its size accounted for, and thus can't have any
152 * further relocations added to it.
153 */
154 char used_as_reloc_target;
156 /**
157 * Boolean of whether this buffer can be re-used
158 */
159 char reusable;
161 /**
162 * Size in bytes of this buffer and its relocation descendents.
163 *
164 * Used to avoid costly tree walking in
165 * drm_intel_bufmgr_check_aperture in the common case.
166 */
167 int reloc_tree_size;
169 /**
170 * Number of potential fence registers required by this buffer and its
171 * relocations.
172 */
173 int reloc_tree_fences;
174 };
176 static unsigned int
177 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
179 static unsigned int
180 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
182 static int
183 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
184 uint32_t * swizzle_mode);
186 static int
187 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
188 uint32_t stride);
190 static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
191 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
192 time_t time);
194 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
196 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
198 static unsigned long
199 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
200 uint32_t *tiling_mode)
201 {
202 unsigned long min_size, max_size;
203 unsigned long i;
205 if (*tiling_mode == I915_TILING_NONE)
206 return size;
208 /* 965+ just need multiples of page size for tiling */
209 if (IS_I965G(bufmgr_gem))
210 return ROUND_UP_TO(size, 4096);
212 /* Older chips need powers of two, of at least 512k or 1M */
213 if (IS_I9XX(bufmgr_gem)) {
214 min_size = 1024*1024;
215 max_size = 128*1024*1024;
216 } else {
217 min_size = 512*1024;
218 max_size = 64*1024*1024;
219 }
221 if (size > max_size) {
222 *tiling_mode = I915_TILING_NONE;
223 return size;
224 }
226 for (i = min_size; i < size; i <<= 1)
227 ;
229 return i;
230 }
232 /*
233 * Round a given pitch up to the minimum required for X tiling on a
234 * given chip. We use 512 as the minimum to allow for a later tiling
235 * change.
236 */
237 static unsigned long
238 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
239 unsigned long pitch, uint32_t tiling_mode)
240 {
241 unsigned long tile_width = 512;
242 unsigned long i;
244 if (tiling_mode == I915_TILING_NONE)
245 return ROUND_UP_TO(pitch, tile_width);
247 /* 965 is flexible */
248 if (IS_I965G(bufmgr_gem))
249 return ROUND_UP_TO(pitch, tile_width);
251 /* Pre-965 needs power of two tile width */
252 for (i = tile_width; i < pitch; i <<= 1)
253 ;
255 return i;
256 }
258 static struct drm_intel_gem_bo_bucket *
259 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
260 unsigned long size)
261 {
262 int i;
264 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
265 struct drm_intel_gem_bo_bucket *bucket =
266 &bufmgr_gem->cache_bucket[i];
267 if (bucket->size >= size) {
268 return bucket;
269 }
270 }
272 return NULL;
273 }
275 static void
276 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
277 {
278 int i, j;
280 for (i = 0; i < bufmgr_gem->exec_count; i++) {
281 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
282 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
284 if (bo_gem->relocs == NULL) {
285 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
286 bo_gem->name);
287 continue;
288 }
290 for (j = 0; j < bo_gem->reloc_count; j++) {
291 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
292 drm_intel_bo_gem *target_gem =
293 (drm_intel_bo_gem *) target_bo;
295 DBG("%2d: %d (%s)@0x%08llx -> "
296 "%d (%s)@0x%08lx + 0x%08x\n",
297 i,
298 bo_gem->gem_handle, bo_gem->name,
299 (unsigned long long)bo_gem->relocs[j].offset,
300 target_gem->gem_handle,
301 target_gem->name,
302 target_bo->offset,
303 bo_gem->relocs[j].delta);
304 }
305 }
306 }
308 static void
309 drm_intel_gem_bo_reference(drm_intel_bo *bo)
310 {
311 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
313 assert(atomic_read(&bo_gem->refcount) > 0);
314 atomic_inc(&bo_gem->refcount);
315 }
317 /**
318 * Adds the given buffer to the list of buffers to be validated (moved into the
319 * appropriate memory type) with the next batch submission.
320 *
321 * If a buffer is validated multiple times in a batch submission, it ends up
322 * with the intersection of the memory type flags and the union of the
323 * access flags.
324 */
325 static void
326 drm_intel_add_validate_buffer(drm_intel_bo *bo)
327 {
328 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
329 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
330 int index;
332 if (bo_gem->validate_index != -1)
333 return;
335 /* Extend the array of validation entries as necessary. */
336 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
337 int new_size = bufmgr_gem->exec_size * 2;
339 if (new_size == 0)
340 new_size = 5;
342 bufmgr_gem->exec_objects =
343 realloc(bufmgr_gem->exec_objects,
344 sizeof(*bufmgr_gem->exec_objects) * new_size);
345 bufmgr_gem->exec_bos =
346 realloc(bufmgr_gem->exec_bos,
347 sizeof(*bufmgr_gem->exec_bos) * new_size);
348 bufmgr_gem->exec_size = new_size;
349 }
351 index = bufmgr_gem->exec_count;
352 bo_gem->validate_index = index;
353 /* Fill in array entry */
354 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
355 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
356 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
357 bufmgr_gem->exec_objects[index].alignment = 0;
358 bufmgr_gem->exec_objects[index].offset = 0;
359 bufmgr_gem->exec_bos[index] = bo;
360 drm_intel_gem_bo_reference(bo);
361 bufmgr_gem->exec_count++;
362 }
364 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
365 sizeof(uint32_t))
367 static int
368 drm_intel_setup_reloc_list(drm_intel_bo *bo)
369 {
370 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
371 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
372 unsigned int max_relocs = bufmgr_gem->max_relocs;
374 if (bo->size / 4 < max_relocs)
375 max_relocs = bo->size / 4;
377 bo_gem->relocs = malloc(max_relocs *
378 sizeof(struct drm_i915_gem_relocation_entry));
379 bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
381 return 0;
382 }
384 static int
385 drm_intel_gem_bo_busy(drm_intel_bo *bo)
386 {
387 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
388 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
389 struct drm_i915_gem_busy busy;
390 int ret;
392 memset(&busy, 0, sizeof(busy));
393 busy.handle = bo_gem->gem_handle;
395 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
397 return (ret == 0 && busy.busy);
398 }
400 static int
401 drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
402 drm_intel_bo_gem *bo_gem, int state)
403 {
404 struct drm_i915_gem_madvise madv;
406 madv.handle = bo_gem->gem_handle;
407 madv.madv = state;
408 madv.retained = 1;
409 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
411 return madv.retained;
412 }
414 /* drop the oldest entries that have been purged by the kernel */
415 static void
416 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
417 struct drm_intel_gem_bo_bucket *bucket)
418 {
419 while (!DRMLISTEMPTY(&bucket->head)) {
420 drm_intel_bo_gem *bo_gem;
422 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
423 bucket->head.next, head);
424 if (drm_intel_gem_bo_madvise
425 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
426 break;
428 DRMLISTDEL(&bo_gem->head);
429 drm_intel_gem_bo_free(&bo_gem->bo);
430 }
431 }
433 static drm_intel_bo *
434 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
435 const char *name,
436 unsigned long size,
437 unsigned long flags)
438 {
439 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
440 drm_intel_bo_gem *bo_gem;
441 unsigned int page_size = getpagesize();
442 int ret;
443 struct drm_intel_gem_bo_bucket *bucket;
444 int alloc_from_cache;
445 unsigned long bo_size;
446 int for_render = 0;
448 if (flags & BO_ALLOC_FOR_RENDER)
449 for_render = 1;
451 /* Round the allocated size up to a power of two number of pages. */
452 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
454 /* If we don't have caching at this size, don't actually round the
455 * allocation up.
456 */
457 if (bucket == NULL) {
458 bo_size = size;
459 if (bo_size < page_size)
460 bo_size = page_size;
461 } else {
462 bo_size = bucket->size;
463 }
465 pthread_mutex_lock(&bufmgr_gem->lock);
466 /* Get a buffer out of the cache if available */
467 retry:
468 alloc_from_cache = 0;
469 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
470 if (for_render) {
471 /* Allocate new render-target BOs from the tail (MRU)
472 * of the list, as it will likely be hot in the GPU
473 * cache and in the aperture for us.
474 */
475 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
476 bucket->head.prev, head);
477 DRMLISTDEL(&bo_gem->head);
478 alloc_from_cache = 1;
479 } else {
480 /* For non-render-target BOs (where we're probably
481 * going to map it first thing in order to fill it
482 * with data), check if the last BO in the cache is
483 * unbusy, and only reuse in that case. Otherwise,
484 * allocating a new buffer is probably faster than
485 * waiting for the GPU to finish.
486 */
487 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
488 bucket->head.next, head);
489 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
490 alloc_from_cache = 1;
491 DRMLISTDEL(&bo_gem->head);
492 }
493 }
495 if (alloc_from_cache) {
496 if (!drm_intel_gem_bo_madvise
497 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
498 drm_intel_gem_bo_free(&bo_gem->bo);
499 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
500 bucket);
501 goto retry;
502 }
503 }
504 }
505 pthread_mutex_unlock(&bufmgr_gem->lock);
507 if (!alloc_from_cache) {
508 struct drm_i915_gem_create create;
510 bo_gem = calloc(1, sizeof(*bo_gem));
511 if (!bo_gem)
512 return NULL;
514 bo_gem->bo.size = bo_size;
515 memset(&create, 0, sizeof(create));
516 create.size = bo_size;
518 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
519 bo_gem->gem_handle = create.handle;
520 bo_gem->bo.handle = bo_gem->gem_handle;
521 if (ret != 0) {
522 free(bo_gem);
523 return NULL;
524 }
525 bo_gem->bo.bufmgr = bufmgr;
526 }
528 bo_gem->name = name;
529 atomic_set(&bo_gem->refcount, 1);
530 bo_gem->validate_index = -1;
531 bo_gem->reloc_tree_size = bo_gem->bo.size;
532 bo_gem->reloc_tree_fences = 0;
533 bo_gem->used_as_reloc_target = 0;
534 bo_gem->tiling_mode = I915_TILING_NONE;
535 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
536 bo_gem->reusable = 1;
538 DBG("bo_create: buf %d (%s) %ldb\n",
539 bo_gem->gem_handle, bo_gem->name, size);
541 return &bo_gem->bo;
542 }
544 static drm_intel_bo *
545 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
546 const char *name,
547 unsigned long size,
548 unsigned int alignment)
549 {
550 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
551 BO_ALLOC_FOR_RENDER);
552 }
554 static drm_intel_bo *
555 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
556 const char *name,
557 unsigned long size,
558 unsigned int alignment)
559 {
560 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
561 }
563 static drm_intel_bo *
564 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
565 int x, int y, int cpp, uint32_t *tiling_mode,
566 unsigned long *pitch, unsigned long flags)
567 {
568 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
569 drm_intel_bo *bo;
570 unsigned long size, stride, aligned_y = y;
571 int ret;
573 if (*tiling_mode == I915_TILING_NONE)
574 aligned_y = ALIGN(y, 2);
575 else if (*tiling_mode == I915_TILING_X)
576 aligned_y = ALIGN(y, 8);
577 else if (*tiling_mode == I915_TILING_Y)
578 aligned_y = ALIGN(y, 32);
580 stride = x * cpp;
581 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
582 size = stride * aligned_y;
583 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
585 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
586 if (!bo)
587 return NULL;
589 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
590 if (ret != 0) {
591 drm_intel_gem_bo_unreference(bo);
592 return NULL;
593 }
595 *pitch = stride;
597 return bo;
598 }
600 /**
601 * Returns a drm_intel_bo wrapping the given buffer object handle.
602 *
603 * This can be used when one application needs to pass a buffer object
604 * to another.
605 */
606 drm_intel_bo *
607 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
608 const char *name,
609 unsigned int handle)
610 {
611 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
612 drm_intel_bo_gem *bo_gem;
613 int ret;
614 struct drm_gem_open open_arg;
615 struct drm_i915_gem_get_tiling get_tiling;
617 bo_gem = calloc(1, sizeof(*bo_gem));
618 if (!bo_gem)
619 return NULL;
621 memset(&open_arg, 0, sizeof(open_arg));
622 open_arg.name = handle;
623 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
624 if (ret != 0) {
625 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
626 name, handle, strerror(errno));
627 free(bo_gem);
628 return NULL;
629 }
630 bo_gem->bo.size = open_arg.size;
631 bo_gem->bo.offset = 0;
632 bo_gem->bo.virtual = NULL;
633 bo_gem->bo.bufmgr = bufmgr;
634 bo_gem->name = name;
635 atomic_set(&bo_gem->refcount, 1);
636 bo_gem->validate_index = -1;
637 bo_gem->gem_handle = open_arg.handle;
638 bo_gem->global_name = handle;
639 bo_gem->reusable = 0;
641 memset(&get_tiling, 0, sizeof(get_tiling));
642 get_tiling.handle = bo_gem->gem_handle;
643 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
644 if (ret != 0) {
645 drm_intel_gem_bo_unreference(&bo_gem->bo);
646 return NULL;
647 }
648 bo_gem->tiling_mode = get_tiling.tiling_mode;
649 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
650 if (bo_gem->tiling_mode == I915_TILING_NONE)
651 bo_gem->reloc_tree_fences = 0;
652 else
653 bo_gem->reloc_tree_fences = 1;
655 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
657 return &bo_gem->bo;
658 }
660 static void
661 drm_intel_gem_bo_free(drm_intel_bo *bo)
662 {
663 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
664 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
665 struct drm_gem_close close;
666 int ret;
668 if (bo_gem->mem_virtual)
669 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
670 if (bo_gem->gtt_virtual)
671 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
673 free(bo_gem->reloc_target_bo);
674 free(bo_gem->relocs);
676 /* Close this object */
677 memset(&close, 0, sizeof(close));
678 close.handle = bo_gem->gem_handle;
679 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
680 if (ret != 0) {
681 fprintf(stderr,
682 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
683 bo_gem->gem_handle, bo_gem->name, strerror(errno));
684 }
685 free(bo);
686 }
688 /** Frees all cached buffers significantly older than @time. */
689 static void
690 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
691 {
692 int i;
694 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
695 struct drm_intel_gem_bo_bucket *bucket =
696 &bufmgr_gem->cache_bucket[i];
698 while (!DRMLISTEMPTY(&bucket->head)) {
699 drm_intel_bo_gem *bo_gem;
701 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
702 bucket->head.next, head);
703 if (time - bo_gem->free_time <= 1)
704 break;
706 DRMLISTDEL(&bo_gem->head);
708 drm_intel_gem_bo_free(&bo_gem->bo);
709 }
710 }
711 }
713 static void
714 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
715 {
716 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
717 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
718 struct drm_intel_gem_bo_bucket *bucket;
719 uint32_t tiling_mode;
720 int i;
722 /* Unreference all the target buffers */
723 for (i = 0; i < bo_gem->reloc_count; i++) {
724 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
725 reloc_target_bo[i],
726 time);
727 }
729 DBG("bo_unreference final: %d (%s)\n",
730 bo_gem->gem_handle, bo_gem->name);
732 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
733 /* Put the buffer into our internal cache for reuse if we can. */
734 tiling_mode = I915_TILING_NONE;
735 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
736 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) {
737 bo_gem->free_time = time;
739 bo_gem->name = NULL;
740 bo_gem->validate_index = -1;
741 bo_gem->reloc_count = 0;
743 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
745 drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem,
746 I915_MADV_DONTNEED);
747 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
748 } else {
749 drm_intel_gem_bo_free(bo);
750 }
751 }
753 static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
754 {
755 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
757 assert(atomic_read(&bo_gem->refcount) > 0);
758 if (atomic_dec_and_test(&bo_gem->refcount)) {
759 struct timespec time;
761 clock_gettime(CLOCK_MONOTONIC, &time);
762 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
763 }
764 }
766 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
767 time_t time)
768 {
769 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
771 assert(atomic_read(&bo_gem->refcount) > 0);
772 if (atomic_dec_and_test(&bo_gem->refcount))
773 drm_intel_gem_bo_unreference_final(bo, time);
774 }
776 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
777 {
778 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
780 assert(atomic_read(&bo_gem->refcount) > 0);
781 if (atomic_dec_and_test(&bo_gem->refcount)) {
782 drm_intel_bufmgr_gem *bufmgr_gem =
783 (drm_intel_bufmgr_gem *) bo->bufmgr;
784 struct timespec time;
786 clock_gettime(CLOCK_MONOTONIC, &time);
788 pthread_mutex_lock(&bufmgr_gem->lock);
789 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
790 pthread_mutex_unlock(&bufmgr_gem->lock);
791 }
792 }
794 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
795 {
796 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
797 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
798 struct drm_i915_gem_set_domain set_domain;
799 int ret;
801 pthread_mutex_lock(&bufmgr_gem->lock);
803 /* Allow recursive mapping. Mesa may recursively map buffers with
804 * nested display loops.
805 */
806 if (!bo_gem->mem_virtual) {
807 struct drm_i915_gem_mmap mmap_arg;
809 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
811 memset(&mmap_arg, 0, sizeof(mmap_arg));
812 mmap_arg.handle = bo_gem->gem_handle;
813 mmap_arg.offset = 0;
814 mmap_arg.size = bo->size;
815 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
816 if (ret != 0) {
817 fprintf(stderr,
818 "%s:%d: Error mapping buffer %d (%s): %s .\n",
819 __FILE__, __LINE__, bo_gem->gem_handle,
820 bo_gem->name, strerror(errno));
821 pthread_mutex_unlock(&bufmgr_gem->lock);
822 return ret;
823 }
824 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
825 }
826 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
827 bo_gem->mem_virtual);
828 bo->virtual = bo_gem->mem_virtual;
830 set_domain.handle = bo_gem->gem_handle;
831 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
832 if (write_enable)
833 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
834 else
835 set_domain.write_domain = 0;
836 do {
837 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
838 &set_domain);
839 } while (ret == -1 && errno == EINTR);
840 if (ret != 0) {
841 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
842 __FILE__, __LINE__, bo_gem->gem_handle,
843 strerror(errno));
844 pthread_mutex_unlock(&bufmgr_gem->lock);
845 return ret;
846 }
848 pthread_mutex_unlock(&bufmgr_gem->lock);
850 return 0;
851 }
853 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
854 {
855 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
856 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
857 struct drm_i915_gem_set_domain set_domain;
858 int ret;
860 pthread_mutex_lock(&bufmgr_gem->lock);
862 /* Get a mapping of the buffer if we haven't before. */
863 if (bo_gem->gtt_virtual == NULL) {
864 struct drm_i915_gem_mmap_gtt mmap_arg;
866 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
867 bo_gem->name);
869 memset(&mmap_arg, 0, sizeof(mmap_arg));
870 mmap_arg.handle = bo_gem->gem_handle;
872 /* Get the fake offset back... */
873 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
874 &mmap_arg);
875 if (ret != 0) {
876 fprintf(stderr,
877 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
878 __FILE__, __LINE__,
879 bo_gem->gem_handle, bo_gem->name,
880 strerror(errno));
881 pthread_mutex_unlock(&bufmgr_gem->lock);
882 return ret;
883 }
885 /* and mmap it */
886 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
887 MAP_SHARED, bufmgr_gem->fd,
888 mmap_arg.offset);
889 if (bo_gem->gtt_virtual == MAP_FAILED) {
890 fprintf(stderr,
891 "%s:%d: Error mapping buffer %d (%s): %s .\n",
892 __FILE__, __LINE__,
893 bo_gem->gem_handle, bo_gem->name,
894 strerror(errno));
895 pthread_mutex_unlock(&bufmgr_gem->lock);
896 return errno;
897 }
898 }
900 bo->virtual = bo_gem->gtt_virtual;
902 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
903 bo_gem->gtt_virtual);
905 /* Now move it to the GTT domain so that the CPU caches are flushed */
906 set_domain.handle = bo_gem->gem_handle;
907 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
908 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
909 do {
910 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
911 &set_domain);
912 } while (ret == -1 && errno == EINTR);
914 if (ret != 0) {
915 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
916 __FILE__, __LINE__, bo_gem->gem_handle,
917 strerror(errno));
918 }
920 pthread_mutex_unlock(&bufmgr_gem->lock);
922 return 0;
923 }
925 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
926 {
927 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
928 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
929 int ret = 0;
931 if (bo == NULL)
932 return 0;
934 assert(bo_gem->gtt_virtual != NULL);
936 pthread_mutex_lock(&bufmgr_gem->lock);
937 bo->virtual = NULL;
938 pthread_mutex_unlock(&bufmgr_gem->lock);
940 return ret;
941 }
943 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
944 {
945 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
946 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
947 struct drm_i915_gem_sw_finish sw_finish;
948 int ret;
950 if (bo == NULL)
951 return 0;
953 assert(bo_gem->mem_virtual != NULL);
955 pthread_mutex_lock(&bufmgr_gem->lock);
957 /* Cause a flush to happen if the buffer's pinned for scanout, so the
958 * results show up in a timely manner.
959 */
960 sw_finish.handle = bo_gem->gem_handle;
961 do {
962 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
963 &sw_finish);
964 } while (ret == -1 && errno == EINTR);
966 bo->virtual = NULL;
967 pthread_mutex_unlock(&bufmgr_gem->lock);
968 return 0;
969 }
971 static int
972 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
973 unsigned long size, const void *data)
974 {
975 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
976 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
977 struct drm_i915_gem_pwrite pwrite;
978 int ret;
980 memset(&pwrite, 0, sizeof(pwrite));
981 pwrite.handle = bo_gem->gem_handle;
982 pwrite.offset = offset;
983 pwrite.size = size;
984 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
985 do {
986 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
987 } while (ret == -1 && errno == EINTR);
988 if (ret != 0) {
989 fprintf(stderr,
990 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
991 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
992 (int)size, strerror(errno));
993 }
994 return 0;
995 }
997 static int
998 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
999 {
1000 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1001 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1002 int ret;
1004 get_pipe_from_crtc_id.crtc_id = crtc_id;
1005 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1006 &get_pipe_from_crtc_id);
1007 if (ret != 0) {
1008 /* We return -1 here to signal that we don't
1009 * know which pipe is associated with this crtc.
1010 * This lets the caller know that this information
1011 * isn't available; using the wrong pipe for
1012 * vblank waiting can cause the chipset to lock up
1013 */
1014 return -1;
1015 }
1017 return get_pipe_from_crtc_id.pipe;
1018 }
1020 static int
1021 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1022 unsigned long size, void *data)
1023 {
1024 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1025 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1026 struct drm_i915_gem_pread pread;
1027 int ret;
1029 memset(&pread, 0, sizeof(pread));
1030 pread.handle = bo_gem->gem_handle;
1031 pread.offset = offset;
1032 pread.size = size;
1033 pread.data_ptr = (uint64_t) (uintptr_t) data;
1034 do {
1035 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
1036 } while (ret == -1 && errno == EINTR);
1037 if (ret != 0) {
1038 fprintf(stderr,
1039 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1040 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1041 (int)size, strerror(errno));
1042 }
1043 return 0;
1044 }
1046 /** Waits for all GPU rendering to the object to have completed. */
1047 static void
1048 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1049 {
1050 drm_intel_gem_bo_start_gtt_access(bo, 0);
1051 }
1053 /**
1054 * Sets the object to the GTT read and possibly write domain, used by the X
1055 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1056 *
1057 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1058 * can do tiled pixmaps this way.
1059 */
1060 void
1061 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1062 {
1063 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1064 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1065 struct drm_i915_gem_set_domain set_domain;
1066 int ret;
1068 set_domain.handle = bo_gem->gem_handle;
1069 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1070 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1071 do {
1072 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
1073 &set_domain);
1074 } while (ret == -1 && errno == EINTR);
1075 if (ret != 0) {
1076 fprintf(stderr,
1077 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1078 __FILE__, __LINE__, bo_gem->gem_handle,
1079 set_domain.read_domains, set_domain.write_domain,
1080 strerror(errno));
1081 }
1082 }
1084 static void
1085 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1086 {
1087 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1088 int i;
1090 free(bufmgr_gem->exec_objects);
1091 free(bufmgr_gem->exec_bos);
1093 pthread_mutex_destroy(&bufmgr_gem->lock);
1095 /* Free any cached buffer objects we were going to reuse */
1096 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1097 struct drm_intel_gem_bo_bucket *bucket =
1098 &bufmgr_gem->cache_bucket[i];
1099 drm_intel_bo_gem *bo_gem;
1101 while (!DRMLISTEMPTY(&bucket->head)) {
1102 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1103 bucket->head.next, head);
1104 DRMLISTDEL(&bo_gem->head);
1106 drm_intel_gem_bo_free(&bo_gem->bo);
1107 }
1108 }
1110 free(bufmgr);
1111 }
1113 /**
1114 * Adds the target buffer to the validation list and adds the relocation
1115 * to the reloc_buffer's relocation list.
1116 *
1117 * The relocation entry at the given offset must already contain the
1118 * precomputed relocation value, because the kernel will optimize out
1119 * the relocation entry write when the buffer hasn't moved from the
1120 * last known offset in target_bo.
1121 */
1122 static int
1123 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1124 drm_intel_bo *target_bo, uint32_t target_offset,
1125 uint32_t read_domains, uint32_t write_domain)
1126 {
1127 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1128 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1129 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1131 pthread_mutex_lock(&bufmgr_gem->lock);
1133 /* Create a new relocation list if needed */
1134 if (bo_gem->relocs == NULL)
1135 drm_intel_setup_reloc_list(bo);
1137 /* Check overflow */
1138 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1140 /* Check args */
1141 assert(offset <= bo->size - 4);
1142 assert((write_domain & (write_domain - 1)) == 0);
1144 /* Make sure that we're not adding a reloc to something whose size has
1145 * already been accounted for.
1146 */
1147 assert(!bo_gem->used_as_reloc_target);
1148 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1149 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1151 /* Flag the target to disallow further relocations in it. */
1152 target_bo_gem->used_as_reloc_target = 1;
1154 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1155 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1156 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1157 target_bo_gem->gem_handle;
1158 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1159 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1160 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1162 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
1163 drm_intel_gem_bo_reference(target_bo);
1165 bo_gem->reloc_count++;
1167 pthread_mutex_unlock(&bufmgr_gem->lock);
1169 return 0;
1170 }
1172 /**
1173 * Walk the tree of relocations rooted at BO and accumulate the list of
1174 * validations to be performed and update the relocation buffers with
1175 * index values into the validation list.
1176 */
1177 static void
1178 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1179 {
1180 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1181 int i;
1183 if (bo_gem->relocs == NULL)
1184 return;
1186 for (i = 0; i < bo_gem->reloc_count; i++) {
1187 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
1189 /* Continue walking the tree depth-first. */
1190 drm_intel_gem_bo_process_reloc(target_bo);
1192 /* Add the target to the validate list */
1193 drm_intel_add_validate_buffer(target_bo);
1194 }
1195 }
1197 static void
1198 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1199 {
1200 int i;
1202 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1203 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1204 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1206 /* Update the buffer offset */
1207 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1208 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1209 bo_gem->gem_handle, bo_gem->name, bo->offset,
1210 (unsigned long long)bufmgr_gem->exec_objects[i].
1211 offset);
1212 bo->offset = bufmgr_gem->exec_objects[i].offset;
1213 }
1214 }
1215 }
1217 static int
1218 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1219 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1220 {
1221 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1222 struct drm_i915_gem_execbuffer execbuf;
1223 int ret, i;
1225 pthread_mutex_lock(&bufmgr_gem->lock);
1226 /* Update indices and set up the validate list. */
1227 drm_intel_gem_bo_process_reloc(bo);
1229 /* Add the batch buffer to the validation list. There are no
1230 * relocations pointing to it.
1231 */
1232 drm_intel_add_validate_buffer(bo);
1234 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1235 execbuf.buffer_count = bufmgr_gem->exec_count;
1236 execbuf.batch_start_offset = 0;
1237 execbuf.batch_len = used;
1238 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1239 execbuf.num_cliprects = num_cliprects;
1240 execbuf.DR1 = 0;
1241 execbuf.DR4 = DR4;
1243 do {
1244 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER,
1245 &execbuf);
1246 } while (ret != 0 && errno == EAGAIN);
1248 if (ret != 0 && errno == ENOMEM) {
1249 fprintf(stderr,
1250 "Execbuffer fails to pin. "
1251 "Estimate: %u. Actual: %u. Available: %u\n",
1252 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1253 bufmgr_gem->
1254 exec_count),
1255 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1256 bufmgr_gem->
1257 exec_count),
1258 (unsigned int)bufmgr_gem->gtt_size);
1259 }
1260 drm_intel_update_buffer_offsets(bufmgr_gem);
1262 if (bufmgr_gem->bufmgr.debug)
1263 drm_intel_gem_dump_validation_list(bufmgr_gem);
1265 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1266 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1267 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1269 /* Disconnect the buffer from the validate list */
1270 bo_gem->validate_index = -1;
1271 drm_intel_gem_bo_unreference_locked(bo);
1272 bufmgr_gem->exec_bos[i] = NULL;
1273 }
1274 bufmgr_gem->exec_count = 0;
1275 pthread_mutex_unlock(&bufmgr_gem->lock);
1277 return 0;
1278 }
1280 static int
1281 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1282 {
1283 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1284 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1285 struct drm_i915_gem_pin pin;
1286 int ret;
1288 memset(&pin, 0, sizeof(pin));
1289 pin.handle = bo_gem->gem_handle;
1290 pin.alignment = alignment;
1292 do {
1293 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
1294 } while (ret == -1 && errno == EINTR);
1296 if (ret != 0)
1297 return -errno;
1299 bo->offset = pin.offset;
1300 return 0;
1301 }
1303 static int
1304 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1305 {
1306 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1307 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1308 struct drm_i915_gem_unpin unpin;
1309 int ret;
1311 memset(&unpin, 0, sizeof(unpin));
1312 unpin.handle = bo_gem->gem_handle;
1314 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1315 if (ret != 0)
1316 return -errno;
1318 return 0;
1319 }
1321 static int
1322 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1323 uint32_t stride)
1324 {
1325 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1326 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1327 struct drm_i915_gem_set_tiling set_tiling;
1328 int ret;
1330 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1331 return 0;
1333 /* If we're going from non-tiling to tiling, bump fence count */
1334 if (bo_gem->tiling_mode == I915_TILING_NONE)
1335 bo_gem->reloc_tree_fences++;
1337 memset(&set_tiling, 0, sizeof(set_tiling));
1338 set_tiling.handle = bo_gem->gem_handle;
1339 set_tiling.tiling_mode = *tiling_mode;
1340 set_tiling.stride = stride;
1342 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1343 if (ret != 0) {
1344 *tiling_mode = bo_gem->tiling_mode;
1345 return -errno;
1346 }
1347 bo_gem->tiling_mode = set_tiling.tiling_mode;
1348 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1350 /* If we're going from tiling to non-tiling, drop fence count */
1351 if (bo_gem->tiling_mode == I915_TILING_NONE)
1352 bo_gem->reloc_tree_fences--;
1354 *tiling_mode = bo_gem->tiling_mode;
1355 return 0;
1356 }
1358 static int
1359 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1360 uint32_t * swizzle_mode)
1361 {
1362 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1364 *tiling_mode = bo_gem->tiling_mode;
1365 *swizzle_mode = bo_gem->swizzle_mode;
1366 return 0;
1367 }
1369 static int
1370 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1371 {
1372 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1373 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1374 struct drm_gem_flink flink;
1375 int ret;
1377 if (!bo_gem->global_name) {
1378 memset(&flink, 0, sizeof(flink));
1379 flink.handle = bo_gem->gem_handle;
1381 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1382 if (ret != 0)
1383 return -errno;
1384 bo_gem->global_name = flink.name;
1385 bo_gem->reusable = 0;
1386 }
1388 *name = bo_gem->global_name;
1389 return 0;
1390 }
1392 /**
1393 * Enables unlimited caching of buffer objects for reuse.
1394 *
1395 * This is potentially very memory expensive, as the cache at each bucket
1396 * size is only bounded by how many buffers of that size we've managed to have
1397 * in flight at once.
1398 */
1399 void
1400 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1401 {
1402 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1404 bufmgr_gem->bo_reuse = 1;
1405 }
1407 /**
1408 * Return the additional aperture space required by the tree of buffer objects
1409 * rooted at bo.
1410 */
1411 static int
1412 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1413 {
1414 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1415 int i;
1416 int total = 0;
1418 if (bo == NULL || bo_gem->included_in_check_aperture)
1419 return 0;
1421 total += bo->size;
1422 bo_gem->included_in_check_aperture = 1;
1424 for (i = 0; i < bo_gem->reloc_count; i++)
1425 total +=
1426 drm_intel_gem_bo_get_aperture_space(bo_gem->
1427 reloc_target_bo[i]);
1429 return total;
1430 }
1432 /**
1433 * Count the number of buffers in this list that need a fence reg
1434 *
1435 * If the count is greater than the number of available regs, we'll have
1436 * to ask the caller to resubmit a batch with fewer tiled buffers.
1437 *
1438 * This function over-counts if the same buffer is used multiple times.
1439 */
1440 static unsigned int
1441 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1442 {
1443 int i;
1444 unsigned int total = 0;
1446 for (i = 0; i < count; i++) {
1447 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1449 if (bo_gem == NULL)
1450 continue;
1452 total += bo_gem->reloc_tree_fences;
1453 }
1454 return total;
1455 }
1457 /**
1458 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1459 * for the next drm_intel_bufmgr_check_aperture_space() call.
1460 */
1461 static void
1462 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1463 {
1464 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1465 int i;
1467 if (bo == NULL || !bo_gem->included_in_check_aperture)
1468 return;
1470 bo_gem->included_in_check_aperture = 0;
1472 for (i = 0; i < bo_gem->reloc_count; i++)
1473 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1474 reloc_target_bo[i]);
1475 }
1477 /**
1478 * Return a conservative estimate for the amount of aperture required
1479 * for a collection of buffers. This may double-count some buffers.
1480 */
1481 static unsigned int
1482 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1483 {
1484 int i;
1485 unsigned int total = 0;
1487 for (i = 0; i < count; i++) {
1488 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1489 if (bo_gem != NULL)
1490 total += bo_gem->reloc_tree_size;
1491 }
1492 return total;
1493 }
1495 /**
1496 * Return the amount of aperture needed for a collection of buffers.
1497 * This avoids double counting any buffers, at the cost of looking
1498 * at every buffer in the set.
1499 */
1500 static unsigned int
1501 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1502 {
1503 int i;
1504 unsigned int total = 0;
1506 for (i = 0; i < count; i++) {
1507 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1508 /* For the first buffer object in the array, we get an
1509 * accurate count back for its reloc_tree size (since nothing
1510 * had been flagged as being counted yet). We can save that
1511 * value out as a more conservative reloc_tree_size that
1512 * avoids double-counting target buffers. Since the first
1513 * buffer happens to usually be the batch buffer in our
1514 * callers, this can pull us back from doing the tree
1515 * walk on every new batch emit.
1516 */
1517 if (i == 0) {
1518 drm_intel_bo_gem *bo_gem =
1519 (drm_intel_bo_gem *) bo_array[i];
1520 bo_gem->reloc_tree_size = total;
1521 }
1522 }
1524 for (i = 0; i < count; i++)
1525 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1526 return total;
1527 }
1529 /**
1530 * Return -1 if the batchbuffer should be flushed before attempting to
1531 * emit rendering referencing the buffers pointed to by bo_array.
1532 *
1533 * This is required because if we try to emit a batchbuffer with relocations
1534 * to a tree of buffers that won't simultaneously fit in the aperture,
1535 * the rendering will return an error at a point where the software is not
1536 * prepared to recover from it.
1537 *
1538 * However, we also want to emit the batchbuffer significantly before we reach
1539 * the limit, as a series of batchbuffers each of which references buffers
1540 * covering almost all of the aperture means that at each emit we end up
1541 * waiting to evict a buffer from the last rendering, and we get synchronous
1542 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1543 * get better parallelism.
1544 */
1545 static int
1546 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1547 {
1548 drm_intel_bufmgr_gem *bufmgr_gem =
1549 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1550 unsigned int total = 0;
1551 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1552 int total_fences;
1554 /* Check for fence reg constraints if necessary */
1555 if (bufmgr_gem->available_fences) {
1556 total_fences = drm_intel_gem_total_fences(bo_array, count);
1557 if (total_fences > bufmgr_gem->available_fences)
1558 return -1;
1559 }
1561 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1563 if (total > threshold)
1564 total = drm_intel_gem_compute_batch_space(bo_array, count);
1566 if (total > threshold) {
1567 DBG("check_space: overflowed available aperture, "
1568 "%dkb vs %dkb\n",
1569 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1570 return -1;
1571 } else {
1572 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1573 (int)bufmgr_gem->gtt_size / 1024);
1574 return 0;
1575 }
1576 }
1578 /*
1579 * Disable buffer reuse for objects which are shared with the kernel
1580 * as scanout buffers
1581 */
1582 static int
1583 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1584 {
1585 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1587 bo_gem->reusable = 0;
1588 return 0;
1589 }
1591 static int
1592 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1593 {
1594 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1595 int i;
1597 for (i = 0; i < bo_gem->reloc_count; i++) {
1598 if (bo_gem->reloc_target_bo[i] == target_bo)
1599 return 1;
1600 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
1601 target_bo))
1602 return 1;
1603 }
1605 return 0;
1606 }
1608 /** Return true if target_bo is referenced by bo's relocation tree. */
1609 static int
1610 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1611 {
1612 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1614 if (bo == NULL || target_bo == NULL)
1615 return 0;
1616 if (target_bo_gem->used_as_reloc_target)
1617 return _drm_intel_gem_bo_references(bo, target_bo);
1618 return 0;
1619 }
1621 /**
1622 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1623 * and manage map buffer objections.
1624 *
1625 * \param fd File descriptor of the opened DRM device.
1626 */
1627 drm_intel_bufmgr *
1628 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1629 {
1630 drm_intel_bufmgr_gem *bufmgr_gem;
1631 struct drm_i915_gem_get_aperture aperture;
1632 drm_i915_getparam_t gp;
1633 int ret, i;
1634 unsigned long size;
1636 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1637 bufmgr_gem->fd = fd;
1639 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1640 free(bufmgr_gem);
1641 return NULL;
1642 }
1644 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1646 if (ret == 0)
1647 bufmgr_gem->gtt_size = aperture.aper_available_size;
1648 else {
1649 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1650 strerror(errno));
1651 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1652 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1653 "May lead to reduced performance or incorrect "
1654 "rendering.\n",
1655 (int)bufmgr_gem->gtt_size / 1024);
1656 }
1658 gp.param = I915_PARAM_CHIPSET_ID;
1659 gp.value = &bufmgr_gem->pci_device;
1660 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1661 if (ret) {
1662 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
1663 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
1664 }
1666 if (!IS_I965G(bufmgr_gem)) {
1667 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1668 gp.value = &bufmgr_gem->available_fences;
1669 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1670 if (ret) {
1671 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
1672 errno);
1673 fprintf(stderr, "param: %d, val: %d\n", gp.param,
1674 *gp.value);
1675 bufmgr_gem->available_fences = 0;
1676 }
1677 }
1679 /* Let's go with one relocation per every 2 dwords (but round down a bit
1680 * since a power of two will mean an extra page allocation for the reloc
1681 * buffer).
1682 *
1683 * Every 4 was too few for the blender benchmark.
1684 */
1685 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
1687 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1688 bufmgr_gem->bufmgr.bo_alloc_for_render =
1689 drm_intel_gem_bo_alloc_for_render;
1690 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
1691 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1692 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1693 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1694 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1695 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1696 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1697 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1698 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1699 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1700 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1701 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1702 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1703 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1704 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1705 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
1706 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1707 bufmgr_gem->bufmgr.debug = 0;
1708 bufmgr_gem->bufmgr.check_aperture_space =
1709 drm_intel_gem_check_aperture_space;
1710 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
1711 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
1712 drm_intel_gem_get_pipe_from_crtc_id;
1713 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
1715 /* Initialize the linked lists for BO reuse cache. */
1716 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
1717 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
1718 bufmgr_gem->cache_bucket[i].size = size;
1719 }
1721 return &bufmgr_gem->bufmgr;
1722 }