187e8ec9ca8684b5c99cbcb50f72fd095fbe2879
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
41 #include <xf86drm.h>
42 #include <xf86atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/types.h>
54 #include <stdbool.h>
56 #include "errno.h"
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
61 #include "string.h"
63 #include "i915_drm.h"
65 #define DBG(...) do { \
66 if (bufmgr_gem->bufmgr.debug) \
67 fprintf(stderr, __VA_ARGS__); \
68 } while (0)
70 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
72 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
74 struct drm_intel_gem_bo_bucket {
75 drmMMListHead head;
76 unsigned long size;
77 };
79 typedef struct _drm_intel_bufmgr_gem {
80 drm_intel_bufmgr bufmgr;
82 int fd;
84 int max_relocs;
86 pthread_mutex_t lock;
88 struct drm_i915_gem_exec_object *exec_objects;
89 struct drm_i915_gem_exec_object2 *exec2_objects;
90 drm_intel_bo **exec_bos;
91 int exec_size;
92 int exec_count;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
96 int num_buckets;
97 time_t time;
99 drmMMListHead named;
100 drmMMListHead vma_cache;
101 int vma_count, vma_open, vma_max;
103 uint64_t gtt_size;
104 int available_fences;
105 int pci_device;
106 int gen;
107 unsigned int has_bsd : 1;
108 unsigned int has_blt : 1;
109 unsigned int has_relaxed_fencing : 1;
110 unsigned int has_llc : 1;
111 unsigned int bo_reuse : 1;
112 bool fenced_relocs;
113 } drm_intel_bufmgr_gem;
115 #define DRM_INTEL_RELOC_FENCE (1<<0)
117 typedef struct _drm_intel_reloc_target_info {
118 drm_intel_bo *bo;
119 int flags;
120 } drm_intel_reloc_target;
122 struct _drm_intel_bo_gem {
123 drm_intel_bo bo;
125 atomic_t refcount;
126 uint32_t gem_handle;
127 const char *name;
129 /**
130 * Kenel-assigned global name for this object
131 */
132 unsigned int global_name;
133 drmMMListHead name_list;
135 /**
136 * Index of the buffer within the validation list while preparing a
137 * batchbuffer execution.
138 */
139 int validate_index;
141 /**
142 * Current tiling mode
143 */
144 uint32_t tiling_mode;
145 uint32_t swizzle_mode;
146 unsigned long stride;
148 time_t free_time;
150 /** Array passed to the DRM containing relocation information. */
151 struct drm_i915_gem_relocation_entry *relocs;
152 /**
153 * Array of info structs corresponding to relocs[i].target_handle etc
154 */
155 drm_intel_reloc_target *reloc_target_info;
156 /** Number of entries in relocs */
157 int reloc_count;
158 /** Mapped address for the buffer, saved across map/unmap cycles */
159 void *mem_virtual;
160 /** GTT virtual address for the buffer, saved across map/unmap cycles */
161 void *gtt_virtual;
162 int map_count;
163 drmMMListHead vma_list;
165 /** BO cache list */
166 drmMMListHead head;
168 /**
169 * Boolean of whether this BO and its children have been included in
170 * the current drm_intel_bufmgr_check_aperture_space() total.
171 */
172 bool included_in_check_aperture;
174 /**
175 * Boolean of whether this buffer has been used as a relocation
176 * target and had its size accounted for, and thus can't have any
177 * further relocations added to it.
178 */
179 bool used_as_reloc_target;
181 /**
182 * Boolean of whether we have encountered an error whilst building the relocation tree.
183 */
184 bool has_error;
186 /**
187 * Boolean of whether this buffer can be re-used
188 */
189 bool reusable;
191 /**
192 * Size in bytes of this buffer and its relocation descendents.
193 *
194 * Used to avoid costly tree walking in
195 * drm_intel_bufmgr_check_aperture in the common case.
196 */
197 int reloc_tree_size;
199 /**
200 * Number of potential fence registers required by this buffer and its
201 * relocations.
202 */
203 int reloc_tree_fences;
205 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
206 bool mapped_cpu_write;
207 };
209 static unsigned int
210 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
212 static unsigned int
213 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
215 static int
216 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
217 uint32_t * swizzle_mode);
219 static int
220 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
221 uint32_t tiling_mode,
222 uint32_t stride);
224 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
225 time_t time);
227 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
229 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
231 static unsigned long
232 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
233 uint32_t *tiling_mode)
234 {
235 unsigned long min_size, max_size;
236 unsigned long i;
238 if (*tiling_mode == I915_TILING_NONE)
239 return size;
241 /* 965+ just need multiples of page size for tiling */
242 if (bufmgr_gem->gen >= 4)
243 return ROUND_UP_TO(size, 4096);
245 /* Older chips need powers of two, of at least 512k or 1M */
246 if (bufmgr_gem->gen == 3) {
247 min_size = 1024*1024;
248 max_size = 128*1024*1024;
249 } else {
250 min_size = 512*1024;
251 max_size = 64*1024*1024;
252 }
254 if (size > max_size) {
255 *tiling_mode = I915_TILING_NONE;
256 return size;
257 }
259 /* Do we need to allocate every page for the fence? */
260 if (bufmgr_gem->has_relaxed_fencing)
261 return ROUND_UP_TO(size, 4096);
263 for (i = min_size; i < size; i <<= 1)
264 ;
266 return i;
267 }
269 /*
270 * Round a given pitch up to the minimum required for X tiling on a
271 * given chip. We use 512 as the minimum to allow for a later tiling
272 * change.
273 */
274 static unsigned long
275 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
276 unsigned long pitch, uint32_t *tiling_mode)
277 {
278 unsigned long tile_width;
279 unsigned long i;
281 /* If untiled, then just align it so that we can do rendering
282 * to it with the 3D engine.
283 */
284 if (*tiling_mode == I915_TILING_NONE)
285 return ALIGN(pitch, 64);
287 if (*tiling_mode == I915_TILING_X
288 || (IS_915(bufmgr_gem->pci_device)
289 && *tiling_mode == I915_TILING_Y))
290 tile_width = 512;
291 else
292 tile_width = 128;
294 /* 965 is flexible */
295 if (bufmgr_gem->gen >= 4)
296 return ROUND_UP_TO(pitch, tile_width);
298 /* The older hardware has a maximum pitch of 8192 with tiled
299 * surfaces, so fallback to untiled if it's too large.
300 */
301 if (pitch > 8192) {
302 *tiling_mode = I915_TILING_NONE;
303 return ALIGN(pitch, 64);
304 }
306 /* Pre-965 needs power of two tile width */
307 for (i = tile_width; i < pitch; i <<= 1)
308 ;
310 return i;
311 }
313 static struct drm_intel_gem_bo_bucket *
314 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
315 unsigned long size)
316 {
317 int i;
319 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
320 struct drm_intel_gem_bo_bucket *bucket =
321 &bufmgr_gem->cache_bucket[i];
322 if (bucket->size >= size) {
323 return bucket;
324 }
325 }
327 return NULL;
328 }
330 static void
331 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
332 {
333 int i, j;
335 for (i = 0; i < bufmgr_gem->exec_count; i++) {
336 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
337 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
339 if (bo_gem->relocs == NULL) {
340 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
341 bo_gem->name);
342 continue;
343 }
345 for (j = 0; j < bo_gem->reloc_count; j++) {
346 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
347 drm_intel_bo_gem *target_gem =
348 (drm_intel_bo_gem *) target_bo;
350 DBG("%2d: %d (%s)@0x%08llx -> "
351 "%d (%s)@0x%08lx + 0x%08x\n",
352 i,
353 bo_gem->gem_handle, bo_gem->name,
354 (unsigned long long)bo_gem->relocs[j].offset,
355 target_gem->gem_handle,
356 target_gem->name,
357 target_bo->offset,
358 bo_gem->relocs[j].delta);
359 }
360 }
361 }
363 static inline void
364 drm_intel_gem_bo_reference(drm_intel_bo *bo)
365 {
366 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
368 atomic_inc(&bo_gem->refcount);
369 }
371 /**
372 * Adds the given buffer to the list of buffers to be validated (moved into the
373 * appropriate memory type) with the next batch submission.
374 *
375 * If a buffer is validated multiple times in a batch submission, it ends up
376 * with the intersection of the memory type flags and the union of the
377 * access flags.
378 */
379 static void
380 drm_intel_add_validate_buffer(drm_intel_bo *bo)
381 {
382 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
383 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
384 int index;
386 if (bo_gem->validate_index != -1)
387 return;
389 /* Extend the array of validation entries as necessary. */
390 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
391 int new_size = bufmgr_gem->exec_size * 2;
393 if (new_size == 0)
394 new_size = 5;
396 bufmgr_gem->exec_objects =
397 realloc(bufmgr_gem->exec_objects,
398 sizeof(*bufmgr_gem->exec_objects) * new_size);
399 bufmgr_gem->exec_bos =
400 realloc(bufmgr_gem->exec_bos,
401 sizeof(*bufmgr_gem->exec_bos) * new_size);
402 bufmgr_gem->exec_size = new_size;
403 }
405 index = bufmgr_gem->exec_count;
406 bo_gem->validate_index = index;
407 /* Fill in array entry */
408 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
409 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
410 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
411 bufmgr_gem->exec_objects[index].alignment = 0;
412 bufmgr_gem->exec_objects[index].offset = 0;
413 bufmgr_gem->exec_bos[index] = bo;
414 bufmgr_gem->exec_count++;
415 }
417 static void
418 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
419 {
420 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
421 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
422 int index;
424 if (bo_gem->validate_index != -1) {
425 if (need_fence)
426 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
427 EXEC_OBJECT_NEEDS_FENCE;
428 return;
429 }
431 /* Extend the array of validation entries as necessary. */
432 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
433 int new_size = bufmgr_gem->exec_size * 2;
435 if (new_size == 0)
436 new_size = 5;
438 bufmgr_gem->exec2_objects =
439 realloc(bufmgr_gem->exec2_objects,
440 sizeof(*bufmgr_gem->exec2_objects) * new_size);
441 bufmgr_gem->exec_bos =
442 realloc(bufmgr_gem->exec_bos,
443 sizeof(*bufmgr_gem->exec_bos) * new_size);
444 bufmgr_gem->exec_size = new_size;
445 }
447 index = bufmgr_gem->exec_count;
448 bo_gem->validate_index = index;
449 /* Fill in array entry */
450 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
451 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
452 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
453 bufmgr_gem->exec2_objects[index].alignment = 0;
454 bufmgr_gem->exec2_objects[index].offset = 0;
455 bufmgr_gem->exec_bos[index] = bo;
456 bufmgr_gem->exec2_objects[index].flags = 0;
457 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
458 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
459 if (need_fence) {
460 bufmgr_gem->exec2_objects[index].flags |=
461 EXEC_OBJECT_NEEDS_FENCE;
462 }
463 bufmgr_gem->exec_count++;
464 }
466 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
467 sizeof(uint32_t))
469 static void
470 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
471 drm_intel_bo_gem *bo_gem)
472 {
473 int size;
475 assert(!bo_gem->used_as_reloc_target);
477 /* The older chipsets are far-less flexible in terms of tiling,
478 * and require tiled buffer to be size aligned in the aperture.
479 * This means that in the worst possible case we will need a hole
480 * twice as large as the object in order for it to fit into the
481 * aperture. Optimal packing is for wimps.
482 */
483 size = bo_gem->bo.size;
484 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
485 int min_size;
487 if (bufmgr_gem->has_relaxed_fencing) {
488 if (bufmgr_gem->gen == 3)
489 min_size = 1024*1024;
490 else
491 min_size = 512*1024;
493 while (min_size < size)
494 min_size *= 2;
495 } else
496 min_size = size;
498 /* Account for worst-case alignment. */
499 size = 2 * min_size;
500 }
502 bo_gem->reloc_tree_size = size;
503 }
505 static int
506 drm_intel_setup_reloc_list(drm_intel_bo *bo)
507 {
508 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
509 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
510 unsigned int max_relocs = bufmgr_gem->max_relocs;
512 if (bo->size / 4 < max_relocs)
513 max_relocs = bo->size / 4;
515 bo_gem->relocs = malloc(max_relocs *
516 sizeof(struct drm_i915_gem_relocation_entry));
517 bo_gem->reloc_target_info = malloc(max_relocs *
518 sizeof(drm_intel_reloc_target));
519 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
520 bo_gem->has_error = true;
522 free (bo_gem->relocs);
523 bo_gem->relocs = NULL;
525 free (bo_gem->reloc_target_info);
526 bo_gem->reloc_target_info = NULL;
528 return 1;
529 }
531 return 0;
532 }
534 static int
535 drm_intel_gem_bo_busy(drm_intel_bo *bo)
536 {
537 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
538 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
539 struct drm_i915_gem_busy busy;
540 int ret;
542 memset(&busy, 0, sizeof(busy));
543 busy.handle = bo_gem->gem_handle;
545 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
547 return (ret == 0 && busy.busy);
548 }
550 static int
551 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
552 drm_intel_bo_gem *bo_gem, int state)
553 {
554 struct drm_i915_gem_madvise madv;
556 madv.handle = bo_gem->gem_handle;
557 madv.madv = state;
558 madv.retained = 1;
559 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
561 return madv.retained;
562 }
564 static int
565 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
566 {
567 return drm_intel_gem_bo_madvise_internal
568 ((drm_intel_bufmgr_gem *) bo->bufmgr,
569 (drm_intel_bo_gem *) bo,
570 madv);
571 }
573 /* drop the oldest entries that have been purged by the kernel */
574 static void
575 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
576 struct drm_intel_gem_bo_bucket *bucket)
577 {
578 while (!DRMLISTEMPTY(&bucket->head)) {
579 drm_intel_bo_gem *bo_gem;
581 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
582 bucket->head.next, head);
583 if (drm_intel_gem_bo_madvise_internal
584 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
585 break;
587 DRMLISTDEL(&bo_gem->head);
588 drm_intel_gem_bo_free(&bo_gem->bo);
589 }
590 }
592 static drm_intel_bo *
593 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
594 const char *name,
595 unsigned long size,
596 unsigned long flags,
597 uint32_t tiling_mode,
598 unsigned long stride)
599 {
600 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
601 drm_intel_bo_gem *bo_gem;
602 unsigned int page_size = getpagesize();
603 int ret;
604 struct drm_intel_gem_bo_bucket *bucket;
605 bool alloc_from_cache;
606 unsigned long bo_size;
607 bool for_render = false;
609 if (flags & BO_ALLOC_FOR_RENDER)
610 for_render = true;
612 /* Round the allocated size up to a power of two number of pages. */
613 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
615 /* If we don't have caching at this size, don't actually round the
616 * allocation up.
617 */
618 if (bucket == NULL) {
619 bo_size = size;
620 if (bo_size < page_size)
621 bo_size = page_size;
622 } else {
623 bo_size = bucket->size;
624 }
626 pthread_mutex_lock(&bufmgr_gem->lock);
627 /* Get a buffer out of the cache if available */
628 retry:
629 alloc_from_cache = false;
630 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
631 if (for_render) {
632 /* Allocate new render-target BOs from the tail (MRU)
633 * of the list, as it will likely be hot in the GPU
634 * cache and in the aperture for us.
635 */
636 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
637 bucket->head.prev, head);
638 DRMLISTDEL(&bo_gem->head);
639 alloc_from_cache = true;
640 } else {
641 /* For non-render-target BOs (where we're probably
642 * going to map it first thing in order to fill it
643 * with data), check if the last BO in the cache is
644 * unbusy, and only reuse in that case. Otherwise,
645 * allocating a new buffer is probably faster than
646 * waiting for the GPU to finish.
647 */
648 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
649 bucket->head.next, head);
650 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
651 alloc_from_cache = true;
652 DRMLISTDEL(&bo_gem->head);
653 }
654 }
656 if (alloc_from_cache) {
657 if (!drm_intel_gem_bo_madvise_internal
658 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
659 drm_intel_gem_bo_free(&bo_gem->bo);
660 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
661 bucket);
662 goto retry;
663 }
665 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
666 tiling_mode,
667 stride)) {
668 drm_intel_gem_bo_free(&bo_gem->bo);
669 goto retry;
670 }
671 }
672 }
673 pthread_mutex_unlock(&bufmgr_gem->lock);
675 if (!alloc_from_cache) {
676 struct drm_i915_gem_create create;
678 bo_gem = calloc(1, sizeof(*bo_gem));
679 if (!bo_gem)
680 return NULL;
682 bo_gem->bo.size = bo_size;
683 memset(&create, 0, sizeof(create));
684 create.size = bo_size;
686 ret = drmIoctl(bufmgr_gem->fd,
687 DRM_IOCTL_I915_GEM_CREATE,
688 &create);
689 bo_gem->gem_handle = create.handle;
690 bo_gem->bo.handle = bo_gem->gem_handle;
691 if (ret != 0) {
692 free(bo_gem);
693 return NULL;
694 }
695 bo_gem->bo.bufmgr = bufmgr;
697 bo_gem->tiling_mode = I915_TILING_NONE;
698 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
699 bo_gem->stride = 0;
701 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
702 tiling_mode,
703 stride)) {
704 drm_intel_gem_bo_free(&bo_gem->bo);
705 return NULL;
706 }
708 DRMINITLISTHEAD(&bo_gem->name_list);
709 DRMINITLISTHEAD(&bo_gem->vma_list);
710 }
712 bo_gem->name = name;
713 atomic_set(&bo_gem->refcount, 1);
714 bo_gem->validate_index = -1;
715 bo_gem->reloc_tree_fences = 0;
716 bo_gem->used_as_reloc_target = false;
717 bo_gem->has_error = false;
718 bo_gem->reusable = true;
720 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
722 DBG("bo_create: buf %d (%s) %ldb\n",
723 bo_gem->gem_handle, bo_gem->name, size);
725 return &bo_gem->bo;
726 }
728 static drm_intel_bo *
729 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
730 const char *name,
731 unsigned long size,
732 unsigned int alignment)
733 {
734 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
735 BO_ALLOC_FOR_RENDER,
736 I915_TILING_NONE, 0);
737 }
739 static drm_intel_bo *
740 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
741 const char *name,
742 unsigned long size,
743 unsigned int alignment)
744 {
745 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
746 I915_TILING_NONE, 0);
747 }
749 static drm_intel_bo *
750 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
751 int x, int y, int cpp, uint32_t *tiling_mode,
752 unsigned long *pitch, unsigned long flags)
753 {
754 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
755 unsigned long size, stride;
756 uint32_t tiling;
758 do {
759 unsigned long aligned_y, height_alignment;
761 tiling = *tiling_mode;
763 /* If we're tiled, our allocations are in 8 or 32-row blocks,
764 * so failure to align our height means that we won't allocate
765 * enough pages.
766 *
767 * If we're untiled, we still have to align to 2 rows high
768 * because the data port accesses 2x2 blocks even if the
769 * bottom row isn't to be rendered, so failure to align means
770 * we could walk off the end of the GTT and fault. This is
771 * documented on 965, and may be the case on older chipsets
772 * too so we try to be careful.
773 */
774 aligned_y = y;
775 height_alignment = 2;
777 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
778 height_alignment = 16;
779 else if (tiling == I915_TILING_X
780 || (IS_915(bufmgr_gem->pci_device)
781 && tiling == I915_TILING_Y))
782 height_alignment = 8;
783 else if (tiling == I915_TILING_Y)
784 height_alignment = 32;
785 aligned_y = ALIGN(y, height_alignment);
787 stride = x * cpp;
788 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
789 size = stride * aligned_y;
790 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
791 } while (*tiling_mode != tiling);
792 *pitch = stride;
794 if (tiling == I915_TILING_NONE)
795 stride = 0;
797 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
798 tiling, stride);
799 }
801 /**
802 * Returns a drm_intel_bo wrapping the given buffer object handle.
803 *
804 * This can be used when one application needs to pass a buffer object
805 * to another.
806 */
807 drm_intel_bo *
808 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
809 const char *name,
810 unsigned int handle)
811 {
812 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
813 drm_intel_bo_gem *bo_gem;
814 int ret;
815 struct drm_gem_open open_arg;
816 struct drm_i915_gem_get_tiling get_tiling;
817 drmMMListHead *list;
819 /* At the moment most applications only have a few named bo.
820 * For instance, in a DRI client only the render buffers passed
821 * between X and the client are named. And since X returns the
822 * alternating names for the front/back buffer a linear search
823 * provides a sufficiently fast match.
824 */
825 for (list = bufmgr_gem->named.next;
826 list != &bufmgr_gem->named;
827 list = list->next) {
828 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
829 if (bo_gem->global_name == handle) {
830 drm_intel_gem_bo_reference(&bo_gem->bo);
831 return &bo_gem->bo;
832 }
833 }
835 bo_gem = calloc(1, sizeof(*bo_gem));
836 if (!bo_gem)
837 return NULL;
839 memset(&open_arg, 0, sizeof(open_arg));
840 open_arg.name = handle;
841 ret = drmIoctl(bufmgr_gem->fd,
842 DRM_IOCTL_GEM_OPEN,
843 &open_arg);
844 if (ret != 0) {
845 DBG("Couldn't reference %s handle 0x%08x: %s\n",
846 name, handle, strerror(errno));
847 free(bo_gem);
848 return NULL;
849 }
850 bo_gem->bo.size = open_arg.size;
851 bo_gem->bo.offset = 0;
852 bo_gem->bo.virtual = NULL;
853 bo_gem->bo.bufmgr = bufmgr;
854 bo_gem->name = name;
855 atomic_set(&bo_gem->refcount, 1);
856 bo_gem->validate_index = -1;
857 bo_gem->gem_handle = open_arg.handle;
858 bo_gem->bo.handle = open_arg.handle;
859 bo_gem->global_name = handle;
860 bo_gem->reusable = false;
862 memset(&get_tiling, 0, sizeof(get_tiling));
863 get_tiling.handle = bo_gem->gem_handle;
864 ret = drmIoctl(bufmgr_gem->fd,
865 DRM_IOCTL_I915_GEM_GET_TILING,
866 &get_tiling);
867 if (ret != 0) {
868 drm_intel_gem_bo_unreference(&bo_gem->bo);
869 return NULL;
870 }
871 bo_gem->tiling_mode = get_tiling.tiling_mode;
872 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
873 /* XXX stride is unknown */
874 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
876 DRMINITLISTHEAD(&bo_gem->vma_list);
877 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
878 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
880 return &bo_gem->bo;
881 }
883 static void
884 drm_intel_gem_bo_free(drm_intel_bo *bo)
885 {
886 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
887 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
888 struct drm_gem_close close;
889 int ret;
891 DRMLISTDEL(&bo_gem->vma_list);
892 if (bo_gem->mem_virtual) {
893 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
894 bufmgr_gem->vma_count--;
895 }
896 if (bo_gem->gtt_virtual) {
897 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
898 bufmgr_gem->vma_count--;
899 }
901 /* Close this object */
902 memset(&close, 0, sizeof(close));
903 close.handle = bo_gem->gem_handle;
904 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
905 if (ret != 0) {
906 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
907 bo_gem->gem_handle, bo_gem->name, strerror(errno));
908 }
909 free(bo);
910 }
912 /** Frees all cached buffers significantly older than @time. */
913 static void
914 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
915 {
916 int i;
918 if (bufmgr_gem->time == time)
919 return;
921 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
922 struct drm_intel_gem_bo_bucket *bucket =
923 &bufmgr_gem->cache_bucket[i];
925 while (!DRMLISTEMPTY(&bucket->head)) {
926 drm_intel_bo_gem *bo_gem;
928 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
929 bucket->head.next, head);
930 if (time - bo_gem->free_time <= 1)
931 break;
933 DRMLISTDEL(&bo_gem->head);
935 drm_intel_gem_bo_free(&bo_gem->bo);
936 }
937 }
939 bufmgr_gem->time = time;
940 }
942 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
943 {
944 int limit;
946 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
947 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
949 if (bufmgr_gem->vma_max < 0)
950 return;
952 /* We may need to evict a few entries in order to create new mmaps */
953 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
954 if (limit < 0)
955 limit = 0;
957 while (bufmgr_gem->vma_count > limit) {
958 drm_intel_bo_gem *bo_gem;
960 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
961 bufmgr_gem->vma_cache.next,
962 vma_list);
963 assert(bo_gem->map_count == 0);
964 DRMLISTDELINIT(&bo_gem->vma_list);
966 if (bo_gem->mem_virtual) {
967 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
968 bo_gem->mem_virtual = NULL;
969 bufmgr_gem->vma_count--;
970 }
971 if (bo_gem->gtt_virtual) {
972 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
973 bo_gem->gtt_virtual = NULL;
974 bufmgr_gem->vma_count--;
975 }
976 }
977 }
979 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
980 drm_intel_bo_gem *bo_gem)
981 {
982 bufmgr_gem->vma_open--;
983 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
984 if (bo_gem->mem_virtual)
985 bufmgr_gem->vma_count++;
986 if (bo_gem->gtt_virtual)
987 bufmgr_gem->vma_count++;
988 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
989 }
991 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
992 drm_intel_bo_gem *bo_gem)
993 {
994 bufmgr_gem->vma_open++;
995 DRMLISTDEL(&bo_gem->vma_list);
996 if (bo_gem->mem_virtual)
997 bufmgr_gem->vma_count--;
998 if (bo_gem->gtt_virtual)
999 bufmgr_gem->vma_count--;
1000 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1001 }
1003 static void
1004 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1005 {
1006 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1007 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1008 struct drm_intel_gem_bo_bucket *bucket;
1009 int i;
1011 /* Unreference all the target buffers */
1012 for (i = 0; i < bo_gem->reloc_count; i++) {
1013 if (bo_gem->reloc_target_info[i].bo != bo) {
1014 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1015 reloc_target_info[i].bo,
1016 time);
1017 }
1018 }
1019 bo_gem->reloc_count = 0;
1020 bo_gem->used_as_reloc_target = false;
1022 DBG("bo_unreference final: %d (%s)\n",
1023 bo_gem->gem_handle, bo_gem->name);
1025 /* release memory associated with this object */
1026 if (bo_gem->reloc_target_info) {
1027 free(bo_gem->reloc_target_info);
1028 bo_gem->reloc_target_info = NULL;
1029 }
1030 if (bo_gem->relocs) {
1031 free(bo_gem->relocs);
1032 bo_gem->relocs = NULL;
1033 }
1035 /* Clear any left-over mappings */
1036 if (bo_gem->map_count) {
1037 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1038 bo_gem->map_count = 0;
1039 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1040 }
1042 DRMLISTDEL(&bo_gem->name_list);
1044 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1045 /* Put the buffer into our internal cache for reuse if we can. */
1046 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1047 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1048 I915_MADV_DONTNEED)) {
1049 bo_gem->free_time = time;
1051 bo_gem->name = NULL;
1052 bo_gem->validate_index = -1;
1054 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1055 } else {
1056 drm_intel_gem_bo_free(bo);
1057 }
1058 }
1060 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1061 time_t time)
1062 {
1063 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1065 assert(atomic_read(&bo_gem->refcount) > 0);
1066 if (atomic_dec_and_test(&bo_gem->refcount))
1067 drm_intel_gem_bo_unreference_final(bo, time);
1068 }
1070 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1071 {
1072 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1074 assert(atomic_read(&bo_gem->refcount) > 0);
1075 if (atomic_dec_and_test(&bo_gem->refcount)) {
1076 drm_intel_bufmgr_gem *bufmgr_gem =
1077 (drm_intel_bufmgr_gem *) bo->bufmgr;
1078 struct timespec time;
1080 clock_gettime(CLOCK_MONOTONIC, &time);
1082 pthread_mutex_lock(&bufmgr_gem->lock);
1083 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1084 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1085 pthread_mutex_unlock(&bufmgr_gem->lock);
1086 }
1087 }
1089 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1090 {
1091 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1092 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1093 struct drm_i915_gem_set_domain set_domain;
1094 int ret;
1096 pthread_mutex_lock(&bufmgr_gem->lock);
1098 if (bo_gem->map_count++ == 0)
1099 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1101 if (!bo_gem->mem_virtual) {
1102 struct drm_i915_gem_mmap mmap_arg;
1104 DBG("bo_map: %d (%s), map_count=%d\n",
1105 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1107 memset(&mmap_arg, 0, sizeof(mmap_arg));
1108 mmap_arg.handle = bo_gem->gem_handle;
1109 mmap_arg.offset = 0;
1110 mmap_arg.size = bo->size;
1111 ret = drmIoctl(bufmgr_gem->fd,
1112 DRM_IOCTL_I915_GEM_MMAP,
1113 &mmap_arg);
1114 if (ret != 0) {
1115 ret = -errno;
1116 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1117 __FILE__, __LINE__, bo_gem->gem_handle,
1118 bo_gem->name, strerror(errno));
1119 if (--bo_gem->map_count == 0)
1120 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1121 pthread_mutex_unlock(&bufmgr_gem->lock);
1122 return ret;
1123 }
1124 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1125 }
1126 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1127 bo_gem->mem_virtual);
1128 bo->virtual = bo_gem->mem_virtual;
1130 set_domain.handle = bo_gem->gem_handle;
1131 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1132 if (write_enable)
1133 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1134 else
1135 set_domain.write_domain = 0;
1136 ret = drmIoctl(bufmgr_gem->fd,
1137 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1138 &set_domain);
1139 if (ret != 0) {
1140 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1141 __FILE__, __LINE__, bo_gem->gem_handle,
1142 strerror(errno));
1143 }
1145 if (write_enable)
1146 bo_gem->mapped_cpu_write = true;
1148 pthread_mutex_unlock(&bufmgr_gem->lock);
1150 return 0;
1151 }
1153 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1154 {
1155 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1156 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1157 struct drm_i915_gem_set_domain set_domain;
1158 int ret;
1160 pthread_mutex_lock(&bufmgr_gem->lock);
1162 if (bo_gem->map_count++ == 0)
1163 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1165 /* Get a mapping of the buffer if we haven't before. */
1166 if (bo_gem->gtt_virtual == NULL) {
1167 struct drm_i915_gem_mmap_gtt mmap_arg;
1169 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1170 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1172 memset(&mmap_arg, 0, sizeof(mmap_arg));
1173 mmap_arg.handle = bo_gem->gem_handle;
1175 /* Get the fake offset back... */
1176 ret = drmIoctl(bufmgr_gem->fd,
1177 DRM_IOCTL_I915_GEM_MMAP_GTT,
1178 &mmap_arg);
1179 if (ret != 0) {
1180 ret = -errno;
1181 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1182 __FILE__, __LINE__,
1183 bo_gem->gem_handle, bo_gem->name,
1184 strerror(errno));
1185 if (--bo_gem->map_count == 0)
1186 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1187 pthread_mutex_unlock(&bufmgr_gem->lock);
1188 return ret;
1189 }
1191 /* and mmap it */
1192 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1193 MAP_SHARED, bufmgr_gem->fd,
1194 mmap_arg.offset);
1195 if (bo_gem->gtt_virtual == MAP_FAILED) {
1196 bo_gem->gtt_virtual = NULL;
1197 ret = -errno;
1198 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1199 __FILE__, __LINE__,
1200 bo_gem->gem_handle, bo_gem->name,
1201 strerror(errno));
1202 if (--bo_gem->map_count == 0)
1203 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1204 pthread_mutex_unlock(&bufmgr_gem->lock);
1205 return ret;
1206 }
1207 }
1209 bo->virtual = bo_gem->gtt_virtual;
1211 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1212 bo_gem->gtt_virtual);
1214 /* Now move it to the GTT domain so that the CPU caches are flushed */
1215 set_domain.handle = bo_gem->gem_handle;
1216 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1217 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1218 ret = drmIoctl(bufmgr_gem->fd,
1219 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1220 &set_domain);
1221 if (ret != 0) {
1222 DBG("%s:%d: Error setting domain %d: %s\n",
1223 __FILE__, __LINE__, bo_gem->gem_handle,
1224 strerror(errno));
1225 }
1227 pthread_mutex_unlock(&bufmgr_gem->lock);
1229 return 0;
1230 }
1232 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1233 {
1234 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1235 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1236 struct drm_i915_gem_sw_finish sw_finish;
1237 int ret = 0;
1239 if (bo == NULL)
1240 return 0;
1242 pthread_mutex_lock(&bufmgr_gem->lock);
1244 if (bo_gem->map_count <= 0) {
1245 DBG("attempted to unmap an unmapped bo\n");
1246 pthread_mutex_unlock(&bufmgr_gem->lock);
1247 /* Preserve the old behaviour of just treating this as a
1248 * no-op rather than reporting the error.
1249 */
1250 return 0;
1251 }
1253 if (bo_gem->mapped_cpu_write) {
1254 /* Cause a flush to happen if the buffer's pinned for
1255 * scanout, so the results show up in a timely manner.
1256 * Unlike GTT set domains, this only does work if the
1257 * buffer should be scanout-related.
1258 */
1259 sw_finish.handle = bo_gem->gem_handle;
1260 ret = drmIoctl(bufmgr_gem->fd,
1261 DRM_IOCTL_I915_GEM_SW_FINISH,
1262 &sw_finish);
1263 ret = ret == -1 ? -errno : 0;
1265 bo_gem->mapped_cpu_write = false;
1266 }
1268 /* We need to unmap after every innovation as we cannot track
1269 * an open vma for every bo as that will exhaasut the system
1270 * limits and cause later failures.
1271 */
1272 if (--bo_gem->map_count == 0) {
1273 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1274 bo->virtual = NULL;
1275 }
1276 pthread_mutex_unlock(&bufmgr_gem->lock);
1278 return ret;
1279 }
1281 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1282 {
1283 return drm_intel_gem_bo_unmap(bo);
1284 }
1286 static int
1287 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1288 unsigned long size, const void *data)
1289 {
1290 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1291 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1292 struct drm_i915_gem_pwrite pwrite;
1293 int ret;
1295 memset(&pwrite, 0, sizeof(pwrite));
1296 pwrite.handle = bo_gem->gem_handle;
1297 pwrite.offset = offset;
1298 pwrite.size = size;
1299 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1300 ret = drmIoctl(bufmgr_gem->fd,
1301 DRM_IOCTL_I915_GEM_PWRITE,
1302 &pwrite);
1303 if (ret != 0) {
1304 ret = -errno;
1305 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1306 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1307 (int)size, strerror(errno));
1308 }
1310 return ret;
1311 }
1313 static int
1314 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1315 {
1316 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1317 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1318 int ret;
1320 get_pipe_from_crtc_id.crtc_id = crtc_id;
1321 ret = drmIoctl(bufmgr_gem->fd,
1322 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1323 &get_pipe_from_crtc_id);
1324 if (ret != 0) {
1325 /* We return -1 here to signal that we don't
1326 * know which pipe is associated with this crtc.
1327 * This lets the caller know that this information
1328 * isn't available; using the wrong pipe for
1329 * vblank waiting can cause the chipset to lock up
1330 */
1331 return -1;
1332 }
1334 return get_pipe_from_crtc_id.pipe;
1335 }
1337 static int
1338 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1339 unsigned long size, void *data)
1340 {
1341 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1342 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1343 struct drm_i915_gem_pread pread;
1344 int ret;
1346 memset(&pread, 0, sizeof(pread));
1347 pread.handle = bo_gem->gem_handle;
1348 pread.offset = offset;
1349 pread.size = size;
1350 pread.data_ptr = (uint64_t) (uintptr_t) data;
1351 ret = drmIoctl(bufmgr_gem->fd,
1352 DRM_IOCTL_I915_GEM_PREAD,
1353 &pread);
1354 if (ret != 0) {
1355 ret = -errno;
1356 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1357 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1358 (int)size, strerror(errno));
1359 }
1361 return ret;
1362 }
1364 /** Waits for all GPU rendering with the object to have completed. */
1365 static void
1366 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1367 {
1368 drm_intel_gem_bo_start_gtt_access(bo, 1);
1369 }
1371 /**
1372 * Sets the object to the GTT read and possibly write domain, used by the X
1373 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1374 *
1375 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1376 * can do tiled pixmaps this way.
1377 */
1378 void
1379 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1380 {
1381 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1382 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1383 struct drm_i915_gem_set_domain set_domain;
1384 int ret;
1386 set_domain.handle = bo_gem->gem_handle;
1387 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1388 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1389 ret = drmIoctl(bufmgr_gem->fd,
1390 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1391 &set_domain);
1392 if (ret != 0) {
1393 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1394 __FILE__, __LINE__, bo_gem->gem_handle,
1395 set_domain.read_domains, set_domain.write_domain,
1396 strerror(errno));
1397 }
1398 }
1400 static void
1401 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1402 {
1403 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1404 int i;
1406 free(bufmgr_gem->exec2_objects);
1407 free(bufmgr_gem->exec_objects);
1408 free(bufmgr_gem->exec_bos);
1410 pthread_mutex_destroy(&bufmgr_gem->lock);
1412 /* Free any cached buffer objects we were going to reuse */
1413 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1414 struct drm_intel_gem_bo_bucket *bucket =
1415 &bufmgr_gem->cache_bucket[i];
1416 drm_intel_bo_gem *bo_gem;
1418 while (!DRMLISTEMPTY(&bucket->head)) {
1419 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1420 bucket->head.next, head);
1421 DRMLISTDEL(&bo_gem->head);
1423 drm_intel_gem_bo_free(&bo_gem->bo);
1424 }
1425 }
1427 free(bufmgr);
1428 }
1430 /**
1431 * Adds the target buffer to the validation list and adds the relocation
1432 * to the reloc_buffer's relocation list.
1433 *
1434 * The relocation entry at the given offset must already contain the
1435 * precomputed relocation value, because the kernel will optimize out
1436 * the relocation entry write when the buffer hasn't moved from the
1437 * last known offset in target_bo.
1438 */
1439 static int
1440 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1441 drm_intel_bo *target_bo, uint32_t target_offset,
1442 uint32_t read_domains, uint32_t write_domain,
1443 bool need_fence)
1444 {
1445 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1446 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1447 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1448 bool fenced_command;
1450 if (bo_gem->has_error)
1451 return -ENOMEM;
1453 if (target_bo_gem->has_error) {
1454 bo_gem->has_error = true;
1455 return -ENOMEM;
1456 }
1458 /* We never use HW fences for rendering on 965+ */
1459 if (bufmgr_gem->gen >= 4)
1460 need_fence = false;
1462 fenced_command = need_fence;
1463 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1464 need_fence = false;
1466 /* Create a new relocation list if needed */
1467 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1468 return -ENOMEM;
1470 /* Check overflow */
1471 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1473 /* Check args */
1474 assert(offset <= bo->size - 4);
1475 assert((write_domain & (write_domain - 1)) == 0);
1477 /* Make sure that we're not adding a reloc to something whose size has
1478 * already been accounted for.
1479 */
1480 assert(!bo_gem->used_as_reloc_target);
1481 if (target_bo_gem != bo_gem) {
1482 target_bo_gem->used_as_reloc_target = true;
1483 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1484 }
1485 /* An object needing a fence is a tiled buffer, so it won't have
1486 * relocs to other buffers.
1487 */
1488 if (need_fence)
1489 target_bo_gem->reloc_tree_fences = 1;
1490 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1492 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1493 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1494 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1495 target_bo_gem->gem_handle;
1496 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1497 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1498 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1500 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1501 if (target_bo != bo)
1502 drm_intel_gem_bo_reference(target_bo);
1503 if (fenced_command)
1504 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1505 DRM_INTEL_RELOC_FENCE;
1506 else
1507 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1509 bo_gem->reloc_count++;
1511 return 0;
1512 }
1514 static int
1515 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1516 drm_intel_bo *target_bo, uint32_t target_offset,
1517 uint32_t read_domains, uint32_t write_domain)
1518 {
1519 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1521 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1522 read_domains, write_domain,
1523 !bufmgr_gem->fenced_relocs);
1524 }
1526 static int
1527 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1528 drm_intel_bo *target_bo,
1529 uint32_t target_offset,
1530 uint32_t read_domains, uint32_t write_domain)
1531 {
1532 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1533 read_domains, write_domain, true);
1534 }
1536 int
1537 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1538 {
1539 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1541 return bo_gem->reloc_count;
1542 }
1544 /**
1545 * Removes existing relocation entries in the BO after "start".
1546 *
1547 * This allows a user to avoid a two-step process for state setup with
1548 * counting up all the buffer objects and doing a
1549 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1550 * relocations for the state setup. Instead, save the state of the
1551 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1552 * state, and then check if it still fits in the aperture.
1553 *
1554 * Any further drm_intel_bufmgr_check_aperture_space() queries
1555 * involving this buffer in the tree are undefined after this call.
1556 */
1557 void
1558 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1559 {
1560 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1561 int i;
1562 struct timespec time;
1564 clock_gettime(CLOCK_MONOTONIC, &time);
1566 assert(bo_gem->reloc_count >= start);
1567 /* Unreference the cleared target buffers */
1568 for (i = start; i < bo_gem->reloc_count; i++) {
1569 if (bo_gem->reloc_target_info[i].bo != bo) {
1570 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1571 reloc_target_info[i].bo,
1572 time.tv_sec);
1573 }
1574 }
1575 bo_gem->reloc_count = start;
1576 }
1578 /**
1579 * Walk the tree of relocations rooted at BO and accumulate the list of
1580 * validations to be performed and update the relocation buffers with
1581 * index values into the validation list.
1582 */
1583 static void
1584 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1585 {
1586 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1587 int i;
1589 if (bo_gem->relocs == NULL)
1590 return;
1592 for (i = 0; i < bo_gem->reloc_count; i++) {
1593 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1595 if (target_bo == bo)
1596 continue;
1598 /* Continue walking the tree depth-first. */
1599 drm_intel_gem_bo_process_reloc(target_bo);
1601 /* Add the target to the validate list */
1602 drm_intel_add_validate_buffer(target_bo);
1603 }
1604 }
1606 static void
1607 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1608 {
1609 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1610 int i;
1612 if (bo_gem->relocs == NULL)
1613 return;
1615 for (i = 0; i < bo_gem->reloc_count; i++) {
1616 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1617 int need_fence;
1619 if (target_bo == bo)
1620 continue;
1622 /* Continue walking the tree depth-first. */
1623 drm_intel_gem_bo_process_reloc2(target_bo);
1625 need_fence = (bo_gem->reloc_target_info[i].flags &
1626 DRM_INTEL_RELOC_FENCE);
1628 /* Add the target to the validate list */
1629 drm_intel_add_validate_buffer2(target_bo, need_fence);
1630 }
1631 }
1634 static void
1635 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1636 {
1637 int i;
1639 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1640 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1641 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1643 /* Update the buffer offset */
1644 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1645 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1646 bo_gem->gem_handle, bo_gem->name, bo->offset,
1647 (unsigned long long)bufmgr_gem->exec_objects[i].
1648 offset);
1649 bo->offset = bufmgr_gem->exec_objects[i].offset;
1650 }
1651 }
1652 }
1654 static void
1655 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1656 {
1657 int i;
1659 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1660 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1661 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1663 /* Update the buffer offset */
1664 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1665 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1666 bo_gem->gem_handle, bo_gem->name, bo->offset,
1667 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1668 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1669 }
1670 }
1671 }
1673 static int
1674 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1675 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1676 {
1677 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1678 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1679 struct drm_i915_gem_execbuffer execbuf;
1680 int ret, i;
1682 if (bo_gem->has_error)
1683 return -ENOMEM;
1685 pthread_mutex_lock(&bufmgr_gem->lock);
1686 /* Update indices and set up the validate list. */
1687 drm_intel_gem_bo_process_reloc(bo);
1689 /* Add the batch buffer to the validation list. There are no
1690 * relocations pointing to it.
1691 */
1692 drm_intel_add_validate_buffer(bo);
1694 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1695 execbuf.buffer_count = bufmgr_gem->exec_count;
1696 execbuf.batch_start_offset = 0;
1697 execbuf.batch_len = used;
1698 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1699 execbuf.num_cliprects = num_cliprects;
1700 execbuf.DR1 = 0;
1701 execbuf.DR4 = DR4;
1703 ret = drmIoctl(bufmgr_gem->fd,
1704 DRM_IOCTL_I915_GEM_EXECBUFFER,
1705 &execbuf);
1706 if (ret != 0) {
1707 ret = -errno;
1708 if (errno == ENOSPC) {
1709 DBG("Execbuffer fails to pin. "
1710 "Estimate: %u. Actual: %u. Available: %u\n",
1711 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1712 bufmgr_gem->
1713 exec_count),
1714 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1715 bufmgr_gem->
1716 exec_count),
1717 (unsigned int)bufmgr_gem->gtt_size);
1718 }
1719 }
1720 drm_intel_update_buffer_offsets(bufmgr_gem);
1722 if (bufmgr_gem->bufmgr.debug)
1723 drm_intel_gem_dump_validation_list(bufmgr_gem);
1725 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1726 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1727 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1729 /* Disconnect the buffer from the validate list */
1730 bo_gem->validate_index = -1;
1731 bufmgr_gem->exec_bos[i] = NULL;
1732 }
1733 bufmgr_gem->exec_count = 0;
1734 pthread_mutex_unlock(&bufmgr_gem->lock);
1736 return ret;
1737 }
1739 static int
1740 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1741 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1742 unsigned int flags)
1743 {
1744 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1745 struct drm_i915_gem_execbuffer2 execbuf;
1746 int ret, i;
1748 switch (flags & 0x7) {
1749 default:
1750 return -EINVAL;
1751 case I915_EXEC_BLT:
1752 if (!bufmgr_gem->has_blt)
1753 return -EINVAL;
1754 break;
1755 case I915_EXEC_BSD:
1756 if (!bufmgr_gem->has_bsd)
1757 return -EINVAL;
1758 break;
1759 case I915_EXEC_RENDER:
1760 case I915_EXEC_DEFAULT:
1761 break;
1762 }
1764 pthread_mutex_lock(&bufmgr_gem->lock);
1765 /* Update indices and set up the validate list. */
1766 drm_intel_gem_bo_process_reloc2(bo);
1768 /* Add the batch buffer to the validation list. There are no relocations
1769 * pointing to it.
1770 */
1771 drm_intel_add_validate_buffer2(bo, 0);
1773 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1774 execbuf.buffer_count = bufmgr_gem->exec_count;
1775 execbuf.batch_start_offset = 0;
1776 execbuf.batch_len = used;
1777 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1778 execbuf.num_cliprects = num_cliprects;
1779 execbuf.DR1 = 0;
1780 execbuf.DR4 = DR4;
1781 execbuf.flags = flags;
1782 execbuf.rsvd1 = 0;
1783 execbuf.rsvd2 = 0;
1785 ret = drmIoctl(bufmgr_gem->fd,
1786 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1787 &execbuf);
1788 if (ret != 0) {
1789 ret = -errno;
1790 if (ret == -ENOSPC) {
1791 DBG("Execbuffer fails to pin. "
1792 "Estimate: %u. Actual: %u. Available: %u\n",
1793 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1794 bufmgr_gem->exec_count),
1795 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1796 bufmgr_gem->exec_count),
1797 (unsigned int) bufmgr_gem->gtt_size);
1798 }
1799 }
1800 drm_intel_update_buffer_offsets2(bufmgr_gem);
1802 if (bufmgr_gem->bufmgr.debug)
1803 drm_intel_gem_dump_validation_list(bufmgr_gem);
1805 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1806 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1807 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1809 /* Disconnect the buffer from the validate list */
1810 bo_gem->validate_index = -1;
1811 bufmgr_gem->exec_bos[i] = NULL;
1812 }
1813 bufmgr_gem->exec_count = 0;
1814 pthread_mutex_unlock(&bufmgr_gem->lock);
1816 return ret;
1817 }
1819 static int
1820 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1821 drm_clip_rect_t *cliprects, int num_cliprects,
1822 int DR4)
1823 {
1824 return drm_intel_gem_bo_mrb_exec2(bo, used,
1825 cliprects, num_cliprects, DR4,
1826 I915_EXEC_RENDER);
1827 }
1829 static int
1830 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1831 {
1832 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1833 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1834 struct drm_i915_gem_pin pin;
1835 int ret;
1837 memset(&pin, 0, sizeof(pin));
1838 pin.handle = bo_gem->gem_handle;
1839 pin.alignment = alignment;
1841 ret = drmIoctl(bufmgr_gem->fd,
1842 DRM_IOCTL_I915_GEM_PIN,
1843 &pin);
1844 if (ret != 0)
1845 return -errno;
1847 bo->offset = pin.offset;
1848 return 0;
1849 }
1851 static int
1852 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1853 {
1854 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1855 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1856 struct drm_i915_gem_unpin unpin;
1857 int ret;
1859 memset(&unpin, 0, sizeof(unpin));
1860 unpin.handle = bo_gem->gem_handle;
1862 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1863 if (ret != 0)
1864 return -errno;
1866 return 0;
1867 }
1869 static int
1870 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1871 uint32_t tiling_mode,
1872 uint32_t stride)
1873 {
1874 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1875 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1876 struct drm_i915_gem_set_tiling set_tiling;
1877 int ret;
1879 if (bo_gem->global_name == 0 &&
1880 tiling_mode == bo_gem->tiling_mode &&
1881 stride == bo_gem->stride)
1882 return 0;
1884 memset(&set_tiling, 0, sizeof(set_tiling));
1885 do {
1886 /* set_tiling is slightly broken and overwrites the
1887 * input on the error path, so we have to open code
1888 * rmIoctl.
1889 */
1890 set_tiling.handle = bo_gem->gem_handle;
1891 set_tiling.tiling_mode = tiling_mode;
1892 set_tiling.stride = stride;
1894 ret = ioctl(bufmgr_gem->fd,
1895 DRM_IOCTL_I915_GEM_SET_TILING,
1896 &set_tiling);
1897 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1898 if (ret == -1)
1899 return -errno;
1901 bo_gem->tiling_mode = set_tiling.tiling_mode;
1902 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1903 bo_gem->stride = set_tiling.stride;
1904 return 0;
1905 }
1907 static int
1908 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1909 uint32_t stride)
1910 {
1911 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1912 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1913 int ret;
1915 /* Linear buffers have no stride. By ensuring that we only ever use
1916 * stride 0 with linear buffers, we simplify our code.
1917 */
1918 if (*tiling_mode == I915_TILING_NONE)
1919 stride = 0;
1921 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1922 if (ret == 0)
1923 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1925 *tiling_mode = bo_gem->tiling_mode;
1926 return ret;
1927 }
1929 static int
1930 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1931 uint32_t * swizzle_mode)
1932 {
1933 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1935 *tiling_mode = bo_gem->tiling_mode;
1936 *swizzle_mode = bo_gem->swizzle_mode;
1937 return 0;
1938 }
1940 static int
1941 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1942 {
1943 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1944 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1945 struct drm_gem_flink flink;
1946 int ret;
1948 if (!bo_gem->global_name) {
1949 memset(&flink, 0, sizeof(flink));
1950 flink.handle = bo_gem->gem_handle;
1952 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1953 if (ret != 0)
1954 return -errno;
1955 bo_gem->global_name = flink.name;
1956 bo_gem->reusable = false;
1958 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1959 }
1961 *name = bo_gem->global_name;
1962 return 0;
1963 }
1965 /**
1966 * Enables unlimited caching of buffer objects for reuse.
1967 *
1968 * This is potentially very memory expensive, as the cache at each bucket
1969 * size is only bounded by how many buffers of that size we've managed to have
1970 * in flight at once.
1971 */
1972 void
1973 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1974 {
1975 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1977 bufmgr_gem->bo_reuse = true;
1978 }
1980 /**
1981 * Enable use of fenced reloc type.
1982 *
1983 * New code should enable this to avoid unnecessary fence register
1984 * allocation. If this option is not enabled, all relocs will have fence
1985 * register allocated.
1986 */
1987 void
1988 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1989 {
1990 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1992 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1993 bufmgr_gem->fenced_relocs = true;
1994 }
1996 /**
1997 * Return the additional aperture space required by the tree of buffer objects
1998 * rooted at bo.
1999 */
2000 static int
2001 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2002 {
2003 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2004 int i;
2005 int total = 0;
2007 if (bo == NULL || bo_gem->included_in_check_aperture)
2008 return 0;
2010 total += bo->size;
2011 bo_gem->included_in_check_aperture = true;
2013 for (i = 0; i < bo_gem->reloc_count; i++)
2014 total +=
2015 drm_intel_gem_bo_get_aperture_space(bo_gem->
2016 reloc_target_info[i].bo);
2018 return total;
2019 }
2021 /**
2022 * Count the number of buffers in this list that need a fence reg
2023 *
2024 * If the count is greater than the number of available regs, we'll have
2025 * to ask the caller to resubmit a batch with fewer tiled buffers.
2026 *
2027 * This function over-counts if the same buffer is used multiple times.
2028 */
2029 static unsigned int
2030 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2031 {
2032 int i;
2033 unsigned int total = 0;
2035 for (i = 0; i < count; i++) {
2036 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2038 if (bo_gem == NULL)
2039 continue;
2041 total += bo_gem->reloc_tree_fences;
2042 }
2043 return total;
2044 }
2046 /**
2047 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2048 * for the next drm_intel_bufmgr_check_aperture_space() call.
2049 */
2050 static void
2051 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2052 {
2053 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2054 int i;
2056 if (bo == NULL || !bo_gem->included_in_check_aperture)
2057 return;
2059 bo_gem->included_in_check_aperture = false;
2061 for (i = 0; i < bo_gem->reloc_count; i++)
2062 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2063 reloc_target_info[i].bo);
2064 }
2066 /**
2067 * Return a conservative estimate for the amount of aperture required
2068 * for a collection of buffers. This may double-count some buffers.
2069 */
2070 static unsigned int
2071 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2072 {
2073 int i;
2074 unsigned int total = 0;
2076 for (i = 0; i < count; i++) {
2077 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2078 if (bo_gem != NULL)
2079 total += bo_gem->reloc_tree_size;
2080 }
2081 return total;
2082 }
2084 /**
2085 * Return the amount of aperture needed for a collection of buffers.
2086 * This avoids double counting any buffers, at the cost of looking
2087 * at every buffer in the set.
2088 */
2089 static unsigned int
2090 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2091 {
2092 int i;
2093 unsigned int total = 0;
2095 for (i = 0; i < count; i++) {
2096 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2097 /* For the first buffer object in the array, we get an
2098 * accurate count back for its reloc_tree size (since nothing
2099 * had been flagged as being counted yet). We can save that
2100 * value out as a more conservative reloc_tree_size that
2101 * avoids double-counting target buffers. Since the first
2102 * buffer happens to usually be the batch buffer in our
2103 * callers, this can pull us back from doing the tree
2104 * walk on every new batch emit.
2105 */
2106 if (i == 0) {
2107 drm_intel_bo_gem *bo_gem =
2108 (drm_intel_bo_gem *) bo_array[i];
2109 bo_gem->reloc_tree_size = total;
2110 }
2111 }
2113 for (i = 0; i < count; i++)
2114 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2115 return total;
2116 }
2118 /**
2119 * Return -1 if the batchbuffer should be flushed before attempting to
2120 * emit rendering referencing the buffers pointed to by bo_array.
2121 *
2122 * This is required because if we try to emit a batchbuffer with relocations
2123 * to a tree of buffers that won't simultaneously fit in the aperture,
2124 * the rendering will return an error at a point where the software is not
2125 * prepared to recover from it.
2126 *
2127 * However, we also want to emit the batchbuffer significantly before we reach
2128 * the limit, as a series of batchbuffers each of which references buffers
2129 * covering almost all of the aperture means that at each emit we end up
2130 * waiting to evict a buffer from the last rendering, and we get synchronous
2131 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2132 * get better parallelism.
2133 */
2134 static int
2135 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2136 {
2137 drm_intel_bufmgr_gem *bufmgr_gem =
2138 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2139 unsigned int total = 0;
2140 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2141 int total_fences;
2143 /* Check for fence reg constraints if necessary */
2144 if (bufmgr_gem->available_fences) {
2145 total_fences = drm_intel_gem_total_fences(bo_array, count);
2146 if (total_fences > bufmgr_gem->available_fences)
2147 return -ENOSPC;
2148 }
2150 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2152 if (total > threshold)
2153 total = drm_intel_gem_compute_batch_space(bo_array, count);
2155 if (total > threshold) {
2156 DBG("check_space: overflowed available aperture, "
2157 "%dkb vs %dkb\n",
2158 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2159 return -ENOSPC;
2160 } else {
2161 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2162 (int)bufmgr_gem->gtt_size / 1024);
2163 return 0;
2164 }
2165 }
2167 /*
2168 * Disable buffer reuse for objects which are shared with the kernel
2169 * as scanout buffers
2170 */
2171 static int
2172 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2173 {
2174 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2176 bo_gem->reusable = false;
2177 return 0;
2178 }
2180 static int
2181 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2182 {
2183 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2185 return bo_gem->reusable;
2186 }
2188 static int
2189 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2190 {
2191 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2192 int i;
2194 for (i = 0; i < bo_gem->reloc_count; i++) {
2195 if (bo_gem->reloc_target_info[i].bo == target_bo)
2196 return 1;
2197 if (bo == bo_gem->reloc_target_info[i].bo)
2198 continue;
2199 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2200 target_bo))
2201 return 1;
2202 }
2204 return 0;
2205 }
2207 /** Return true if target_bo is referenced by bo's relocation tree. */
2208 static int
2209 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2210 {
2211 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2213 if (bo == NULL || target_bo == NULL)
2214 return 0;
2215 if (target_bo_gem->used_as_reloc_target)
2216 return _drm_intel_gem_bo_references(bo, target_bo);
2217 return 0;
2218 }
2220 static void
2221 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2222 {
2223 unsigned int i = bufmgr_gem->num_buckets;
2225 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2227 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2228 bufmgr_gem->cache_bucket[i].size = size;
2229 bufmgr_gem->num_buckets++;
2230 }
2232 static void
2233 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2234 {
2235 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2237 /* OK, so power of two buckets was too wasteful of memory.
2238 * Give 3 other sizes between each power of two, to hopefully
2239 * cover things accurately enough. (The alternative is
2240 * probably to just go for exact matching of sizes, and assume
2241 * that for things like composited window resize the tiled
2242 * width/height alignment and rounding of sizes to pages will
2243 * get us useful cache hit rates anyway)
2244 */
2245 add_bucket(bufmgr_gem, 4096);
2246 add_bucket(bufmgr_gem, 4096 * 2);
2247 add_bucket(bufmgr_gem, 4096 * 3);
2249 /* Initialize the linked lists for BO reuse cache. */
2250 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2251 add_bucket(bufmgr_gem, size);
2253 add_bucket(bufmgr_gem, size + size * 1 / 4);
2254 add_bucket(bufmgr_gem, size + size * 2 / 4);
2255 add_bucket(bufmgr_gem, size + size * 3 / 4);
2256 }
2257 }
2259 void
2260 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2261 {
2262 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2264 bufmgr_gem->vma_max = limit;
2266 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2267 }
2269 /**
2270 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2271 * and manage map buffer objections.
2272 *
2273 * \param fd File descriptor of the opened DRM device.
2274 */
2275 drm_intel_bufmgr *
2276 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2277 {
2278 drm_intel_bufmgr_gem *bufmgr_gem;
2279 struct drm_i915_gem_get_aperture aperture;
2280 drm_i915_getparam_t gp;
2281 int ret, tmp;
2282 bool exec2 = false;
2284 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2285 if (bufmgr_gem == NULL)
2286 return NULL;
2288 bufmgr_gem->fd = fd;
2290 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2291 free(bufmgr_gem);
2292 return NULL;
2293 }
2295 ret = drmIoctl(bufmgr_gem->fd,
2296 DRM_IOCTL_I915_GEM_GET_APERTURE,
2297 &aperture);
2299 if (ret == 0)
2300 bufmgr_gem->gtt_size = aperture.aper_available_size;
2301 else {
2302 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2303 strerror(errno));
2304 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2305 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2306 "May lead to reduced performance or incorrect "
2307 "rendering.\n",
2308 (int)bufmgr_gem->gtt_size / 1024);
2309 }
2311 gp.param = I915_PARAM_CHIPSET_ID;
2312 gp.value = &bufmgr_gem->pci_device;
2313 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2314 if (ret) {
2315 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2316 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2317 }
2319 if (IS_GEN2(bufmgr_gem->pci_device))
2320 bufmgr_gem->gen = 2;
2321 else if (IS_GEN3(bufmgr_gem->pci_device))
2322 bufmgr_gem->gen = 3;
2323 else if (IS_GEN4(bufmgr_gem->pci_device))
2324 bufmgr_gem->gen = 4;
2325 else if (IS_GEN5(bufmgr_gem->pci_device))
2326 bufmgr_gem->gen = 5;
2327 else if (IS_GEN6(bufmgr_gem->pci_device))
2328 bufmgr_gem->gen = 6;
2329 else if (IS_GEN7(bufmgr_gem->pci_device))
2330 bufmgr_gem->gen = 7;
2331 else
2332 assert(0);
2334 if (IS_GEN3(bufmgr_gem->pci_device) &&
2335 bufmgr_gem->gtt_size > 256*1024*1024) {
2336 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2337 * be used for tiled blits. To simplify the accounting, just
2338 * substract the unmappable part (fixed to 256MB on all known
2339 * gen3 devices) if the kernel advertises it. */
2340 bufmgr_gem->gtt_size -= 256*1024*1024;
2341 }
2343 gp.value = &tmp;
2345 gp.param = I915_PARAM_HAS_EXECBUF2;
2346 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2347 if (!ret)
2348 exec2 = true;
2350 gp.param = I915_PARAM_HAS_BSD;
2351 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2352 bufmgr_gem->has_bsd = ret == 0;
2354 gp.param = I915_PARAM_HAS_BLT;
2355 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2356 bufmgr_gem->has_blt = ret == 0;
2358 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2359 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2360 bufmgr_gem->has_relaxed_fencing = ret == 0;
2362 gp.param = I915_PARAM_HAS_LLC;
2363 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2364 if (ret == -EINVAL) {
2365 /* Kernel does not supports HAS_LLC query, fallback to GPU
2366 * generation detection and assume that we have LLC on GEN6/7
2367 */
2368 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
2369 IS_GEN7(bufmgr_gem->pci_device));
2370 } else
2371 bufmgr_gem->has_llc = ret == 0;
2373 if (bufmgr_gem->gen < 4) {
2374 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2375 gp.value = &bufmgr_gem->available_fences;
2376 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2377 if (ret) {
2378 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2379 errno);
2380 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2381 *gp.value);
2382 bufmgr_gem->available_fences = 0;
2383 } else {
2384 /* XXX The kernel reports the total number of fences,
2385 * including any that may be pinned.
2386 *
2387 * We presume that there will be at least one pinned
2388 * fence for the scanout buffer, but there may be more
2389 * than one scanout and the user may be manually
2390 * pinning buffers. Let's move to execbuffer2 and
2391 * thereby forget the insanity of using fences...
2392 */
2393 bufmgr_gem->available_fences -= 2;
2394 if (bufmgr_gem->available_fences < 0)
2395 bufmgr_gem->available_fences = 0;
2396 }
2397 }
2399 /* Let's go with one relocation per every 2 dwords (but round down a bit
2400 * since a power of two will mean an extra page allocation for the reloc
2401 * buffer).
2402 *
2403 * Every 4 was too few for the blender benchmark.
2404 */
2405 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2407 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2408 bufmgr_gem->bufmgr.bo_alloc_for_render =
2409 drm_intel_gem_bo_alloc_for_render;
2410 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2411 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2412 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2413 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2414 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2415 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2416 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2417 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2418 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2419 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2420 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2421 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2422 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2423 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2424 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2425 /* Use the new one if available */
2426 if (exec2) {
2427 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2428 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2429 } else
2430 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2431 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2432 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2433 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2434 bufmgr_gem->bufmgr.debug = 0;
2435 bufmgr_gem->bufmgr.check_aperture_space =
2436 drm_intel_gem_check_aperture_space;
2437 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2438 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2439 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2440 drm_intel_gem_get_pipe_from_crtc_id;
2441 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2443 DRMINITLISTHEAD(&bufmgr_gem->named);
2444 init_cache_buckets(bufmgr_gem);
2446 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2447 bufmgr_gem->vma_max = -1; /* unlimited by default */
2449 return &bufmgr_gem->bufmgr;
2450 }