1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
41 #include <xf86drm.h>
42 #include <xf86atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/mman.h>
52 #include <sys/stat.h>
53 #include <sys/types.h>
54 #include <stdbool.h>
56 #include "errno.h"
57 #include "libdrm_lists.h"
58 #include "intel_bufmgr.h"
59 #include "intel_bufmgr_priv.h"
60 #include "intel_chipset.h"
61 #include "string.h"
63 #include "i915_drm.h"
65 #define DBG(...) do { \
66 if (bufmgr_gem->bufmgr.debug) \
67 fprintf(stderr, __VA_ARGS__); \
68 } while (0)
70 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
72 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
74 struct drm_intel_gem_bo_bucket {
75 drmMMListHead head;
76 unsigned long size;
77 };
79 typedef struct _drm_intel_bufmgr_gem {
80 drm_intel_bufmgr bufmgr;
82 int fd;
84 int max_relocs;
86 pthread_mutex_t lock;
88 struct drm_i915_gem_exec_object *exec_objects;
89 struct drm_i915_gem_exec_object2 *exec2_objects;
90 drm_intel_bo **exec_bos;
91 int exec_size;
92 int exec_count;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
96 int num_buckets;
97 time_t time;
99 drmMMListHead named;
100 drmMMListHead vma_cache;
101 int vma_count, vma_open, vma_max;
103 uint64_t gtt_size;
104 int available_fences;
105 int pci_device;
106 int gen;
107 unsigned int has_bsd : 1;
108 unsigned int has_blt : 1;
109 unsigned int has_relaxed_fencing : 1;
110 unsigned int bo_reuse : 1;
111 bool fenced_relocs;
112 } drm_intel_bufmgr_gem;
114 #define DRM_INTEL_RELOC_FENCE (1<<0)
116 typedef struct _drm_intel_reloc_target_info {
117 drm_intel_bo *bo;
118 int flags;
119 } drm_intel_reloc_target;
121 struct _drm_intel_bo_gem {
122 drm_intel_bo bo;
124 atomic_t refcount;
125 uint32_t gem_handle;
126 const char *name;
128 /**
129 * Kenel-assigned global name for this object
130 */
131 unsigned int global_name;
132 drmMMListHead name_list;
134 /**
135 * Index of the buffer within the validation list while preparing a
136 * batchbuffer execution.
137 */
138 int validate_index;
140 /**
141 * Current tiling mode
142 */
143 uint32_t tiling_mode;
144 uint32_t swizzle_mode;
145 unsigned long stride;
147 time_t free_time;
149 /** Array passed to the DRM containing relocation information. */
150 struct drm_i915_gem_relocation_entry *relocs;
151 /**
152 * Array of info structs corresponding to relocs[i].target_handle etc
153 */
154 drm_intel_reloc_target *reloc_target_info;
155 /** Number of entries in relocs */
156 int reloc_count;
157 /** Mapped address for the buffer, saved across map/unmap cycles */
158 void *mem_virtual;
159 /** GTT virtual address for the buffer, saved across map/unmap cycles */
160 void *gtt_virtual;
161 int map_count;
162 drmMMListHead vma_list;
164 /** BO cache list */
165 drmMMListHead head;
167 /**
168 * Boolean of whether this BO and its children have been included in
169 * the current drm_intel_bufmgr_check_aperture_space() total.
170 */
171 bool included_in_check_aperture;
173 /**
174 * Boolean of whether this buffer has been used as a relocation
175 * target and had its size accounted for, and thus can't have any
176 * further relocations added to it.
177 */
178 bool used_as_reloc_target;
180 /**
181 * Boolean of whether we have encountered an error whilst building the relocation tree.
182 */
183 bool has_error;
185 /**
186 * Boolean of whether this buffer can be re-used
187 */
188 bool reusable;
190 /**
191 * Size in bytes of this buffer and its relocation descendents.
192 *
193 * Used to avoid costly tree walking in
194 * drm_intel_bufmgr_check_aperture in the common case.
195 */
196 int reloc_tree_size;
198 /**
199 * Number of potential fence registers required by this buffer and its
200 * relocations.
201 */
202 int reloc_tree_fences;
204 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
205 bool mapped_cpu_write;
206 };
208 static unsigned int
209 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
211 static unsigned int
212 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
214 static int
215 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
216 uint32_t * swizzle_mode);
218 static int
219 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
220 uint32_t tiling_mode,
221 uint32_t stride);
223 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
224 time_t time);
226 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
228 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
230 static unsigned long
231 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
232 uint32_t *tiling_mode)
233 {
234 unsigned long min_size, max_size;
235 unsigned long i;
237 if (*tiling_mode == I915_TILING_NONE)
238 return size;
240 /* 965+ just need multiples of page size for tiling */
241 if (bufmgr_gem->gen >= 4)
242 return ROUND_UP_TO(size, 4096);
244 /* Older chips need powers of two, of at least 512k or 1M */
245 if (bufmgr_gem->gen == 3) {
246 min_size = 1024*1024;
247 max_size = 128*1024*1024;
248 } else {
249 min_size = 512*1024;
250 max_size = 64*1024*1024;
251 }
253 if (size > max_size) {
254 *tiling_mode = I915_TILING_NONE;
255 return size;
256 }
258 /* Do we need to allocate every page for the fence? */
259 if (bufmgr_gem->has_relaxed_fencing)
260 return ROUND_UP_TO(size, 4096);
262 for (i = min_size; i < size; i <<= 1)
263 ;
265 return i;
266 }
268 /*
269 * Round a given pitch up to the minimum required for X tiling on a
270 * given chip. We use 512 as the minimum to allow for a later tiling
271 * change.
272 */
273 static unsigned long
274 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
275 unsigned long pitch, uint32_t *tiling_mode)
276 {
277 unsigned long tile_width;
278 unsigned long i;
280 /* If untiled, then just align it so that we can do rendering
281 * to it with the 3D engine.
282 */
283 if (*tiling_mode == I915_TILING_NONE)
284 return ALIGN(pitch, 64);
286 if (*tiling_mode == I915_TILING_X
287 || (IS_915(bufmgr_gem->pci_device)
288 && *tiling_mode == I915_TILING_Y))
289 tile_width = 512;
290 else
291 tile_width = 128;
293 /* 965 is flexible */
294 if (bufmgr_gem->gen >= 4)
295 return ROUND_UP_TO(pitch, tile_width);
297 /* The older hardware has a maximum pitch of 8192 with tiled
298 * surfaces, so fallback to untiled if it's too large.
299 */
300 if (pitch > 8192) {
301 *tiling_mode = I915_TILING_NONE;
302 return ALIGN(pitch, 64);
303 }
305 /* Pre-965 needs power of two tile width */
306 for (i = tile_width; i < pitch; i <<= 1)
307 ;
309 return i;
310 }
312 static struct drm_intel_gem_bo_bucket *
313 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
314 unsigned long size)
315 {
316 int i;
318 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
319 struct drm_intel_gem_bo_bucket *bucket =
320 &bufmgr_gem->cache_bucket[i];
321 if (bucket->size >= size) {
322 return bucket;
323 }
324 }
326 return NULL;
327 }
329 static void
330 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
331 {
332 int i, j;
334 for (i = 0; i < bufmgr_gem->exec_count; i++) {
335 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
336 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
338 if (bo_gem->relocs == NULL) {
339 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
340 bo_gem->name);
341 continue;
342 }
344 for (j = 0; j < bo_gem->reloc_count; j++) {
345 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
346 drm_intel_bo_gem *target_gem =
347 (drm_intel_bo_gem *) target_bo;
349 DBG("%2d: %d (%s)@0x%08llx -> "
350 "%d (%s)@0x%08lx + 0x%08x\n",
351 i,
352 bo_gem->gem_handle, bo_gem->name,
353 (unsigned long long)bo_gem->relocs[j].offset,
354 target_gem->gem_handle,
355 target_gem->name,
356 target_bo->offset,
357 bo_gem->relocs[j].delta);
358 }
359 }
360 }
362 static inline void
363 drm_intel_gem_bo_reference(drm_intel_bo *bo)
364 {
365 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
367 atomic_inc(&bo_gem->refcount);
368 }
370 /**
371 * Adds the given buffer to the list of buffers to be validated (moved into the
372 * appropriate memory type) with the next batch submission.
373 *
374 * If a buffer is validated multiple times in a batch submission, it ends up
375 * with the intersection of the memory type flags and the union of the
376 * access flags.
377 */
378 static void
379 drm_intel_add_validate_buffer(drm_intel_bo *bo)
380 {
381 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
382 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
383 int index;
385 if (bo_gem->validate_index != -1)
386 return;
388 /* Extend the array of validation entries as necessary. */
389 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
390 int new_size = bufmgr_gem->exec_size * 2;
392 if (new_size == 0)
393 new_size = 5;
395 bufmgr_gem->exec_objects =
396 realloc(bufmgr_gem->exec_objects,
397 sizeof(*bufmgr_gem->exec_objects) * new_size);
398 bufmgr_gem->exec_bos =
399 realloc(bufmgr_gem->exec_bos,
400 sizeof(*bufmgr_gem->exec_bos) * new_size);
401 bufmgr_gem->exec_size = new_size;
402 }
404 index = bufmgr_gem->exec_count;
405 bo_gem->validate_index = index;
406 /* Fill in array entry */
407 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
408 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
409 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
410 bufmgr_gem->exec_objects[index].alignment = 0;
411 bufmgr_gem->exec_objects[index].offset = 0;
412 bufmgr_gem->exec_bos[index] = bo;
413 bufmgr_gem->exec_count++;
414 }
416 static void
417 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
418 {
419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
421 int index;
423 if (bo_gem->validate_index != -1) {
424 if (need_fence)
425 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
426 EXEC_OBJECT_NEEDS_FENCE;
427 return;
428 }
430 /* Extend the array of validation entries as necessary. */
431 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
432 int new_size = bufmgr_gem->exec_size * 2;
434 if (new_size == 0)
435 new_size = 5;
437 bufmgr_gem->exec2_objects =
438 realloc(bufmgr_gem->exec2_objects,
439 sizeof(*bufmgr_gem->exec2_objects) * new_size);
440 bufmgr_gem->exec_bos =
441 realloc(bufmgr_gem->exec_bos,
442 sizeof(*bufmgr_gem->exec_bos) * new_size);
443 bufmgr_gem->exec_size = new_size;
444 }
446 index = bufmgr_gem->exec_count;
447 bo_gem->validate_index = index;
448 /* Fill in array entry */
449 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
450 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
451 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
452 bufmgr_gem->exec2_objects[index].alignment = 0;
453 bufmgr_gem->exec2_objects[index].offset = 0;
454 bufmgr_gem->exec_bos[index] = bo;
455 bufmgr_gem->exec2_objects[index].flags = 0;
456 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
457 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
458 if (need_fence) {
459 bufmgr_gem->exec2_objects[index].flags |=
460 EXEC_OBJECT_NEEDS_FENCE;
461 }
462 bufmgr_gem->exec_count++;
463 }
465 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
466 sizeof(uint32_t))
468 static void
469 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
470 drm_intel_bo_gem *bo_gem)
471 {
472 int size;
474 assert(!bo_gem->used_as_reloc_target);
476 /* The older chipsets are far-less flexible in terms of tiling,
477 * and require tiled buffer to be size aligned in the aperture.
478 * This means that in the worst possible case we will need a hole
479 * twice as large as the object in order for it to fit into the
480 * aperture. Optimal packing is for wimps.
481 */
482 size = bo_gem->bo.size;
483 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
484 int min_size;
486 if (bufmgr_gem->has_relaxed_fencing) {
487 if (bufmgr_gem->gen == 3)
488 min_size = 1024*1024;
489 else
490 min_size = 512*1024;
492 while (min_size < size)
493 min_size *= 2;
494 } else
495 min_size = size;
497 /* Account for worst-case alignment. */
498 size = 2 * min_size;
499 }
501 bo_gem->reloc_tree_size = size;
502 }
504 static int
505 drm_intel_setup_reloc_list(drm_intel_bo *bo)
506 {
507 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
508 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
509 unsigned int max_relocs = bufmgr_gem->max_relocs;
511 if (bo->size / 4 < max_relocs)
512 max_relocs = bo->size / 4;
514 bo_gem->relocs = malloc(max_relocs *
515 sizeof(struct drm_i915_gem_relocation_entry));
516 bo_gem->reloc_target_info = malloc(max_relocs *
517 sizeof(drm_intel_reloc_target));
518 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
519 bo_gem->has_error = true;
521 free (bo_gem->relocs);
522 bo_gem->relocs = NULL;
524 free (bo_gem->reloc_target_info);
525 bo_gem->reloc_target_info = NULL;
527 return 1;
528 }
530 return 0;
531 }
533 static int
534 drm_intel_gem_bo_busy(drm_intel_bo *bo)
535 {
536 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
537 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
538 struct drm_i915_gem_busy busy;
539 int ret;
541 memset(&busy, 0, sizeof(busy));
542 busy.handle = bo_gem->gem_handle;
544 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
546 return (ret == 0 && busy.busy);
547 }
549 static int
550 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
551 drm_intel_bo_gem *bo_gem, int state)
552 {
553 struct drm_i915_gem_madvise madv;
555 madv.handle = bo_gem->gem_handle;
556 madv.madv = state;
557 madv.retained = 1;
558 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
560 return madv.retained;
561 }
563 static int
564 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
565 {
566 return drm_intel_gem_bo_madvise_internal
567 ((drm_intel_bufmgr_gem *) bo->bufmgr,
568 (drm_intel_bo_gem *) bo,
569 madv);
570 }
572 /* drop the oldest entries that have been purged by the kernel */
573 static void
574 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
575 struct drm_intel_gem_bo_bucket *bucket)
576 {
577 while (!DRMLISTEMPTY(&bucket->head)) {
578 drm_intel_bo_gem *bo_gem;
580 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
581 bucket->head.next, head);
582 if (drm_intel_gem_bo_madvise_internal
583 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
584 break;
586 DRMLISTDEL(&bo_gem->head);
587 drm_intel_gem_bo_free(&bo_gem->bo);
588 }
589 }
591 static drm_intel_bo *
592 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
593 const char *name,
594 unsigned long size,
595 unsigned long flags,
596 uint32_t tiling_mode,
597 unsigned long stride)
598 {
599 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
600 drm_intel_bo_gem *bo_gem;
601 unsigned int page_size = getpagesize();
602 int ret;
603 struct drm_intel_gem_bo_bucket *bucket;
604 bool alloc_from_cache;
605 unsigned long bo_size;
606 bool for_render = false;
608 if (flags & BO_ALLOC_FOR_RENDER)
609 for_render = true;
611 /* Round the allocated size up to a power of two number of pages. */
612 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
614 /* If we don't have caching at this size, don't actually round the
615 * allocation up.
616 */
617 if (bucket == NULL) {
618 bo_size = size;
619 if (bo_size < page_size)
620 bo_size = page_size;
621 } else {
622 bo_size = bucket->size;
623 }
625 pthread_mutex_lock(&bufmgr_gem->lock);
626 /* Get a buffer out of the cache if available */
627 retry:
628 alloc_from_cache = false;
629 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
630 if (for_render) {
631 /* Allocate new render-target BOs from the tail (MRU)
632 * of the list, as it will likely be hot in the GPU
633 * cache and in the aperture for us.
634 */
635 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
636 bucket->head.prev, head);
637 DRMLISTDEL(&bo_gem->head);
638 alloc_from_cache = true;
639 } else {
640 /* For non-render-target BOs (where we're probably
641 * going to map it first thing in order to fill it
642 * with data), check if the last BO in the cache is
643 * unbusy, and only reuse in that case. Otherwise,
644 * allocating a new buffer is probably faster than
645 * waiting for the GPU to finish.
646 */
647 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
648 bucket->head.next, head);
649 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
650 alloc_from_cache = true;
651 DRMLISTDEL(&bo_gem->head);
652 }
653 }
655 if (alloc_from_cache) {
656 if (!drm_intel_gem_bo_madvise_internal
657 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
658 drm_intel_gem_bo_free(&bo_gem->bo);
659 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
660 bucket);
661 goto retry;
662 }
664 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
665 tiling_mode,
666 stride)) {
667 drm_intel_gem_bo_free(&bo_gem->bo);
668 goto retry;
669 }
670 }
671 }
672 pthread_mutex_unlock(&bufmgr_gem->lock);
674 if (!alloc_from_cache) {
675 struct drm_i915_gem_create create;
677 bo_gem = calloc(1, sizeof(*bo_gem));
678 if (!bo_gem)
679 return NULL;
681 bo_gem->bo.size = bo_size;
682 memset(&create, 0, sizeof(create));
683 create.size = bo_size;
685 ret = drmIoctl(bufmgr_gem->fd,
686 DRM_IOCTL_I915_GEM_CREATE,
687 &create);
688 bo_gem->gem_handle = create.handle;
689 bo_gem->bo.handle = bo_gem->gem_handle;
690 if (ret != 0) {
691 free(bo_gem);
692 return NULL;
693 }
694 bo_gem->bo.bufmgr = bufmgr;
696 bo_gem->tiling_mode = I915_TILING_NONE;
697 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
698 bo_gem->stride = 0;
700 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
701 tiling_mode,
702 stride)) {
703 drm_intel_gem_bo_free(&bo_gem->bo);
704 return NULL;
705 }
707 DRMINITLISTHEAD(&bo_gem->name_list);
708 DRMINITLISTHEAD(&bo_gem->vma_list);
709 }
711 bo_gem->name = name;
712 atomic_set(&bo_gem->refcount, 1);
713 bo_gem->validate_index = -1;
714 bo_gem->reloc_tree_fences = 0;
715 bo_gem->used_as_reloc_target = false;
716 bo_gem->has_error = false;
717 bo_gem->reusable = true;
719 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
721 DBG("bo_create: buf %d (%s) %ldb\n",
722 bo_gem->gem_handle, bo_gem->name, size);
724 return &bo_gem->bo;
725 }
727 static drm_intel_bo *
728 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
729 const char *name,
730 unsigned long size,
731 unsigned int alignment)
732 {
733 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
734 BO_ALLOC_FOR_RENDER,
735 I915_TILING_NONE, 0);
736 }
738 static drm_intel_bo *
739 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
740 const char *name,
741 unsigned long size,
742 unsigned int alignment)
743 {
744 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
745 I915_TILING_NONE, 0);
746 }
748 static drm_intel_bo *
749 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
750 int x, int y, int cpp, uint32_t *tiling_mode,
751 unsigned long *pitch, unsigned long flags)
752 {
753 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
754 unsigned long size, stride;
755 uint32_t tiling;
757 do {
758 unsigned long aligned_y, height_alignment;
760 tiling = *tiling_mode;
762 /* If we're tiled, our allocations are in 8 or 32-row blocks,
763 * so failure to align our height means that we won't allocate
764 * enough pages.
765 *
766 * If we're untiled, we still have to align to 2 rows high
767 * because the data port accesses 2x2 blocks even if the
768 * bottom row isn't to be rendered, so failure to align means
769 * we could walk off the end of the GTT and fault. This is
770 * documented on 965, and may be the case on older chipsets
771 * too so we try to be careful.
772 */
773 aligned_y = y;
774 height_alignment = 2;
776 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
777 height_alignment = 16;
778 else if (tiling == I915_TILING_X
779 || (IS_915(bufmgr_gem->pci_device)
780 && tiling == I915_TILING_Y))
781 height_alignment = 8;
782 else if (tiling == I915_TILING_Y)
783 height_alignment = 32;
784 aligned_y = ALIGN(y, height_alignment);
786 stride = x * cpp;
787 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
788 size = stride * aligned_y;
789 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
790 } while (*tiling_mode != tiling);
791 *pitch = stride;
793 if (tiling == I915_TILING_NONE)
794 stride = 0;
796 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
797 tiling, stride);
798 }
800 /**
801 * Returns a drm_intel_bo wrapping the given buffer object handle.
802 *
803 * This can be used when one application needs to pass a buffer object
804 * to another.
805 */
806 drm_intel_bo *
807 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
808 const char *name,
809 unsigned int handle)
810 {
811 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
812 drm_intel_bo_gem *bo_gem;
813 int ret;
814 struct drm_gem_open open_arg;
815 struct drm_i915_gem_get_tiling get_tiling;
816 drmMMListHead *list;
818 /* At the moment most applications only have a few named bo.
819 * For instance, in a DRI client only the render buffers passed
820 * between X and the client are named. And since X returns the
821 * alternating names for the front/back buffer a linear search
822 * provides a sufficiently fast match.
823 */
824 for (list = bufmgr_gem->named.next;
825 list != &bufmgr_gem->named;
826 list = list->next) {
827 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
828 if (bo_gem->global_name == handle) {
829 drm_intel_gem_bo_reference(&bo_gem->bo);
830 return &bo_gem->bo;
831 }
832 }
834 bo_gem = calloc(1, sizeof(*bo_gem));
835 if (!bo_gem)
836 return NULL;
838 memset(&open_arg, 0, sizeof(open_arg));
839 open_arg.name = handle;
840 ret = drmIoctl(bufmgr_gem->fd,
841 DRM_IOCTL_GEM_OPEN,
842 &open_arg);
843 if (ret != 0) {
844 DBG("Couldn't reference %s handle 0x%08x: %s\n",
845 name, handle, strerror(errno));
846 free(bo_gem);
847 return NULL;
848 }
849 bo_gem->bo.size = open_arg.size;
850 bo_gem->bo.offset = 0;
851 bo_gem->bo.virtual = NULL;
852 bo_gem->bo.bufmgr = bufmgr;
853 bo_gem->name = name;
854 atomic_set(&bo_gem->refcount, 1);
855 bo_gem->validate_index = -1;
856 bo_gem->gem_handle = open_arg.handle;
857 bo_gem->bo.handle = open_arg.handle;
858 bo_gem->global_name = handle;
859 bo_gem->reusable = false;
861 memset(&get_tiling, 0, sizeof(get_tiling));
862 get_tiling.handle = bo_gem->gem_handle;
863 ret = drmIoctl(bufmgr_gem->fd,
864 DRM_IOCTL_I915_GEM_GET_TILING,
865 &get_tiling);
866 if (ret != 0) {
867 drm_intel_gem_bo_unreference(&bo_gem->bo);
868 return NULL;
869 }
870 bo_gem->tiling_mode = get_tiling.tiling_mode;
871 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
872 /* XXX stride is unknown */
873 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
875 DRMINITLISTHEAD(&bo_gem->vma_list);
876 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
877 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
879 return &bo_gem->bo;
880 }
882 static void
883 drm_intel_gem_bo_free(drm_intel_bo *bo)
884 {
885 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
886 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
887 struct drm_gem_close close;
888 int ret;
890 DRMLISTDEL(&bo_gem->vma_list);
891 if (bo_gem->mem_virtual) {
892 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
893 bufmgr_gem->vma_count--;
894 }
895 if (bo_gem->gtt_virtual) {
896 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
897 bufmgr_gem->vma_count--;
898 }
900 /* Close this object */
901 memset(&close, 0, sizeof(close));
902 close.handle = bo_gem->gem_handle;
903 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
904 if (ret != 0) {
905 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
906 bo_gem->gem_handle, bo_gem->name, strerror(errno));
907 }
908 free(bo);
909 }
911 /** Frees all cached buffers significantly older than @time. */
912 static void
913 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
914 {
915 int i;
917 if (bufmgr_gem->time == time)
918 return;
920 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
921 struct drm_intel_gem_bo_bucket *bucket =
922 &bufmgr_gem->cache_bucket[i];
924 while (!DRMLISTEMPTY(&bucket->head)) {
925 drm_intel_bo_gem *bo_gem;
927 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
928 bucket->head.next, head);
929 if (time - bo_gem->free_time <= 1)
930 break;
932 DRMLISTDEL(&bo_gem->head);
934 drm_intel_gem_bo_free(&bo_gem->bo);
935 }
936 }
938 bufmgr_gem->time = time;
939 }
941 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
942 {
943 int limit;
945 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
946 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
948 if (bufmgr_gem->vma_max < 0)
949 return;
951 /* We may need to evict a few entries in order to create new mmaps */
952 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
953 if (limit < 0)
954 limit = 0;
956 while (bufmgr_gem->vma_count > limit) {
957 drm_intel_bo_gem *bo_gem;
959 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
960 bufmgr_gem->vma_cache.next,
961 vma_list);
962 assert(bo_gem->map_count == 0);
963 DRMLISTDELINIT(&bo_gem->vma_list);
965 if (bo_gem->mem_virtual) {
966 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
967 bo_gem->mem_virtual = NULL;
968 bufmgr_gem->vma_count--;
969 }
970 if (bo_gem->gtt_virtual) {
971 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
972 bo_gem->gtt_virtual = NULL;
973 bufmgr_gem->vma_count--;
974 }
975 }
976 }
978 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
979 drm_intel_bo_gem *bo_gem)
980 {
981 bufmgr_gem->vma_open--;
982 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
983 if (bo_gem->mem_virtual)
984 bufmgr_gem->vma_count++;
985 if (bo_gem->gtt_virtual)
986 bufmgr_gem->vma_count++;
987 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
988 }
990 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
991 drm_intel_bo_gem *bo_gem)
992 {
993 bufmgr_gem->vma_open++;
994 DRMLISTDEL(&bo_gem->vma_list);
995 if (bo_gem->mem_virtual)
996 bufmgr_gem->vma_count--;
997 if (bo_gem->gtt_virtual)
998 bufmgr_gem->vma_count--;
999 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1000 }
1002 static void
1003 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1004 {
1005 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1006 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1007 struct drm_intel_gem_bo_bucket *bucket;
1008 int i;
1010 /* Unreference all the target buffers */
1011 for (i = 0; i < bo_gem->reloc_count; i++) {
1012 if (bo_gem->reloc_target_info[i].bo != bo) {
1013 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1014 reloc_target_info[i].bo,
1015 time);
1016 }
1017 }
1018 bo_gem->reloc_count = 0;
1019 bo_gem->used_as_reloc_target = false;
1021 DBG("bo_unreference final: %d (%s)\n",
1022 bo_gem->gem_handle, bo_gem->name);
1024 /* release memory associated with this object */
1025 if (bo_gem->reloc_target_info) {
1026 free(bo_gem->reloc_target_info);
1027 bo_gem->reloc_target_info = NULL;
1028 }
1029 if (bo_gem->relocs) {
1030 free(bo_gem->relocs);
1031 bo_gem->relocs = NULL;
1032 }
1034 /* Clear any left-over mappings */
1035 if (bo_gem->map_count) {
1036 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1037 bo_gem->map_count = 0;
1038 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1039 }
1041 DRMLISTDEL(&bo_gem->name_list);
1043 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1044 /* Put the buffer into our internal cache for reuse if we can. */
1045 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1046 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1047 I915_MADV_DONTNEED)) {
1048 bo_gem->free_time = time;
1050 bo_gem->name = NULL;
1051 bo_gem->validate_index = -1;
1053 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1054 } else {
1055 drm_intel_gem_bo_free(bo);
1056 }
1057 }
1059 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1060 time_t time)
1061 {
1062 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1064 assert(atomic_read(&bo_gem->refcount) > 0);
1065 if (atomic_dec_and_test(&bo_gem->refcount))
1066 drm_intel_gem_bo_unreference_final(bo, time);
1067 }
1069 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1070 {
1071 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1073 assert(atomic_read(&bo_gem->refcount) > 0);
1074 if (atomic_dec_and_test(&bo_gem->refcount)) {
1075 drm_intel_bufmgr_gem *bufmgr_gem =
1076 (drm_intel_bufmgr_gem *) bo->bufmgr;
1077 struct timespec time;
1079 clock_gettime(CLOCK_MONOTONIC, &time);
1081 pthread_mutex_lock(&bufmgr_gem->lock);
1082 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1083 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1084 pthread_mutex_unlock(&bufmgr_gem->lock);
1085 }
1086 }
1088 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1089 {
1090 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1091 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1092 struct drm_i915_gem_set_domain set_domain;
1093 int ret;
1095 pthread_mutex_lock(&bufmgr_gem->lock);
1097 if (bo_gem->map_count++ == 0)
1098 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1100 if (!bo_gem->mem_virtual) {
1101 struct drm_i915_gem_mmap mmap_arg;
1103 DBG("bo_map: %d (%s), map_count=%d\n",
1104 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1106 memset(&mmap_arg, 0, sizeof(mmap_arg));
1107 mmap_arg.handle = bo_gem->gem_handle;
1108 mmap_arg.offset = 0;
1109 mmap_arg.size = bo->size;
1110 ret = drmIoctl(bufmgr_gem->fd,
1111 DRM_IOCTL_I915_GEM_MMAP,
1112 &mmap_arg);
1113 if (ret != 0) {
1114 ret = -errno;
1115 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1116 __FILE__, __LINE__, bo_gem->gem_handle,
1117 bo_gem->name, strerror(errno));
1118 if (--bo_gem->map_count == 0)
1119 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1120 pthread_mutex_unlock(&bufmgr_gem->lock);
1121 return ret;
1122 }
1123 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1124 }
1125 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1126 bo_gem->mem_virtual);
1127 bo->virtual = bo_gem->mem_virtual;
1129 set_domain.handle = bo_gem->gem_handle;
1130 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1131 if (write_enable)
1132 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1133 else
1134 set_domain.write_domain = 0;
1135 ret = drmIoctl(bufmgr_gem->fd,
1136 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1137 &set_domain);
1138 if (ret != 0) {
1139 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1140 __FILE__, __LINE__, bo_gem->gem_handle,
1141 strerror(errno));
1142 }
1144 if (write_enable)
1145 bo_gem->mapped_cpu_write = true;
1147 pthread_mutex_unlock(&bufmgr_gem->lock);
1149 return 0;
1150 }
1152 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1153 {
1154 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1155 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1156 struct drm_i915_gem_set_domain set_domain;
1157 int ret;
1159 pthread_mutex_lock(&bufmgr_gem->lock);
1161 if (bo_gem->map_count++ == 0)
1162 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1164 /* Get a mapping of the buffer if we haven't before. */
1165 if (bo_gem->gtt_virtual == NULL) {
1166 struct drm_i915_gem_mmap_gtt mmap_arg;
1168 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1169 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1171 memset(&mmap_arg, 0, sizeof(mmap_arg));
1172 mmap_arg.handle = bo_gem->gem_handle;
1174 /* Get the fake offset back... */
1175 ret = drmIoctl(bufmgr_gem->fd,
1176 DRM_IOCTL_I915_GEM_MMAP_GTT,
1177 &mmap_arg);
1178 if (ret != 0) {
1179 ret = -errno;
1180 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1181 __FILE__, __LINE__,
1182 bo_gem->gem_handle, bo_gem->name,
1183 strerror(errno));
1184 if (--bo_gem->map_count == 0)
1185 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1186 pthread_mutex_unlock(&bufmgr_gem->lock);
1187 return ret;
1188 }
1190 /* and mmap it */
1191 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1192 MAP_SHARED, bufmgr_gem->fd,
1193 mmap_arg.offset);
1194 if (bo_gem->gtt_virtual == MAP_FAILED) {
1195 bo_gem->gtt_virtual = NULL;
1196 ret = -errno;
1197 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1198 __FILE__, __LINE__,
1199 bo_gem->gem_handle, bo_gem->name,
1200 strerror(errno));
1201 if (--bo_gem->map_count == 0)
1202 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1203 pthread_mutex_unlock(&bufmgr_gem->lock);
1204 return ret;
1205 }
1206 }
1208 bo->virtual = bo_gem->gtt_virtual;
1210 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1211 bo_gem->gtt_virtual);
1213 /* Now move it to the GTT domain so that the CPU caches are flushed */
1214 set_domain.handle = bo_gem->gem_handle;
1215 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1216 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1217 ret = drmIoctl(bufmgr_gem->fd,
1218 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1219 &set_domain);
1220 if (ret != 0) {
1221 DBG("%s:%d: Error setting domain %d: %s\n",
1222 __FILE__, __LINE__, bo_gem->gem_handle,
1223 strerror(errno));
1224 }
1226 pthread_mutex_unlock(&bufmgr_gem->lock);
1228 return 0;
1229 }
1231 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1232 {
1233 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1234 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1235 struct drm_i915_gem_sw_finish sw_finish;
1236 int ret = 0;
1238 if (bo == NULL)
1239 return 0;
1241 pthread_mutex_lock(&bufmgr_gem->lock);
1243 if (bo_gem->map_count <= 0) {
1244 DBG("attempted to unmap an unmapped bo\n");
1245 pthread_mutex_unlock(&bufmgr_gem->lock);
1246 /* Preserve the old behaviour of just treating this as a
1247 * no-op rather than reporting the error.
1248 */
1249 return 0;
1250 }
1252 if (bo_gem->mapped_cpu_write) {
1253 /* Cause a flush to happen if the buffer's pinned for
1254 * scanout, so the results show up in a timely manner.
1255 * Unlike GTT set domains, this only does work if the
1256 * buffer should be scanout-related.
1257 */
1258 sw_finish.handle = bo_gem->gem_handle;
1259 ret = drmIoctl(bufmgr_gem->fd,
1260 DRM_IOCTL_I915_GEM_SW_FINISH,
1261 &sw_finish);
1262 ret = ret == -1 ? -errno : 0;
1264 bo_gem->mapped_cpu_write = false;
1265 }
1267 /* We need to unmap after every innovation as we cannot track
1268 * an open vma for every bo as that will exhaasut the system
1269 * limits and cause later failures.
1270 */
1271 if (--bo_gem->map_count == 0) {
1272 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1273 bo->virtual = NULL;
1274 }
1275 pthread_mutex_unlock(&bufmgr_gem->lock);
1277 return ret;
1278 }
1280 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1281 {
1282 return drm_intel_gem_bo_unmap(bo);
1283 }
1285 static int
1286 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1287 unsigned long size, const void *data)
1288 {
1289 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1290 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1291 struct drm_i915_gem_pwrite pwrite;
1292 int ret;
1294 memset(&pwrite, 0, sizeof(pwrite));
1295 pwrite.handle = bo_gem->gem_handle;
1296 pwrite.offset = offset;
1297 pwrite.size = size;
1298 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1299 ret = drmIoctl(bufmgr_gem->fd,
1300 DRM_IOCTL_I915_GEM_PWRITE,
1301 &pwrite);
1302 if (ret != 0) {
1303 ret = -errno;
1304 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1305 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1306 (int)size, strerror(errno));
1307 }
1309 return ret;
1310 }
1312 static int
1313 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1314 {
1315 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1316 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1317 int ret;
1319 get_pipe_from_crtc_id.crtc_id = crtc_id;
1320 ret = drmIoctl(bufmgr_gem->fd,
1321 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1322 &get_pipe_from_crtc_id);
1323 if (ret != 0) {
1324 /* We return -1 here to signal that we don't
1325 * know which pipe is associated with this crtc.
1326 * This lets the caller know that this information
1327 * isn't available; using the wrong pipe for
1328 * vblank waiting can cause the chipset to lock up
1329 */
1330 return -1;
1331 }
1333 return get_pipe_from_crtc_id.pipe;
1334 }
1336 static int
1337 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1338 unsigned long size, void *data)
1339 {
1340 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1341 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1342 struct drm_i915_gem_pread pread;
1343 int ret;
1345 memset(&pread, 0, sizeof(pread));
1346 pread.handle = bo_gem->gem_handle;
1347 pread.offset = offset;
1348 pread.size = size;
1349 pread.data_ptr = (uint64_t) (uintptr_t) data;
1350 ret = drmIoctl(bufmgr_gem->fd,
1351 DRM_IOCTL_I915_GEM_PREAD,
1352 &pread);
1353 if (ret != 0) {
1354 ret = -errno;
1355 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1356 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1357 (int)size, strerror(errno));
1358 }
1360 return ret;
1361 }
1363 /** Waits for all GPU rendering with the object to have completed. */
1364 static void
1365 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1366 {
1367 drm_intel_gem_bo_start_gtt_access(bo, 1);
1368 }
1370 /**
1371 * Sets the object to the GTT read and possibly write domain, used by the X
1372 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1373 *
1374 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1375 * can do tiled pixmaps this way.
1376 */
1377 void
1378 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1379 {
1380 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1381 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1382 struct drm_i915_gem_set_domain set_domain;
1383 int ret;
1385 set_domain.handle = bo_gem->gem_handle;
1386 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1387 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1388 ret = drmIoctl(bufmgr_gem->fd,
1389 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1390 &set_domain);
1391 if (ret != 0) {
1392 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1393 __FILE__, __LINE__, bo_gem->gem_handle,
1394 set_domain.read_domains, set_domain.write_domain,
1395 strerror(errno));
1396 }
1397 }
1399 static void
1400 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1401 {
1402 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1403 int i;
1405 free(bufmgr_gem->exec2_objects);
1406 free(bufmgr_gem->exec_objects);
1407 free(bufmgr_gem->exec_bos);
1409 pthread_mutex_destroy(&bufmgr_gem->lock);
1411 /* Free any cached buffer objects we were going to reuse */
1412 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1413 struct drm_intel_gem_bo_bucket *bucket =
1414 &bufmgr_gem->cache_bucket[i];
1415 drm_intel_bo_gem *bo_gem;
1417 while (!DRMLISTEMPTY(&bucket->head)) {
1418 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1419 bucket->head.next, head);
1420 DRMLISTDEL(&bo_gem->head);
1422 drm_intel_gem_bo_free(&bo_gem->bo);
1423 }
1424 }
1426 free(bufmgr);
1427 }
1429 /**
1430 * Adds the target buffer to the validation list and adds the relocation
1431 * to the reloc_buffer's relocation list.
1432 *
1433 * The relocation entry at the given offset must already contain the
1434 * precomputed relocation value, because the kernel will optimize out
1435 * the relocation entry write when the buffer hasn't moved from the
1436 * last known offset in target_bo.
1437 */
1438 static int
1439 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1440 drm_intel_bo *target_bo, uint32_t target_offset,
1441 uint32_t read_domains, uint32_t write_domain,
1442 bool need_fence)
1443 {
1444 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1445 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1446 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1447 bool fenced_command;
1449 if (bo_gem->has_error)
1450 return -ENOMEM;
1452 if (target_bo_gem->has_error) {
1453 bo_gem->has_error = true;
1454 return -ENOMEM;
1455 }
1457 /* We never use HW fences for rendering on 965+ */
1458 if (bufmgr_gem->gen >= 4)
1459 need_fence = false;
1461 fenced_command = need_fence;
1462 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1463 need_fence = false;
1465 /* Create a new relocation list if needed */
1466 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1467 return -ENOMEM;
1469 /* Check overflow */
1470 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1472 /* Check args */
1473 assert(offset <= bo->size - 4);
1474 assert((write_domain & (write_domain - 1)) == 0);
1476 /* Make sure that we're not adding a reloc to something whose size has
1477 * already been accounted for.
1478 */
1479 assert(!bo_gem->used_as_reloc_target);
1480 if (target_bo_gem != bo_gem) {
1481 target_bo_gem->used_as_reloc_target = true;
1482 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1483 }
1484 /* An object needing a fence is a tiled buffer, so it won't have
1485 * relocs to other buffers.
1486 */
1487 if (need_fence)
1488 target_bo_gem->reloc_tree_fences = 1;
1489 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1491 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1492 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1493 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1494 target_bo_gem->gem_handle;
1495 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1496 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1497 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1499 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1500 if (target_bo != bo)
1501 drm_intel_gem_bo_reference(target_bo);
1502 if (fenced_command)
1503 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1504 DRM_INTEL_RELOC_FENCE;
1505 else
1506 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1508 bo_gem->reloc_count++;
1510 return 0;
1511 }
1513 static int
1514 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1515 drm_intel_bo *target_bo, uint32_t target_offset,
1516 uint32_t read_domains, uint32_t write_domain)
1517 {
1518 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1520 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1521 read_domains, write_domain,
1522 !bufmgr_gem->fenced_relocs);
1523 }
1525 static int
1526 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1527 drm_intel_bo *target_bo,
1528 uint32_t target_offset,
1529 uint32_t read_domains, uint32_t write_domain)
1530 {
1531 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1532 read_domains, write_domain, true);
1533 }
1535 int
1536 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
1537 {
1538 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1540 return bo_gem->reloc_count;
1541 }
1543 /**
1544 * Removes existing relocation entries in the BO after "start".
1545 *
1546 * This allows a user to avoid a two-step process for state setup with
1547 * counting up all the buffer objects and doing a
1548 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
1549 * relocations for the state setup. Instead, save the state of the
1550 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
1551 * state, and then check if it still fits in the aperture.
1552 *
1553 * Any further drm_intel_bufmgr_check_aperture_space() queries
1554 * involving this buffer in the tree are undefined after this call.
1555 */
1556 void
1557 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
1558 {
1559 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1560 int i;
1561 struct timespec time;
1563 clock_gettime(CLOCK_MONOTONIC, &time);
1565 assert(bo_gem->reloc_count >= start);
1566 /* Unreference the cleared target buffers */
1567 for (i = start; i < bo_gem->reloc_count; i++) {
1568 if (bo_gem->reloc_target_info[i].bo != bo) {
1569 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1570 reloc_target_info[i].bo,
1571 time.tv_sec);
1572 }
1573 }
1574 bo_gem->reloc_count = start;
1575 }
1577 /**
1578 * Walk the tree of relocations rooted at BO and accumulate the list of
1579 * validations to be performed and update the relocation buffers with
1580 * index values into the validation list.
1581 */
1582 static void
1583 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1584 {
1585 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1586 int i;
1588 if (bo_gem->relocs == NULL)
1589 return;
1591 for (i = 0; i < bo_gem->reloc_count; i++) {
1592 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1594 if (target_bo == bo)
1595 continue;
1597 /* Continue walking the tree depth-first. */
1598 drm_intel_gem_bo_process_reloc(target_bo);
1600 /* Add the target to the validate list */
1601 drm_intel_add_validate_buffer(target_bo);
1602 }
1603 }
1605 static void
1606 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1607 {
1608 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1609 int i;
1611 if (bo_gem->relocs == NULL)
1612 return;
1614 for (i = 0; i < bo_gem->reloc_count; i++) {
1615 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1616 int need_fence;
1618 if (target_bo == bo)
1619 continue;
1621 /* Continue walking the tree depth-first. */
1622 drm_intel_gem_bo_process_reloc2(target_bo);
1624 need_fence = (bo_gem->reloc_target_info[i].flags &
1625 DRM_INTEL_RELOC_FENCE);
1627 /* Add the target to the validate list */
1628 drm_intel_add_validate_buffer2(target_bo, need_fence);
1629 }
1630 }
1633 static void
1634 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1635 {
1636 int i;
1638 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1639 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1640 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1642 /* Update the buffer offset */
1643 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1644 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1645 bo_gem->gem_handle, bo_gem->name, bo->offset,
1646 (unsigned long long)bufmgr_gem->exec_objects[i].
1647 offset);
1648 bo->offset = bufmgr_gem->exec_objects[i].offset;
1649 }
1650 }
1651 }
1653 static void
1654 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1655 {
1656 int i;
1658 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1659 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1660 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1662 /* Update the buffer offset */
1663 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1664 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1665 bo_gem->gem_handle, bo_gem->name, bo->offset,
1666 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1667 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1668 }
1669 }
1670 }
1672 static int
1673 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1674 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1675 {
1676 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1677 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1678 struct drm_i915_gem_execbuffer execbuf;
1679 int ret, i;
1681 if (bo_gem->has_error)
1682 return -ENOMEM;
1684 pthread_mutex_lock(&bufmgr_gem->lock);
1685 /* Update indices and set up the validate list. */
1686 drm_intel_gem_bo_process_reloc(bo);
1688 /* Add the batch buffer to the validation list. There are no
1689 * relocations pointing to it.
1690 */
1691 drm_intel_add_validate_buffer(bo);
1693 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1694 execbuf.buffer_count = bufmgr_gem->exec_count;
1695 execbuf.batch_start_offset = 0;
1696 execbuf.batch_len = used;
1697 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1698 execbuf.num_cliprects = num_cliprects;
1699 execbuf.DR1 = 0;
1700 execbuf.DR4 = DR4;
1702 ret = drmIoctl(bufmgr_gem->fd,
1703 DRM_IOCTL_I915_GEM_EXECBUFFER,
1704 &execbuf);
1705 if (ret != 0) {
1706 ret = -errno;
1707 if (errno == ENOSPC) {
1708 DBG("Execbuffer fails to pin. "
1709 "Estimate: %u. Actual: %u. Available: %u\n",
1710 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1711 bufmgr_gem->
1712 exec_count),
1713 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1714 bufmgr_gem->
1715 exec_count),
1716 (unsigned int)bufmgr_gem->gtt_size);
1717 }
1718 }
1719 drm_intel_update_buffer_offsets(bufmgr_gem);
1721 if (bufmgr_gem->bufmgr.debug)
1722 drm_intel_gem_dump_validation_list(bufmgr_gem);
1724 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1725 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1726 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1728 /* Disconnect the buffer from the validate list */
1729 bo_gem->validate_index = -1;
1730 bufmgr_gem->exec_bos[i] = NULL;
1731 }
1732 bufmgr_gem->exec_count = 0;
1733 pthread_mutex_unlock(&bufmgr_gem->lock);
1735 return ret;
1736 }
1738 static int
1739 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1740 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1741 unsigned int flags)
1742 {
1743 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1744 struct drm_i915_gem_execbuffer2 execbuf;
1745 int ret, i;
1747 switch (flags & 0x7) {
1748 default:
1749 return -EINVAL;
1750 case I915_EXEC_BLT:
1751 if (!bufmgr_gem->has_blt)
1752 return -EINVAL;
1753 break;
1754 case I915_EXEC_BSD:
1755 if (!bufmgr_gem->has_bsd)
1756 return -EINVAL;
1757 break;
1758 case I915_EXEC_RENDER:
1759 case I915_EXEC_DEFAULT:
1760 break;
1761 }
1763 pthread_mutex_lock(&bufmgr_gem->lock);
1764 /* Update indices and set up the validate list. */
1765 drm_intel_gem_bo_process_reloc2(bo);
1767 /* Add the batch buffer to the validation list. There are no relocations
1768 * pointing to it.
1769 */
1770 drm_intel_add_validate_buffer2(bo, 0);
1772 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1773 execbuf.buffer_count = bufmgr_gem->exec_count;
1774 execbuf.batch_start_offset = 0;
1775 execbuf.batch_len = used;
1776 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1777 execbuf.num_cliprects = num_cliprects;
1778 execbuf.DR1 = 0;
1779 execbuf.DR4 = DR4;
1780 execbuf.flags = flags;
1781 execbuf.rsvd1 = 0;
1782 execbuf.rsvd2 = 0;
1784 ret = drmIoctl(bufmgr_gem->fd,
1785 DRM_IOCTL_I915_GEM_EXECBUFFER2,
1786 &execbuf);
1787 if (ret != 0) {
1788 ret = -errno;
1789 if (ret == -ENOSPC) {
1790 DBG("Execbuffer fails to pin. "
1791 "Estimate: %u. Actual: %u. Available: %u\n",
1792 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1793 bufmgr_gem->exec_count),
1794 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1795 bufmgr_gem->exec_count),
1796 (unsigned int) bufmgr_gem->gtt_size);
1797 }
1798 }
1799 drm_intel_update_buffer_offsets2(bufmgr_gem);
1801 if (bufmgr_gem->bufmgr.debug)
1802 drm_intel_gem_dump_validation_list(bufmgr_gem);
1804 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1805 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1806 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1808 /* Disconnect the buffer from the validate list */
1809 bo_gem->validate_index = -1;
1810 bufmgr_gem->exec_bos[i] = NULL;
1811 }
1812 bufmgr_gem->exec_count = 0;
1813 pthread_mutex_unlock(&bufmgr_gem->lock);
1815 return ret;
1816 }
1818 static int
1819 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1820 drm_clip_rect_t *cliprects, int num_cliprects,
1821 int DR4)
1822 {
1823 return drm_intel_gem_bo_mrb_exec2(bo, used,
1824 cliprects, num_cliprects, DR4,
1825 I915_EXEC_RENDER);
1826 }
1828 static int
1829 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1830 {
1831 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1832 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1833 struct drm_i915_gem_pin pin;
1834 int ret;
1836 memset(&pin, 0, sizeof(pin));
1837 pin.handle = bo_gem->gem_handle;
1838 pin.alignment = alignment;
1840 ret = drmIoctl(bufmgr_gem->fd,
1841 DRM_IOCTL_I915_GEM_PIN,
1842 &pin);
1843 if (ret != 0)
1844 return -errno;
1846 bo->offset = pin.offset;
1847 return 0;
1848 }
1850 static int
1851 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1852 {
1853 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1854 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1855 struct drm_i915_gem_unpin unpin;
1856 int ret;
1858 memset(&unpin, 0, sizeof(unpin));
1859 unpin.handle = bo_gem->gem_handle;
1861 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1862 if (ret != 0)
1863 return -errno;
1865 return 0;
1866 }
1868 static int
1869 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1870 uint32_t tiling_mode,
1871 uint32_t stride)
1872 {
1873 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1874 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1875 struct drm_i915_gem_set_tiling set_tiling;
1876 int ret;
1878 if (bo_gem->global_name == 0 &&
1879 tiling_mode == bo_gem->tiling_mode &&
1880 stride == bo_gem->stride)
1881 return 0;
1883 memset(&set_tiling, 0, sizeof(set_tiling));
1884 do {
1885 /* set_tiling is slightly broken and overwrites the
1886 * input on the error path, so we have to open code
1887 * rmIoctl.
1888 */
1889 set_tiling.handle = bo_gem->gem_handle;
1890 set_tiling.tiling_mode = tiling_mode;
1891 set_tiling.stride = stride;
1893 ret = ioctl(bufmgr_gem->fd,
1894 DRM_IOCTL_I915_GEM_SET_TILING,
1895 &set_tiling);
1896 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1897 if (ret == -1)
1898 return -errno;
1900 bo_gem->tiling_mode = set_tiling.tiling_mode;
1901 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1902 bo_gem->stride = set_tiling.stride;
1903 return 0;
1904 }
1906 static int
1907 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1908 uint32_t stride)
1909 {
1910 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1911 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1912 int ret;
1914 /* Linear buffers have no stride. By ensuring that we only ever use
1915 * stride 0 with linear buffers, we simplify our code.
1916 */
1917 if (*tiling_mode == I915_TILING_NONE)
1918 stride = 0;
1920 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1921 if (ret == 0)
1922 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1924 *tiling_mode = bo_gem->tiling_mode;
1925 return ret;
1926 }
1928 static int
1929 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1930 uint32_t * swizzle_mode)
1931 {
1932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1934 *tiling_mode = bo_gem->tiling_mode;
1935 *swizzle_mode = bo_gem->swizzle_mode;
1936 return 0;
1937 }
1939 static int
1940 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1941 {
1942 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1943 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1944 struct drm_gem_flink flink;
1945 int ret;
1947 if (!bo_gem->global_name) {
1948 memset(&flink, 0, sizeof(flink));
1949 flink.handle = bo_gem->gem_handle;
1951 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1952 if (ret != 0)
1953 return -errno;
1954 bo_gem->global_name = flink.name;
1955 bo_gem->reusable = false;
1957 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
1958 }
1960 *name = bo_gem->global_name;
1961 return 0;
1962 }
1964 /**
1965 * Enables unlimited caching of buffer objects for reuse.
1966 *
1967 * This is potentially very memory expensive, as the cache at each bucket
1968 * size is only bounded by how many buffers of that size we've managed to have
1969 * in flight at once.
1970 */
1971 void
1972 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1973 {
1974 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1976 bufmgr_gem->bo_reuse = true;
1977 }
1979 /**
1980 * Enable use of fenced reloc type.
1981 *
1982 * New code should enable this to avoid unnecessary fence register
1983 * allocation. If this option is not enabled, all relocs will have fence
1984 * register allocated.
1985 */
1986 void
1987 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1988 {
1989 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1991 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1992 bufmgr_gem->fenced_relocs = true;
1993 }
1995 /**
1996 * Return the additional aperture space required by the tree of buffer objects
1997 * rooted at bo.
1998 */
1999 static int
2000 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2001 {
2002 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2003 int i;
2004 int total = 0;
2006 if (bo == NULL || bo_gem->included_in_check_aperture)
2007 return 0;
2009 total += bo->size;
2010 bo_gem->included_in_check_aperture = true;
2012 for (i = 0; i < bo_gem->reloc_count; i++)
2013 total +=
2014 drm_intel_gem_bo_get_aperture_space(bo_gem->
2015 reloc_target_info[i].bo);
2017 return total;
2018 }
2020 /**
2021 * Count the number of buffers in this list that need a fence reg
2022 *
2023 * If the count is greater than the number of available regs, we'll have
2024 * to ask the caller to resubmit a batch with fewer tiled buffers.
2025 *
2026 * This function over-counts if the same buffer is used multiple times.
2027 */
2028 static unsigned int
2029 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2030 {
2031 int i;
2032 unsigned int total = 0;
2034 for (i = 0; i < count; i++) {
2035 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2037 if (bo_gem == NULL)
2038 continue;
2040 total += bo_gem->reloc_tree_fences;
2041 }
2042 return total;
2043 }
2045 /**
2046 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2047 * for the next drm_intel_bufmgr_check_aperture_space() call.
2048 */
2049 static void
2050 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2051 {
2052 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2053 int i;
2055 if (bo == NULL || !bo_gem->included_in_check_aperture)
2056 return;
2058 bo_gem->included_in_check_aperture = false;
2060 for (i = 0; i < bo_gem->reloc_count; i++)
2061 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2062 reloc_target_info[i].bo);
2063 }
2065 /**
2066 * Return a conservative estimate for the amount of aperture required
2067 * for a collection of buffers. This may double-count some buffers.
2068 */
2069 static unsigned int
2070 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2071 {
2072 int i;
2073 unsigned int total = 0;
2075 for (i = 0; i < count; i++) {
2076 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2077 if (bo_gem != NULL)
2078 total += bo_gem->reloc_tree_size;
2079 }
2080 return total;
2081 }
2083 /**
2084 * Return the amount of aperture needed for a collection of buffers.
2085 * This avoids double counting any buffers, at the cost of looking
2086 * at every buffer in the set.
2087 */
2088 static unsigned int
2089 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2090 {
2091 int i;
2092 unsigned int total = 0;
2094 for (i = 0; i < count; i++) {
2095 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2096 /* For the first buffer object in the array, we get an
2097 * accurate count back for its reloc_tree size (since nothing
2098 * had been flagged as being counted yet). We can save that
2099 * value out as a more conservative reloc_tree_size that
2100 * avoids double-counting target buffers. Since the first
2101 * buffer happens to usually be the batch buffer in our
2102 * callers, this can pull us back from doing the tree
2103 * walk on every new batch emit.
2104 */
2105 if (i == 0) {
2106 drm_intel_bo_gem *bo_gem =
2107 (drm_intel_bo_gem *) bo_array[i];
2108 bo_gem->reloc_tree_size = total;
2109 }
2110 }
2112 for (i = 0; i < count; i++)
2113 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2114 return total;
2115 }
2117 /**
2118 * Return -1 if the batchbuffer should be flushed before attempting to
2119 * emit rendering referencing the buffers pointed to by bo_array.
2120 *
2121 * This is required because if we try to emit a batchbuffer with relocations
2122 * to a tree of buffers that won't simultaneously fit in the aperture,
2123 * the rendering will return an error at a point where the software is not
2124 * prepared to recover from it.
2125 *
2126 * However, we also want to emit the batchbuffer significantly before we reach
2127 * the limit, as a series of batchbuffers each of which references buffers
2128 * covering almost all of the aperture means that at each emit we end up
2129 * waiting to evict a buffer from the last rendering, and we get synchronous
2130 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2131 * get better parallelism.
2132 */
2133 static int
2134 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
2135 {
2136 drm_intel_bufmgr_gem *bufmgr_gem =
2137 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
2138 unsigned int total = 0;
2139 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2140 int total_fences;
2142 /* Check for fence reg constraints if necessary */
2143 if (bufmgr_gem->available_fences) {
2144 total_fences = drm_intel_gem_total_fences(bo_array, count);
2145 if (total_fences > bufmgr_gem->available_fences)
2146 return -ENOSPC;
2147 }
2149 total = drm_intel_gem_estimate_batch_space(bo_array, count);
2151 if (total > threshold)
2152 total = drm_intel_gem_compute_batch_space(bo_array, count);
2154 if (total > threshold) {
2155 DBG("check_space: overflowed available aperture, "
2156 "%dkb vs %dkb\n",
2157 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2158 return -ENOSPC;
2159 } else {
2160 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2161 (int)bufmgr_gem->gtt_size / 1024);
2162 return 0;
2163 }
2164 }
2166 /*
2167 * Disable buffer reuse for objects which are shared with the kernel
2168 * as scanout buffers
2169 */
2170 static int
2171 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
2172 {
2173 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2175 bo_gem->reusable = false;
2176 return 0;
2177 }
2179 static int
2180 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
2181 {
2182 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2184 return bo_gem->reusable;
2185 }
2187 static int
2188 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2189 {
2190 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2191 int i;
2193 for (i = 0; i < bo_gem->reloc_count; i++) {
2194 if (bo_gem->reloc_target_info[i].bo == target_bo)
2195 return 1;
2196 if (bo == bo_gem->reloc_target_info[i].bo)
2197 continue;
2198 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2199 target_bo))
2200 return 1;
2201 }
2203 return 0;
2204 }
2206 /** Return true if target_bo is referenced by bo's relocation tree. */
2207 static int
2208 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2209 {
2210 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2212 if (bo == NULL || target_bo == NULL)
2213 return 0;
2214 if (target_bo_gem->used_as_reloc_target)
2215 return _drm_intel_gem_bo_references(bo, target_bo);
2216 return 0;
2217 }
2219 static void
2220 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2221 {
2222 unsigned int i = bufmgr_gem->num_buckets;
2224 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2226 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2227 bufmgr_gem->cache_bucket[i].size = size;
2228 bufmgr_gem->num_buckets++;
2229 }
2231 static void
2232 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2233 {
2234 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2236 /* OK, so power of two buckets was too wasteful of memory.
2237 * Give 3 other sizes between each power of two, to hopefully
2238 * cover things accurately enough. (The alternative is
2239 * probably to just go for exact matching of sizes, and assume
2240 * that for things like composited window resize the tiled
2241 * width/height alignment and rounding of sizes to pages will
2242 * get us useful cache hit rates anyway)
2243 */
2244 add_bucket(bufmgr_gem, 4096);
2245 add_bucket(bufmgr_gem, 4096 * 2);
2246 add_bucket(bufmgr_gem, 4096 * 3);
2248 /* Initialize the linked lists for BO reuse cache. */
2249 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2250 add_bucket(bufmgr_gem, size);
2252 add_bucket(bufmgr_gem, size + size * 1 / 4);
2253 add_bucket(bufmgr_gem, size + size * 2 / 4);
2254 add_bucket(bufmgr_gem, size + size * 3 / 4);
2255 }
2256 }
2258 void
2259 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
2260 {
2261 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2263 bufmgr_gem->vma_max = limit;
2265 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
2266 }
2268 /**
2269 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2270 * and manage map buffer objections.
2271 *
2272 * \param fd File descriptor of the opened DRM device.
2273 */
2274 drm_intel_bufmgr *
2275 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2276 {
2277 drm_intel_bufmgr_gem *bufmgr_gem;
2278 struct drm_i915_gem_get_aperture aperture;
2279 drm_i915_getparam_t gp;
2280 int ret, tmp;
2281 bool exec2 = false;
2283 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2284 if (bufmgr_gem == NULL)
2285 return NULL;
2287 bufmgr_gem->fd = fd;
2289 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2290 free(bufmgr_gem);
2291 return NULL;
2292 }
2294 ret = drmIoctl(bufmgr_gem->fd,
2295 DRM_IOCTL_I915_GEM_GET_APERTURE,
2296 &aperture);
2298 if (ret == 0)
2299 bufmgr_gem->gtt_size = aperture.aper_available_size;
2300 else {
2301 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2302 strerror(errno));
2303 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2304 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2305 "May lead to reduced performance or incorrect "
2306 "rendering.\n",
2307 (int)bufmgr_gem->gtt_size / 1024);
2308 }
2310 gp.param = I915_PARAM_CHIPSET_ID;
2311 gp.value = &bufmgr_gem->pci_device;
2312 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2313 if (ret) {
2314 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2315 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2316 }
2318 if (IS_GEN2(bufmgr_gem->pci_device))
2319 bufmgr_gem->gen = 2;
2320 else if (IS_GEN3(bufmgr_gem->pci_device))
2321 bufmgr_gem->gen = 3;
2322 else if (IS_GEN4(bufmgr_gem->pci_device))
2323 bufmgr_gem->gen = 4;
2324 else if (IS_GEN5(bufmgr_gem->pci_device))
2325 bufmgr_gem->gen = 5;
2326 else if (IS_GEN6(bufmgr_gem->pci_device))
2327 bufmgr_gem->gen = 6;
2328 else if (IS_GEN7(bufmgr_gem->pci_device))
2329 bufmgr_gem->gen = 7;
2330 else
2331 assert(0);
2333 if (IS_GEN3(bufmgr_gem->pci_device) &&
2334 bufmgr_gem->gtt_size > 256*1024*1024) {
2335 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
2336 * be used for tiled blits. To simplify the accounting, just
2337 * substract the unmappable part (fixed to 256MB on all known
2338 * gen3 devices) if the kernel advertises it. */
2339 bufmgr_gem->gtt_size -= 256*1024*1024;
2340 }
2342 gp.value = &tmp;
2344 gp.param = I915_PARAM_HAS_EXECBUF2;
2345 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2346 if (!ret)
2347 exec2 = true;
2349 gp.param = I915_PARAM_HAS_BSD;
2350 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2351 bufmgr_gem->has_bsd = ret == 0;
2353 gp.param = I915_PARAM_HAS_BLT;
2354 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2355 bufmgr_gem->has_blt = ret == 0;
2357 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
2358 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2359 bufmgr_gem->has_relaxed_fencing = ret == 0;
2361 if (bufmgr_gem->gen < 4) {
2362 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2363 gp.value = &bufmgr_gem->available_fences;
2364 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2365 if (ret) {
2366 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2367 errno);
2368 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2369 *gp.value);
2370 bufmgr_gem->available_fences = 0;
2371 } else {
2372 /* XXX The kernel reports the total number of fences,
2373 * including any that may be pinned.
2374 *
2375 * We presume that there will be at least one pinned
2376 * fence for the scanout buffer, but there may be more
2377 * than one scanout and the user may be manually
2378 * pinning buffers. Let's move to execbuffer2 and
2379 * thereby forget the insanity of using fences...
2380 */
2381 bufmgr_gem->available_fences -= 2;
2382 if (bufmgr_gem->available_fences < 0)
2383 bufmgr_gem->available_fences = 0;
2384 }
2385 }
2387 /* Let's go with one relocation per every 2 dwords (but round down a bit
2388 * since a power of two will mean an extra page allocation for the reloc
2389 * buffer).
2390 *
2391 * Every 4 was too few for the blender benchmark.
2392 */
2393 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2395 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2396 bufmgr_gem->bufmgr.bo_alloc_for_render =
2397 drm_intel_gem_bo_alloc_for_render;
2398 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2399 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2400 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2401 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2402 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2403 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2404 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2405 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2406 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2407 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2408 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2409 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2410 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2411 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2412 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2413 /* Use the new one if available */
2414 if (exec2) {
2415 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2416 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2417 } else
2418 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2419 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2420 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2421 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2422 bufmgr_gem->bufmgr.debug = 0;
2423 bufmgr_gem->bufmgr.check_aperture_space =
2424 drm_intel_gem_check_aperture_space;
2425 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2426 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2427 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2428 drm_intel_gem_get_pipe_from_crtc_id;
2429 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2431 DRMINITLISTHEAD(&bufmgr_gem->named);
2432 init_cache_buckets(bufmgr_gem);
2434 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
2435 bufmgr_gem->vma_max = -1; /* unlimited by default */
2437 return &bufmgr_gem->bufmgr;
2438 }