1 /*
2 * drivers/gpu/drm/omapdrm/omap_gem.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
23 #include <drm/drm_vma_manager.h>
25 #include "omap_drv.h"
26 #include "omap_dmm_tiler.h"
28 /* remove these once drm core helpers are merged */
29 struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 bool dirty, bool accessed);
32 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
34 /*
35 * GEM buffer object implementation.
36 */
38 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
40 /* note: we use upper 8 bits of flags for driver-internal flags: */
41 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
42 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
43 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
46 struct omap_gem_object {
47 struct drm_gem_object base;
49 struct list_head mm_list;
51 uint32_t flags;
53 /** width/height for tiled formats (rounded up to slot boundaries) */
54 uint16_t width, height;
56 /** roll applied when mapping to DMM */
57 uint32_t roll;
59 /**
60 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
61 * is set and the paddr is valid. Also if the buffer is remapped in
62 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
63 * the physical address and OMAP_BO_DMA is not set, then you should
64 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
65 * not removed from under your feet.
66 *
67 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
68 * buffer is requested, but doesn't mean that it is. Use the
69 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
70 * physical address.
71 */
72 dma_addr_t paddr;
74 /**
75 * # of users of paddr
76 */
77 uint32_t paddr_cnt;
79 /**
80 * tiler block used when buffer is remapped in DMM/TILER.
81 */
82 struct tiler_block *block;
84 /**
85 * Array of backing pages, if allocated. Note that pages are never
86 * allocated for buffers originally allocated from contiguous memory
87 */
88 struct page **pages;
90 /** addresses corresponding to pages in above array */
91 dma_addr_t *addrs;
93 /**
94 * Virtual address, if mapped.
95 */
96 void *vaddr;
98 /**
99 * sync-object allocated on demand (if needed)
100 *
101 * Per-buffer sync-object for tracking pending and completed hw/dma
102 * read and write operations. The layout in memory is dictated by
103 * the SGX firmware, which uses this information to stall the command
104 * stream if a surface is not ready yet.
105 *
106 * Note that when buffer is used by SGX, the sync-object needs to be
107 * allocated from a special heap of sync-objects. This way many sync
108 * objects can be packed in a page, and not waste GPU virtual address
109 * space. Because of this we have to have a omap_gem_set_sync_object()
110 * API to allow replacement of the syncobj after it has (potentially)
111 * already been allocated. A bit ugly but I haven't thought of a
112 * better alternative.
113 */
114 struct {
115 uint32_t write_pending;
116 uint32_t write_complete;
117 uint32_t read_pending;
118 uint32_t read_complete;
119 } *sync;
120 };
122 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
123 static uint64_t mmap_offset(struct drm_gem_object *obj);
125 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
126 * not necessarily pinned in TILER all the time, and (b) when they are
127 * they are not necessarily page aligned, we reserve one or more small
128 * regions in each of the 2d containers to use as a user-GART where we
129 * can create a second page-aligned mapping of parts of the buffer
130 * being accessed from userspace.
131 *
132 * Note that we could optimize slightly when we know that multiple
133 * tiler containers are backed by the same PAT.. but I'll leave that
134 * for later..
135 */
136 #define NUM_USERGART_ENTRIES 2
137 struct usergart_entry {
138 struct tiler_block *block; /* the reserved tiler block */
139 dma_addr_t paddr;
140 struct drm_gem_object *obj; /* the current pinned obj */
141 pgoff_t obj_pgoff; /* page offset of obj currently
142 mapped in */
143 };
144 static struct {
145 struct usergart_entry entry[NUM_USERGART_ENTRIES];
146 int height; /* height in rows */
147 int height_shift; /* ilog2(height in rows) */
148 int slot_shift; /* ilog2(width per slot) */
149 int stride_pfn; /* stride in pages */
150 int last; /* index of last used entry */
151 } *usergart;
153 static void evict_entry(struct drm_gem_object *obj,
154 enum tiler_fmt fmt, struct usergart_entry *entry)
155 {
156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
159 loff_t off = mmap_offset(obj) +
160 (entry->obj_pgoff << PAGE_SHIFT);
161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
163 if (m > 1) {
164 int i;
165 /* if stride > than PAGE_SIZE then sparse mapping: */
166 for (i = n; i > 0; i--) {
167 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
168 off, PAGE_SIZE, 1);
169 off += PAGE_SIZE * m;
170 }
171 } else {
172 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
173 off, size, 1);
174 }
176 entry->obj = NULL;
177 }
179 /* Evict a buffer from usergart, if it is mapped there */
180 static void evict(struct drm_gem_object *obj)
181 {
182 struct omap_gem_object *omap_obj = to_omap_bo(obj);
184 if (omap_obj->flags & OMAP_BO_TILED) {
185 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
186 int i;
188 if (!usergart)
189 return;
191 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
192 struct usergart_entry *entry = &usergart[fmt].entry[i];
193 if (entry->obj == obj)
194 evict_entry(obj, fmt, entry);
195 }
196 }
197 }
199 /* GEM objects can either be allocated from contiguous memory (in which
200 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
201 * contiguous buffers can be remapped in TILER/DMM if they need to be
202 * contiguous... but we don't do this all the time to reduce pressure
203 * on TILER/DMM space when we know at allocation time that the buffer
204 * will need to be scanned out.
205 */
206 static inline bool is_shmem(struct drm_gem_object *obj)
207 {
208 return obj->filp != NULL;
209 }
211 /**
212 * shmem buffers that are mapped cached can simulate coherency via using
213 * page faulting to keep track of dirty pages
214 */
215 static inline bool is_cached_coherent(struct drm_gem_object *obj)
216 {
217 struct omap_gem_object *omap_obj = to_omap_bo(obj);
218 return is_shmem(obj) &&
219 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
220 }
222 static DEFINE_SPINLOCK(sync_lock);
224 /** ensure backing pages are allocated */
225 static int omap_gem_attach_pages(struct drm_gem_object *obj)
226 {
227 struct drm_device *dev = obj->dev;
228 struct omap_gem_object *omap_obj = to_omap_bo(obj);
229 struct page **pages;
230 int npages = obj->size >> PAGE_SHIFT;
231 int i, ret;
232 dma_addr_t *addrs;
234 WARN_ON(omap_obj->pages);
236 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
237 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
238 * we actually want CMA memory for it all anyways..
239 */
240 pages = drm_gem_get_pages(obj, GFP_KERNEL);
241 if (IS_ERR(pages)) {
242 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
243 return PTR_ERR(pages);
244 }
246 /* for non-cached buffers, ensure the new pages are clean because
247 * DSS, GPU, etc. are not cache coherent:
248 */
249 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
250 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
251 if (!addrs) {
252 ret = -ENOMEM;
253 goto free_pages;
254 }
256 for (i = 0; i < npages; i++) {
257 addrs[i] = dma_map_page(dev->dev, pages[i],
258 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
259 }
260 } else {
261 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
262 if (!addrs) {
263 ret = -ENOMEM;
264 goto free_pages;
265 }
266 }
268 omap_obj->addrs = addrs;
269 omap_obj->pages = pages;
271 return 0;
273 free_pages:
274 drm_gem_put_pages(obj, pages, true, false);
276 return ret;
277 }
279 /** release backing pages */
280 static void omap_gem_detach_pages(struct drm_gem_object *obj)
281 {
282 struct omap_gem_object *omap_obj = to_omap_bo(obj);
284 /* for non-cached buffers, ensure the new pages are clean because
285 * DSS, GPU, etc. are not cache coherent:
286 */
287 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
288 int i, npages = obj->size >> PAGE_SHIFT;
289 for (i = 0; i < npages; i++) {
290 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
291 PAGE_SIZE, DMA_BIDIRECTIONAL);
292 }
293 }
295 kfree(omap_obj->addrs);
296 omap_obj->addrs = NULL;
298 drm_gem_put_pages(obj, omap_obj->pages, true, false);
299 omap_obj->pages = NULL;
300 }
302 /* get buffer flags */
303 uint32_t omap_gem_flags(struct drm_gem_object *obj)
304 {
305 return to_omap_bo(obj)->flags;
306 }
308 /** get mmap offset */
309 static uint64_t mmap_offset(struct drm_gem_object *obj)
310 {
311 struct drm_device *dev = obj->dev;
312 int ret;
313 size_t size;
315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
317 /* Make it mmapable */
318 size = omap_gem_mmap_size(obj);
319 ret = drm_gem_create_mmap_offset_size(obj, size);
320 if (ret) {
321 dev_err(dev->dev, "could not allocate mmap offset\n");
322 return 0;
323 }
325 return drm_vma_node_offset_addr(&obj->vma_node);
326 }
328 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
329 {
330 uint64_t offset;
331 mutex_lock(&obj->dev->struct_mutex);
332 offset = mmap_offset(obj);
333 mutex_unlock(&obj->dev->struct_mutex);
334 return offset;
335 }
337 /** get mmap size */
338 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
339 {
340 struct omap_gem_object *omap_obj = to_omap_bo(obj);
341 size_t size = obj->size;
343 if (omap_obj->flags & OMAP_BO_TILED) {
344 /* for tiled buffers, the virtual size has stride rounded up
345 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
346 * 32kb later!). But we don't back the entire buffer with
347 * pages, only the valid picture part.. so need to adjust for
348 * this in the size used to mmap and generate mmap offset
349 */
350 size = tiler_vsize(gem2fmt(omap_obj->flags),
351 omap_obj->width, omap_obj->height);
352 }
354 return size;
355 }
357 /* get tiled size, returns -EINVAL if not tiled buffer */
358 int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
359 {
360 struct omap_gem_object *omap_obj = to_omap_bo(obj);
361 if (omap_obj->flags & OMAP_BO_TILED) {
362 *w = omap_obj->width;
363 *h = omap_obj->height;
364 return 0;
365 }
366 return -EINVAL;
367 }
369 /* Normal handling for the case of faulting in non-tiled buffers */
370 static int fault_1d(struct drm_gem_object *obj,
371 struct vm_area_struct *vma, struct vm_fault *vmf)
372 {
373 struct omap_gem_object *omap_obj = to_omap_bo(obj);
374 unsigned long pfn;
375 pgoff_t pgoff;
377 /* We don't use vmf->pgoff since that has the fake offset: */
378 pgoff = ((unsigned long)vmf->virtual_address -
379 vma->vm_start) >> PAGE_SHIFT;
381 if (omap_obj->pages) {
382 omap_gem_cpu_sync(obj, pgoff);
383 pfn = page_to_pfn(omap_obj->pages[pgoff]);
384 } else {
385 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
386 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
387 }
389 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
390 pfn, pfn << PAGE_SHIFT);
392 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
393 }
395 /* Special handling for the case of faulting in 2d tiled buffers */
396 static int fault_2d(struct drm_gem_object *obj,
397 struct vm_area_struct *vma, struct vm_fault *vmf)
398 {
399 struct omap_gem_object *omap_obj = to_omap_bo(obj);
400 struct usergart_entry *entry;
401 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
402 struct page *pages[64]; /* XXX is this too much to have on stack? */
403 unsigned long pfn;
404 pgoff_t pgoff, base_pgoff;
405 void __user *vaddr;
406 int i, ret, slots;
408 /*
409 * Note the height of the slot is also equal to the number of pages
410 * that need to be mapped in to fill 4kb wide CPU page. If the slot
411 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
412 */
413 const int n = usergart[fmt].height;
414 const int n_shift = usergart[fmt].height_shift;
416 /*
417 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
418 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
419 * into account in some of the math, so figure out virtual stride
420 * in pages
421 */
422 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
424 /* We don't use vmf->pgoff since that has the fake offset: */
425 pgoff = ((unsigned long)vmf->virtual_address -
426 vma->vm_start) >> PAGE_SHIFT;
428 /*
429 * Actual address we start mapping at is rounded down to previous slot
430 * boundary in the y direction:
431 */
432 base_pgoff = round_down(pgoff, m << n_shift);
434 /* figure out buffer width in slots */
435 slots = omap_obj->width >> usergart[fmt].slot_shift;
437 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
439 entry = &usergart[fmt].entry[usergart[fmt].last];
441 /* evict previous buffer using this usergart entry, if any: */
442 if (entry->obj)
443 evict_entry(entry->obj, fmt, entry);
445 entry->obj = obj;
446 entry->obj_pgoff = base_pgoff;
448 /* now convert base_pgoff to phys offset from virt offset: */
449 base_pgoff = (base_pgoff >> n_shift) * slots;
451 /* for wider-than 4k.. figure out which part of the slot-row we want: */
452 if (m > 1) {
453 int off = pgoff % m;
454 entry->obj_pgoff += off;
455 base_pgoff /= m;
456 slots = min(slots - (off << n_shift), n);
457 base_pgoff += off << n_shift;
458 vaddr += off << PAGE_SHIFT;
459 }
461 /*
462 * Map in pages. Beyond the valid pixel part of the buffer, we set
463 * pages[i] to NULL to get a dummy page mapped in.. if someone
464 * reads/writes it they will get random/undefined content, but at
465 * least it won't be corrupting whatever other random page used to
466 * be mapped in, or other undefined behavior.
467 */
468 memcpy(pages, &omap_obj->pages[base_pgoff],
469 sizeof(struct page *) * slots);
470 memset(pages + slots, 0,
471 sizeof(struct page *) * (n - slots));
473 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
474 if (ret) {
475 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
476 return ret;
477 }
479 pfn = entry->paddr >> PAGE_SHIFT;
481 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
482 pfn, pfn << PAGE_SHIFT);
484 for (i = n; i > 0; i--) {
485 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
486 pfn += usergart[fmt].stride_pfn;
487 vaddr += PAGE_SIZE * m;
488 }
490 /* simple round-robin: */
491 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
493 return 0;
494 }
496 /**
497 * omap_gem_fault - pagefault handler for GEM objects
498 * @vma: the VMA of the GEM object
499 * @vmf: fault detail
500 *
501 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
502 * does most of the work for us including the actual map/unmap calls
503 * but we need to do the actual page work.
504 *
505 * The VMA was set up by GEM. In doing so it also ensured that the
506 * vma->vm_private_data points to the GEM object that is backing this
507 * mapping.
508 */
509 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
510 {
511 struct drm_gem_object *obj = vma->vm_private_data;
512 struct omap_gem_object *omap_obj = to_omap_bo(obj);
513 struct drm_device *dev = obj->dev;
514 struct page **pages;
515 int ret;
517 /* Make sure we don't parallel update on a fault, nor move or remove
518 * something from beneath our feet
519 */
520 mutex_lock(&dev->struct_mutex);
522 /* if a shmem backed object, make sure we have pages attached now */
523 ret = get_pages(obj, &pages);
524 if (ret)
525 goto fail;
527 /* where should we do corresponding put_pages().. we are mapping
528 * the original page, rather than thru a GART, so we can't rely
529 * on eviction to trigger this. But munmap() or all mappings should
530 * probably trigger put_pages()?
531 */
533 if (omap_obj->flags & OMAP_BO_TILED)
534 ret = fault_2d(obj, vma, vmf);
535 else
536 ret = fault_1d(obj, vma, vmf);
539 fail:
540 mutex_unlock(&dev->struct_mutex);
541 switch (ret) {
542 case 0:
543 case -ERESTARTSYS:
544 case -EINTR:
545 return VM_FAULT_NOPAGE;
546 case -ENOMEM:
547 return VM_FAULT_OOM;
548 default:
549 return VM_FAULT_SIGBUS;
550 }
551 }
553 /** We override mainly to fix up some of the vm mapping flags.. */
554 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
555 {
556 int ret;
558 ret = drm_gem_mmap(filp, vma);
559 if (ret) {
560 DBG("mmap failed: %d", ret);
561 return ret;
562 }
564 return omap_gem_mmap_obj(vma->vm_private_data, vma);
565 }
567 int omap_gem_mmap_obj(struct drm_gem_object *obj,
568 struct vm_area_struct *vma)
569 {
570 struct omap_gem_object *omap_obj = to_omap_bo(obj);
572 vma->vm_flags &= ~VM_PFNMAP;
573 vma->vm_flags |= VM_MIXEDMAP;
575 if (omap_obj->flags & OMAP_BO_WC) {
576 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
577 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
578 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
579 } else {
580 /*
581 * We do have some private objects, at least for scanout buffers
582 * on hardware without DMM/TILER. But these are allocated write-
583 * combine
584 */
585 if (WARN_ON(!obj->filp))
586 return -EINVAL;
588 /*
589 * Shunt off cached objs to shmem file so they have their own
590 * address_space (so unmap_mapping_range does what we want,
591 * in particular in the case of mmap'd dmabufs)
592 */
593 fput(vma->vm_file);
594 vma->vm_pgoff = 0;
595 vma->vm_file = get_file(obj->filp);
597 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
598 }
600 return 0;
601 }
604 /**
605 * omap_gem_dumb_create - create a dumb buffer
606 * @drm_file: our client file
607 * @dev: our device
608 * @args: the requested arguments copied from userspace
609 *
610 * Allocate a buffer suitable for use for a frame buffer of the
611 * form described by user space. Give userspace a handle by which
612 * to reference it.
613 */
614 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
615 struct drm_mode_create_dumb *args)
616 {
617 union omap_gem_size gsize;
619 /* in case someone tries to feed us a completely bogus stride: */
620 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
621 args->size = PAGE_ALIGN(args->pitch * args->height);
623 gsize = (union omap_gem_size){
624 .bytes = args->size,
625 };
627 return omap_gem_new_handle(dev, file, gsize,
628 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
629 }
631 /**
632 * omap_gem_dumb_map - buffer mapping for dumb interface
633 * @file: our drm client file
634 * @dev: drm device
635 * @handle: GEM handle to the object (from dumb_create)
636 *
637 * Do the necessary setup to allow the mapping of the frame buffer
638 * into user memory. We don't have to do much here at the moment.
639 */
640 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
641 uint32_t handle, uint64_t *offset)
642 {
643 struct drm_gem_object *obj;
644 int ret = 0;
646 /* GEM does all our handle to object mapping */
647 obj = drm_gem_object_lookup(dev, file, handle);
648 if (obj == NULL) {
649 ret = -ENOENT;
650 goto fail;
651 }
653 *offset = omap_gem_mmap_offset(obj);
655 drm_gem_object_unreference_unlocked(obj);
657 fail:
658 return ret;
659 }
661 /* Set scrolling position. This allows us to implement fast scrolling
662 * for console.
663 *
664 * Call only from non-atomic contexts.
665 */
666 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
667 {
668 struct omap_gem_object *omap_obj = to_omap_bo(obj);
669 uint32_t npages = obj->size >> PAGE_SHIFT;
670 int ret = 0;
672 if (roll > npages) {
673 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
674 return -EINVAL;
675 }
677 omap_obj->roll = roll;
679 mutex_lock(&obj->dev->struct_mutex);
681 /* if we aren't mapped yet, we don't need to do anything */
682 if (omap_obj->block) {
683 struct page **pages;
684 ret = get_pages(obj, &pages);
685 if (ret)
686 goto fail;
687 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
688 if (ret)
689 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
690 }
692 fail:
693 mutex_unlock(&obj->dev->struct_mutex);
695 return ret;
696 }
698 /* Sync the buffer for CPU access.. note pages should already be
699 * attached, ie. omap_gem_get_pages()
700 */
701 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
702 {
703 struct drm_device *dev = obj->dev;
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
706 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
707 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
708 PAGE_SIZE, DMA_BIDIRECTIONAL);
709 omap_obj->addrs[pgoff] = 0;
710 }
711 }
713 /* sync the buffer for DMA access */
714 void omap_gem_dma_sync(struct drm_gem_object *obj,
715 enum dma_data_direction dir)
716 {
717 struct drm_device *dev = obj->dev;
718 struct omap_gem_object *omap_obj = to_omap_bo(obj);
720 if (is_cached_coherent(obj)) {
721 int i, npages = obj->size >> PAGE_SHIFT;
722 struct page **pages = omap_obj->pages;
723 bool dirty = false;
725 for (i = 0; i < npages; i++) {
726 if (!omap_obj->addrs[i]) {
727 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
728 PAGE_SIZE, DMA_BIDIRECTIONAL);
729 dirty = true;
730 }
731 }
733 if (dirty) {
734 unmap_mapping_range(obj->filp->f_mapping, 0,
735 omap_gem_mmap_size(obj), 1);
736 }
737 }
738 }
740 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
741 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
742 * map in TILER)
743 */
744 int omap_gem_get_paddr(struct drm_gem_object *obj,
745 dma_addr_t *paddr, bool remap)
746 {
747 struct omap_drm_private *priv = obj->dev->dev_private;
748 struct omap_gem_object *omap_obj = to_omap_bo(obj);
749 int ret = 0;
751 mutex_lock(&obj->dev->struct_mutex);
753 if (remap && is_shmem(obj) && priv->has_dmm) {
754 if (omap_obj->paddr_cnt == 0) {
755 struct page **pages;
756 uint32_t npages = obj->size >> PAGE_SHIFT;
757 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
758 struct tiler_block *block;
760 BUG_ON(omap_obj->block);
762 ret = get_pages(obj, &pages);
763 if (ret)
764 goto fail;
766 if (omap_obj->flags & OMAP_BO_TILED) {
767 block = tiler_reserve_2d(fmt,
768 omap_obj->width,
769 omap_obj->height, 0);
770 } else {
771 block = tiler_reserve_1d(obj->size);
772 }
774 if (IS_ERR(block)) {
775 ret = PTR_ERR(block);
776 dev_err(obj->dev->dev,
777 "could not remap: %d (%d)\n", ret, fmt);
778 goto fail;
779 }
781 /* TODO: enable async refill.. */
782 ret = tiler_pin(block, pages, npages,
783 omap_obj->roll, true);
784 if (ret) {
785 tiler_release(block);
786 dev_err(obj->dev->dev,
787 "could not pin: %d\n", ret);
788 goto fail;
789 }
791 omap_obj->paddr = tiler_ssptr(block);
792 omap_obj->block = block;
794 DBG("got paddr: %pad", &omap_obj->paddr);
795 }
797 omap_obj->paddr_cnt++;
799 *paddr = omap_obj->paddr;
800 } else if (omap_obj->flags & OMAP_BO_DMA) {
801 *paddr = omap_obj->paddr;
802 } else {
803 ret = -EINVAL;
804 goto fail;
805 }
807 fail:
808 mutex_unlock(&obj->dev->struct_mutex);
810 return ret;
811 }
813 /* Release physical address, when DMA is no longer being performed.. this
814 * could potentially unpin and unmap buffers from TILER
815 */
816 void omap_gem_put_paddr(struct drm_gem_object *obj)
817 {
818 struct omap_gem_object *omap_obj = to_omap_bo(obj);
819 int ret;
821 mutex_lock(&obj->dev->struct_mutex);
822 if (omap_obj->paddr_cnt > 0) {
823 omap_obj->paddr_cnt--;
824 if (omap_obj->paddr_cnt == 0) {
825 ret = tiler_unpin(omap_obj->block);
826 if (ret) {
827 dev_err(obj->dev->dev,
828 "could not unpin pages: %d\n", ret);
829 }
830 ret = tiler_release(omap_obj->block);
831 if (ret) {
832 dev_err(obj->dev->dev,
833 "could not release unmap: %d\n", ret);
834 }
835 omap_obj->paddr = 0;
836 omap_obj->block = NULL;
837 }
838 }
840 mutex_unlock(&obj->dev->struct_mutex);
841 }
843 /* Get rotated scanout address (only valid if already pinned), at the
844 * specified orientation and x,y offset from top-left corner of buffer
845 * (only valid for tiled 2d buffers)
846 */
847 int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
848 int x, int y, dma_addr_t *paddr)
849 {
850 struct omap_gem_object *omap_obj = to_omap_bo(obj);
851 int ret = -EINVAL;
853 mutex_lock(&obj->dev->struct_mutex);
854 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
855 (omap_obj->flags & OMAP_BO_TILED)) {
856 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
857 ret = 0;
858 }
859 mutex_unlock(&obj->dev->struct_mutex);
860 return ret;
861 }
863 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
864 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
865 {
866 struct omap_gem_object *omap_obj = to_omap_bo(obj);
867 int ret = -EINVAL;
868 if (omap_obj->flags & OMAP_BO_TILED)
869 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
870 return ret;
871 }
873 /* acquire pages when needed (for example, for DMA where physically
874 * contiguous buffer is not required
875 */
876 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
877 {
878 struct omap_gem_object *omap_obj = to_omap_bo(obj);
879 int ret = 0;
881 if (is_shmem(obj) && !omap_obj->pages) {
882 ret = omap_gem_attach_pages(obj);
883 if (ret) {
884 dev_err(obj->dev->dev, "could not attach pages\n");
885 return ret;
886 }
887 }
889 /* TODO: even phys-contig.. we should have a list of pages? */
890 *pages = omap_obj->pages;
892 return 0;
893 }
895 /* if !remap, and we don't have pages backing, then fail, rather than
896 * increasing the pin count (which we don't really do yet anyways,
897 * because we don't support swapping pages back out). And 'remap'
898 * might not be quite the right name, but I wanted to keep it working
899 * similarly to omap_gem_get_paddr(). Note though that mutex is not
900 * aquired if !remap (because this can be called in atomic ctxt),
901 * but probably omap_gem_get_paddr() should be changed to work in the
902 * same way. If !remap, a matching omap_gem_put_pages() call is not
903 * required (and should not be made).
904 */
905 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
906 bool remap)
907 {
908 int ret;
909 if (!remap) {
910 struct omap_gem_object *omap_obj = to_omap_bo(obj);
911 if (!omap_obj->pages)
912 return -ENOMEM;
913 *pages = omap_obj->pages;
914 return 0;
915 }
916 mutex_lock(&obj->dev->struct_mutex);
917 ret = get_pages(obj, pages);
918 mutex_unlock(&obj->dev->struct_mutex);
919 return ret;
920 }
922 /* release pages when DMA no longer being performed */
923 int omap_gem_put_pages(struct drm_gem_object *obj)
924 {
925 /* do something here if we dynamically attach/detach pages.. at
926 * least they would no longer need to be pinned if everyone has
927 * released the pages..
928 */
929 return 0;
930 }
932 /* Get kernel virtual address for CPU access.. this more or less only
933 * exists for omap_fbdev. This should be called with struct_mutex
934 * held.
935 */
936 void *omap_gem_vaddr(struct drm_gem_object *obj)
937 {
938 struct omap_gem_object *omap_obj = to_omap_bo(obj);
939 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
940 if (!omap_obj->vaddr) {
941 struct page **pages;
942 int ret = get_pages(obj, &pages);
943 if (ret)
944 return ERR_PTR(ret);
945 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
946 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
947 }
948 return omap_obj->vaddr;
949 }
951 #ifdef CONFIG_PM
952 /* re-pin objects in DMM in resume path: */
953 int omap_gem_resume(struct device *dev)
954 {
955 struct drm_device *drm_dev = dev_get_drvdata(dev);
956 struct omap_drm_private *priv = drm_dev->dev_private;
957 struct omap_gem_object *omap_obj;
958 int ret = 0;
960 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
961 if (omap_obj->block) {
962 struct drm_gem_object *obj = &omap_obj->base;
963 uint32_t npages = obj->size >> PAGE_SHIFT;
964 WARN_ON(!omap_obj->pages); /* this can't happen */
965 ret = tiler_pin(omap_obj->block,
966 omap_obj->pages, npages,
967 omap_obj->roll, true);
968 if (ret) {
969 dev_err(dev, "could not repin: %d\n", ret);
970 return ret;
971 }
972 }
973 }
975 return 0;
976 }
977 #endif
979 #ifdef CONFIG_DEBUG_FS
980 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
981 {
982 struct omap_gem_object *omap_obj = to_omap_bo(obj);
983 uint64_t off;
985 off = drm_vma_node_start(&obj->vma_node);
987 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
988 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
989 off, &omap_obj->paddr, omap_obj->paddr_cnt,
990 omap_obj->vaddr, omap_obj->roll);
992 if (omap_obj->flags & OMAP_BO_TILED) {
993 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
994 if (omap_obj->block) {
995 struct tcm_area *area = &omap_obj->block->area;
996 seq_printf(m, " (%dx%d, %dx%d)",
997 area->p0.x, area->p0.y,
998 area->p1.x, area->p1.y);
999 }
1000 } else {
1001 seq_printf(m, " %d", obj->size);
1002 }
1004 seq_printf(m, "\n");
1005 }
1007 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1008 {
1009 struct omap_gem_object *omap_obj;
1010 int count = 0;
1011 size_t size = 0;
1013 list_for_each_entry(omap_obj, list, mm_list) {
1014 struct drm_gem_object *obj = &omap_obj->base;
1015 seq_printf(m, " ");
1016 omap_gem_describe(obj, m);
1017 count++;
1018 size += obj->size;
1019 }
1021 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1022 }
1023 #endif
1025 /* Buffer Synchronization:
1026 */
1028 struct omap_gem_sync_waiter {
1029 struct list_head list;
1030 struct omap_gem_object *omap_obj;
1031 enum omap_gem_op op;
1032 uint32_t read_target, write_target;
1033 /* notify called w/ sync_lock held */
1034 void (*notify)(void *arg);
1035 void *arg;
1036 };
1038 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1039 * the read and/or write target count is achieved which can call a user
1040 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1041 * cpu access), etc.
1042 */
1043 static LIST_HEAD(waiters);
1045 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1046 {
1047 struct omap_gem_object *omap_obj = waiter->omap_obj;
1048 if ((waiter->op & OMAP_GEM_READ) &&
1049 (omap_obj->sync->write_complete < waiter->write_target))
1050 return true;
1051 if ((waiter->op & OMAP_GEM_WRITE) &&
1052 (omap_obj->sync->read_complete < waiter->read_target))
1053 return true;
1054 return false;
1055 }
1057 /* macro for sync debug.. */
1058 #define SYNCDBG 0
1059 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1060 printk(KERN_ERR "%s:%d: "fmt"\n", \
1061 __func__, __LINE__, ##__VA_ARGS__); \
1062 } while (0)
1065 static void sync_op_update(void)
1066 {
1067 struct omap_gem_sync_waiter *waiter, *n;
1068 list_for_each_entry_safe(waiter, n, &waiters, list) {
1069 if (!is_waiting(waiter)) {
1070 list_del(&waiter->list);
1071 SYNC("notify: %p", waiter);
1072 waiter->notify(waiter->arg);
1073 kfree(waiter);
1074 }
1075 }
1076 }
1078 static inline int sync_op(struct drm_gem_object *obj,
1079 enum omap_gem_op op, bool start)
1080 {
1081 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1082 int ret = 0;
1084 spin_lock(&sync_lock);
1086 if (!omap_obj->sync) {
1087 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1088 if (!omap_obj->sync) {
1089 ret = -ENOMEM;
1090 goto unlock;
1091 }
1092 }
1094 if (start) {
1095 if (op & OMAP_GEM_READ)
1096 omap_obj->sync->read_pending++;
1097 if (op & OMAP_GEM_WRITE)
1098 omap_obj->sync->write_pending++;
1099 } else {
1100 if (op & OMAP_GEM_READ)
1101 omap_obj->sync->read_complete++;
1102 if (op & OMAP_GEM_WRITE)
1103 omap_obj->sync->write_complete++;
1104 sync_op_update();
1105 }
1107 unlock:
1108 spin_unlock(&sync_lock);
1110 return ret;
1111 }
1113 /* it is a bit lame to handle updates in this sort of polling way, but
1114 * in case of PVR, the GPU can directly update read/write complete
1115 * values, and not really tell us which ones it updated.. this also
1116 * means that sync_lock is not quite sufficient. So we'll need to
1117 * do something a bit better when it comes time to add support for
1118 * separate 2d hw..
1119 */
1120 void omap_gem_op_update(void)
1121 {
1122 spin_lock(&sync_lock);
1123 sync_op_update();
1124 spin_unlock(&sync_lock);
1125 }
1127 /* mark the start of read and/or write operation */
1128 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1129 {
1130 return sync_op(obj, op, true);
1131 }
1133 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1134 {
1135 return sync_op(obj, op, false);
1136 }
1138 static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1140 static void sync_notify(void *arg)
1141 {
1142 struct task_struct **waiter_task = arg;
1143 *waiter_task = NULL;
1144 wake_up_all(&sync_event);
1145 }
1147 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1148 {
1149 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1150 int ret = 0;
1151 if (omap_obj->sync) {
1152 struct task_struct *waiter_task = current;
1153 struct omap_gem_sync_waiter *waiter =
1154 kzalloc(sizeof(*waiter), GFP_KERNEL);
1156 if (!waiter)
1157 return -ENOMEM;
1159 waiter->omap_obj = omap_obj;
1160 waiter->op = op;
1161 waiter->read_target = omap_obj->sync->read_pending;
1162 waiter->write_target = omap_obj->sync->write_pending;
1163 waiter->notify = sync_notify;
1164 waiter->arg = &waiter_task;
1166 spin_lock(&sync_lock);
1167 if (is_waiting(waiter)) {
1168 SYNC("waited: %p", waiter);
1169 list_add_tail(&waiter->list, &waiters);
1170 spin_unlock(&sync_lock);
1171 ret = wait_event_interruptible(sync_event,
1172 (waiter_task == NULL));
1173 spin_lock(&sync_lock);
1174 if (waiter_task) {
1175 SYNC("interrupted: %p", waiter);
1176 /* we were interrupted */
1177 list_del(&waiter->list);
1178 waiter_task = NULL;
1179 } else {
1180 /* freed in sync_op_update() */
1181 waiter = NULL;
1182 }
1183 }
1184 spin_unlock(&sync_lock);
1186 if (waiter)
1187 kfree(waiter);
1188 }
1189 return ret;
1190 }
1192 /* call fxn(arg), either synchronously or asynchronously if the op
1193 * is currently blocked.. fxn() can be called from any context
1194 *
1195 * (TODO for now fxn is called back from whichever context calls
1196 * omap_gem_op_update().. but this could be better defined later
1197 * if needed)
1198 *
1199 * TODO more code in common w/ _sync()..
1200 */
1201 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1202 void (*fxn)(void *arg), void *arg)
1203 {
1204 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1205 if (omap_obj->sync) {
1206 struct omap_gem_sync_waiter *waiter =
1207 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1209 if (!waiter)
1210 return -ENOMEM;
1212 waiter->omap_obj = omap_obj;
1213 waiter->op = op;
1214 waiter->read_target = omap_obj->sync->read_pending;
1215 waiter->write_target = omap_obj->sync->write_pending;
1216 waiter->notify = fxn;
1217 waiter->arg = arg;
1219 spin_lock(&sync_lock);
1220 if (is_waiting(waiter)) {
1221 SYNC("waited: %p", waiter);
1222 list_add_tail(&waiter->list, &waiters);
1223 spin_unlock(&sync_lock);
1224 return 0;
1225 }
1227 spin_unlock(&sync_lock);
1229 kfree(waiter);
1230 }
1232 /* no waiting.. */
1233 fxn(arg);
1235 return 0;
1236 }
1238 /* special API so PVR can update the buffer to use a sync-object allocated
1239 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1240 * perspective) sync-object, so we overwrite the new syncobj w/ values
1241 * from the already allocated syncobj (if there is one)
1242 */
1243 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1244 {
1245 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1246 int ret = 0;
1248 spin_lock(&sync_lock);
1250 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1251 /* clearing a previously set syncobj */
1252 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1253 GFP_ATOMIC);
1254 if (!syncobj) {
1255 ret = -ENOMEM;
1256 goto unlock;
1257 }
1258 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1259 omap_obj->sync = syncobj;
1260 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1261 /* replacing an existing syncobj */
1262 if (omap_obj->sync) {
1263 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1264 kfree(omap_obj->sync);
1265 }
1266 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1267 omap_obj->sync = syncobj;
1268 }
1270 unlock:
1271 spin_unlock(&sync_lock);
1272 return ret;
1273 }
1275 /* don't call directly.. called from GEM core when it is time to actually
1276 * free the object..
1277 */
1278 void omap_gem_free_object(struct drm_gem_object *obj)
1279 {
1280 struct drm_device *dev = obj->dev;
1281 struct omap_drm_private *priv = dev->dev_private;
1282 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1284 evict(obj);
1286 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1288 spin_lock(&priv->list_lock);
1289 list_del(&omap_obj->mm_list);
1290 spin_unlock(&priv->list_lock);
1292 drm_gem_free_mmap_offset(obj);
1294 /* this means the object is still pinned.. which really should
1295 * not happen. I think..
1296 */
1297 WARN_ON(omap_obj->paddr_cnt > 0);
1299 /* don't free externally allocated backing memory */
1300 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1301 if (omap_obj->pages)
1302 omap_gem_detach_pages(obj);
1304 if (!is_shmem(obj)) {
1305 dma_free_writecombine(dev->dev, obj->size,
1306 omap_obj->vaddr, omap_obj->paddr);
1307 } else if (omap_obj->vaddr) {
1308 vunmap(omap_obj->vaddr);
1309 }
1310 }
1312 /* don't free externally allocated syncobj */
1313 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1314 kfree(omap_obj->sync);
1316 drm_gem_object_release(obj);
1318 kfree(obj);
1319 }
1321 /* convenience method to construct a GEM buffer object, and userspace handle */
1322 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1323 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1324 {
1325 struct drm_gem_object *obj;
1326 int ret;
1328 obj = omap_gem_new(dev, gsize, flags);
1329 if (!obj)
1330 return -ENOMEM;
1332 ret = drm_gem_handle_create(file, obj, handle);
1333 if (ret) {
1334 drm_gem_object_release(obj);
1335 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1336 return ret;
1337 }
1339 /* drop reference from allocate - handle holds it now */
1340 drm_gem_object_unreference_unlocked(obj);
1342 return 0;
1343 }
1345 /* GEM buffer object constructor */
1346 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1347 union omap_gem_size gsize, uint32_t flags)
1348 {
1349 struct omap_drm_private *priv = dev->dev_private;
1350 struct omap_gem_object *omap_obj;
1351 struct drm_gem_object *obj = NULL;
1352 size_t size;
1353 int ret;
1355 if (flags & OMAP_BO_TILED) {
1356 if (!usergart) {
1357 dev_err(dev->dev, "Tiled buffers require DMM\n");
1358 goto fail;
1359 }
1361 /* tiled buffers are always shmem paged backed.. when they are
1362 * scanned out, they are remapped into DMM/TILER
1363 */
1364 flags &= ~OMAP_BO_SCANOUT;
1366 /* currently don't allow cached buffers.. there is some caching
1367 * stuff that needs to be handled better
1368 */
1369 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1370 flags |= tiler_get_cpu_cache_flags();
1372 /* align dimensions to slot boundaries... */
1373 tiler_align(gem2fmt(flags),
1374 &gsize.tiled.width, &gsize.tiled.height);
1376 /* ...and calculate size based on aligned dimensions */
1377 size = tiler_size(gem2fmt(flags),
1378 gsize.tiled.width, gsize.tiled.height);
1379 } else {
1380 size = PAGE_ALIGN(gsize.bytes);
1381 }
1383 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1384 if (!omap_obj)
1385 goto fail;
1387 spin_lock(&priv->list_lock);
1388 list_add(&omap_obj->mm_list, &priv->obj_list);
1389 spin_unlock(&priv->list_lock);
1391 obj = &omap_obj->base;
1393 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1394 /* attempt to allocate contiguous memory if we don't
1395 * have DMM for remappign discontiguous buffers
1396 */
1397 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1398 &omap_obj->paddr, GFP_KERNEL);
1399 if (!omap_obj->vaddr) {
1400 spin_lock(&priv->list_lock);
1401 list_del(&omap_obj->mm_list);
1402 spin_unlock(&priv->list_lock);
1404 kfree(omap_obj);
1406 return NULL;
1407 }
1409 flags |= OMAP_BO_DMA;
1410 }
1412 omap_obj->flags = flags;
1414 if (flags & OMAP_BO_TILED) {
1415 omap_obj->width = gsize.tiled.width;
1416 omap_obj->height = gsize.tiled.height;
1417 }
1419 ret = 0;
1420 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1421 drm_gem_private_object_init(dev, obj, size);
1422 else
1423 ret = drm_gem_object_init(dev, obj, size);
1425 if (ret)
1426 goto fail;
1428 return obj;
1430 fail:
1431 if (obj)
1432 omap_gem_free_object(obj);
1434 return NULL;
1435 }
1437 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1438 void omap_gem_init(struct drm_device *dev)
1439 {
1440 struct omap_drm_private *priv = dev->dev_private;
1441 const enum tiler_fmt fmts[] = {
1442 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1443 };
1444 int i, j;
1446 if (!dmm_is_available()) {
1447 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1448 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1449 return;
1450 }
1452 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1453 if (!usergart)
1454 return;
1456 /* reserve 4k aligned/wide regions for userspace mappings: */
1457 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1458 uint16_t h = 1, w = PAGE_SIZE >> i;
1459 tiler_align(fmts[i], &w, &h);
1460 /* note: since each region is 1 4kb page wide, and minimum
1461 * number of rows, the height ends up being the same as the
1462 * # of pages in the region
1463 */
1464 usergart[i].height = h;
1465 usergart[i].height_shift = ilog2(h);
1466 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1467 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1468 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1469 struct usergart_entry *entry = &usergart[i].entry[j];
1470 struct tiler_block *block =
1471 tiler_reserve_2d(fmts[i], w, h,
1472 PAGE_SIZE);
1473 if (IS_ERR(block)) {
1474 dev_err(dev->dev,
1475 "reserve failed: %d, %d, %ld\n",
1476 i, j, PTR_ERR(block));
1477 return;
1478 }
1479 entry->paddr = tiler_ssptr(block);
1480 entry->block = block;
1482 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1483 &entry->paddr,
1484 usergart[i].stride_pfn << PAGE_SHIFT);
1485 }
1486 }
1488 priv->has_dmm = true;
1489 }
1491 void omap_gem_deinit(struct drm_device *dev)
1492 {
1493 /* I believe we can rely on there being no more outstanding GEM
1494 * objects which could depend on usergart/dmm at this point.
1495 */
1496 kfree(usergart);
1497 }