1 /*
2 * drivers/gpu/drm/omapdrm/omap_gem.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
23 #include <drm/drm_vma_manager.h>
25 #include "omap_drv.h"
26 #include "omap_dmm_tiler.h"
28 /* remove these once drm core helpers are merged */
29 struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
30 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
31 bool dirty, bool accessed);
32 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
34 /*
35 * GEM buffer object implementation.
36 */
38 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
40 /* note: we use upper 8 bits of flags for driver-internal flags: */
41 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
42 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
43 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
46 struct omap_gem_object {
47 struct drm_gem_object base;
49 struct list_head mm_list;
51 uint32_t flags;
53 /** width/height for tiled formats (rounded up to slot boundaries) */
54 uint16_t width, height;
56 /** roll applied when mapping to DMM */
57 uint32_t roll;
59 /**
60 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
61 * is set and the paddr is valid. Also if the buffer is remapped in
62 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
63 * the physical address and OMAP_BO_DMA is not set, then you should
64 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
65 * not removed from under your feet.
66 *
67 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
68 * buffer is requested, but doesn't mean that it is. Use the
69 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
70 * physical address.
71 */
72 dma_addr_t paddr;
74 /**
75 * # of users of paddr
76 */
77 uint32_t paddr_cnt;
79 /**
80 * tiler block used when buffer is remapped in DMM/TILER.
81 */
82 struct tiler_block *block;
84 /**
85 * Array of backing pages, if allocated. Note that pages are never
86 * allocated for buffers originally allocated from contiguous memory
87 */
88 struct page **pages;
90 /** addresses corresponding to pages in above array */
91 dma_addr_t *addrs;
93 /**
94 * Virtual address, if mapped.
95 */
96 void *vaddr;
98 /**
99 * sync-object allocated on demand (if needed)
100 *
101 * Per-buffer sync-object for tracking pending and completed hw/dma
102 * read and write operations. The layout in memory is dictated by
103 * the SGX firmware, which uses this information to stall the command
104 * stream if a surface is not ready yet.
105 *
106 * Note that when buffer is used by SGX, the sync-object needs to be
107 * allocated from a special heap of sync-objects. This way many sync
108 * objects can be packed in a page, and not waste GPU virtual address
109 * space. Because of this we have to have a omap_gem_set_sync_object()
110 * API to allow replacement of the syncobj after it has (potentially)
111 * already been allocated. A bit ugly but I haven't thought of a
112 * better alternative.
113 */
114 struct {
115 uint32_t write_pending;
116 uint32_t write_complete;
117 uint32_t read_pending;
118 uint32_t read_complete;
119 } *sync;
121 struct omap_gem_vm_ops *ops;
123 /**
124 * per-mapper private data..
125 *
126 * TODO maybe there can be a more flexible way to store per-mapper
127 * data.. for now I just keep it simple, and since this is only
128 * accessible externally via omap_gem_priv()/omap_get_set_priv()
129 */
130 void *priv[MAX_MAPPERS];
132 };
134 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
135 static uint64_t mmap_offset(struct drm_gem_object *obj);
137 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
138 * not necessarily pinned in TILER all the time, and (b) when they are
139 * they are not necessarily page aligned, we reserve one or more small
140 * regions in each of the 2d containers to use as a user-GART where we
141 * can create a second page-aligned mapping of parts of the buffer
142 * being accessed from userspace.
143 *
144 * Note that we could optimize slightly when we know that multiple
145 * tiler containers are backed by the same PAT.. but I'll leave that
146 * for later..
147 */
148 #define NUM_USERGART_ENTRIES 2
149 struct usergart_entry {
150 struct tiler_block *block; /* the reserved tiler block */
151 dma_addr_t paddr;
152 struct drm_gem_object *obj; /* the current pinned obj */
153 pgoff_t obj_pgoff; /* page offset of obj currently
154 mapped in */
155 };
156 static struct {
157 struct usergart_entry entry[NUM_USERGART_ENTRIES];
158 int height; /* height in rows */
159 int height_shift; /* ilog2(height in rows) */
160 int slot_shift; /* ilog2(width per slot) */
161 int stride_pfn; /* stride in pages */
162 int last; /* index of last used entry */
163 } *usergart;
165 static void evict_entry(struct drm_gem_object *obj,
166 enum tiler_fmt fmt, struct usergart_entry *entry)
167 {
168 struct omap_gem_object *omap_obj = to_omap_bo(obj);
169 int n = usergart[fmt].height;
170 size_t size = PAGE_SIZE * n;
171 loff_t off = mmap_offset(obj) +
172 (entry->obj_pgoff << PAGE_SHIFT);
173 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
175 if (m > 1) {
176 int i;
177 /* if stride > than PAGE_SIZE then sparse mapping: */
178 for (i = n; i > 0; i--) {
179 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
180 off, PAGE_SIZE, 1);
181 off += PAGE_SIZE * m;
182 }
183 } else {
184 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
185 off, size, 1);
186 }
188 entry->obj = NULL;
189 }
191 /* Evict a buffer from usergart, if it is mapped there */
192 static void evict(struct drm_gem_object *obj)
193 {
194 struct omap_gem_object *omap_obj = to_omap_bo(obj);
196 if (omap_obj->flags & OMAP_BO_TILED) {
197 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
198 int i;
200 if (!usergart)
201 return;
203 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
204 struct usergart_entry *entry = &usergart[fmt].entry[i];
205 if (entry->obj == obj)
206 evict_entry(obj, fmt, entry);
207 }
208 }
209 }
211 /* GEM objects can either be allocated from contiguous memory (in which
212 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
213 * contiguous buffers can be remapped in TILER/DMM if they need to be
214 * contiguous... but we don't do this all the time to reduce pressure
215 * on TILER/DMM space when we know at allocation time that the buffer
216 * will need to be scanned out.
217 */
218 static inline bool is_shmem(struct drm_gem_object *obj)
219 {
220 return obj->filp != NULL;
221 }
223 /**
224 * shmem buffers that are mapped cached can simulate coherency via using
225 * page faulting to keep track of dirty pages
226 */
227 static inline bool is_cached_coherent(struct drm_gem_object *obj)
228 {
229 struct omap_gem_object *omap_obj = to_omap_bo(obj);
230 return is_shmem(obj) &&
231 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
232 }
234 static DEFINE_SPINLOCK(sync_lock);
236 /** ensure backing pages are allocated */
237 static int omap_gem_attach_pages(struct drm_gem_object *obj)
238 {
239 struct drm_device *dev = obj->dev;
240 struct omap_gem_object *omap_obj = to_omap_bo(obj);
241 struct page **pages;
242 int npages = obj->size >> PAGE_SHIFT;
243 int i, ret;
244 dma_addr_t *addrs;
246 WARN_ON(omap_obj->pages);
248 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
249 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
250 * we actually want CMA memory for it all anyways..
251 */
252 pages = drm_gem_get_pages(obj, GFP_KERNEL);
253 if (IS_ERR(pages)) {
254 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
255 return PTR_ERR(pages);
256 }
258 /* for non-cached buffers, ensure the new pages are clean because
259 * DSS, GPU, etc. are not cache coherent:
260 */
261 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
262 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
263 if (!addrs) {
264 ret = -ENOMEM;
265 goto free_pages;
266 }
268 for (i = 0; i < npages; i++) {
269 addrs[i] = dma_map_page(dev->dev, pages[i],
270 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
271 }
272 } else {
273 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
274 if (!addrs) {
275 ret = -ENOMEM;
276 goto free_pages;
277 }
278 }
280 omap_obj->addrs = addrs;
281 omap_obj->pages = pages;
283 return 0;
285 free_pages:
286 drm_gem_put_pages(obj, pages, true, false);
288 return ret;
289 }
291 /** release backing pages */
292 static void omap_gem_detach_pages(struct drm_gem_object *obj)
293 {
294 struct omap_gem_object *omap_obj = to_omap_bo(obj);
296 /* for non-cached buffers, ensure the new pages are clean because
297 * DSS, GPU, etc. are not cache coherent:
298 */
299 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
300 int i, npages = obj->size >> PAGE_SHIFT;
301 for (i = 0; i < npages; i++) {
302 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
303 PAGE_SIZE, DMA_BIDIRECTIONAL);
304 }
305 }
307 kfree(omap_obj->addrs);
308 omap_obj->addrs = NULL;
310 drm_gem_put_pages(obj, omap_obj->pages, true, false);
311 omap_obj->pages = NULL;
312 }
314 /* get buffer flags */
315 uint32_t omap_gem_flags(struct drm_gem_object *obj)
316 {
317 return to_omap_bo(obj)->flags;
318 }
319 EXPORT_SYMBOL(omap_gem_flags);
321 /** get mmap offset */
322 static uint64_t mmap_offset(struct drm_gem_object *obj)
323 {
324 struct drm_device *dev = obj->dev;
325 int ret;
326 size_t size;
328 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
330 /* Make it mmapable */
331 size = omap_gem_mmap_size(obj);
332 ret = drm_gem_create_mmap_offset_size(obj, size);
333 if (ret) {
334 dev_err(dev->dev, "could not allocate mmap offset\n");
335 return 0;
336 }
338 return drm_vma_node_offset_addr(&obj->vma_node);
339 }
341 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
342 {
343 uint64_t offset;
344 mutex_lock(&obj->dev->struct_mutex);
345 offset = mmap_offset(obj);
346 mutex_unlock(&obj->dev->struct_mutex);
347 return offset;
348 }
349 EXPORT_SYMBOL(omap_gem_mmap_offset);
351 /** get mmap size */
352 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
353 {
354 struct omap_gem_object *omap_obj = to_omap_bo(obj);
355 size_t size = obj->size;
357 if (omap_obj->flags & OMAP_BO_TILED) {
358 /* for tiled buffers, the virtual size has stride rounded up
359 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
360 * 32kb later!). But we don't back the entire buffer with
361 * pages, only the valid picture part.. so need to adjust for
362 * this in the size used to mmap and generate mmap offset
363 */
364 size = tiler_vsize(gem2fmt(omap_obj->flags),
365 omap_obj->width, omap_obj->height);
366 }
368 return size;
369 }
371 /* get tiled size, returns -EINVAL if not tiled buffer */
372 int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
373 {
374 struct omap_gem_object *omap_obj = to_omap_bo(obj);
375 if (omap_obj->flags & OMAP_BO_TILED) {
376 *w = omap_obj->width;
377 *h = omap_obj->height;
378 return 0;
379 }
380 return -EINVAL;
381 }
382 EXPORT_SYMBOL(omap_gem_tiled_size);
384 /* Normal handling for the case of faulting in non-tiled buffers */
385 static int fault_1d(struct drm_gem_object *obj,
386 struct vm_area_struct *vma, struct vm_fault *vmf)
387 {
388 struct omap_gem_object *omap_obj = to_omap_bo(obj);
389 unsigned long pfn;
390 pgoff_t pgoff;
392 /* We don't use vmf->pgoff since that has the fake offset: */
393 pgoff = ((unsigned long)vmf->virtual_address -
394 vma->vm_start) >> PAGE_SHIFT;
396 if (omap_obj->pages) {
397 omap_gem_cpu_sync(obj, pgoff);
398 pfn = page_to_pfn(omap_obj->pages[pgoff]);
399 } else {
400 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
401 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
402 }
404 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
405 pfn, pfn << PAGE_SHIFT);
407 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
408 }
410 /* Special handling for the case of faulting in 2d tiled buffers */
411 static int fault_2d(struct drm_gem_object *obj,
412 struct vm_area_struct *vma, struct vm_fault *vmf)
413 {
414 struct omap_gem_object *omap_obj = to_omap_bo(obj);
415 struct usergart_entry *entry;
416 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
417 struct page *pages[64]; /* XXX is this too much to have on stack? */
418 unsigned long pfn;
419 pgoff_t pgoff, base_pgoff;
420 void __user *vaddr;
421 int i, ret, slots;
423 /*
424 * Note the height of the slot is also equal to the number of pages
425 * that need to be mapped in to fill 4kb wide CPU page. If the slot
426 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
427 */
428 const int n = usergart[fmt].height;
429 const int n_shift = usergart[fmt].height_shift;
431 /*
432 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
433 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
434 * into account in some of the math, so figure out virtual stride
435 * in pages
436 */
437 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
439 /* We don't use vmf->pgoff since that has the fake offset: */
440 pgoff = ((unsigned long)vmf->virtual_address -
441 vma->vm_start) >> PAGE_SHIFT;
443 /*
444 * Actual address we start mapping at is rounded down to previous slot
445 * boundary in the y direction:
446 */
447 base_pgoff = round_down(pgoff, m << n_shift);
449 /* figure out buffer width in slots */
450 slots = omap_obj->width >> usergart[fmt].slot_shift;
452 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
454 entry = &usergart[fmt].entry[usergart[fmt].last];
456 /* evict previous buffer using this usergart entry, if any: */
457 if (entry->obj)
458 evict_entry(entry->obj, fmt, entry);
460 entry->obj = obj;
461 entry->obj_pgoff = base_pgoff;
463 /* now convert base_pgoff to phys offset from virt offset: */
464 base_pgoff = (base_pgoff >> n_shift) * slots;
466 /* for wider-than 4k.. figure out which part of the slot-row we want: */
467 if (m > 1) {
468 int off = pgoff % m;
469 entry->obj_pgoff += off;
470 base_pgoff /= m;
471 slots = min(slots - (off << n_shift), n);
472 base_pgoff += off << n_shift;
473 vaddr += off << PAGE_SHIFT;
474 }
476 /*
477 * Map in pages. Beyond the valid pixel part of the buffer, we set
478 * pages[i] to NULL to get a dummy page mapped in.. if someone
479 * reads/writes it they will get random/undefined content, but at
480 * least it won't be corrupting whatever other random page used to
481 * be mapped in, or other undefined behavior.
482 */
483 memcpy(pages, &omap_obj->pages[base_pgoff],
484 sizeof(struct page *) * slots);
485 memset(pages + slots, 0,
486 sizeof(struct page *) * (n - slots));
488 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
489 if (ret) {
490 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
491 return ret;
492 }
494 pfn = entry->paddr >> PAGE_SHIFT;
496 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
497 pfn, pfn << PAGE_SHIFT);
499 for (i = n; i > 0; i--) {
500 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
501 pfn += usergart[fmt].stride_pfn;
502 vaddr += PAGE_SIZE * m;
503 }
505 /* simple round-robin: */
506 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
508 return 0;
509 }
511 /**
512 * omap_gem_fault - pagefault handler for GEM objects
513 * @vma: the VMA of the GEM object
514 * @vmf: fault detail
515 *
516 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
517 * does most of the work for us including the actual map/unmap calls
518 * but we need to do the actual page work.
519 *
520 * The VMA was set up by GEM. In doing so it also ensured that the
521 * vma->vm_private_data points to the GEM object that is backing this
522 * mapping.
523 */
524 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
525 {
526 struct drm_gem_object *obj = vma->vm_private_data;
527 struct omap_gem_object *omap_obj = to_omap_bo(obj);
528 struct drm_device *dev = obj->dev;
529 struct page **pages;
530 int ret;
532 /* Make sure we don't parallel update on a fault, nor move or remove
533 * something from beneath our feet
534 */
535 mutex_lock(&dev->struct_mutex);
537 /* if a shmem backed object, make sure we have pages attached now */
538 ret = get_pages(obj, &pages);
539 if (ret)
540 goto fail;
542 /* where should we do corresponding put_pages().. we are mapping
543 * the original page, rather than thru a GART, so we can't rely
544 * on eviction to trigger this. But munmap() or all mappings should
545 * probably trigger put_pages()?
546 */
548 if (omap_obj->flags & OMAP_BO_TILED)
549 ret = fault_2d(obj, vma, vmf);
550 else
551 ret = fault_1d(obj, vma, vmf);
554 fail:
555 mutex_unlock(&dev->struct_mutex);
556 switch (ret) {
557 case 0:
558 case -ERESTARTSYS:
559 case -EINTR:
560 return VM_FAULT_NOPAGE;
561 case -ENOMEM:
562 return VM_FAULT_OOM;
563 default:
564 return VM_FAULT_SIGBUS;
565 }
566 }
568 /** We override mainly to fix up some of the vm mapping flags.. */
569 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
570 {
571 int ret;
573 ret = drm_gem_mmap(filp, vma);
574 if (ret) {
575 DBG("mmap failed: %d", ret);
576 return ret;
577 }
579 return omap_gem_mmap_obj(vma->vm_private_data, vma);
580 }
582 int omap_gem_mmap_obj(struct drm_gem_object *obj,
583 struct vm_area_struct *vma)
584 {
585 struct omap_gem_object *omap_obj = to_omap_bo(obj);
587 vma->vm_flags &= ~VM_PFNMAP;
588 vma->vm_flags |= VM_MIXEDMAP;
590 if (omap_obj->flags & OMAP_BO_WC) {
591 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
592 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
593 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
594 } else {
595 /*
596 * We do have some private objects, at least for scanout buffers
597 * on hardware without DMM/TILER. But these are allocated write-
598 * combine
599 */
600 if (WARN_ON(!obj->filp))
601 return -EINVAL;
603 /*
604 * Shunt off cached objs to shmem file so they have their own
605 * address_space (so unmap_mapping_range does what we want,
606 * in particular in the case of mmap'd dmabufs)
607 */
608 fput(vma->vm_file);
609 vma->vm_pgoff = 0;
610 vma->vm_file = get_file(obj->filp);
612 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
613 }
615 if (omap_obj->ops && omap_obj->ops->mmap)
616 omap_obj->ops->mmap(obj->filp, vma);
618 return 0;
619 }
622 /**
623 * omap_gem_dumb_create - create a dumb buffer
624 * @drm_file: our client file
625 * @dev: our device
626 * @args: the requested arguments copied from userspace
627 *
628 * Allocate a buffer suitable for use for a frame buffer of the
629 * form described by user space. Give userspace a handle by which
630 * to reference it.
631 */
632 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
633 struct drm_mode_create_dumb *args)
634 {
635 union omap_gem_size gsize;
637 /* in case someone tries to feed us a completely bogus stride: */
638 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
639 args->size = PAGE_ALIGN(args->pitch * args->height);
641 gsize = (union omap_gem_size){
642 .bytes = args->size,
643 };
645 return omap_gem_new_handle(dev, file, gsize,
646 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
647 }
649 /**
650 * omap_gem_dumb_map - buffer mapping for dumb interface
651 * @file: our drm client file
652 * @dev: drm device
653 * @handle: GEM handle to the object (from dumb_create)
654 *
655 * Do the necessary setup to allow the mapping of the frame buffer
656 * into user memory. We don't have to do much here at the moment.
657 */
658 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
659 uint32_t handle, uint64_t *offset)
660 {
661 struct drm_gem_object *obj;
662 int ret = 0;
664 /* GEM does all our handle to object mapping */
665 obj = drm_gem_object_lookup(dev, file, handle);
666 if (obj == NULL) {
667 ret = -ENOENT;
668 goto fail;
669 }
671 *offset = omap_gem_mmap_offset(obj);
673 drm_gem_object_unreference_unlocked(obj);
675 fail:
676 return ret;
677 }
679 /* Set scrolling position. This allows us to implement fast scrolling
680 * for console.
681 *
682 * Call only from non-atomic contexts.
683 */
684 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
685 {
686 struct omap_gem_object *omap_obj = to_omap_bo(obj);
687 uint32_t npages = obj->size >> PAGE_SHIFT;
688 int ret = 0;
690 if (roll > npages) {
691 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
692 return -EINVAL;
693 }
695 omap_obj->roll = roll;
697 mutex_lock(&obj->dev->struct_mutex);
699 /* if we aren't mapped yet, we don't need to do anything */
700 if (omap_obj->block) {
701 struct page **pages;
702 ret = get_pages(obj, &pages);
703 if (ret)
704 goto fail;
705 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
706 if (ret)
707 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
708 }
710 fail:
711 mutex_unlock(&obj->dev->struct_mutex);
713 return ret;
714 }
716 /* Sync the buffer for CPU access.. note pages should already be
717 * attached, ie. omap_gem_get_pages()
718 */
719 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
720 {
721 struct drm_device *dev = obj->dev;
722 struct omap_gem_object *omap_obj = to_omap_bo(obj);
724 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
725 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
726 PAGE_SIZE, DMA_BIDIRECTIONAL);
727 omap_obj->addrs[pgoff] = 0;
728 }
729 }
731 /* sync the buffer for DMA access */
732 void omap_gem_dma_sync(struct drm_gem_object *obj,
733 enum dma_data_direction dir)
734 {
735 struct drm_device *dev = obj->dev;
736 struct omap_gem_object *omap_obj = to_omap_bo(obj);
738 if (is_cached_coherent(obj)) {
739 int i, npages = obj->size >> PAGE_SHIFT;
740 struct page **pages = omap_obj->pages;
741 bool dirty = false;
743 for (i = 0; i < npages; i++) {
744 if (!omap_obj->addrs[i]) {
745 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
746 PAGE_SIZE, DMA_BIDIRECTIONAL);
747 dirty = true;
748 }
749 }
751 if (dirty) {
752 unmap_mapping_range(obj->filp->f_mapping, 0,
753 omap_gem_mmap_size(obj), 1);
754 }
755 }
756 }
758 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
759 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
760 * map in TILER)
761 */
762 int omap_gem_get_paddr(struct drm_gem_object *obj,
763 dma_addr_t *paddr, bool remap)
764 {
765 struct omap_drm_private *priv = obj->dev->dev_private;
766 struct omap_gem_object *omap_obj = to_omap_bo(obj);
767 int ret = 0;
769 mutex_lock(&obj->dev->struct_mutex);
771 if (remap && is_shmem(obj) && priv->has_dmm) {
772 if (omap_obj->paddr_cnt == 0) {
773 struct page **pages;
774 uint32_t npages = obj->size >> PAGE_SHIFT;
775 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
776 struct tiler_block *block;
778 BUG_ON(omap_obj->block);
780 ret = get_pages(obj, &pages);
781 if (ret)
782 goto fail;
784 if (omap_obj->flags & OMAP_BO_TILED) {
785 block = tiler_reserve_2d(fmt,
786 omap_obj->width,
787 omap_obj->height, 0);
788 } else {
789 block = tiler_reserve_1d(obj->size);
790 }
792 if (IS_ERR(block)) {
793 ret = PTR_ERR(block);
794 dev_err(obj->dev->dev,
795 "could not remap: %d (%d)\n", ret, fmt);
796 goto fail;
797 }
799 /* TODO: enable async refill.. */
800 ret = tiler_pin(block, pages, npages,
801 omap_obj->roll, true);
802 if (ret) {
803 tiler_release(block);
804 dev_err(obj->dev->dev,
805 "could not pin: %d\n", ret);
806 goto fail;
807 }
809 omap_obj->paddr = tiler_ssptr(block);
810 omap_obj->block = block;
812 DBG("got paddr: %pad", &omap_obj->paddr);
813 }
815 omap_obj->paddr_cnt++;
817 *paddr = omap_obj->paddr;
818 } else if (omap_obj->flags & OMAP_BO_DMA) {
819 *paddr = omap_obj->paddr;
820 } else {
821 ret = -EINVAL;
822 goto fail;
823 }
825 fail:
826 mutex_unlock(&obj->dev->struct_mutex);
828 return ret;
829 }
830 EXPORT_SYMBOL(omap_gem_get_paddr);
832 /* Release physical address, when DMA is no longer being performed.. this
833 * could potentially unpin and unmap buffers from TILER
834 */
835 void omap_gem_put_paddr(struct drm_gem_object *obj)
836 {
837 struct omap_gem_object *omap_obj = to_omap_bo(obj);
838 int ret;
840 mutex_lock(&obj->dev->struct_mutex);
841 if (omap_obj->paddr_cnt > 0) {
842 omap_obj->paddr_cnt--;
843 if (omap_obj->paddr_cnt == 0) {
844 ret = tiler_unpin(omap_obj->block);
845 if (ret) {
846 dev_err(obj->dev->dev,
847 "could not unpin pages: %d\n", ret);
848 }
849 ret = tiler_release(omap_obj->block);
850 if (ret) {
851 dev_err(obj->dev->dev,
852 "could not release unmap: %d\n", ret);
853 }
854 omap_obj->paddr = 0;
855 omap_obj->block = NULL;
856 }
857 }
859 mutex_unlock(&obj->dev->struct_mutex);
860 }
861 EXPORT_SYMBOL(omap_gem_put_paddr);
863 /* Get rotated scanout address (only valid if already pinned), at the
864 * specified orientation and x,y offset from top-left corner of buffer
865 * (only valid for tiled 2d buffers)
866 */
867 int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
868 int x, int y, dma_addr_t *paddr)
869 {
870 struct omap_gem_object *omap_obj = to_omap_bo(obj);
871 int ret = -EINVAL;
873 mutex_lock(&obj->dev->struct_mutex);
874 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
875 (omap_obj->flags & OMAP_BO_TILED)) {
876 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
877 ret = 0;
878 }
879 mutex_unlock(&obj->dev->struct_mutex);
880 return ret;
881 }
883 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
884 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
885 {
886 struct omap_gem_object *omap_obj = to_omap_bo(obj);
887 int ret = -EINVAL;
888 if (omap_obj->flags & OMAP_BO_TILED)
889 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
890 return ret;
891 }
892 EXPORT_SYMBOL(omap_gem_tiled_stride);
894 /* acquire pages when needed (for example, for DMA where physically
895 * contiguous buffer is not required
896 */
897 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
898 {
899 struct omap_gem_object *omap_obj = to_omap_bo(obj);
900 int ret = 0;
902 if (is_shmem(obj) && !omap_obj->pages) {
903 ret = omap_gem_attach_pages(obj);
904 if (ret) {
905 dev_err(obj->dev->dev, "could not attach pages\n");
906 return ret;
907 }
908 }
910 /* TODO: even phys-contig.. we should have a list of pages? */
911 *pages = omap_obj->pages;
913 return 0;
914 }
916 /* if !remap, and we don't have pages backing, then fail, rather than
917 * increasing the pin count (which we don't really do yet anyways,
918 * because we don't support swapping pages back out). And 'remap'
919 * might not be quite the right name, but I wanted to keep it working
920 * similarly to omap_gem_get_paddr(). Note though that mutex is not
921 * aquired if !remap (because this can be called in atomic ctxt),
922 * but probably omap_gem_get_paddr() should be changed to work in the
923 * same way. If !remap, a matching omap_gem_put_pages() call is not
924 * required (and should not be made).
925 */
926 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
927 bool remap)
928 {
929 int ret;
930 if (!remap) {
931 struct omap_gem_object *omap_obj = to_omap_bo(obj);
932 if (!omap_obj->pages)
933 return -ENOMEM;
934 *pages = omap_obj->pages;
935 return 0;
936 }
937 mutex_lock(&obj->dev->struct_mutex);
938 ret = get_pages(obj, pages);
939 mutex_unlock(&obj->dev->struct_mutex);
940 return ret;
941 }
942 EXPORT_SYMBOL(omap_gem_get_pages);
944 /* release pages when DMA no longer being performed */
945 int omap_gem_put_pages(struct drm_gem_object *obj)
946 {
947 /* do something here if we dynamically attach/detach pages.. at
948 * least they would no longer need to be pinned if everyone has
949 * released the pages..
950 */
951 return 0;
952 }
953 EXPORT_SYMBOL(omap_gem_put_pages);
955 /* Get kernel virtual address for CPU access.. this more or less only
956 * exists for omap_fbdev. This should be called with struct_mutex
957 * held.
958 */
959 void *omap_gem_vaddr(struct drm_gem_object *obj)
960 {
961 struct omap_gem_object *omap_obj = to_omap_bo(obj);
962 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
963 if (!omap_obj->vaddr) {
964 struct page **pages;
965 int ret = get_pages(obj, &pages);
966 if (ret)
967 return ERR_PTR(ret);
968 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
969 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
970 }
971 return omap_obj->vaddr;
972 }
974 #ifdef CONFIG_PM
975 /* re-pin objects in DMM in resume path: */
976 int omap_gem_resume(struct device *dev)
977 {
978 struct drm_device *drm_dev = dev_get_drvdata(dev);
979 struct omap_drm_private *priv = drm_dev->dev_private;
980 struct omap_gem_object *omap_obj;
981 int ret = 0;
983 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
984 if (omap_obj->block) {
985 struct drm_gem_object *obj = &omap_obj->base;
986 uint32_t npages = obj->size >> PAGE_SHIFT;
987 WARN_ON(!omap_obj->pages); /* this can't happen */
988 ret = tiler_pin(omap_obj->block,
989 omap_obj->pages, npages,
990 omap_obj->roll, true);
991 if (ret) {
992 dev_err(dev, "could not repin: %d\n", ret);
993 return ret;
994 }
995 }
996 }
998 return 0;
999 }
1000 #endif
1002 #ifdef CONFIG_DEBUG_FS
1003 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1004 {
1005 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1006 uint64_t off;
1008 off = drm_vma_node_start(&obj->vma_node);
1010 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1011 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
1012 off, &omap_obj->paddr, omap_obj->paddr_cnt,
1013 omap_obj->vaddr, omap_obj->roll);
1015 if (omap_obj->flags & OMAP_BO_TILED) {
1016 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1017 if (omap_obj->block) {
1018 struct tcm_area *area = &omap_obj->block->area;
1019 seq_printf(m, " (%dx%d, %dx%d)",
1020 area->p0.x, area->p0.y,
1021 area->p1.x, area->p1.y);
1022 }
1023 } else {
1024 seq_printf(m, " %d", obj->size);
1025 }
1027 seq_printf(m, "\n");
1028 }
1030 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1031 {
1032 struct omap_gem_object *omap_obj;
1033 int count = 0;
1034 size_t size = 0;
1036 list_for_each_entry(omap_obj, list, mm_list) {
1037 struct drm_gem_object *obj = &omap_obj->base;
1038 seq_printf(m, " ");
1039 omap_gem_describe(obj, m);
1040 count++;
1041 size += obj->size;
1042 }
1044 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1045 }
1046 #endif
1048 /* Buffer Synchronization:
1049 */
1051 struct omap_gem_sync_waiter {
1052 struct list_head list;
1053 struct omap_gem_object *omap_obj;
1054 enum omap_gem_op op;
1055 uint32_t read_target, write_target;
1056 /* notify called w/ sync_lock held */
1057 void (*notify)(void *arg);
1058 void *arg;
1059 };
1061 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1062 * the read and/or write target count is achieved which can call a user
1063 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1064 * cpu access), etc.
1065 */
1066 static LIST_HEAD(waiters);
1068 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1069 {
1070 struct omap_gem_object *omap_obj = waiter->omap_obj;
1071 if ((waiter->op & OMAP_GEM_READ) &&
1072 (omap_obj->sync->write_complete < waiter->write_target))
1073 return true;
1074 if ((waiter->op & OMAP_GEM_WRITE) &&
1075 (omap_obj->sync->read_complete < waiter->read_target))
1076 return true;
1077 return false;
1078 }
1080 /* macro for sync debug.. */
1081 #define SYNCDBG 0
1082 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1083 printk(KERN_ERR "%s:%d: "fmt"\n", \
1084 __func__, __LINE__, ##__VA_ARGS__); \
1085 } while (0)
1088 static void sync_op_update(void)
1089 {
1090 struct omap_gem_sync_waiter *waiter, *n;
1091 list_for_each_entry_safe(waiter, n, &waiters, list) {
1092 if (!is_waiting(waiter)) {
1093 list_del(&waiter->list);
1094 SYNC("notify: %p", waiter);
1095 waiter->notify(waiter->arg);
1096 kfree(waiter);
1097 }
1098 }
1099 }
1101 static inline int sync_op(struct drm_gem_object *obj,
1102 enum omap_gem_op op, bool start)
1103 {
1104 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1105 int ret = 0;
1107 spin_lock(&sync_lock);
1109 if (!omap_obj->sync) {
1110 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1111 if (!omap_obj->sync) {
1112 ret = -ENOMEM;
1113 goto unlock;
1114 }
1115 }
1117 if (start) {
1118 if (op & OMAP_GEM_READ)
1119 omap_obj->sync->read_pending++;
1120 if (op & OMAP_GEM_WRITE)
1121 omap_obj->sync->write_pending++;
1122 } else {
1123 if (op & OMAP_GEM_READ)
1124 omap_obj->sync->read_complete++;
1125 if (op & OMAP_GEM_WRITE)
1126 omap_obj->sync->write_complete++;
1127 sync_op_update();
1128 }
1130 unlock:
1131 spin_unlock(&sync_lock);
1133 return ret;
1134 }
1136 /* it is a bit lame to handle updates in this sort of polling way, but
1137 * in case of PVR, the GPU can directly update read/write complete
1138 * values, and not really tell us which ones it updated.. this also
1139 * means that sync_lock is not quite sufficient. So we'll need to
1140 * do something a bit better when it comes time to add support for
1141 * separate 2d hw..
1142 */
1143 void omap_gem_op_update(void)
1144 {
1145 spin_lock(&sync_lock);
1146 sync_op_update();
1147 spin_unlock(&sync_lock);
1148 }
1149 EXPORT_SYMBOL(omap_gem_op_update);
1151 /* mark the start of read and/or write operation */
1152 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1153 {
1154 return sync_op(obj, op, true);
1155 }
1156 EXPORT_SYMBOL(omap_gem_op_start);
1158 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1159 {
1160 return sync_op(obj, op, false);
1161 }
1162 EXPORT_SYMBOL(omap_gem_op_finish);
1164 static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1166 static void sync_notify(void *arg)
1167 {
1168 struct task_struct **waiter_task = arg;
1169 *waiter_task = NULL;
1170 wake_up_all(&sync_event);
1171 }
1173 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1174 {
1175 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1176 int ret = 0;
1177 if (omap_obj->sync) {
1178 struct task_struct *waiter_task = current;
1179 struct omap_gem_sync_waiter *waiter =
1180 kzalloc(sizeof(*waiter), GFP_KERNEL);
1182 if (!waiter)
1183 return -ENOMEM;
1185 waiter->omap_obj = omap_obj;
1186 waiter->op = op;
1187 waiter->read_target = omap_obj->sync->read_pending;
1188 waiter->write_target = omap_obj->sync->write_pending;
1189 waiter->notify = sync_notify;
1190 waiter->arg = &waiter_task;
1192 spin_lock(&sync_lock);
1193 if (is_waiting(waiter)) {
1194 SYNC("waited: %p", waiter);
1195 list_add_tail(&waiter->list, &waiters);
1196 spin_unlock(&sync_lock);
1197 ret = wait_event_interruptible(sync_event,
1198 (waiter_task == NULL));
1199 spin_lock(&sync_lock);
1200 if (waiter_task) {
1201 SYNC("interrupted: %p", waiter);
1202 /* we were interrupted */
1203 list_del(&waiter->list);
1204 waiter_task = NULL;
1205 } else {
1206 /* freed in sync_op_update() */
1207 waiter = NULL;
1208 }
1209 }
1210 spin_unlock(&sync_lock);
1212 if (waiter)
1213 kfree(waiter);
1214 }
1215 return ret;
1216 }
1217 EXPORT_SYMBOL(omap_gem_op_sync);
1219 /* call fxn(arg), either synchronously or asynchronously if the op
1220 * is currently blocked.. fxn() can be called from any context
1221 *
1222 * (TODO for now fxn is called back from whichever context calls
1223 * omap_gem_op_update().. but this could be better defined later
1224 * if needed)
1225 *
1226 * TODO more code in common w/ _sync()..
1227 */
1228 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1229 void (*fxn)(void *arg), void *arg)
1230 {
1231 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1232 if (omap_obj->sync) {
1233 struct omap_gem_sync_waiter *waiter =
1234 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1236 if (!waiter)
1237 return -ENOMEM;
1239 waiter->omap_obj = omap_obj;
1240 waiter->op = op;
1241 waiter->read_target = omap_obj->sync->read_pending;
1242 waiter->write_target = omap_obj->sync->write_pending;
1243 waiter->notify = fxn;
1244 waiter->arg = arg;
1246 spin_lock(&sync_lock);
1247 if (is_waiting(waiter)) {
1248 SYNC("waited: %p", waiter);
1249 list_add_tail(&waiter->list, &waiters);
1250 spin_unlock(&sync_lock);
1251 return 0;
1252 }
1254 spin_unlock(&sync_lock);
1256 kfree(waiter);
1257 }
1259 /* no waiting.. */
1260 fxn(arg);
1262 return 0;
1263 }
1264 EXPORT_SYMBOL(omap_gem_op_async);
1266 /* special API so PVR can update the buffer to use a sync-object allocated
1267 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1268 * perspective) sync-object, so we overwrite the new syncobj w/ values
1269 * from the already allocated syncobj (if there is one)
1270 */
1271 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1272 {
1273 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1274 int ret = 0;
1276 spin_lock(&sync_lock);
1278 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1279 /* clearing a previously set syncobj */
1280 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
1281 GFP_ATOMIC);
1282 if (!syncobj) {
1283 ret = -ENOMEM;
1284 goto unlock;
1285 }
1286 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1287 omap_obj->sync = syncobj;
1288 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1289 /* replacing an existing syncobj */
1290 if (omap_obj->sync) {
1291 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1292 kfree(omap_obj->sync);
1293 }
1294 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1295 omap_obj->sync = syncobj;
1296 }
1298 unlock:
1299 spin_unlock(&sync_lock);
1300 return ret;
1301 }
1302 EXPORT_SYMBOL(omap_gem_set_sync_object);
1304 /* don't call directly.. called from GEM core when it is time to actually
1305 * free the object..
1306 */
1307 void omap_gem_free_object(struct drm_gem_object *obj)
1308 {
1309 struct drm_device *dev = obj->dev;
1310 struct omap_drm_private *priv = dev->dev_private;
1311 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1313 evict(obj);
1315 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1317 spin_lock(&priv->list_lock);
1318 list_del(&omap_obj->mm_list);
1319 spin_unlock(&priv->list_lock);
1321 drm_gem_free_mmap_offset(obj);
1323 /* this means the object is still pinned.. which really should
1324 * not happen. I think..
1325 */
1326 WARN_ON(omap_obj->paddr_cnt > 0);
1328 /* don't free externally allocated backing memory */
1329 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1330 if (omap_obj->pages)
1331 omap_gem_detach_pages(obj);
1333 if (!is_shmem(obj)) {
1334 dma_free_writecombine(dev->dev, obj->size,
1335 omap_obj->vaddr, omap_obj->paddr);
1336 } else if (omap_obj->vaddr) {
1337 vunmap(omap_obj->vaddr);
1338 }
1339 }
1341 /* don't free externally allocated syncobj */
1342 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1343 kfree(omap_obj->sync);
1345 drm_gem_object_release(obj);
1347 kfree(obj);
1348 }
1350 /* convenience method to construct a GEM buffer object, and userspace handle */
1351 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1352 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1353 {
1354 struct drm_gem_object *obj;
1355 int ret;
1357 obj = omap_gem_new(dev, gsize, flags);
1358 if (!obj)
1359 return -ENOMEM;
1361 ret = drm_gem_handle_create(file, obj, handle);
1362 if (ret) {
1363 drm_gem_object_release(obj);
1364 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1365 return ret;
1366 }
1368 /* drop reference from allocate - handle holds it now */
1369 drm_gem_object_unreference_unlocked(obj);
1371 return 0;
1372 }
1374 /* GEM buffer object constructor */
1375 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1376 union omap_gem_size gsize, uint32_t flags)
1377 {
1378 struct omap_drm_private *priv = dev->dev_private;
1379 struct omap_gem_object *omap_obj;
1380 struct drm_gem_object *obj = NULL;
1381 size_t size;
1382 int ret;
1384 if (flags & OMAP_BO_TILED) {
1385 if (!usergart) {
1386 dev_err(dev->dev, "Tiled buffers require DMM\n");
1387 goto fail;
1388 }
1390 /* tiled buffers are always shmem paged backed.. when they are
1391 * scanned out, they are remapped into DMM/TILER
1392 */
1393 flags &= ~OMAP_BO_SCANOUT;
1395 /* currently don't allow cached buffers.. there is some caching
1396 * stuff that needs to be handled better
1397 */
1398 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1399 flags |= tiler_get_cpu_cache_flags();
1401 /* align dimensions to slot boundaries... */
1402 tiler_align(gem2fmt(flags),
1403 &gsize.tiled.width, &gsize.tiled.height);
1405 /* ...and calculate size based on aligned dimensions */
1406 size = tiler_size(gem2fmt(flags),
1407 gsize.tiled.width, gsize.tiled.height);
1408 } else {
1409 size = PAGE_ALIGN(gsize.bytes);
1410 }
1412 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1413 if (!omap_obj)
1414 goto fail;
1416 spin_lock(&priv->list_lock);
1417 list_add(&omap_obj->mm_list, &priv->obj_list);
1418 spin_unlock(&priv->list_lock);
1420 obj = &omap_obj->base;
1422 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1423 /* attempt to allocate contiguous memory if we don't
1424 * have DMM for remappign discontiguous buffers
1425 */
1426 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1427 &omap_obj->paddr, GFP_KERNEL);
1428 if (!omap_obj->vaddr) {
1429 spin_lock(&priv->list_lock);
1430 list_del(&omap_obj->mm_list);
1431 spin_unlock(&priv->list_lock);
1433 kfree(omap_obj);
1435 return NULL;
1436 }
1438 flags |= OMAP_BO_DMA;
1439 }
1441 omap_obj->flags = flags;
1443 if (flags & OMAP_BO_TILED) {
1444 omap_obj->width = gsize.tiled.width;
1445 omap_obj->height = gsize.tiled.height;
1446 }
1448 ret = 0;
1449 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1450 drm_gem_private_object_init(dev, obj, size);
1451 else
1452 ret = drm_gem_object_init(dev, obj, size);
1454 if (ret)
1455 goto fail;
1457 return obj;
1459 fail:
1460 if (obj)
1461 omap_gem_free_object(obj);
1463 return NULL;
1464 }
1466 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1467 void omap_gem_init(struct drm_device *dev)
1468 {
1469 struct omap_drm_private *priv = dev->dev_private;
1470 const enum tiler_fmt fmts[] = {
1471 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1472 };
1473 int i, j;
1475 if (!dmm_is_available()) {
1476 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1477 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1478 return;
1479 }
1481 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1482 if (!usergart)
1483 return;
1485 /* reserve 4k aligned/wide regions for userspace mappings: */
1486 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1487 uint16_t h = 1, w = PAGE_SIZE >> i;
1488 tiler_align(fmts[i], &w, &h);
1489 /* note: since each region is 1 4kb page wide, and minimum
1490 * number of rows, the height ends up being the same as the
1491 * # of pages in the region
1492 */
1493 usergart[i].height = h;
1494 usergart[i].height_shift = ilog2(h);
1495 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1496 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1497 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1498 struct usergart_entry *entry = &usergart[i].entry[j];
1499 struct tiler_block *block =
1500 tiler_reserve_2d(fmts[i], w, h,
1501 PAGE_SIZE);
1502 if (IS_ERR(block)) {
1503 dev_err(dev->dev,
1504 "reserve failed: %d, %d, %ld\n",
1505 i, j, PTR_ERR(block));
1506 return;
1507 }
1508 entry->paddr = tiler_ssptr(block);
1509 entry->block = block;
1511 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
1512 &entry->paddr,
1513 usergart[i].stride_pfn << PAGE_SHIFT);
1514 }
1515 }
1517 priv->has_dmm = true;
1518 }
1520 void omap_gem_deinit(struct drm_device *dev)
1521 {
1522 /* I believe we can rely on there being no more outstanding GEM
1523 * objects which could depend on usergart/dmm at this point.
1524 */
1525 kfree(usergart);
1526 }
1528 /****** PLUGIN API specific ******/
1530 /* This constructor is mainly to give plugins a way to wrap their
1531 * own allocations
1532 */
1533 struct drm_gem_object *omap_gem_new_ext(struct drm_device *dev,
1534 union omap_gem_size gsize, uint32_t flags,
1535 dma_addr_t paddr, struct page **pages,
1536 struct omap_gem_vm_ops *ops)
1537 {
1538 struct drm_gem_object *obj;
1540 BUG_ON((flags & OMAP_BO_TILED) && !pages);
1542 if (paddr)
1543 flags |= OMAP_BO_DMA;
1545 obj = omap_gem_new(dev, gsize, flags | OMAP_BO_EXT_MEM);
1546 if (obj) {
1547 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1548 omap_obj->paddr = paddr;
1549 omap_obj->pages = pages;
1550 omap_obj->ops = ops;
1551 }
1552 return obj;
1553 }
1554 EXPORT_SYMBOL(omap_gem_new_ext);
1556 void omap_gem_vm_open(struct vm_area_struct *vma)
1557 {
1558 struct drm_gem_object *obj = vma->vm_private_data;
1559 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1561 if (omap_obj->ops && omap_obj->ops->open)
1562 omap_obj->ops->open(vma);
1563 else
1564 drm_gem_vm_open(vma);
1566 }
1568 void omap_gem_vm_close(struct vm_area_struct *vma)
1569 {
1570 struct drm_gem_object *obj = vma->vm_private_data;
1571 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1573 if (omap_obj->ops && omap_obj->ops->close)
1574 omap_obj->ops->close(vma);
1575 else
1576 drm_gem_vm_close(vma);
1578 }
1580 void *omap_gem_priv(struct drm_gem_object *obj, int mapper_id)
1581 {
1582 BUG_ON((mapper_id >= MAX_MAPPERS) || (mapper_id < 0));
1583 return to_omap_bo(obj)->priv[mapper_id];
1584 }
1585 EXPORT_SYMBOL(omap_gem_priv);
1587 void omap_gem_set_priv(struct drm_gem_object *obj, int mapper_id, void *priv)
1588 {
1589 BUG_ON((mapper_id >= MAX_MAPPERS) || (mapper_id < 0));
1590 to_omap_bo(obj)->priv[mapper_id] = priv;
1591 }
1592 EXPORT_SYMBOL(omap_gem_set_priv);