1 /*
2 * drivers/gpu/ion/omap_tiler_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16 #include <linux/spinlock.h>
17 #include <linux/err.h>
18 #include <linux/genalloc.h>
19 #include <linux/io.h>
20 #include <linux/ion.h>
21 #include <linux/mm.h>
22 #include <linux/omap_ion.h>
23 #include <linux/scatterlist.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
27 #include <asm/mach/map.h>
28 #include <asm/page.h>
29 #include "../../../arch/arm/mach-omap2/soc.h"
33 #include "../ion_priv.h"
34 #include "omap_ion_priv.h"
35 #include <asm/cacheflush.h>
37 struct omap_ion_heap {
38 struct ion_heap heap;
39 struct gen_pool *pool;
40 ion_phys_addr_t base;
41 };
43 struct omap_tiler_info {
44 struct tiler_block *tiler_handle; /* handle of the allocation
45 intiler */
46 bool lump; /* true for a single lump allocation */
47 u32 n_phys_pages; /* number of physical pages */
48 u32 *phys_addrs; /* array addrs of pages */
49 u32 n_tiler_pages; /* number of tiler pages */
50 u32 *tiler_addrs; /* array of addrs of tiler pages */
51 int fmt; /* tiler buffer format */
52 u32 tiler_start; /* start addr in tiler -- if not page
53 aligned this may not equal the
54 first entry onf tiler_addrs */
55 u32 vsize; /* virtual stride of buffer */
56 u32 vstride; /* virtual size of buffer */
57 u32 phys_stride; /* Physical stride of the buffer */
58 u32 flags; /* Flags specifying cached or not */
59 };
61 static int omap_tiler_heap_allocate(struct ion_heap *heap,
62 struct ion_buffer *buffer,
63 unsigned long size, unsigned long align,
64 unsigned long flags)
65 {
66 struct omap_tiler_info *info;
68 /* This means the buffer is already allocated and populated, we're getting here because
69 * of dummy handle creation, so simply return*/
70 if (size == 0) {
71 /*
72 * Store the pointer to struct omap_tiler_info * into buffer here.
73 * This will be used later on inside map_dma function to create
74 * the sg list for tiler buffer
75 */
76 info = (struct omap_tiler_info *) flags;
77 if (!info)
78 pr_err("%s: flags argument is not setupg\n", __func__);
79 buffer->priv_virt = info;
80 /* Re-update correct flags inside buffer */
81 buffer->flags = info->flags;
82 return 0;
83 }
85 pr_err("%s: This should never be called directly -- use the "
86 "OMAP_ION_TILER_ALLOC flag to the ION_IOC_CUSTOM "
87 "instead\n", __func__);
88 return -EINVAL;
89 }
91 static int omap_tiler_alloc_carveout(struct ion_heap *heap,
92 struct omap_tiler_info *info)
93 {
94 struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
95 int i;
96 int ret;
97 ion_phys_addr_t addr;
99 addr = gen_pool_alloc(omap_heap->pool, info->n_phys_pages * PAGE_SIZE);
100 if (addr) {
101 info->lump = true;
102 for (i = 0; i < info->n_phys_pages; i++)
103 info->phys_addrs[i] = addr + i * PAGE_SIZE;
104 return 0;
105 }
107 for (i = 0; i < info->n_phys_pages; i++) {
108 addr = gen_pool_alloc(omap_heap->pool, PAGE_SIZE);
110 if (addr == 0) {
111 ret = -ENOMEM;
112 pr_err("%s: failed to allocate pages to back "
113 "tiler address space\n", __func__);
114 goto err;
115 }
116 info->phys_addrs[i] = addr;
117 }
118 return 0;
120 err:
121 for (i -= 1; i >= 0; i--)
122 gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
123 return ret;
124 }
126 static void omap_tiler_free_carveout(struct ion_heap *heap,
127 struct omap_tiler_info *info)
128 {
129 struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
130 int i;
132 if (info->lump) {
133 gen_pool_free(omap_heap->pool,
134 info->phys_addrs[0],
135 info->n_phys_pages * PAGE_SIZE);
136 return;
137 }
139 for (i = 0; i < info->n_phys_pages; i++)
140 gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
141 }
143 int omap_tiler_alloc(struct ion_heap *heap,
144 struct ion_client *client,
145 struct omap_ion_tiler_alloc_data *data)
146 {
147 struct ion_handle *handle;
148 struct ion_buffer *buffer;
149 struct omap_tiler_info *info = NULL;
150 u32 n_phys_pages;
151 u32 n_tiler_pages;
152 int i = 0, ret;
153 uint32_t remainder;
154 dma_addr_t ssptr;
156 if (data->fmt == TILFMT_PAGE && data->h != 1) {
157 pr_err("%s: Page mode (1D) allocations must have a height of "
158 "one\n", __func__);
159 return -EINVAL;
160 }
162 if (data->fmt == TILFMT_PAGE) {
163 /* calculate required pages the usual way */
164 n_phys_pages = round_up(data->w, PAGE_SIZE) >> PAGE_SHIFT;
165 n_tiler_pages = n_phys_pages;
166 } else {
167 /* call APIs to calculate 2D buffer page requirements */
168 n_phys_pages = tiler_size(data->fmt, data->w, data->h) >>
169 PAGE_SHIFT;
170 n_tiler_pages = tiler_vsize(data->fmt, data->w, data->h) >>
171 PAGE_SHIFT;
172 }
174 info = kzalloc(sizeof(struct omap_tiler_info) +
175 sizeof(u32) * n_phys_pages +
176 sizeof(u32) * n_tiler_pages, GFP_KERNEL);
177 if (!info)
178 return -ENOMEM;
180 info->n_phys_pages = n_phys_pages;
181 info->n_tiler_pages = n_tiler_pages;
182 info->phys_addrs = (u32 *)(info + 1);
183 info->tiler_addrs = info->phys_addrs + n_phys_pages;
184 info->fmt = data->fmt;
185 info->flags = data->flags;
187 /* Allocate tiler space
188 FIXME: we only support PAGE_SIZE alignment right now. */
189 if (data->fmt == TILFMT_PAGE)
190 info->tiler_handle = tiler_reserve_1d(data->w);
191 else
192 info->tiler_handle = tiler_reserve_2d(data->fmt, data->w,
193 data->h, PAGE_SIZE);
195 info->tiler_handle->width = data->w;
196 info->tiler_handle->height = data->h;
198 if (IS_ERR_OR_NULL(info->tiler_handle)) {
199 ret = PTR_ERR(info->tiler_handle);
200 pr_err("%s: failure to allocate address space from tiler\n",
201 __func__);
202 goto err_got_mem;
203 }
205 /* get physical address of tiler buffer */
206 info->tiler_start = tiler_ssptr(info->tiler_handle);
208 /* fill in tiler pages by using ssptr and stride */
209 info->vstride = info->tiler_handle->stride;
210 info->vsize = n_tiler_pages << PAGE_SHIFT;
211 info->phys_stride = (data->fmt == TILFMT_PAGE) ? info->vstride :
212 tiler_stride(data->fmt, 0);
213 ssptr = info->tiler_start;
214 remainder = info->vstride;
216 for (i = 0; i < n_tiler_pages; i++) {
217 info->tiler_addrs[i] = PAGE_ALIGN(ssptr);
218 ssptr += PAGE_SIZE;
219 remainder -= PAGE_SIZE;
221 /* see if we are done with this line. If so, go to the next
222 line */
223 if (!remainder) {
224 remainder = info->vstride;
225 ssptr += info->phys_stride - info->vstride;
226 }
227 }
229 if ((heap->id == OMAP_ION_HEAP_TILER) ||
230 (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
231 ret = omap_tiler_alloc_carveout(heap, info);
232 if (ret)
233 goto err_got_tiler;
235 ret = tiler_pin_phys(info->tiler_handle, info->phys_addrs,
236 info->n_phys_pages);
238 if (ret) {
239 pr_err("%s: failure to pin pages to tiler\n",
240 __func__);
241 goto err_got_carveout;
242 }
243 }
245 data->stride = info->vstride;
247 /* create an ion handle for the allocation */
248 handle = ion_alloc(client, -1, 0, 1 << OMAP_ION_HEAP_TILER, (unsigned int) info);
249 if (IS_ERR_OR_NULL(handle)) {
250 ret = PTR_ERR(handle);
251 pr_err("%s: failure to allocate handle to manage "
252 "tiler allocation\n", __func__);
253 goto err;
254 }
256 buffer = ion_handle_buffer(handle);
257 buffer->size = n_tiler_pages * PAGE_SIZE;
258 data->handle = handle;
259 data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);
261 return 0;
263 err:
264 tiler_unpin(info->tiler_handle);
265 err_got_carveout:
266 if ((heap->id == OMAP_ION_HEAP_TILER) ||
267 (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
268 omap_tiler_free_carveout(heap, info);
269 }
270 err_got_tiler:
271 tiler_release(info->tiler_handle);
272 err_got_mem:
273 kfree(info);
274 return ret;
275 }
277 static void omap_tiler_heap_free(struct ion_buffer *buffer)
278 {
279 struct omap_tiler_info *info = buffer->priv_virt;
281 tiler_unpin(info->tiler_handle);
282 tiler_release(info->tiler_handle);
284 if ((buffer->heap->id == OMAP_ION_HEAP_TILER) ||
285 (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER))
286 omap_tiler_free_carveout(buffer->heap, info);
288 kfree(info);
289 }
291 static int omap_tiler_phys(struct ion_heap *heap,
292 struct ion_buffer *buffer,
293 ion_phys_addr_t *addr, size_t *len)
294 {
295 struct omap_tiler_info *info = buffer->priv_virt;
297 *addr = info->tiler_start;
298 *len = buffer->size;
299 return 0;
300 }
302 int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
303 int *n, u32 **tiler_addrs)
304 {
305 ion_phys_addr_t addr;
306 size_t len;
307 int ret;
308 struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;
310 /* validate that the handle exists in this client */
311 ret = ion_phys(client, handle, &addr, &len);
312 if (ret)
313 return ret;
315 *n = info->n_tiler_pages;
316 *tiler_addrs = info->tiler_addrs;
317 return 0;
318 }
319 EXPORT_SYMBOL(omap_tiler_pages);
321 int omap_tiler_vinfo(struct ion_client *client, struct ion_handle *handle,
322 unsigned int *vstride, unsigned int *vsize)
323 {
324 struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;
326 *vstride = info->vstride;
327 *vsize = info->vsize;
329 return 0;
330 }
332 static int omap_tiler_heap_map_user(struct ion_heap *heap,
333 struct ion_buffer *buffer, struct vm_area_struct *vma)
334 {
335 struct omap_tiler_info *info = buffer->priv_virt;
336 unsigned long addr = vma->vm_start;
337 u32 vma_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
338 int n_pages = min(vma_pages, info->n_tiler_pages);
339 int i, ret = 0;
340 pgprot_t vm_page_prot;
342 /* Use writecombined mappings unless on OMAP5 or DRA7. If OMAP5 or DRA7, use
343 shared device due to h/w issue. */
344 if (soc_is_omap54xx() || soc_is_dra7xx())
345 vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK,
346 L_PTE_MT_DEV_SHARED);
347 else
348 vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
350 if (TILER_PIXEL_FMT_PAGE == info->fmt) {
351 /* Since 1D buffer is linear, map whole buffer in one shot */
352 ret = remap_pfn_range(vma, addr,
353 __phys_to_pfn(info->tiler_addrs[0]),
354 (vma->vm_end - vma->vm_start),
355 vm_page_prot);
356 } else {
357 for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
358 ret = remap_pfn_range(vma, addr,
359 __phys_to_pfn(info->tiler_addrs[i]),
360 PAGE_SIZE,
361 vm_page_prot);
362 if (ret)
363 return ret;
364 }
365 }
366 return ret;
367 }
369 static struct scatterlist *sg_alloc(unsigned int nents, gfp_t gfp_mask)
370 {
371 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
372 }
374 static void sg_free(struct scatterlist *sg, unsigned int nents)
375 {
376 kfree(sg);
377 }
381 struct sg_table *omap_tiler_heap_map_dma(struct ion_heap *heap,
382 struct ion_buffer *buffer)
383 {
384 int ret, i;
385 struct sg_table *table = NULL;
386 struct scatterlist *sg;
387 struct omap_tiler_info *info = NULL;
388 static phys_addr_t paddr;
391 info = buffer->priv_virt;
393 if(!info)
394 return table;
396 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
397 if (!table)
398 return ERR_PTR(-ENOMEM);
399 /* sg_alloc_table can only allocate multi-page scatter-gather list tables
400 * if the architecture supports scatter-gather lists chaining. ARM doesn't
401 * fit in that category.
402 * Use __sg_alloc_table instead of sg_alloc_table and allocate all entries
403 * in one go. Otherwise trying to allocate beyond SG_MAX_SINGLE_ALLOC
404 * when height > SG_MAX_SINGLE_ALLOC will hit a BUG_ON in __sg_alloc_table.
405 */
407 ret = __sg_alloc_table(table, info->tiler_handle->height, -1, GFP_KERNEL, sg_alloc);
408 if (ret) {
409 kfree(table);
410 return ERR_PTR(ret);
411 }
413 sg = table->sgl;
414 for (i = 0; i < info->tiler_handle->height; i++) {
415 paddr = info->tiler_start+ (i * info->phys_stride);
416 sg_set_page(sg, phys_to_page(paddr), info->vstride, 0);
417 sg = sg_next(sg);
418 }
420 return table;
421 }
423 void omap_tiler_heap_unmap_dma(struct ion_heap *heap,
424 struct ion_buffer *buffer)
425 {
426 __sg_free_table(buffer->sg_table, -1, sg_free);
427 }
429 void *ion_tiler_heap_map_kernel(struct ion_heap *heap,
430 struct ion_buffer *buffer)
431 {
432 /* todo: Need to see how to implement this api. Seems like it is
433 * mandatory to implement in new ION
434 */
435 return NULL;
436 }
438 void ion_tiler_heap_unmap_kernel(struct ion_heap *heap,
439 struct ion_buffer *buffer)
440 {
441 /* todo: Need to see how to implement this api. Seems like it is
442 * mandatory to implement in new ION
443 */
444 return;
446 }
447 static struct ion_heap_ops omap_tiler_ops = {
448 .allocate = omap_tiler_heap_allocate,
449 .free = omap_tiler_heap_free,
450 .phys = omap_tiler_phys,
451 .map_user = omap_tiler_heap_map_user,
452 .map_dma = omap_tiler_heap_map_dma,
453 .unmap_dma = omap_tiler_heap_unmap_dma,
454 .map_kernel = ion_tiler_heap_map_kernel,
455 .unmap_kernel = ion_tiler_heap_unmap_kernel,
456 };
458 struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data)
459 {
460 struct omap_ion_heap *heap;
462 heap = kzalloc(sizeof(struct omap_ion_heap), GFP_KERNEL);
463 if (!heap)
464 return ERR_PTR(-ENOMEM);
466 if ((data->id == OMAP_ION_HEAP_TILER) ||
467 (data->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
468 heap->pool = gen_pool_create(12, -1);
469 if (!heap->pool) {
470 kfree(heap);
471 return ERR_PTR(-ENOMEM);
472 }
473 heap->base = data->base;
474 gen_pool_add(heap->pool, heap->base, data->size, -1);
475 }
476 heap->heap.ops = &omap_tiler_ops;
477 heap->heap.type = OMAP_ION_HEAP_TYPE_TILER;
478 heap->heap.name = data->name;
479 heap->heap.id = data->id;
480 return &heap->heap;
481 }
483 void omap_tiler_heap_destroy(struct ion_heap *heap)
484 {
485 struct omap_ion_heap *omap_ion_heap = (struct omap_ion_heap *)heap;
486 if (omap_ion_heap->pool)
487 gen_pool_destroy(omap_ion_heap->pool);
488 kfree(heap);
489 }