Merge branch 'p-ti-android-3.8.y-video'
[android-sdk/kernel-video.git] / drivers / gpu / ion / omap / omap_tiler_heap.c
1 /*
2  * drivers/gpu/ion/omap_tiler_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 #include <linux/spinlock.h>
17 #include <linux/err.h>
18 #include <linux/genalloc.h>
19 #include <linux/io.h>
20 #include <linux/ion.h>
21 #include <linux/mm.h>
22 #include <linux/omap_ion.h>
23 #include <linux/scatterlist.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
27 #include <asm/mach/map.h>
28 #include <asm/page.h>
29 #include "../../../arch/arm/mach-omap2/soc.h"
33 #include "../ion_priv.h"
34 #include "omap_ion_priv.h"
35 #include <asm/cacheflush.h>
37 struct omap_ion_heap {
38         struct ion_heap heap;
39         struct gen_pool *pool;
40         ion_phys_addr_t base;
41 };
43 struct omap_tiler_info {
44         struct tiler_block *tiler_handle;       /* handle of the allocation
45                                                    intiler */
46         bool lump;                      /* true for a single lump allocation */
47         u32 n_phys_pages;               /* number of physical pages */
48         u32 *phys_addrs;                /* array addrs of pages */
49         u32 n_tiler_pages;              /* number of tiler pages */
50         u32 *tiler_addrs;               /* array of addrs of tiler pages */
51         int fmt;                        /* tiler buffer format */
52         u32 tiler_start;                /* start addr in tiler -- if not page
53                                            aligned this may not equal the
54                                            first entry onf tiler_addrs */
55         u32 vsize;                      /* virtual stride of buffer */
56         u32 vstride;                    /* virtual size of buffer */
57         u32 phys_stride;                        /* Physical stride of the buffer */
58 };
60 static int omap_tiler_heap_allocate(struct ion_heap *heap,
61                                     struct ion_buffer *buffer,
62                                     unsigned long size, unsigned long align,
63                                     unsigned long flags)
64 {
65         /* This means the buffer is already allocated and populated, we're getting here because
66          * of dummy handle creation, so simply return*/ 
67         if (size == 0) {
68                 /*
69                   * Store the pointer to struct omap_tiler_info * into buffer here.
70                   * This will be used later on inside map_dma function to create
71                   * the sg list for tiler buffer
72                   */
73                 buffer->priv_virt = (void *)flags;
74                 return 0;
75         }
77         pr_err("%s: This should never be called directly -- use the "
78                         "OMAP_ION_TILER_ALLOC flag to the ION_IOC_CUSTOM "
79                         "instead\n", __func__);
80         return -EINVAL;
81 }
83 static int omap_tiler_alloc_carveout(struct ion_heap *heap,
84                                      struct omap_tiler_info *info)
85 {
86         struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
87         int i;
88         int ret;
89         ion_phys_addr_t addr;
91         addr = gen_pool_alloc(omap_heap->pool, info->n_phys_pages * PAGE_SIZE);
92         if (addr) {
93                 info->lump = true;
94                 for (i = 0; i < info->n_phys_pages; i++)
95                         info->phys_addrs[i] = addr + i * PAGE_SIZE;
96                 return 0;
97         }
99         for (i = 0; i < info->n_phys_pages; i++) {
100                 addr = gen_pool_alloc(omap_heap->pool, PAGE_SIZE);
102                 if (addr == 0) {
103                         ret = -ENOMEM;
104                         pr_err("%s: failed to allocate pages to back "
105                                "tiler address space\n", __func__);
106                         goto err;
107                 }
108                 info->phys_addrs[i] = addr;
109         }
110         return 0;
112 err:
113         for (i -= 1; i >= 0; i--)
114                 gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
115         return ret;
118 static void omap_tiler_free_carveout(struct ion_heap *heap,
119                                      struct omap_tiler_info *info)
121         struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap;
122         int i;
124         if (info->lump) {
125                 gen_pool_free(omap_heap->pool,
126                                 info->phys_addrs[0],
127                                 info->n_phys_pages * PAGE_SIZE);
128                 return;
129         }
131         for (i = 0; i < info->n_phys_pages; i++)
132                 gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE);
135 int omap_tiler_alloc(struct ion_heap *heap,
136                      struct ion_client *client,
137                      struct omap_ion_tiler_alloc_data *data)
139         struct ion_handle *handle;
140         struct ion_buffer *buffer;
141         struct omap_tiler_info *info = NULL;
142         u32 n_phys_pages;
143         u32 n_tiler_pages;
144         int i = 0, ret;
145         uint32_t remainder;
146         dma_addr_t ssptr;
148         if (data->fmt == TILFMT_PAGE && data->h != 1) {
149                 pr_err("%s: Page mode (1D) allocations must have a height of "
150                                 "one\n", __func__);
151                 return -EINVAL;
152         }
154         if (data->fmt == TILFMT_PAGE) {
155                 /* calculate required pages the usual way */
156                 n_phys_pages = round_up(data->w, PAGE_SIZE) >> PAGE_SHIFT;
157                 n_tiler_pages = n_phys_pages;
158         } else {
159                 /* call APIs to calculate 2D buffer page requirements */
160                 n_phys_pages = tiler_size(data->fmt, data->w, data->h) >>
161                                 PAGE_SHIFT;
162                 n_tiler_pages = tiler_vsize(data->fmt, data->w, data->h) >>
163                                         PAGE_SHIFT;
164         }
166         info = kzalloc(sizeof(struct omap_tiler_info) +
167                        sizeof(u32) * n_phys_pages +
168                        sizeof(u32) * n_tiler_pages, GFP_KERNEL);
169         if (!info)
170                 return -ENOMEM;
172         info->n_phys_pages = n_phys_pages;
173         info->n_tiler_pages = n_tiler_pages;
174         info->phys_addrs = (u32 *)(info + 1);
175         info->tiler_addrs = info->phys_addrs + n_phys_pages;
176         info->fmt = data->fmt;
178         /* Allocate tiler space
179            FIXME: we only support PAGE_SIZE alignment right now. */
180         if (data->fmt == TILFMT_PAGE)
181                 info->tiler_handle = tiler_reserve_1d(data->w);
182         else
183                 info->tiler_handle = tiler_reserve_2d(data->fmt, data->w,
184                                 data->h, PAGE_SIZE);
186         info->tiler_handle->width = data->w;
187         info->tiler_handle->height = data->h;
189         if (IS_ERR_OR_NULL(info->tiler_handle)) {
190                 ret = PTR_ERR(info->tiler_handle);
191                 pr_err("%s: failure to allocate address space from tiler\n",
192                        __func__);
193                 goto err_got_mem;
194         }
196         /* get physical address of tiler buffer */
197         info->tiler_start = tiler_ssptr(info->tiler_handle);
199         /* fill in tiler pages by using ssptr and stride */
200         info->vstride = info->tiler_handle->stride;
201         info->vsize = n_tiler_pages << PAGE_SHIFT;
202         info->phys_stride = (data->fmt == TILFMT_PAGE) ? info->vstride :
203                                 tiler_stride(data->fmt, 0);
204         ssptr = info->tiler_start;
205         remainder = info->vstride;
207         for (i = 0; i < n_tiler_pages; i++) {
208                 info->tiler_addrs[i] = PAGE_ALIGN(ssptr);
209                 ssptr += PAGE_SIZE;
210                 remainder -= PAGE_SIZE;
212                 /* see if we are done with this line.  If so, go to the next
213                    line */
214                 if (!remainder) {
215                         remainder = info->vstride;
216                         ssptr += info->phys_stride - info->vstride;
217                 }
218         }
220         if ((heap->id == OMAP_ION_HEAP_TILER) ||
221             (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
222                 ret = omap_tiler_alloc_carveout(heap, info);
223                 if (ret)
224                         goto err_got_tiler;
226                 ret = tiler_pin_phys(info->tiler_handle, info->phys_addrs,
227                                         info->n_phys_pages);
229                 if (ret) {
230                         pr_err("%s: failure to pin pages to tiler\n",
231                                 __func__);
232                         goto err_got_carveout;
233                 }
234         }
236         data->stride = info->vstride;
238         /* create an ion handle  for the allocation */
239         handle = ion_alloc(client, -1, 0, 1 << OMAP_ION_HEAP_TILER, (unsigned int) info);
240         if (IS_ERR_OR_NULL(handle)) {
241                 ret = PTR_ERR(handle);
242                 pr_err("%s: failure to allocate handle to manage "
243                                 "tiler allocation\n", __func__);
244                 goto err;
245         }
247         buffer = ion_handle_buffer(handle);
248         buffer->size = n_tiler_pages * PAGE_SIZE;
249         data->handle = handle;
250         data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);
252         return 0;
254 err:
255         tiler_unpin(info->tiler_handle);
256 err_got_carveout:
257         if ((heap->id == OMAP_ION_HEAP_TILER) ||
258             (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
259                 omap_tiler_free_carveout(heap, info);
260         }
261 err_got_tiler:
262         tiler_release(info->tiler_handle);
263 err_got_mem:
264         kfree(info);
265         return ret;
268 static void omap_tiler_heap_free(struct ion_buffer *buffer)
270         struct omap_tiler_info *info = buffer->priv_virt;
272         tiler_unpin(info->tiler_handle);
273         tiler_release(info->tiler_handle);
275         if ((buffer->heap->id == OMAP_ION_HEAP_TILER) ||
276             (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER))
277                 omap_tiler_free_carveout(buffer->heap, info);
279         kfree(info);
282 static int omap_tiler_phys(struct ion_heap *heap,
283                            struct ion_buffer *buffer,
284                            ion_phys_addr_t *addr, size_t *len)
286         struct omap_tiler_info *info = buffer->priv_virt;
288         *addr = info->tiler_start;
289         *len = buffer->size;
290         return 0;
293 int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
294                      int *n, u32 **tiler_addrs)
296         ion_phys_addr_t addr;
297         size_t len;
298         int ret;
299         struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;
301         /* validate that the handle exists in this client */
302         ret = ion_phys(client, handle, &addr, &len);
303         if (ret)
304                 return ret;
306         *n = info->n_tiler_pages;
307         *tiler_addrs = info->tiler_addrs;
308         return 0;
310 EXPORT_SYMBOL(omap_tiler_pages);
312 int omap_tiler_vinfo(struct ion_client *client, struct ion_handle *handle,
313                         unsigned int *vstride, unsigned int *vsize)
315         struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;
317         *vstride = info->vstride;
318         *vsize = info->vsize;
320         return 0;
323 static int omap_tiler_heap_map_user(struct ion_heap *heap,
324                 struct ion_buffer *buffer, struct vm_area_struct *vma)
326         struct omap_tiler_info *info = buffer->priv_virt;
327         unsigned long addr = vma->vm_start;
328         u32 vma_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
329         int n_pages = min(vma_pages, info->n_tiler_pages);
330         int i, ret = 0;
331         pgprot_t vm_page_prot;
333         /* Use writecombined mappings unless on OMAP5.  If OMAP5, use
334         shared device due to h/w issue. */
335         if (soc_is_omap54xx())
336                 vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK,
337                                                 L_PTE_MT_DEV_SHARED);
338         else
339                 vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
341         if (TILER_PIXEL_FMT_PAGE == info->fmt) {
342                 /* Since 1D buffer is linear, map whole buffer in one shot */
343                 ret = remap_pfn_range(vma, addr,
344                                  __phys_to_pfn(info->tiler_addrs[0]),
345                                 (vma->vm_end - vma->vm_start),
346                                 vm_page_prot);
347         } else {
348                 for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
349                         ret = remap_pfn_range(vma, addr,
350                                  __phys_to_pfn(info->tiler_addrs[i]),
351                                 PAGE_SIZE,
352                                 vm_page_prot);
353                         if (ret)
354                                 return ret;
355                 }
356         }
357         return ret;
360 static struct scatterlist *sg_alloc(unsigned int nents, gfp_t gfp_mask)
362         return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
365 static void sg_free(struct scatterlist *sg, unsigned int nents)
367         kfree(sg);
372 struct sg_table *omap_tiler_heap_map_dma(struct ion_heap *heap,
373                                               struct ion_buffer *buffer)
375         int ret, i;
376         struct sg_table *table = NULL;
377         struct scatterlist *sg;
378         struct omap_tiler_info *info = NULL;
379         static phys_addr_t paddr;
382         info = buffer->priv_virt;
384         if(!info)
385                 return table;
387         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
388         if (!table)
389                 return ERR_PTR(-ENOMEM);
390         /* sg_alloc_table can only allocate multi-page scatter-gather list tables
391          * if the architecture supports scatter-gather lists chaining. ARM doesn't
392          * fit in that category.
393          * Use __sg_alloc_table instead of sg_alloc_table and allocate all entries
394          * in one go. Otherwise trying to allocate beyond SG_MAX_SINGLE_ALLOC
395          * when height > SG_MAX_SINGLE_ALLOC will hit a BUG_ON in __sg_alloc_table.
396          */
398         ret = __sg_alloc_table(table, info->tiler_handle->height, -1, GFP_KERNEL, sg_alloc);
399         if (ret) {
400                 kfree(table);
401                 return ERR_PTR(ret);
402         }
404         sg = table->sgl;
405         for (i = 0; i < info->tiler_handle->height; i++) {
406                 paddr = info->tiler_start+ (i * info->phys_stride);
407                 sg_set_page(sg, phys_to_page(paddr), info->vstride, 0);
408                 sg = sg_next(sg);
409         }
411         return table;
414 void omap_tiler_heap_unmap_dma(struct ion_heap *heap,
415                                  struct ion_buffer *buffer)
417         __sg_free_table(buffer->sg_table, -1, sg_free);
420 void *ion_tiler_heap_map_kernel(struct ion_heap *heap,
421                                    struct ion_buffer *buffer)
423         /* todo: Need to see how to implement this api. Seems like it is
424          * mandatory to implement in new ION
425          */
426         return NULL;
429 void ion_tiler_heap_unmap_kernel(struct ion_heap *heap,
430                                     struct ion_buffer *buffer)
432         /* todo: Need to see how to implement this api. Seems like it is
433          * mandatory to implement in new ION
434          */
435         return;
438 static struct ion_heap_ops omap_tiler_ops = {
439         .allocate = omap_tiler_heap_allocate,
440         .free = omap_tiler_heap_free,
441         .phys = omap_tiler_phys,
442         .map_user = omap_tiler_heap_map_user,
443         .map_dma = omap_tiler_heap_map_dma,
444         .unmap_dma = omap_tiler_heap_unmap_dma,
445         .map_kernel = ion_tiler_heap_map_kernel,
446         .unmap_kernel = ion_tiler_heap_unmap_kernel,
447 };
449 struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data)
451         struct omap_ion_heap *heap;
453         heap = kzalloc(sizeof(struct omap_ion_heap), GFP_KERNEL);
454         if (!heap)
455                 return ERR_PTR(-ENOMEM);
457         if ((data->id == OMAP_ION_HEAP_TILER) ||
458             (data->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
459                 heap->pool = gen_pool_create(12, -1);
460                 if (!heap->pool) {
461                         kfree(heap);
462                         return ERR_PTR(-ENOMEM);
463                 }
464                 heap->base = data->base;
465                 gen_pool_add(heap->pool, heap->base, data->size, -1);
466         }
467         heap->heap.ops = &omap_tiler_ops;
468         heap->heap.type = OMAP_ION_HEAP_TYPE_TILER;
469         heap->heap.name = data->name;
470         heap->heap.id = data->id;
471         return &heap->heap;
474 void omap_tiler_heap_destroy(struct ion_heap *heap)
476         struct omap_ion_heap *omap_ion_heap = (struct omap_ion_heap *)heap;
477         if (omap_ion_heap->pool)
478                 gen_pool_destroy(omap_ion_heap->pool);
479         kfree(heap);