]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - android-sdk/kernel-video.git/blobdiff - drivers/gpu/ion/omap/omap_tiler_heap.c
gpu: ion: DRA7: ensure TILER 2d mappings are shared device
[android-sdk/kernel-video.git] / drivers / gpu / ion / omap / omap_tiler_heap.c
index d469a99a25536244c968c510fe726b0bdbbf8e8d..cbd16cf6c522b436f07c12f32ea1b572a14f373f 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include "../../../drivers/staging/omapdrm/omap_dmm_tiler.h"
+#include "../../../drivers/gpu/drm/omapdrm/omap_dmm_tiler.h"
 #include <asm/mach/map.h>
 #include <asm/page.h>
 #include "../../../arch/arm/mach-omap2/soc.h"
@@ -46,7 +46,6 @@ struct omap_tiler_info {
        bool lump;                      /* true for a single lump allocation */
        u32 n_phys_pages;               /* number of physical pages */
        u32 *phys_addrs;                /* array addrs of pages */
-       struct page **phys_pages;/* array page pointers */
        u32 n_tiler_pages;              /* number of tiler pages */
        u32 *tiler_addrs;               /* array of addrs of tiler pages */
        int fmt;                        /* tiler buffer format */
@@ -55,6 +54,8 @@ struct omap_tiler_info {
                                           first entry onf tiler_addrs */
        u32 vsize;                      /* virtual stride of buffer */
        u32 vstride;                    /* virtual size of buffer */
+       u32 phys_stride;                        /* Physical stride of the buffer */
+       u32 flags;                      /* Flags specifying cached or not */
 };
 
 static int omap_tiler_heap_allocate(struct ion_heap *heap,
@@ -62,8 +63,24 @@ static int omap_tiler_heap_allocate(struct ion_heap *heap,
                                    unsigned long size, unsigned long align,
                                    unsigned long flags)
 {
-       if (size == 0)
+       struct omap_tiler_info *info;
+
+       /* This means the buffer is already allocated and populated, we're getting here because
+        * of dummy handle creation, so simply return*/ 
+       if (size == 0) {
+               /*
+                 * Store the pointer to struct omap_tiler_info * into buffer here.
+                 * This will be used later on inside map_dma function to create
+                 * the sg list for tiler buffer
+                 */
+               info = (struct omap_tiler_info *) flags;
+               if (!info)
+                       pr_err("%s: flags argument is not setupg\n", __func__);
+               buffer->priv_virt = info;
+               /* Re-update correct flags inside buffer */
+               buffer->flags = info->flags;
                return 0;
+       }
 
        pr_err("%s: This should never be called directly -- use the "
                        "OMAP_ION_TILER_ALLOC flag to the ION_IOC_CUSTOM "
@@ -84,7 +101,6 @@ static int omap_tiler_alloc_carveout(struct ion_heap *heap,
                info->lump = true;
                for (i = 0; i < info->n_phys_pages; i++)
                        info->phys_addrs[i] = addr + i * PAGE_SIZE;
-                       info->phys_pages[i] = phys_to_page(info->phys_addrs[i]);
                return 0;
        }
 
@@ -98,7 +114,6 @@ static int omap_tiler_alloc_carveout(struct ion_heap *heap,
                        goto err;
                }
                info->phys_addrs[i] = addr;
-               info->phys_pages[i] = phys_to_page(addr);
        }
        return 0;
 
@@ -135,7 +150,7 @@ int omap_tiler_alloc(struct ion_heap *heap,
        u32 n_phys_pages;
        u32 n_tiler_pages;
        int i = 0, ret;
-       uint32_t phys_stride, remainder;
+       uint32_t remainder;
        dma_addr_t ssptr;
 
        if (data->fmt == TILFMT_PAGE && data->h != 1) {
@@ -157,7 +172,6 @@ int omap_tiler_alloc(struct ion_heap *heap,
        }
 
        info = kzalloc(sizeof(struct omap_tiler_info) +
-                      sizeof(u32) * n_phys_pages +
                       sizeof(u32) * n_phys_pages +
                       sizeof(u32) * n_tiler_pages, GFP_KERNEL);
        if (!info)
@@ -166,9 +180,9 @@ int omap_tiler_alloc(struct ion_heap *heap,
        info->n_phys_pages = n_phys_pages;
        info->n_tiler_pages = n_tiler_pages;
        info->phys_addrs = (u32 *)(info + 1);
-       info->phys_pages = (struct page **) (info->phys_addrs + n_phys_pages);
        info->tiler_addrs = info->phys_addrs + n_phys_pages;
        info->fmt = data->fmt;
+       info->flags = data->flags;
 
        /* Allocate tiler space
           FIXME: we only support PAGE_SIZE alignment right now. */
@@ -178,6 +192,9 @@ int omap_tiler_alloc(struct ion_heap *heap,
                info->tiler_handle = tiler_reserve_2d(data->fmt, data->w,
                                data->h, PAGE_SIZE);
 
+       info->tiler_handle->width = data->w;
+       info->tiler_handle->height = data->h;
+
        if (IS_ERR_OR_NULL(info->tiler_handle)) {
                ret = PTR_ERR(info->tiler_handle);
                pr_err("%s: failure to allocate address space from tiler\n",
@@ -188,13 +205,10 @@ int omap_tiler_alloc(struct ion_heap *heap,
        /* get physical address of tiler buffer */
        info->tiler_start = tiler_ssptr(info->tiler_handle);
 
-       /*todo: need to check if this will work when passed to phys to page fn */
-       buffer->priv_phys = info->tiler_start;
-
        /* fill in tiler pages by using ssptr and stride */
        info->vstride = info->tiler_handle->stride;
        info->vsize = n_tiler_pages << PAGE_SHIFT;
-       phys_stride = (data->fmt == TILFMT_PAGE) ? info->vstride :
+       info->phys_stride = (data->fmt == TILFMT_PAGE) ? info->vstride :
                                tiler_stride(data->fmt, 0);
        ssptr = info->tiler_start;
        remainder = info->vstride;
@@ -208,7 +222,7 @@ int omap_tiler_alloc(struct ion_heap *heap,
                   line */
                if (!remainder) {
                        remainder = info->vstride;
-                       ssptr += phys_stride - info->vstride;
+                       ssptr += info->phys_stride - info->vstride;
                }
        }
 
@@ -218,8 +232,9 @@ int omap_tiler_alloc(struct ion_heap *heap,
                if (ret)
                        goto err_got_tiler;
 
-               ret = tiler_pin(info->tiler_handle, info->phys_pages,
-                                     info->n_phys_pages,0, false);
+               ret = tiler_pin_phys(info->tiler_handle, info->phys_addrs,
+                                       info->n_phys_pages);
+
                if (ret) {
                        pr_err("%s: failure to pin pages to tiler\n",
                                __func__);
@@ -230,7 +245,7 @@ int omap_tiler_alloc(struct ion_heap *heap,
        data->stride = info->vstride;
 
        /* create an ion handle  for the allocation */
-       handle = ion_alloc(client, 0, 0, 1<<OMAP_ION_HEAP_TILER, 1 << OMAP_ION_HEAP_TILER);
+       handle = ion_alloc(client, -1, 0, 1 << OMAP_ION_HEAP_TILER, (unsigned int) info);
        if (IS_ERR_OR_NULL(handle)) {
                ret = PTR_ERR(handle);
                pr_err("%s: failure to allocate handle to manage "
@@ -240,7 +255,6 @@ int omap_tiler_alloc(struct ion_heap *heap,
 
        buffer = ion_handle_buffer(handle);
        buffer->size = n_tiler_pages * PAGE_SIZE;
-       buffer->priv_virt = info;
        data->handle = handle;
        data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);
 
@@ -268,9 +282,8 @@ static void omap_tiler_heap_free(struct ion_buffer *buffer)
        tiler_release(info->tiler_handle);
 
        if ((buffer->heap->id == OMAP_ION_HEAP_TILER) ||
-           (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
+           (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER))
                omap_tiler_free_carveout(buffer->heap, info);
-       }
 
        kfree(info);
 }
@@ -303,6 +316,7 @@ int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
        *tiler_addrs = info->tiler_addrs;
        return 0;
 }
+EXPORT_SYMBOL(omap_tiler_pages);
 
 int omap_tiler_vinfo(struct ion_client *client, struct ion_handle *handle,
                        unsigned int *vstride, unsigned int *vsize)
@@ -325,9 +339,9 @@ static int omap_tiler_heap_map_user(struct ion_heap *heap,
        int i, ret = 0;
        pgprot_t vm_page_prot;
 
-       /* Use writecombined mappings unless on OMAP5.  If OMAP5, use
+       /* Use writecombined mappings unless on OMAP5 or DRA7.  If OMAP5 or DRA7, use
        shared device due to h/w issue. */
-       if (soc_is_omap54xx())
+       if (soc_is_omap54xx() || soc_is_dra7xx())
                vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK,
                                                L_PTE_MT_DEV_SHARED);
        else
@@ -352,34 +366,64 @@ static int omap_tiler_heap_map_user(struct ion_heap *heap,
        return ret;
 }
 
+static struct scatterlist *sg_alloc(unsigned int nents, gfp_t gfp_mask)
+{
+       return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+}
+
+static void sg_free(struct scatterlist *sg, unsigned int nents)
+{
+       kfree(sg);
+}
+
+
+
 struct sg_table *omap_tiler_heap_map_dma(struct ion_heap *heap,
                                              struct ion_buffer *buffer)
 {
-       struct sg_table *table;
-       int ret;
+       int ret, i;
+       struct sg_table *table = NULL;
+       struct scatterlist *sg;
+       struct omap_tiler_info *info = NULL;
+       static phys_addr_t paddr;
 
-       /*
-        * this function is currently a placeholder. this needs to be modified to pass the
-        * correct physical pointers
-        */
+
+       info = buffer->priv_virt;
+
+       if(!info)
+               return table;
 
        table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (!table)
                return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       /* sg_alloc_table can only allocate multi-page scatter-gather list tables
+        * if the architecture supports scatter-gather lists chaining. ARM doesn't
+        * fit in that category.
+        * Use __sg_alloc_table instead of sg_alloc_table and allocate all entries
+        * in one go. Otherwise trying to allocate beyond SG_MAX_SINGLE_ALLOC
+        * when height > SG_MAX_SINGLE_ALLOC will hit a BUG_ON in __sg_alloc_table.
+        */
+
+       ret = __sg_alloc_table(table, info->tiler_handle->height, -1, GFP_KERNEL, sg_alloc);
        if (ret) {
                kfree(table);
                return ERR_PTR(ret);
        }
-       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
-                   0);
+
+       sg = table->sgl;
+       for (i = 0; i < info->tiler_handle->height; i++) {
+               paddr = info->tiler_start+ (i * info->phys_stride);
+               sg_set_page(sg, phys_to_page(paddr), info->vstride, 0);
+               sg = sg_next(sg);
+       }
+
        return table;
 }
 
 void omap_tiler_heap_unmap_dma(struct ion_heap *heap,
                                 struct ion_buffer *buffer)
 {
-       sg_free_table(buffer->sg_table);
+       __sg_free_table(buffer->sg_table, -1, sg_free);
 }
 
 void *ion_tiler_heap_map_kernel(struct ion_heap *heap,