Merge branch 'p-ti-android-3.8.y-video' master
authorSundar Raman <a0393242@ti.com>
Fri, 26 Jul 2013 21:32:39 +0000 (16:32 -0500)
committerSundar Raman <a0393242@ti.com>
Fri, 26 Jul 2013 21:32:39 +0000 (16:32 -0500)
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/omap5-uevm.dts
drivers/gpu/ion/ion.c
drivers/gpu/ion/omap/omap_ion.c
drivers/gpu/ion/omap/omap_tiler_heap.c
include/linux/ion.h

index 2c902e951943c6be47b69b14c0e0544eeedd8b3b..bc297a466512290f3c12d8f7f9727edb3e68f30c 100755 (executable)
@@ -7,6 +7,18 @@
  */
 /dts-v1/;
 
+/*
+ * Following are the carveout addresses and the sizes for ION. SMC is not reserved for now
+ * C0000000 - SDRAM+1G
+ * BFD00000 - SMC (3MB)
+ * BA300000 - ION (90MB)
+ * B4300000 - TILER SECURE (81 MB)
+ * B3400000 - TILER NONSECURE (15 MB)
+*/
+/memreserve/ 0xba300000 0x5a00000;
+/memreserve/ 0xb5200000 0x5100000;
+/memreserve/ 0xb4300000 0xf00000;
+
 /include/ "dra7.dtsi"
 
 / {
index a339f321c2a220e970dd6b6a8b5c8ac1caad6f43..5694a6fc052e7eaf951a5f65c50ad097fb130239 100644 (file)
@@ -8,16 +8,16 @@
 /dts-v1/;
 
 /*
- * Following are the carveout addresses and the sizes. SMC is not reserved for now
+ * Following are the carveout addresses and the sizes for ION. SMC is not reserved for now
  * C0000000 - SDRAM+1G
  * BFD00000 - SMC (3MB)
  * BA300000 - ION (90MB)
- * B4300000 - TILER SECURE (96 MB)
+ * B4300000 - TILER SECURE (81 MB)
  * B3400000 - TILER NONSECURE (15 MB)
 */
 /memreserve/ 0xba300000 0x5a00000;
-/memreserve/ 0xb4300000 0x6000000;
-/memreserve/ 0xb3400000 0xf00000;
+/memreserve/ 0xb5200000 0x5100000;
+/memreserve/ 0xb4300000 0xf00000;
 
 /include/ "omap5.dtsi"
 /include/ "samsung_k3pe0e000b.dtsi"
index 8f477880f040fe5985617acedd617bce7c988a7c..d7c780ff35b445d06a005f10472d268d5b2fd688 100644 (file)
@@ -951,7 +951,7 @@ end:
 }
 EXPORT_SYMBOL(ion_import_dma_buf);
 
-static int ion_sync_for_device(struct ion_client *client, int fd)
+static int ion_sync_for_device(struct ion_client *client, int fd, enum ion_data_direction dir)
 {
        struct dma_buf *dmabuf;
        struct ion_buffer *buffer;
@@ -969,8 +969,16 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
        }
        buffer = dmabuf->priv;
 
-       dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
-                              buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+       if(dir == ION_FROM_DEVICE)
+               dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+                                      buffer->sg_table->nents, DMA_FROM_DEVICE);
+       else if(dir == ION_TO_DEVICE)
+               dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+                                      buffer->sg_table->nents, DMA_TO_DEVICE);
+       else if(dir == ION_BIDIRECTIONAL)
+               dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+                                      buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+
        dma_buf_put(dmabuf);
        return 0;
 }
@@ -1053,7 +1061,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (copy_from_user(&data, (void __user *)arg,
                                   sizeof(struct ion_fd_data)))
                        return -EFAULT;
-               ion_sync_for_device(client, data.fd);
+               ion_sync_for_device(client, data.fd, data.dir);
                break;
        }
        case ION_IOC_CUSTOM:
index e9b91f0787d3dee3155c41bedd78b0c4ce19a179..df8f3818653826ef95aa569574ef84408875e3dc 100755 (executable)
@@ -124,19 +124,26 @@ static int omap_ion_probe(struct platform_device *pdev)
        uint omap_ion_heap_tiler_size = 0;
        uint omap_ion_heap_nonsecure_tiler_size = 0;
        
-       omap_ion_device = ion_device_create(omap_ion_ioctl);
-       if (IS_ERR_OR_NULL(omap_ion_device)) {
-               kfree(heaps);
-               return PTR_ERR(omap_ion_device);
-       }
-
        if (node) {
                of_property_read_u32(node, "ti,omap_ion_heap_secure_input_base",
                                     &omap_ion_heap_secure_input_base);
                of_property_read_u32(node, "ti,omap_ion_heap_tiler_base",
-                                    &omap_ion_heap_tiler_size);
+                                    &omap_ion_heap_tiler_base);
                of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_base",
                                     &omap_ion_heap_nonsecure_tiler_base);
+               if (omap_ion_heap_secure_input_base == 0
+                       || omap_ion_heap_tiler_base == 0
+                       || omap_ion_heap_nonsecure_tiler_base == 0) {
+                       pr_err("%s: carveout memory address is null. please check dts file\n"
+                               "omap_ion_heap_secure_input_base = 0x%x\n"
+                               "omap_ion_heap_tiler_base = 0x%x\n"
+                               "omap_ion_heap_nonsecure_tiler_base = 0x%x\n"
+                               , __func__
+                               , omap_ion_heap_secure_input_base
+                               , omap_ion_heap_tiler_base
+                               , omap_ion_heap_tiler_base);
+                       return -EFAULT;
+               }
 
                of_property_read_u32(node, "ti,omap_ion_heap_secure_input_size",
                                     &omap_ion_heap_secure_input_size);
@@ -144,8 +151,30 @@ static int omap_ion_probe(struct platform_device *pdev)
                                     &omap_ion_heap_tiler_size);
                of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_size",
                                     &omap_ion_heap_nonsecure_tiler_size);
+               if (omap_ion_heap_secure_input_size == 0
+                       || omap_ion_heap_tiler_size == 0
+                       || omap_ion_heap_nonsecure_tiler_size == 0) {
+                       pr_err("%s: carveout memory address is null. please check dts file\n"
+                               "omap_ion_heap_secure_input_size = 0x%x\n"
+                               "omap_ion_heap_tiler_size = 0x%x\n"
+                               "omap_ion_heap_nonsecure_tiler_size = 0x%x\n"
+                               , __func__
+                               , omap_ion_heap_secure_input_size
+                               , omap_ion_heap_tiler_size
+                               , omap_ion_heap_nonsecure_tiler_size);
+                       return -EINVAL;
+               }
+
+       } else {
+               pr_err("%s: no matching device tree node\n", __func__);
+               return -ENODEV;
        }
 
+       omap_ion_device = ion_device_create(omap_ion_ioctl);
+       if (IS_ERR_OR_NULL(omap_ion_device))
+               return PTR_ERR(omap_ion_device);
+
+
        num_heaps = omap_ion_data.nr;
 
        heaps = kzalloc(sizeof(struct ion_heap *)*num_heaps, GFP_KERNEL);
@@ -242,6 +271,7 @@ int omap_ion_share_fd_to_buffers(int fd, struct ion_buffer **buffers,
        struct ion_handle **handles;
        struct ion_client *client;
        int i = 0, ret = 0;
+       int share_fd;
 
        handles = kzalloc(*num_handles * sizeof(struct ion_handle *),
                          GFP_KERNEL);
@@ -262,7 +292,8 @@ int omap_ion_share_fd_to_buffers(int fd, struct ion_buffer **buffers,
 
        for (i = 0; i < *num_handles; i++) {
                if (handles[i])
-                       buffers[i] = ion_share_dma_buf(client, handles[i]);
+                       share_fd = ion_share_dma_buf(client, handles[i]);
+                       buffers[i] = ion_handle_buffer(handles[i]);
        }
 
 exit:
index af4988ad2121cd5d4fd40e83e9c92c6248bcfa88..cbd16cf6c522b436f07c12f32ea1b572a14f373f 100644 (file)
@@ -55,6 +55,7 @@ struct omap_tiler_info {
        u32 vsize;                      /* virtual stride of buffer */
        u32 vstride;                    /* virtual size of buffer */
        u32 phys_stride;                        /* Physical stride of the buffer */
+       u32 flags;                      /* Flags specifying cached or not */
 };
 
 static int omap_tiler_heap_allocate(struct ion_heap *heap,
@@ -62,6 +63,8 @@ static int omap_tiler_heap_allocate(struct ion_heap *heap,
                                    unsigned long size, unsigned long align,
                                    unsigned long flags)
 {
+       struct omap_tiler_info *info;
+
        /* This means the buffer is already allocated and populated, we're getting here because
         * of dummy handle creation, so simply return*/ 
        if (size == 0) {
@@ -70,7 +73,12 @@ static int omap_tiler_heap_allocate(struct ion_heap *heap,
                  * This will be used later on inside map_dma function to create
                  * the sg list for tiler buffer
                  */
-               buffer->priv_virt = (void *)flags;
+               info = (struct omap_tiler_info *) flags;
+               if (!info)
+                       pr_err("%s: flags argument is not setupg\n", __func__);
+               buffer->priv_virt = info;
+               /* Re-update correct flags inside buffer */
+               buffer->flags = info->flags;
                return 0;
        }
 
@@ -174,6 +182,7 @@ int omap_tiler_alloc(struct ion_heap *heap,
        info->phys_addrs = (u32 *)(info + 1);
        info->tiler_addrs = info->phys_addrs + n_phys_pages;
        info->fmt = data->fmt;
+       info->flags = data->flags;
 
        /* Allocate tiler space
           FIXME: we only support PAGE_SIZE alignment right now. */
@@ -330,9 +339,9 @@ static int omap_tiler_heap_map_user(struct ion_heap *heap,
        int i, ret = 0;
        pgprot_t vm_page_prot;
 
-       /* Use writecombined mappings unless on OMAP5.  If OMAP5, use
+       /* Use writecombined mappings unless on OMAP5 or DRA7.  If OMAP5 or DRA7, use
        shared device due to h/w issue. */
-       if (soc_is_omap54xx())
+       if (soc_is_omap54xx() || soc_is_dra7xx())
                vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK,
                                                L_PTE_MT_DEV_SHARED);
        else
index a55d11fbcbd598d5cf73562fc9b03ede2ee2ed74..31045a150dc5c8ab22a6898dc3f8043838c66be6 100644 (file)
@@ -41,6 +41,21 @@ enum ion_heap_type {
        ION_NUM_HEAPS = 16,
 };
 
+/**
+ * enum ion_data_direction - sync operation arguments
+ * @ION_BIDIRECTIONAL:  memory written to & read from device
+ * @ION_TO_DEVICE: memory going to be transferred to device
+ * @ION_FROM_DEVICE:    memory populated by device
+ * @ION_NONE:           None of the above
+ */
+enum ion_data_direction {
+       ION_BIDIRECTIONAL = 0,
+       ION_TO_DEVICE = 1,
+       ION_FROM_DEVICE = 2,
+       ION_NONE = 3,
+};
+
+
 #define ION_HEAP_SYSTEM_MASK           (1 << ION_HEAP_TYPE_SYSTEM)
 #define ION_HEAP_SYSTEM_CONTIG_MASK    (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
 #define ION_HEAP_CARVEOUT_MASK         (1 << ION_HEAP_TYPE_CARVEOUT)
@@ -273,6 +288,7 @@ struct ion_allocation_data {
 struct ion_fd_data {
        struct ion_handle *handle;
        int fd;
+       enum ion_data_direction dir;
 };
 
 /**