author | Sundar Raman <a0393242@ti.com> | |
Fri, 26 Jul 2013 21:32:39 +0000 (16:32 -0500) | ||
committer | Sundar Raman <a0393242@ti.com> | |
Fri, 26 Jul 2013 21:32:39 +0000 (16:32 -0500) |
index 2c902e951943c6be47b69b14c0e0544eeedd8b3b..bc297a466512290f3c12d8f7f9727edb3e68f30c 100755 (executable)
*/
/dts-v1/;
+/*
+ * Following are the carveout addresses and the sizes for ION. SMC is not reserved for now
+ * C0000000 - SDRAM+1G
+ * BFD00000 - SMC (3MB)
+ * BA300000 - ION (90MB)
+ * B4300000 - TILER SECURE (81 MB)
+ * B3400000 - TILER NONSECURE (15 MB)
+*/
+/memreserve/ 0xba300000 0x5a00000;
+/memreserve/ 0xb5200000 0x5100000;
+/memreserve/ 0xb4300000 0xf00000;
+
/include/ "dra7.dtsi"
/ {
index a339f321c2a220e970dd6b6a8b5c8ac1caad6f43..5694a6fc052e7eaf951a5f65c50ad097fb130239 100644 (file)
/dts-v1/;
/*
- * Following are the carveout addresses and the sizes. SMC is not reserved for now
+ * Following are the carveout addresses and the sizes for ION. SMC is not reserved for now
* C0000000 - SDRAM+1G
* BFD00000 - SMC (3MB)
* BA300000 - ION (90MB)
- * B4300000 - TILER SECURE (96 MB)
+ * B4300000 - TILER SECURE (81 MB)
* B3400000 - TILER NONSECURE (15 MB)
*/
/memreserve/ 0xba300000 0x5a00000;
-/memreserve/ 0xb4300000 0x6000000;
-/memreserve/ 0xb3400000 0xf00000;
+/memreserve/ 0xb5200000 0x5100000;
+/memreserve/ 0xb4300000 0xf00000;
/include/ "omap5.dtsi"
/include/ "samsung_k3pe0e000b.dtsi"
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 8f477880f040fe5985617acedd617bce7c988a7c..d7c780ff35b445d06a005f10472d268d5b2fd688 100644 (file)
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
}
EXPORT_SYMBOL(ion_import_dma_buf);
-static int ion_sync_for_device(struct ion_client *client, int fd)
+static int ion_sync_for_device(struct ion_client *client, int fd, enum ion_data_direction dir)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
}
buffer = dmabuf->priv;
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ if(dir == ION_FROM_DEVICE)
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_FROM_DEVICE);
+ else if(dir == ION_TO_DEVICE)
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_TO_DEVICE);
+ else if(dir == ION_BIDIRECTIONAL)
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+
dma_buf_put(dmabuf);
return 0;
}
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_fd_data)))
return -EFAULT;
- ion_sync_for_device(client, data.fd);
+ ion_sync_for_device(client, data.fd, data.dir);
break;
}
case ION_IOC_CUSTOM:
index e9b91f0787d3dee3155c41bedd78b0c4ce19a179..df8f3818653826ef95aa569574ef84408875e3dc 100755 (executable)
uint omap_ion_heap_tiler_size = 0;
uint omap_ion_heap_nonsecure_tiler_size = 0;
- omap_ion_device = ion_device_create(omap_ion_ioctl);
- if (IS_ERR_OR_NULL(omap_ion_device)) {
- kfree(heaps);
- return PTR_ERR(omap_ion_device);
- }
-
if (node) {
of_property_read_u32(node, "ti,omap_ion_heap_secure_input_base",
&omap_ion_heap_secure_input_base);
of_property_read_u32(node, "ti,omap_ion_heap_tiler_base",
- &omap_ion_heap_tiler_size);
+ &omap_ion_heap_tiler_base);
of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_base",
&omap_ion_heap_nonsecure_tiler_base);
+ if (omap_ion_heap_secure_input_base == 0
+ || omap_ion_heap_tiler_base == 0
+ || omap_ion_heap_nonsecure_tiler_base == 0) {
+ pr_err("%s: carveout memory address is null. please check dts file\n"
+ "omap_ion_heap_secure_input_base = 0x%x\n"
+ "omap_ion_heap_tiler_base = 0x%x\n"
+ "omap_ion_heap_nonsecure_tiler_base = 0x%x\n"
+ , __func__
+ , omap_ion_heap_secure_input_base
+ , omap_ion_heap_tiler_base
+ , omap_ion_heap_tiler_base);
+ return -EFAULT;
+ }
of_property_read_u32(node, "ti,omap_ion_heap_secure_input_size",
&omap_ion_heap_secure_input_size);
&omap_ion_heap_tiler_size);
of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_size",
&omap_ion_heap_nonsecure_tiler_size);
+ if (omap_ion_heap_secure_input_size == 0
+ || omap_ion_heap_tiler_size == 0
+ || omap_ion_heap_nonsecure_tiler_size == 0) {
+ pr_err("%s: carveout memory address is null. please check dts file\n"
+ "omap_ion_heap_secure_input_size = 0x%x\n"
+ "omap_ion_heap_tiler_size = 0x%x\n"
+ "omap_ion_heap_nonsecure_tiler_size = 0x%x\n"
+ , __func__
+ , omap_ion_heap_secure_input_size
+ , omap_ion_heap_tiler_size
+ , omap_ion_heap_nonsecure_tiler_size);
+ return -EINVAL;
+ }
+
+ } else {
+ pr_err("%s: no matching device tree node\n", __func__);
+ return -ENODEV;
}
+ omap_ion_device = ion_device_create(omap_ion_ioctl);
+ if (IS_ERR_OR_NULL(omap_ion_device))
+ return PTR_ERR(omap_ion_device);
+
+
num_heaps = omap_ion_data.nr;
heaps = kzalloc(sizeof(struct ion_heap *)*num_heaps, GFP_KERNEL);
struct ion_handle **handles;
struct ion_client *client;
int i = 0, ret = 0;
+ int share_fd;
handles = kzalloc(*num_handles * sizeof(struct ion_handle *),
GFP_KERNEL);
for (i = 0; i < *num_handles; i++) {
if (handles[i])
- buffers[i] = ion_share_dma_buf(client, handles[i]);
+ share_fd = ion_share_dma_buf(client, handles[i]);
+ buffers[i] = ion_handle_buffer(handles[i]);
}
exit:
index af4988ad2121cd5d4fd40e83e9c92c6248bcfa88..cbd16cf6c522b436f07c12f32ea1b572a14f373f 100644 (file)
u32 vsize; /* virtual stride of buffer */
u32 vstride; /* virtual size of buffer */
u32 phys_stride; /* Physical stride of the buffer */
+ u32 flags; /* Flags specifying cached or not */
};
static int omap_tiler_heap_allocate(struct ion_heap *heap,
unsigned long size, unsigned long align,
unsigned long flags)
{
+ struct omap_tiler_info *info;
+
/* This means the buffer is already allocated and populated, we're getting here because
* of dummy handle creation, so simply return*/
if (size == 0) {
* This will be used later on inside map_dma function to create
* the sg list for tiler buffer
*/
- buffer->priv_virt = (void *)flags;
+ info = (struct omap_tiler_info *) flags;
+ if (!info)
+ pr_err("%s: flags argument is not setupg\n", __func__);
+ buffer->priv_virt = info;
+ /* Re-update correct flags inside buffer */
+ buffer->flags = info->flags;
return 0;
}
info->phys_addrs = (u32 *)(info + 1);
info->tiler_addrs = info->phys_addrs + n_phys_pages;
info->fmt = data->fmt;
+ info->flags = data->flags;
/* Allocate tiler space
FIXME: we only support PAGE_SIZE alignment right now. */
int i, ret = 0;
pgprot_t vm_page_prot;
- /* Use writecombined mappings unless on OMAP5. If OMAP5, use
+ /* Use writecombined mappings unless on OMAP5 or DRA7. If OMAP5 or DRA7, use
shared device due to h/w issue. */
- if (soc_is_omap54xx())
+ if (soc_is_omap54xx() || soc_is_dra7xx())
vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK,
L_PTE_MT_DEV_SHARED);
else
diff --git a/include/linux/ion.h b/include/linux/ion.h
index a55d11fbcbd598d5cf73562fc9b03ede2ee2ed74..31045a150dc5c8ab22a6898dc3f8043838c66be6 100644 (file)
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
ION_NUM_HEAPS = 16,
};
+/**
+ * enum ion_data_direction - sync operation arguments
+ * @ION_BIDIRECTIONAL: memory written to & read from device
+ * @ION_TO_DEVICE: memory going to be transferred to device
+ * @ION_FROM_DEVICE: memory populated by device
+ * @ION_NONE: None of the above
+ */
+enum ion_data_direction {
+ ION_BIDIRECTIONAL = 0,
+ ION_TO_DEVICE = 1,
+ ION_FROM_DEVICE = 2,
+ ION_NONE = 3,
+};
+
+
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
struct ion_fd_data {
struct ion_handle *handle;
int fd;
+ enum ion_data_direction dir;
};
/**