diff options
author | Sundar Raman | 2013-07-26 16:31:54 -0500 |
---|---|---|
committer | Sundar Raman | 2013-07-26 16:31:54 -0500 |
commit | 699a8e2ac38624064ae62c301394f25c5f3d34dd (patch) | |
tree | bcd0fb0c817ecaff08586263209c0d343cca3fb3 /drivers | |
parent | 36e3dab9021df936535d20fc2a3ae8871434c7aa (diff) | |
parent | 2474b61d8c1d217982a6caa76bea45e95568d5b3 (diff) | |
download | kernel-video-699a8e2ac38624064ae62c301394f25c5f3d34dd.tar.gz kernel-video-699a8e2ac38624064ae62c301394f25c5f3d34dd.tar.xz kernel-video-699a8e2ac38624064ae62c301394f25c5f3d34dd.zip |
Merge branch 'ion' into p-ti-android-3.8.y-video
This contains the ION cleanup series and caching changes
* ion:
gpu: ion: add support for more cache operations
gpu: ion: DRA7: ensure TILER 2d mappings are shared device
ARM: dts: DRA7/OMAP5: reserve carveout buffers & fix carveout size
gpu: ion: fix omap_ion_share_fd_to_buffers api
gpu: ion: omap: add checks for carveout addresses and sizes
gpu: ion: omap: Fix TILER secure heap base address
gpu: ion: omap: re-populate flags parameters into buffer
Signed-off-by: Sundar Raman <a0393242@ti.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/ion/ion.c | 16 | ||||
-rwxr-xr-x | drivers/gpu/ion/omap/omap_ion.c | 47 | ||||
-rw-r--r-- | drivers/gpu/ion/omap/omap_tiler_heap.c | 15 |
3 files changed, 63 insertions, 15 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 8f477880f04..d7c780ff35b 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c | |||
@@ -951,7 +951,7 @@ end: | |||
951 | } | 951 | } |
952 | EXPORT_SYMBOL(ion_import_dma_buf); | 952 | EXPORT_SYMBOL(ion_import_dma_buf); |
953 | 953 | ||
954 | static int ion_sync_for_device(struct ion_client *client, int fd) | 954 | static int ion_sync_for_device(struct ion_client *client, int fd, enum ion_data_direction dir) |
955 | { | 955 | { |
956 | struct dma_buf *dmabuf; | 956 | struct dma_buf *dmabuf; |
957 | struct ion_buffer *buffer; | 957 | struct ion_buffer *buffer; |
@@ -969,8 +969,16 @@ static int ion_sync_for_device(struct ion_client *client, int fd) | |||
969 | } | 969 | } |
970 | buffer = dmabuf->priv; | 970 | buffer = dmabuf->priv; |
971 | 971 | ||
972 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, | 972 | if(dir == ION_FROM_DEVICE) |
973 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); | 973 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, |
974 | buffer->sg_table->nents, DMA_FROM_DEVICE); | ||
975 | else if(dir == ION_TO_DEVICE) | ||
976 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, | ||
977 | buffer->sg_table->nents, DMA_TO_DEVICE); | ||
978 | else if(dir == ION_BIDIRECTIONAL) | ||
979 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, | ||
980 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); | ||
981 | |||
974 | dma_buf_put(dmabuf); | 982 | dma_buf_put(dmabuf); |
975 | return 0; | 983 | return 0; |
976 | } | 984 | } |
@@ -1053,7 +1061,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
1053 | if (copy_from_user(&data, (void __user *)arg, | 1061 | if (copy_from_user(&data, (void __user *)arg, |
1054 | sizeof(struct ion_fd_data))) | 1062 | sizeof(struct ion_fd_data))) |
1055 | return -EFAULT; | 1063 | return -EFAULT; |
1056 | ion_sync_for_device(client, data.fd); | 1064 | ion_sync_for_device(client, data.fd, data.dir); |
1057 | break; | 1065 | break; |
1058 | } | 1066 | } |
1059 | case ION_IOC_CUSTOM: | 1067 | case ION_IOC_CUSTOM: |
diff --git a/drivers/gpu/ion/omap/omap_ion.c b/drivers/gpu/ion/omap/omap_ion.c index e9b91f0787d..df8f3818653 100755 --- a/drivers/gpu/ion/omap/omap_ion.c +++ b/drivers/gpu/ion/omap/omap_ion.c | |||
@@ -124,19 +124,26 @@ static int omap_ion_probe(struct platform_device *pdev) | |||
124 | uint omap_ion_heap_tiler_size = 0; | 124 | uint omap_ion_heap_tiler_size = 0; |
125 | uint omap_ion_heap_nonsecure_tiler_size = 0; | 125 | uint omap_ion_heap_nonsecure_tiler_size = 0; |
126 | 126 | ||
127 | omap_ion_device = ion_device_create(omap_ion_ioctl); | ||
128 | if (IS_ERR_OR_NULL(omap_ion_device)) { | ||
129 | kfree(heaps); | ||
130 | return PTR_ERR(omap_ion_device); | ||
131 | } | ||
132 | |||
133 | if (node) { | 127 | if (node) { |
134 | of_property_read_u32(node, "ti,omap_ion_heap_secure_input_base", | 128 | of_property_read_u32(node, "ti,omap_ion_heap_secure_input_base", |
135 | &omap_ion_heap_secure_input_base); | 129 | &omap_ion_heap_secure_input_base); |
136 | of_property_read_u32(node, "ti,omap_ion_heap_tiler_base", | 130 | of_property_read_u32(node, "ti,omap_ion_heap_tiler_base", |
137 | &omap_ion_heap_tiler_size); | 131 | &omap_ion_heap_tiler_base); |
138 | of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_base", | 132 | of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_base", |
139 | &omap_ion_heap_nonsecure_tiler_base); | 133 | &omap_ion_heap_nonsecure_tiler_base); |
134 | if (omap_ion_heap_secure_input_base == 0 | ||
135 | || omap_ion_heap_tiler_base == 0 | ||
136 | || omap_ion_heap_nonsecure_tiler_base == 0) { | ||
137 | pr_err("%s: carveout memory address is null. please check dts file\n" | ||
138 | "omap_ion_heap_secure_input_base = 0x%x\n" | ||
139 | "omap_ion_heap_tiler_base = 0x%x\n" | ||
140 | "omap_ion_heap_nonsecure_tiler_base = 0x%x\n" | ||
141 | , __func__ | ||
142 | , omap_ion_heap_secure_input_base | ||
143 | , omap_ion_heap_tiler_base | ||
144 | , omap_ion_heap_tiler_base); | ||
145 | return -EFAULT; | ||
146 | } | ||
140 | 147 | ||
141 | of_property_read_u32(node, "ti,omap_ion_heap_secure_input_size", | 148 | of_property_read_u32(node, "ti,omap_ion_heap_secure_input_size", |
142 | &omap_ion_heap_secure_input_size); | 149 | &omap_ion_heap_secure_input_size); |
@@ -144,8 +151,30 @@ static int omap_ion_probe(struct platform_device *pdev) | |||
144 | &omap_ion_heap_tiler_size); | 151 | &omap_ion_heap_tiler_size); |
145 | of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_size", | 152 | of_property_read_u32(node, "ti,omap_ion_heap_nonsecure_tiler_size", |
146 | &omap_ion_heap_nonsecure_tiler_size); | 153 | &omap_ion_heap_nonsecure_tiler_size); |
154 | if (omap_ion_heap_secure_input_size == 0 | ||
155 | || omap_ion_heap_tiler_size == 0 | ||
156 | || omap_ion_heap_nonsecure_tiler_size == 0) { | ||
157 | pr_err("%s: carveout memory address is null. please check dts file\n" | ||
158 | "omap_ion_heap_secure_input_size = 0x%x\n" | ||
159 | "omap_ion_heap_tiler_size = 0x%x\n" | ||
160 | "omap_ion_heap_nonsecure_tiler_size = 0x%x\n" | ||
161 | , __func__ | ||
162 | , omap_ion_heap_secure_input_size | ||
163 | , omap_ion_heap_tiler_size | ||
164 | , omap_ion_heap_nonsecure_tiler_size); | ||
165 | return -EINVAL; | ||
166 | } | ||
167 | |||
168 | } else { | ||
169 | pr_err("%s: no matching device tree node\n", __func__); | ||
170 | return -ENODEV; | ||
147 | } | 171 | } |
148 | 172 | ||
173 | omap_ion_device = ion_device_create(omap_ion_ioctl); | ||
174 | if (IS_ERR_OR_NULL(omap_ion_device)) | ||
175 | return PTR_ERR(omap_ion_device); | ||
176 | |||
177 | |||
149 | num_heaps = omap_ion_data.nr; | 178 | num_heaps = omap_ion_data.nr; |
150 | 179 | ||
151 | heaps = kzalloc(sizeof(struct ion_heap *)*num_heaps, GFP_KERNEL); | 180 | heaps = kzalloc(sizeof(struct ion_heap *)*num_heaps, GFP_KERNEL); |
@@ -242,6 +271,7 @@ int omap_ion_share_fd_to_buffers(int fd, struct ion_buffer **buffers, | |||
242 | struct ion_handle **handles; | 271 | struct ion_handle **handles; |
243 | struct ion_client *client; | 272 | struct ion_client *client; |
244 | int i = 0, ret = 0; | 273 | int i = 0, ret = 0; |
274 | int share_fd; | ||
245 | 275 | ||
246 | handles = kzalloc(*num_handles * sizeof(struct ion_handle *), | 276 | handles = kzalloc(*num_handles * sizeof(struct ion_handle *), |
247 | GFP_KERNEL); | 277 | GFP_KERNEL); |
@@ -262,7 +292,8 @@ int omap_ion_share_fd_to_buffers(int fd, struct ion_buffer **buffers, | |||
262 | 292 | ||
263 | for (i = 0; i < *num_handles; i++) { | 293 | for (i = 0; i < *num_handles; i++) { |
264 | if (handles[i]) | 294 | if (handles[i]) |
265 | buffers[i] = ion_share_dma_buf(client, handles[i]); | 295 | share_fd = ion_share_dma_buf(client, handles[i]); |
296 | buffers[i] = ion_handle_buffer(handles[i]); | ||
266 | } | 297 | } |
267 | 298 | ||
268 | exit: | 299 | exit: |
diff --git a/drivers/gpu/ion/omap/omap_tiler_heap.c b/drivers/gpu/ion/omap/omap_tiler_heap.c index af4988ad212..cbd16cf6c52 100644 --- a/drivers/gpu/ion/omap/omap_tiler_heap.c +++ b/drivers/gpu/ion/omap/omap_tiler_heap.c | |||
@@ -55,6 +55,7 @@ struct omap_tiler_info { | |||
55 | u32 vsize; /* virtual stride of buffer */ | 55 | u32 vsize; /* virtual stride of buffer */ |
56 | u32 vstride; /* virtual size of buffer */ | 56 | u32 vstride; /* virtual size of buffer */ |
57 | u32 phys_stride; /* Physical stride of the buffer */ | 57 | u32 phys_stride; /* Physical stride of the buffer */ |
58 | u32 flags; /* Flags specifying cached or not */ | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | static int omap_tiler_heap_allocate(struct ion_heap *heap, | 61 | static int omap_tiler_heap_allocate(struct ion_heap *heap, |
@@ -62,6 +63,8 @@ static int omap_tiler_heap_allocate(struct ion_heap *heap, | |||
62 | unsigned long size, unsigned long align, | 63 | unsigned long size, unsigned long align, |
63 | unsigned long flags) | 64 | unsigned long flags) |
64 | { | 65 | { |
66 | struct omap_tiler_info *info; | ||
67 | |||
65 | /* This means the buffer is already allocated and populated, we're getting here because | 68 | /* This means the buffer is already allocated and populated, we're getting here because |
66 | * of dummy handle creation, so simply return*/ | 69 | * of dummy handle creation, so simply return*/ |
67 | if (size == 0) { | 70 | if (size == 0) { |
@@ -70,7 +73,12 @@ static int omap_tiler_heap_allocate(struct ion_heap *heap, | |||
70 | * This will be used later on inside map_dma function to create | 73 | * This will be used later on inside map_dma function to create |
71 | * the sg list for tiler buffer | 74 | * the sg list for tiler buffer |
72 | */ | 75 | */ |
73 | buffer->priv_virt = (void *)flags; | 76 | info = (struct omap_tiler_info *) flags; |
77 | if (!info) | ||
78 | pr_err("%s: flags argument is not setupg\n", __func__); | ||
79 | buffer->priv_virt = info; | ||
80 | /* Re-update correct flags inside buffer */ | ||
81 | buffer->flags = info->flags; | ||
74 | return 0; | 82 | return 0; |
75 | } | 83 | } |
76 | 84 | ||
@@ -174,6 +182,7 @@ int omap_tiler_alloc(struct ion_heap *heap, | |||
174 | info->phys_addrs = (u32 *)(info + 1); | 182 | info->phys_addrs = (u32 *)(info + 1); |
175 | info->tiler_addrs = info->phys_addrs + n_phys_pages; | 183 | info->tiler_addrs = info->phys_addrs + n_phys_pages; |
176 | info->fmt = data->fmt; | 184 | info->fmt = data->fmt; |
185 | info->flags = data->flags; | ||
177 | 186 | ||
178 | /* Allocate tiler space | 187 | /* Allocate tiler space |
179 | FIXME: we only support PAGE_SIZE alignment right now. */ | 188 | FIXME: we only support PAGE_SIZE alignment right now. */ |
@@ -330,9 +339,9 @@ static int omap_tiler_heap_map_user(struct ion_heap *heap, | |||
330 | int i, ret = 0; | 339 | int i, ret = 0; |
331 | pgprot_t vm_page_prot; | 340 | pgprot_t vm_page_prot; |
332 | 341 | ||
333 | /* Use writecombined mappings unless on OMAP5. If OMAP5, use | 342 | /* Use writecombined mappings unless on OMAP5 or DRA7. If OMAP5 or DRA7, use |
334 | shared device due to h/w issue. */ | 343 | shared device due to h/w issue. */ |
335 | if (soc_is_omap54xx()) | 344 | if (soc_is_omap54xx() || soc_is_dra7xx()) |
336 | vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, | 345 | vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, |
337 | L_PTE_MT_DEV_SHARED); | 346 | L_PTE_MT_DEV_SHARED); |
338 | else | 347 | else |