aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRebecca Schultz Zavin2012-11-15 12:43:46 -0600
committerArve Hjønnevåg2013-02-19 19:56:02 -0600
commitcc7a12852b165a0c3e61dbe576c8df8c7abd1525 (patch)
tree44f201d2ea56adbac3ab2021ff79d9503f52279f
parent78be6b326cfaa8be5e5b8bede173e9d150ddab2e (diff)
downloadkernel-common-cc7a12852b165a0c3e61dbe576c8df8c7abd1525.tar.gz
kernel-common-cc7a12852b165a0c3e61dbe576c8df8c7abd1525.tar.xz
kernel-common-cc7a12852b165a0c3e61dbe576c8df8c7abd1525.zip
gpu: ion: Refactor common mapping functions out of system heap
The system heap contained several general purpose functions to map buffers to the kernel and userspace. This patch refactors those into ion_heap.c so they can be used by other heaps. Change-Id: If64591798bdc2c248bf9064ace2c927909d7adb8 Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
-rw-r--r--drivers/gpu/ion/ion_heap.c75
-rw-r--r--drivers/gpu/ion/ion_priv.h10
-rw-r--r--drivers/gpu/ion/ion_system_heap.c84
3 files changed, 91 insertions, 78 deletions
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 8ce3c1907ba..b000eb39294 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -16,8 +16,83 @@
16 16
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/ion.h> 18#include <linux/ion.h>
19#include <linux/mm.h>
20#include <linux/scatterlist.h>
21#include <linux/vmalloc.h>
19#include "ion_priv.h" 22#include "ion_priv.h"
20 23
24void *ion_heap_map_kernel(struct ion_heap *heap,
25 struct ion_buffer *buffer)
26{
27 struct scatterlist *sg;
28 int i, j;
29 void *vaddr;
30 pgprot_t pgprot;
31 struct sg_table *table = buffer->sg_table;
32 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
33 struct page **pages = vmalloc(sizeof(struct page *) * npages);
34 struct page **tmp = pages;
35
36 if (!pages)
37 return 0;
38
39 if (buffer->flags & ION_FLAG_CACHED)
40 pgprot = PAGE_KERNEL;
41 else
42 pgprot = pgprot_writecombine(PAGE_KERNEL);
43
44 for_each_sg(table->sgl, sg, table->nents, i) {
45 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
46 struct page *page = sg_page(sg);
47 BUG_ON(i >= npages);
48 for (j = 0; j < npages_this_entry; j++) {
49 *(tmp++) = page++;
50 }
51 }
52 vaddr = vmap(pages, npages, VM_MAP, pgprot);
53 vfree(pages);
54
55 return vaddr;
56}
57
58void ion_heap_unmap_kernel(struct ion_heap *heap,
59 struct ion_buffer *buffer)
60{
61 vunmap(buffer->vaddr);
62}
63
64int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
65 struct vm_area_struct *vma)
66{
67 struct sg_table *table = buffer->sg_table;
68 unsigned long addr = vma->vm_start;
69 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
70 struct scatterlist *sg;
71 int i;
72
73 for_each_sg(table->sgl, sg, table->nents, i) {
74 struct page *page = sg_page(sg);
75 unsigned long remainder = vma->vm_end - addr;
76 unsigned long len = sg_dma_len(sg);
77
78 if (offset >= sg_dma_len(sg)) {
79 offset -= sg_dma_len(sg);
80 continue;
81 } else if (offset) {
82 page += offset / PAGE_SIZE;
83 len = sg_dma_len(sg) - offset;
84 offset = 0;
85 }
86 len = min(len, remainder);
87 remap_pfn_range(vma, addr, page_to_pfn(page), len,
88 vma->vm_page_prot);
89 addr += len;
90 if (addr >= vma->vm_end)
91 return 0;
92 }
93 return 0;
94}
95
21struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 96struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
22{ 97{
23 struct ion_heap *heap = NULL; 98 struct ion_heap *heap = NULL;
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index e719a625084..24bf3ebdf42 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -177,6 +177,16 @@ void ion_device_destroy(struct ion_device *dev);
177void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); 177void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
178 178
179/** 179/**
180 * some helpers for common operations on buffers using the sg_table
181 * and vaddr fields
182 */
183void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
184void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
185int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
186 struct vm_area_struct *);
187
188
189/**
180 * functions for creating and destroying the built in ion heaps. 190 * functions for creating and destroying the built in ion heaps.
181 * architectures can add their own custom architecture specific 191 * architectures can add their own custom architecture specific
182 * heaps as appropriate. 192 * heaps as appropriate.
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 2a85df9ef89..a8cab06b106 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -224,7 +224,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
224 struct ion_system_heap *sys_heap = container_of(heap, 224 struct ion_system_heap *sys_heap = container_of(heap,
225 struct ion_system_heap, 225 struct ion_system_heap,
226 heap); 226 heap);
227 struct sg_table *table = buffer->priv_virt; 227 struct sg_table *table = buffer->sg_table;
228 struct scatterlist *sg; 228 struct scatterlist *sg;
229 LIST_HEAD(pages); 229 LIST_HEAD(pages);
230 int i; 230 int i;
@@ -247,86 +247,14 @@ void ion_system_heap_unmap_dma(struct ion_heap *heap,
247 return; 247 return;
248} 248}
249 249
250void *ion_system_heap_map_kernel(struct ion_heap *heap,
251 struct ion_buffer *buffer)
252{
253 struct scatterlist *sg;
254 int i, j;
255 void *vaddr;
256 pgprot_t pgprot;
257 struct sg_table *table = buffer->priv_virt;
258 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
259 struct page **pages = vmalloc(sizeof(struct page *) * npages);
260 struct page **tmp = pages;
261
262 if (!pages)
263 return 0;
264
265 if (buffer->flags & ION_FLAG_CACHED)
266 pgprot = PAGE_KERNEL;
267 else
268 pgprot = pgprot_writecombine(PAGE_KERNEL);
269
270 for_each_sg(table->sgl, sg, table->nents, i) {
271 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
272 struct page *page = sg_page(sg);
273 BUG_ON(i >= npages);
274 for (j = 0; j < npages_this_entry; j++) {
275 *(tmp++) = page++;
276 }
277 }
278 vaddr = vmap(pages, npages, VM_MAP, pgprot);
279 vfree(pages);
280
281 return vaddr;
282}
283
284void ion_system_heap_unmap_kernel(struct ion_heap *heap,
285 struct ion_buffer *buffer)
286{
287 vunmap(buffer->vaddr);
288}
289
290int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
291 struct vm_area_struct *vma)
292{
293 struct sg_table *table = buffer->priv_virt;
294 unsigned long addr = vma->vm_start;
295 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
296 struct scatterlist *sg;
297 int i;
298
299 for_each_sg(table->sgl, sg, table->nents, i) {
300 struct page *page = sg_page(sg);
301 unsigned long remainder = vma->vm_end - addr;
302 unsigned long len = sg_dma_len(sg);
303
304 if (offset >= sg_dma_len(sg)) {
305 offset -= sg_dma_len(sg);
306 continue;
307 } else if (offset) {
308 page += offset / PAGE_SIZE;
309 len = sg_dma_len(sg) - offset;
310 offset = 0;
311 }
312 len = min(len, remainder);
313 remap_pfn_range(vma, addr, page_to_pfn(page), len,
314 vma->vm_page_prot);
315 addr += len;
316 if (addr >= vma->vm_end)
317 return 0;
318 }
319 return 0;
320}
321
322static struct ion_heap_ops system_heap_ops = { 250static struct ion_heap_ops system_heap_ops = {
323 .allocate = ion_system_heap_allocate, 251 .allocate = ion_system_heap_allocate,
324 .free = ion_system_heap_free, 252 .free = ion_system_heap_free,
325 .map_dma = ion_system_heap_map_dma, 253 .map_dma = ion_system_heap_map_dma,
326 .unmap_dma = ion_system_heap_unmap_dma, 254 .unmap_dma = ion_system_heap_unmap_dma,
327 .map_kernel = ion_system_heap_map_kernel, 255 .map_kernel = ion_heap_map_kernel,
328 .unmap_kernel = ion_system_heap_unmap_kernel, 256 .unmap_kernel = ion_heap_unmap_kernel,
329 .map_user = ion_system_heap_map_user, 257 .map_user = ion_heap_map_user,
330}; 258};
331 259
332static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, 260static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
@@ -468,8 +396,8 @@ static struct ion_heap_ops kmalloc_ops = {
468 .phys = ion_system_contig_heap_phys, 396 .phys = ion_system_contig_heap_phys,
469 .map_dma = ion_system_contig_heap_map_dma, 397 .map_dma = ion_system_contig_heap_map_dma,
470 .unmap_dma = ion_system_contig_heap_unmap_dma, 398 .unmap_dma = ion_system_contig_heap_unmap_dma,
471 .map_kernel = ion_system_heap_map_kernel, 399 .map_kernel = ion_heap_map_kernel,
472 .unmap_kernel = ion_system_heap_unmap_kernel, 400 .unmap_kernel = ion_heap_unmap_kernel,
473 .map_user = ion_system_contig_heap_map_user, 401 .map_user = ion_system_contig_heap_map_user,
474}; 402};
475 403