aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRebecca Schultz Zavin2013-01-09 13:26:37 -0600
committerArve Hjønnevåg2013-02-19 19:56:17 -0600
commit949863b55e4af857698901f62b52df5fcb1e7089 (patch)
tree4a7db13092fa89c3cc78c16f3776054fae133d8c
parent0366d8bd917ee268c06accb2e082ac9d003e525e (diff)
downloadkernel-common-949863b55e4af857698901f62b52df5fcb1e7089.tar.gz
kernel-common-949863b55e4af857698901f62b52df5fcb1e7089.tar.xz
kernel-common-949863b55e4af857698901f62b52df5fcb1e7089.zip
gpu: ion: Refactor the code to zero buffers
Refactor the code in the system heap used to map and zero the buffers into a seperate utility so it can be called from other heaps. Use it from the chunk heap. Change-Id: I706341ae42b80bc4aae8a8614b4f73435bbf05d9 Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
-rw-r--r--drivers/gpu/ion/ion_chunk_heap.c2
-rw-r--r--drivers/gpu/ion/ion_heap.c37
-rw-r--r--drivers/gpu/ion/ion_priv.h1
-rw-r--r--drivers/gpu/ion/ion_system_heap.c34
4 files changed, 48 insertions, 26 deletions
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
index 01381827f58..f65274d3306 100644
--- a/drivers/gpu/ion/ion_chunk_heap.c
+++ b/drivers/gpu/ion/ion_chunk_heap.c
@@ -101,6 +101,8 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
101 struct scatterlist *sg; 101 struct scatterlist *sg;
102 int i; 102 int i;
103 103
104 ion_heap_buffer_zero(buffer);
105
104 for_each_sg(table->sgl, sg, table->nents, i) { 106 for_each_sg(table->sgl, sg, table->nents, i) {
105 __dma_page_cpu_to_dev(sg_page(sg), 0, sg_dma_len(sg), 107 __dma_page_cpu_to_dev(sg_page(sg), 0, sg_dma_len(sg),
106 DMA_BIDIRECTIONAL); 108 DMA_BIDIRECTIONAL);
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index fee9c2a8b15..225ef94655d 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -93,6 +93,43 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
93 return 0; 93 return 0;
94} 94}
95 95
96int ion_heap_buffer_zero(struct ion_buffer *buffer)
97{
98 struct sg_table *table = buffer->sg_table;
99 pgprot_t pgprot;
100 struct scatterlist *sg;
101 struct vm_struct *vm_struct;
102 int i, j, ret = 0;
103
104 if (buffer->flags & ION_FLAG_CACHED)
105 pgprot = PAGE_KERNEL;
106 else
107 pgprot = pgprot_writecombine(PAGE_KERNEL);
108
109 vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
110 if (!vm_struct)
111 return -ENOMEM;
112
113 for_each_sg(table->sgl, sg, table->nents, i) {
114 struct page *page = sg_page(sg);
115 unsigned long len = sg_dma_len(sg);
116
117 for (j = 0; j < len / PAGE_SIZE; j++) {
118 struct page *sub_page = page + j;
119 struct page **pages = &sub_page;
120 ret = map_vm_area(vm_struct, pgprot, &pages);
121 if (ret)
122 goto end;
123 memset(vm_struct->addr, 0, PAGE_SIZE);
124 unmap_kernel_range((unsigned long)vm_struct->addr,
125 PAGE_SIZE);
126 }
127 }
128end:
129 free_vm_area(vm_struct);
130 return ret;
131}
132
96struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 133struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
97{ 134{
98 struct ion_heap *heap = NULL; 135 struct ion_heap *heap = NULL;
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index cdd65da515d..c1169216519 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -184,6 +184,7 @@ void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
184void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); 184void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
185int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, 185int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
186 struct vm_area_struct *); 186 struct vm_area_struct *);
187int ion_heap_buffer_zero(struct ion_buffer *buffer);
187 188
188 189
189/** 190/**
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index f1563b8fc33..c1061a801a4 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -91,7 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
91 91
92static void free_buffer_page(struct ion_system_heap *heap, 92static void free_buffer_page(struct ion_system_heap *heap,
93 struct ion_buffer *buffer, struct page *page, 93 struct ion_buffer *buffer, struct page *page,
94 unsigned int order, struct vm_struct *vm_struct) 94 unsigned int order)
95{ 95{
96 bool cached = ion_buffer_cached(buffer); 96 bool cached = ion_buffer_cached(buffer);
97 bool split_pages = ion_buffer_fault_user_mappings(buffer); 97 bool split_pages = ion_buffer_fault_user_mappings(buffer);
@@ -99,20 +99,6 @@ static void free_buffer_page(struct ion_system_heap *heap,
99 99
100 if (!cached) { 100 if (!cached) {
101 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 101 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102 /* zero the pages before returning them to the pool for
103 security. This uses vmap as we want to set the pgprot so
104 the writes to occur to noncached mappings, as the pool's
105 purpose is to keep the pages out of the cache */
106 for (i = 0; i < (1 << order); i++) {
107 struct page *sub_page = page + i;
108 struct page **pages = &sub_page;
109 map_vm_area(vm_struct,
110 pgprot_writecombine(PAGE_KERNEL),
111 &pages);
112 memset(vm_struct->addr, 0, PAGE_SIZE);
113 unmap_kernel_range((unsigned long)vm_struct->addr,
114 PAGE_SIZE);
115 }
116 ion_page_pool_free(pool, page); 102 ion_page_pool_free(pool, page);
117 } else if (split_pages) { 103 } else if (split_pages) {
118 for (i = 0; i < (1 << order); i++) 104 for (i = 0; i < (1 << order); i++)
@@ -167,8 +153,6 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
167 long size_remaining = PAGE_ALIGN(size); 153 long size_remaining = PAGE_ALIGN(size);
168 unsigned int max_order = orders[0]; 154 unsigned int max_order = orders[0];
169 bool split_pages = ion_buffer_fault_user_mappings(buffer); 155 bool split_pages = ion_buffer_fault_user_mappings(buffer);
170 struct vm_struct *vm_struct;
171 pte_t *ptes;
172 156
173 INIT_LIST_HEAD(&pages); 157 INIT_LIST_HEAD(&pages);
174 while (size_remaining > 0) { 158 while (size_remaining > 0) {
@@ -216,13 +200,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
216err1: 200err1:
217 kfree(table); 201 kfree(table);
218err: 202err:
219 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
220 list_for_each_entry(info, &pages, list) { 203 list_for_each_entry(info, &pages, list) {
221 free_buffer_page(sys_heap, buffer, info->page, info->order, 204 free_buffer_page(sys_heap, buffer, info->page, info->order);
222 vm_struct);
223 kfree(info); 205 kfree(info);
224 } 206 }
225 free_vm_area(vm_struct);
226 return -ENOMEM; 207 return -ENOMEM;
227} 208}
228 209
@@ -233,18 +214,19 @@ void ion_system_heap_free(struct ion_buffer *buffer)
233 struct ion_system_heap, 214 struct ion_system_heap,
234 heap); 215 heap);
235 struct sg_table *table = buffer->sg_table; 216 struct sg_table *table = buffer->sg_table;
217 bool cached = ion_buffer_cached(buffer);
236 struct scatterlist *sg; 218 struct scatterlist *sg;
237 LIST_HEAD(pages); 219 LIST_HEAD(pages);
238 struct vm_struct *vm_struct;
239 pte_t *ptes;
240 int i; 220 int i;
241 221
242 vm_struct = get_vm_area(PAGE_SIZE, &ptes); 222 /* uncached pages come from the page pools, zero them before returning
223 for security purposes (other allocations are zerod at alloc time */
224 if (!cached)
225 ion_heap_buffer_zero(buffer);
243 226
244 for_each_sg(table->sgl, sg, table->nents, i) 227 for_each_sg(table->sgl, sg, table->nents, i)
245 free_buffer_page(sys_heap, buffer, sg_page(sg), 228 free_buffer_page(sys_heap, buffer, sg_page(sg),
246 get_order(sg_dma_len(sg)), vm_struct); 229 get_order(sg_dma_len(sg)));
247 free_vm_area(vm_struct);
248 sg_free_table(table); 230 sg_free_table(table);
249 kfree(table); 231 kfree(table);
250} 232}