aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRebecca Schultz Zavin2012-12-19 00:46:57 -0600
committerArve Hjønnevåg2013-02-19 19:56:14 -0600
commite8018511357b8344d35a701c47667f52308d62ff (patch)
treea03b534880551824a38e6af2b626c447ac826bc2
parente09763675acc17ab96de05e9c905b4f2300fae34 (diff)
downloadkernel-common-e8018511357b8344d35a701c47667f52308d62ff.tar.gz
kernel-common-e8018511357b8344d35a701c47667f52308d62ff.tar.xz
kernel-common-e8018511357b8344d35a701c47667f52308d62ff.zip
gpu: ion: Modify zeroing code so it only allocates address space once
vmap/vunmap spend a significant amount of time allocating the address space to map into. Rather than allocating address space for each page, instead allocate once for the entire allocation and then just map and unmap each page into that address space. Change-Id: I4a5c850717c80f75506a36b7ec2bcd55857b8dea Signed-off-by: Rebecca Schultz Zavin <rschultz@google.com>
-rw-r--r--drivers/gpu/ion/ion_system_heap.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index a8cab06b106..f1563b8fc33 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -91,7 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
91 91
92static void free_buffer_page(struct ion_system_heap *heap, 92static void free_buffer_page(struct ion_system_heap *heap,
93 struct ion_buffer *buffer, struct page *page, 93 struct ion_buffer *buffer, struct page *page,
94 unsigned int order) 94 unsigned int order, struct vm_struct *vm_struct)
95{ 95{
96 bool cached = ion_buffer_cached(buffer); 96 bool cached = ion_buffer_cached(buffer);
97 bool split_pages = ion_buffer_fault_user_mappings(buffer); 97 bool split_pages = ion_buffer_fault_user_mappings(buffer);
@@ -105,10 +105,13 @@ static void free_buffer_page(struct ion_system_heap *heap,
105 purpose is to keep the pages out of the cache */ 105 purpose is to keep the pages out of the cache */
106 for (i = 0; i < (1 << order); i++) { 106 for (i = 0; i < (1 << order); i++) {
107 struct page *sub_page = page + i; 107 struct page *sub_page = page + i;
108 void *addr = vmap(&sub_page, 1, VM_MAP, 108 struct page **pages = &sub_page;
109 pgprot_writecombine(PAGE_KERNEL)); 109 map_vm_area(vm_struct,
110 memset(addr, 0, PAGE_SIZE); 110 pgprot_writecombine(PAGE_KERNEL),
111 vunmap(addr); 111 &pages);
112 memset(vm_struct->addr, 0, PAGE_SIZE);
113 unmap_kernel_range((unsigned long)vm_struct->addr,
114 PAGE_SIZE);
112 } 115 }
113 ion_page_pool_free(pool, page); 116 ion_page_pool_free(pool, page);
114 } else if (split_pages) { 117 } else if (split_pages) {
@@ -164,6 +167,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
164 long size_remaining = PAGE_ALIGN(size); 167 long size_remaining = PAGE_ALIGN(size);
165 unsigned int max_order = orders[0]; 168 unsigned int max_order = orders[0];
166 bool split_pages = ion_buffer_fault_user_mappings(buffer); 169 bool split_pages = ion_buffer_fault_user_mappings(buffer);
170 struct vm_struct *vm_struct;
171 pte_t *ptes;
167 172
168 INIT_LIST_HEAD(&pages); 173 INIT_LIST_HEAD(&pages);
169 while (size_remaining > 0) { 174 while (size_remaining > 0) {
@@ -211,10 +216,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
211err1: 216err1:
212 kfree(table); 217 kfree(table);
213err: 218err:
219 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
214 list_for_each_entry(info, &pages, list) { 220 list_for_each_entry(info, &pages, list) {
215 free_buffer_page(sys_heap, buffer, info->page, info->order); 221 free_buffer_page(sys_heap, buffer, info->page, info->order,
222 vm_struct);
216 kfree(info); 223 kfree(info);
217 } 224 }
225 free_vm_area(vm_struct);
218 return -ENOMEM; 226 return -ENOMEM;
219} 227}
220 228
@@ -227,10 +235,16 @@ void ion_system_heap_free(struct ion_buffer *buffer)
227 struct sg_table *table = buffer->sg_table; 235 struct sg_table *table = buffer->sg_table;
228 struct scatterlist *sg; 236 struct scatterlist *sg;
229 LIST_HEAD(pages); 237 LIST_HEAD(pages);
238 struct vm_struct *vm_struct;
239 pte_t *ptes;
230 int i; 240 int i;
231 241
242 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
243
232 for_each_sg(table->sgl, sg, table->nents, i) 244 for_each_sg(table->sgl, sg, table->nents, i)
233 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg))); 245 free_buffer_page(sys_heap, buffer, sg_page(sg),
246 get_order(sg_dma_len(sg)), vm_struct);
247 free_vm_area(vm_struct);
234 sg_free_table(table); 248 sg_free_table(table);
235 kfree(table); 249 kfree(table);
236} 250}