aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRebecca Schultz Zavin2012-09-21 13:46:06 -0500
committerArve Hjønnevåg2013-02-19 19:55:22 -0600
commitb511542570c9cd14ca3b6d4d944709b63a9b8b13 (patch)
treec10745548ec013f9d45b7dd91cea8fc5bc3ff2f8
parent128078854668e9fc11e53ac5bdeabfde3684df32 (diff)
downloadkernel-common-b511542570c9cd14ca3b6d4d944709b63a9b8b13.tar.gz
kernel-common-b511542570c9cd14ca3b6d4d944709b63a9b8b13.tar.xz
kernel-common-b511542570c9cd14ca3b6d4d944709b63a9b8b13.zip
gpu: ion: optimize system heap for non fault buffers
If a buffer's user mappings are not going to be faulted in it need not be allocated page wise. We can optimize this common case by allocating an sglist of larger chunks rather than creating an entry for each page in the allocation. Change-Id: I47814990e55c7bdb7abeaa2af824744b0a97602d Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
-rw-r--r--drivers/gpu/ion/ion.c21
-rw-r--r--drivers/gpu/ion/ion_priv.h9
-rw-r--r--drivers/gpu/ion/ion_system_heap.c40
3 files changed, 51 insertions, 19 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 47d08db8855..f87b9785809 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -100,6 +100,12 @@ struct ion_handle {
100 unsigned int kmap_cnt; 100 unsigned int kmap_cnt;
101}; 101};
102 102
103bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
104{
105 return ((buffer->flags & ION_FLAG_CACHED) &&
106 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
107}
108
103/* this function should only be called while dev->lock is held */ 109/* this function should only be called while dev->lock is held */
104static void ion_buffer_add(struct ion_device *dev, 110static void ion_buffer_add(struct ion_device *dev,
105 struct ion_buffer *buffer) 111 struct ion_buffer *buffer)
@@ -145,6 +151,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
145 return ERR_PTR(-ENOMEM); 151 return ERR_PTR(-ENOMEM);
146 152
147 buffer->heap = heap; 153 buffer->heap = heap;
154 buffer->flags = flags;
148 kref_init(&buffer->ref); 155 kref_init(&buffer->ref);
149 156
150 ret = heap->ops->allocate(heap, buffer, len, align, flags); 157 ret = heap->ops->allocate(heap, buffer, len, align, flags);
@@ -155,7 +162,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
155 162
156 buffer->dev = dev; 163 buffer->dev = dev;
157 buffer->size = len; 164 buffer->size = len;
158 buffer->flags = flags;
159 165
160 table = heap->ops->map_dma(heap, buffer); 166 table = heap->ops->map_dma(heap, buffer);
161 if (IS_ERR_OR_NULL(table)) { 167 if (IS_ERR_OR_NULL(table)) {
@@ -164,14 +170,13 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
164 return ERR_PTR(PTR_ERR(table)); 170 return ERR_PTR(PTR_ERR(table));
165 } 171 }
166 buffer->sg_table = table; 172 buffer->sg_table = table;
167 if (buffer->flags & ION_FLAG_CACHED && 173 if (ion_buffer_fault_user_mappings(buffer)) {
168 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
169 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, 174 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
170 i) { 175 i) {
171 if (sg_dma_len(sg) == PAGE_SIZE) 176 if (sg_dma_len(sg) == PAGE_SIZE)
172 continue; 177 continue;
173 pr_err("%s: cached mappings must have pagewise " 178 pr_err("%s: cached mappings that will be faulted in "
174 "sg_lists\n", __func__); 179 "must have pagewise sg_lists\n", __func__);
175 ret = -EINVAL; 180 ret = -EINVAL;
176 goto err; 181 goto err;
177 } 182 }
@@ -764,8 +769,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
764 pr_debug("%s: syncing for device %s\n", __func__, 769 pr_debug("%s: syncing for device %s\n", __func__,
765 dev ? dev_name(dev) : "null"); 770 dev ? dev_name(dev) : "null");
766 771
767 if (!(buffer->flags & ION_FLAG_CACHED) || 772 if (!ion_buffer_fault_user_mappings(buffer))
768 (buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC))
769 return; 773 return;
770 774
771 mutex_lock(&buffer->lock); 775 mutex_lock(&buffer->lock);
@@ -855,8 +859,7 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
855 return -EINVAL; 859 return -EINVAL;
856 } 860 }
857 861
858 if (buffer->flags & ION_FLAG_CACHED && 862 if (ion_buffer_fault_user_mappings(buffer)) {
859 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
860 vma->vm_private_data = buffer; 863 vma->vm_private_data = buffer;
861 vma->vm_ops = &ion_vma_ops; 864 vma->vm_ops = &ion_vma_ops;
862 ion_vm_open(vma); 865 ion_vm_open(vma);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index b2397230885..449f514261b 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -131,6 +131,15 @@ struct ion_heap {
131}; 131};
132 132
133/** 133/**
134 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
135 * @buffer: buffer
136 *
137 * indicates whether userspace mappings of this buffer will be faulted
138 * in, this can affect how buffers are allocated from the heap.
139 */
140bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
141
142/**
134 * ion_device_create - allocates and returns an ion device 143 * ion_device_create - allocates and returns an ion device
135 * @custom_ioctl: arch specific ioctl function if applicable 144 * @custom_ioctl: arch specific ioctl function if applicable
136 * 145 *
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ca6de04f6c2..310c4f66cfa 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -31,7 +31,8 @@ struct page_info {
31 struct list_head list; 31 struct list_head list;
32}; 32};
33 33
34static struct page_info *alloc_largest_available(unsigned long size) 34static struct page_info *alloc_largest_available(unsigned long size,
35 bool split_pages)
35{ 36{
36 static unsigned int orders[] = {8, 4, 0}; 37 static unsigned int orders[] = {8, 4, 0};
37 struct page *page; 38 struct page *page;
@@ -45,7 +46,8 @@ static struct page_info *alloc_largest_available(unsigned long size)
45 __GFP_NOWARN | __GFP_NORETRY, orders[i]); 46 __GFP_NOWARN | __GFP_NORETRY, orders[i]);
46 if (!page) 47 if (!page)
47 continue; 48 continue;
48 split_page(page, orders[i]); 49 if (split_pages)
50 split_page(page, orders[i]);
49 info = kmalloc(sizeof(struct page_info *), GFP_KERNEL); 51 info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
50 info->page = page; 52 info->page = page;
51 info->order = orders[i]; 53 info->order = orders[i];
@@ -64,35 +66,49 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
64 int ret; 66 int ret;
65 struct list_head pages; 67 struct list_head pages;
66 struct page_info *info, *tmp_info; 68 struct page_info *info, *tmp_info;
67 int i; 69 int i = 0;
68 long size_remaining = PAGE_ALIGN(size); 70 long size_remaining = PAGE_ALIGN(size);
71 bool split_pages = ion_buffer_fault_user_mappings(buffer);
72
69 73
70 INIT_LIST_HEAD(&pages); 74 INIT_LIST_HEAD(&pages);
71 while (size_remaining > 0) { 75 while (size_remaining > 0) {
72 info = alloc_largest_available(size_remaining); 76 info = alloc_largest_available(size_remaining, split_pages);
73 if (!info) 77 if (!info)
74 goto err; 78 goto err;
75 list_add_tail(&info->list, &pages); 79 list_add_tail(&info->list, &pages);
76 size_remaining -= (1 << info->order) * PAGE_SIZE; 80 size_remaining -= (1 << info->order) * PAGE_SIZE;
81 i++;
77 } 82 }
78 83
79 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 84 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
80 if (!table) 85 if (!table)
81 goto err; 86 goto err;
82 87
83 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL); 88 if (split_pages)
89 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
90 GFP_KERNEL);
91 else
92 ret = sg_alloc_table(table, i, GFP_KERNEL);
93
84 if (ret) 94 if (ret)
85 goto err1; 95 goto err1;
86 96
87 sg = table->sgl; 97 sg = table->sgl;
88 list_for_each_entry_safe(info, tmp_info, &pages, list) { 98 list_for_each_entry_safe(info, tmp_info, &pages, list) {
89 struct page *page = info->page; 99 struct page *page = info->page;
90 for (i = 0; i < (1 << info->order); i++) { 100
91 sg_set_page(sg, page + i, PAGE_SIZE, 0); 101 if (split_pages) {
102 for (i = 0; i < (1 << info->order); i++) {
103 sg_set_page(sg, page + i, PAGE_SIZE, 0);
104 sg = sg_next(sg);
105 }
106 } else {
107 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
108 0);
92 sg = sg_next(sg); 109 sg = sg_next(sg);
93 } 110 }
94 list_del(&info->list); 111 list_del(&info->list);
95 memset(info, 0, sizeof(struct page_info));
96 kfree(info); 112 kfree(info);
97 } 113 }
98 114
@@ -105,8 +121,12 @@ err1:
105 kfree(table); 121 kfree(table);
106err: 122err:
107 list_for_each_entry(info, &pages, list) { 123 list_for_each_entry(info, &pages, list) {
108 for (i = 0; i < (1 << info->order); i++) 124 if (split_pages)
109 __free_page(info->page + i); 125 for (i = 0; i < (1 << info->order); i++)
126 __free_page(info->page + i);
127 else
128 __free_pages(info->page, info->order);
129
110 kfree(info); 130 kfree(info);
111 } 131 }
112 return -ENOMEM; 132 return -ENOMEM;