1 /*
2 * drivers/gpu/ion/ion_mem_pool.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
17 #include <linux/debugfs.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/shrinker.h>
25 #include "ion_priv.h"
27 /* #define DEBUG_PAGE_POOL_SHRINKER */
29 static struct plist_head pools = PLIST_HEAD_INIT(pools);
30 static struct shrinker shrinker;
32 struct ion_page_pool_item {
33 struct page *page;
34 struct list_head list;
35 };
37 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
38 {
39 struct page *page = alloc_pages(pool->gfp_mask, pool->order);
41 if (!page)
42 return NULL;
43 /* this is only being used to flush the page for dma,
44 this api is not really suitable for calling from a driver
45 but no better way to flush a page for dma exist at this time */
46 arm_dma_ops.sync_single_for_device(NULL,
47 pfn_to_dma(NULL, page_to_pfn(page)),
48 PAGE_SIZE << pool->order,
49 DMA_BIDIRECTIONAL);
50 return page;
51 }
53 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
54 struct page *page)
55 {
56 __free_pages(page, pool->order);
57 }
59 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
60 {
61 struct ion_page_pool_item *item;
63 item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
64 if (!item)
65 return -ENOMEM;
67 mutex_lock(&pool->mutex);
68 item->page = page;
69 if (PageHighMem(page)) {
70 list_add_tail(&item->list, &pool->high_items);
71 pool->high_count++;
72 } else {
73 list_add_tail(&item->list, &pool->low_items);
74 pool->low_count++;
75 }
76 mutex_unlock(&pool->mutex);
77 return 0;
78 }
80 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
81 {
82 struct ion_page_pool_item *item;
83 struct page *page;
85 if (high) {
86 BUG_ON(!pool->high_count);
87 item = list_first_entry(&pool->high_items,
88 struct ion_page_pool_item, list);
89 pool->high_count--;
90 } else {
91 BUG_ON(!pool->low_count);
92 item = list_first_entry(&pool->low_items,
93 struct ion_page_pool_item, list);
94 pool->low_count--;
95 }
97 list_del(&item->list);
98 page = item->page;
99 kfree(item);
100 return page;
101 }
103 void *ion_page_pool_alloc(struct ion_page_pool *pool)
104 {
105 struct page *page = NULL;
107 BUG_ON(!pool);
109 mutex_lock(&pool->mutex);
110 if (pool->high_count)
111 page = ion_page_pool_remove(pool, true);
112 else if (pool->low_count)
113 page = ion_page_pool_remove(pool, false);
114 mutex_unlock(&pool->mutex);
116 if (!page)
117 page = ion_page_pool_alloc_pages(pool);
119 return page;
120 }
122 void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
123 {
124 int ret;
126 ret = ion_page_pool_add(pool, page);
127 if (ret)
128 ion_page_pool_free_pages(pool, page);
129 }
131 #ifdef DEBUG_PAGE_POOL_SHRINKER
132 static int debug_drop_pools_set(void *data, u64 val)
133 {
134 struct shrink_control sc;
135 int objs;
137 sc.gfp_mask = -1;
138 sc.nr_to_scan = 0;
140 if (!val)
141 return 0;
143 objs = shrinker.shrink(&shrinker, &sc);
144 sc.nr_to_scan = objs;
146 shrinker.shrink(&shrinker, &sc);
147 return 0;
148 }
150 static int debug_drop_pools_get(void *data, u64 *val)
151 {
152 struct shrink_control sc;
153 int objs;
155 sc.gfp_mask = -1;
156 sc.nr_to_scan = 0;
158 objs = shrinker.shrink(&shrinker, &sc);
159 *val = objs;
160 return 0;
161 }
163 DEFINE_SIMPLE_ATTRIBUTE(debug_drop_pools_fops, debug_drop_pools_get,
164 debug_drop_pools_set, "%llu\n");
166 static int debug_grow_pools_set(void *data, u64 val)
167 {
168 struct ion_page_pool *pool;
169 struct page *page;
171 plist_for_each_entry(pool, &pools, list) {
172 if (val != pool->list.prio)
173 continue;
174 page = ion_page_pool_alloc_pages(pool);
175 if (page)
176 ion_page_pool_add(pool, page);
177 }
179 return 0;
180 }
182 DEFINE_SIMPLE_ATTRIBUTE(debug_grow_pools_fops, debug_drop_pools_get,
183 debug_grow_pools_set, "%llu\n");
184 #endif
186 static int ion_page_pool_total(bool high)
187 {
188 struct ion_page_pool *pool;
189 int total = 0;
191 plist_for_each_entry(pool, &pools, list) {
192 total += high ? (pool->high_count + pool->low_count) *
193 (1 << pool->order) :
194 pool->low_count * (1 << pool->order);
195 }
196 return total;
197 }
199 static int ion_page_pool_shrink(struct shrinker *shrinker,
200 struct shrink_control *sc)
201 {
202 struct ion_page_pool *pool;
203 int nr_freed = 0;
204 int i;
205 bool high;
206 int nr_to_scan = sc->nr_to_scan;
208 if (sc->gfp_mask & __GFP_HIGHMEM)
209 high = true;
211 if (nr_to_scan == 0)
212 return ion_page_pool_total(high);
214 plist_for_each_entry(pool, &pools, list) {
215 for (i = 0; i < nr_to_scan; i++) {
216 struct page *page;
218 mutex_lock(&pool->mutex);
219 if (high && pool->high_count) {
220 page = ion_page_pool_remove(pool, true);
221 } else if (pool->low_count) {
222 page = ion_page_pool_remove(pool, false);
223 } else {
224 mutex_unlock(&pool->mutex);
225 break;
226 }
227 mutex_unlock(&pool->mutex);
228 ion_page_pool_free_pages(pool, page);
229 nr_freed += (1 << pool->order);
230 }
231 nr_to_scan -= i;
232 }
234 return ion_page_pool_total(high);
235 }
237 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
238 {
239 struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
240 GFP_KERNEL);
241 if (!pool)
242 return NULL;
243 pool->high_count = 0;
244 pool->low_count = 0;
245 INIT_LIST_HEAD(&pool->low_items);
246 INIT_LIST_HEAD(&pool->high_items);
247 pool->gfp_mask = gfp_mask;
248 pool->order = order;
249 mutex_init(&pool->mutex);
250 plist_node_init(&pool->list, order);
251 plist_add(&pool->list, &pools);
253 return pool;
254 }
256 void ion_page_pool_destroy(struct ion_page_pool *pool)
257 {
258 plist_del(&pool->list, &pools);
259 kfree(pool);
260 }
262 static int __init ion_page_pool_init(void)
263 {
264 shrinker.shrink = ion_page_pool_shrink;
265 shrinker.seeks = DEFAULT_SEEKS;
266 shrinker.batch = 0;
267 register_shrinker(&shrinker);
268 #ifdef DEBUG_PAGE_POOL_SHRINKER
269 debugfs_create_file("ion_pools_shrink", 0644, NULL, NULL,
270 &debug_drop_pools_fops);
271 debugfs_create_file("ion_pools_grow", 0644, NULL, NULL,
272 &debug_grow_pools_fops);
273 #endif
274 return 0;
275 }
277 static void __exit ion_page_pool_exit(void)
278 {
279 unregister_shrinker(&shrinker);
280 }
282 module_init(ion_page_pool_init);
283 module_exit(ion_page_pool_exit);