1 /*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
23 */
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/stat.h>
36 #include <linux/spinlock.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <linux/wait.h>
41 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42 #define DMAPOOL_DEBUG 1
43 #endif
45 struct dma_pool { /* the pool */
46 struct list_head page_list;
47 spinlock_t lock;
48 size_t size;
49 struct device *dev;
50 size_t allocation;
51 size_t boundary;
52 char name[32];
53 struct list_head pools;
54 };
56 struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list;
58 void *vaddr;
59 dma_addr_t dma;
60 unsigned int in_use;
61 unsigned int offset;
62 };
64 static DEFINE_MUTEX(pools_lock);
65 static DEFINE_MUTEX(pools_reg_lock);
67 static ssize_t
68 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
69 {
70 unsigned temp;
71 unsigned size;
72 char *next;
73 struct dma_page *page;
74 struct dma_pool *pool;
76 next = buf;
77 size = PAGE_SIZE;
79 temp = scnprintf(next, size, "poolinfo - 0.1\n");
80 size -= temp;
81 next += temp;
83 mutex_lock(&pools_lock);
84 list_for_each_entry(pool, &dev->dma_pools, pools) {
85 unsigned pages = 0;
86 unsigned blocks = 0;
88 spin_lock_irq(&pool->lock);
89 list_for_each_entry(page, &pool->page_list, page_list) {
90 pages++;
91 blocks += page->in_use;
92 }
93 spin_unlock_irq(&pool->lock);
95 /* per-pool info, no real statistics yet */
96 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
97 pool->name, blocks,
98 pages * (pool->allocation / pool->size),
99 pool->size, pages);
100 size -= temp;
101 next += temp;
102 }
103 mutex_unlock(&pools_lock);
105 return PAGE_SIZE - size;
106 }
108 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110 /**
111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
112 * @name: name of pool, for diagnostics
113 * @dev: device that will be doing the DMA
114 * @size: size of the blocks in this pool.
115 * @align: alignment requirement for blocks; must be a power of two
116 * @boundary: returned blocks won't cross this power of two boundary
117 * Context: !in_interrupt()
118 *
119 * Returns a dma allocation pool with the requested characteristics, or
120 * null if one can't be created. Given one of these pools, dma_pool_alloc()
121 * may be used to allocate memory. Such memory will all have "consistent"
122 * DMA mappings, accessible by the device and its driver without using
123 * cache flushing primitives. The actual size of blocks allocated may be
124 * larger than requested because of alignment.
125 *
126 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
127 * cross that size boundary. This is useful for devices which have
128 * addressing restrictions on individual DMA transfers, such as not crossing
129 * boundaries of 4KBytes.
130 */
131 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132 size_t size, size_t align, size_t boundary)
133 {
134 struct dma_pool *retval;
135 size_t allocation;
137 if (align == 0) {
138 align = 1;
139 } else if (align & (align - 1)) {
140 return NULL;
141 }
143 if (size == 0) {
144 return NULL;
145 } else if (size < 4) {
146 size = 4;
147 }
149 if ((size % align) != 0)
150 size = ALIGN(size, align);
152 allocation = max_t(size_t, size, PAGE_SIZE);
154 if (!boundary) {
155 boundary = allocation;
156 } else if ((boundary < size) || (boundary & (boundary - 1))) {
157 return NULL;
158 }
160 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
161 if (!retval)
162 return retval;
164 strlcpy(retval->name, name, sizeof(retval->name));
166 retval->dev = dev;
168 INIT_LIST_HEAD(&retval->page_list);
169 spin_lock_init(&retval->lock);
170 retval->size = size;
171 retval->boundary = boundary;
172 retval->allocation = allocation;
174 if (dev) {
175 int ret;
176 bool empty = false;
177 /*
178 * pools_lock ensures that the ->dma_pools list does not get
179 * corrupted. pools_reg_lock ensures that there is not a race
180 * between dma_pool_create() and dma_pool_destroy() or within
181 * dma_pool_create() when the first invocation of
182 * dma_pool_create() failed on device_create_file() and the
183 * second assumes that it has been done (I know it is a short
184 * window).
185 */
186 mutex_lock(&pools_reg_lock);
187 mutex_lock(&pools_lock);
188 if (list_empty(&dev->dma_pools))
189 empty = true;
190 /* note: not currently insisting "name" be unique */
191 list_add(&retval->pools, &dev->dma_pools);
192 mutex_unlock(&pools_lock);
193 if (empty) {
194 ret = device_create_file(dev, &dev_attr_pools);
195 if (ret) {
196 mutex_lock(&pools_lock);
197 list_del(&retval->pools);
198 mutex_unlock(&pools_lock);
200 kfree(retval);
201 retval = NULL;
202 }
203 }
204 mutex_unlock(&pools_reg_lock);
205 } else
206 INIT_LIST_HEAD(&retval->pools);
208 return retval;
209 }
210 EXPORT_SYMBOL(dma_pool_create);
212 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
213 {
214 unsigned int offset = 0;
215 unsigned int next_boundary = pool->boundary;
217 do {
218 unsigned int next = offset + pool->size;
219 if (unlikely((next + pool->size) >= next_boundary)) {
220 next = next_boundary;
221 next_boundary += pool->boundary;
222 }
223 *(int *)(page->vaddr + offset) = next;
224 offset = next;
225 } while (offset < pool->allocation);
226 }
228 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
229 {
230 struct dma_page *page;
232 page = kmalloc(sizeof(*page), mem_flags);
233 if (!page)
234 return NULL;
235 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
236 &page->dma, mem_flags);
237 if (page->vaddr) {
238 #ifdef DMAPOOL_DEBUG
239 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
240 #endif
241 pool_initialise_page(pool, page);
242 page->in_use = 0;
243 page->offset = 0;
244 } else {
245 kfree(page);
246 page = NULL;
247 }
248 return page;
249 }
251 static inline int is_page_busy(struct dma_page *page)
252 {
253 return page->in_use != 0;
254 }
256 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
257 {
258 dma_addr_t dma = page->dma;
260 #ifdef DMAPOOL_DEBUG
261 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
262 #endif
263 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
264 list_del(&page->page_list);
265 kfree(page);
266 }
268 /**
269 * dma_pool_destroy - destroys a pool of dma memory blocks.
270 * @pool: dma pool that will be destroyed
271 * Context: !in_interrupt()
272 *
273 * Caller guarantees that no more memory from the pool is in use,
274 * and that nothing will try to use the pool after this call.
275 */
276 void dma_pool_destroy(struct dma_pool *pool)
277 {
278 bool empty = false;
280 mutex_lock(&pools_reg_lock);
281 mutex_lock(&pools_lock);
282 list_del(&pool->pools);
283 if (pool->dev && list_empty(&pool->dev->dma_pools))
284 empty = true;
285 mutex_unlock(&pools_lock);
286 if (empty)
287 device_remove_file(pool->dev, &dev_attr_pools);
288 mutex_unlock(&pools_reg_lock);
290 while (!list_empty(&pool->page_list)) {
291 struct dma_page *page;
292 page = list_entry(pool->page_list.next,
293 struct dma_page, page_list);
294 if (is_page_busy(page)) {
295 if (pool->dev)
296 dev_err(pool->dev,
297 "dma_pool_destroy %s, %p busy\n",
298 pool->name, page->vaddr);
299 else
300 printk(KERN_ERR
301 "dma_pool_destroy %s, %p busy\n",
302 pool->name, page->vaddr);
303 /* leak the still-in-use consistent memory */
304 list_del(&page->page_list);
305 kfree(page);
306 } else
307 pool_free_page(pool, page);
308 }
310 kfree(pool);
311 }
312 EXPORT_SYMBOL(dma_pool_destroy);
314 /**
315 * dma_pool_alloc - get a block of consistent memory
316 * @pool: dma pool that will produce the block
317 * @mem_flags: GFP_* bitmask
318 * @handle: pointer to dma address of block
319 *
320 * This returns the kernel virtual address of a currently unused block,
321 * and reports its dma address through the handle.
322 * If such a memory block can't be allocated, %NULL is returned.
323 */
324 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
325 dma_addr_t *handle)
326 {
327 unsigned long flags;
328 struct dma_page *page;
329 size_t offset;
330 void *retval;
332 might_sleep_if(mem_flags & __GFP_WAIT);
334 spin_lock_irqsave(&pool->lock, flags);
335 list_for_each_entry(page, &pool->page_list, page_list) {
336 if (page->offset < pool->allocation)
337 goto ready;
338 }
340 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
341 spin_unlock_irqrestore(&pool->lock, flags);
343 page = pool_alloc_page(pool, mem_flags);
344 if (!page)
345 return NULL;
347 spin_lock_irqsave(&pool->lock, flags);
349 list_add(&page->page_list, &pool->page_list);
350 ready:
351 page->in_use++;
352 offset = page->offset;
353 page->offset = *(int *)(page->vaddr + offset);
354 retval = offset + page->vaddr;
355 *handle = offset + page->dma;
356 #ifdef DMAPOOL_DEBUG
357 {
358 int i;
359 u8 *data = retval;
360 /* page->offset is stored in first 4 bytes */
361 for (i = sizeof(page->offset); i < pool->size; i++) {
362 if (data[i] == POOL_POISON_FREED)
363 continue;
364 if (pool->dev)
365 dev_err(pool->dev,
366 "dma_pool_alloc %s, %p (corruped)\n",
367 pool->name, retval);
368 else
369 pr_err("dma_pool_alloc %s, %p (corruped)\n",
370 pool->name, retval);
372 /*
373 * Dump the first 4 bytes even if they are not
374 * POOL_POISON_FREED
375 */
376 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
377 data, pool->size, 1);
378 break;
379 }
380 }
381 memset(retval, POOL_POISON_ALLOCATED, pool->size);
382 #endif
383 spin_unlock_irqrestore(&pool->lock, flags);
384 return retval;
385 }
386 EXPORT_SYMBOL(dma_pool_alloc);
388 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
389 {
390 struct dma_page *page;
392 list_for_each_entry(page, &pool->page_list, page_list) {
393 if (dma < page->dma)
394 continue;
395 if (dma < (page->dma + pool->allocation))
396 return page;
397 }
398 return NULL;
399 }
401 /**
402 * dma_pool_free - put block back into dma pool
403 * @pool: the dma pool holding the block
404 * @vaddr: virtual address of block
405 * @dma: dma address of block
406 *
407 * Caller promises neither device nor driver will again touch this block
408 * unless it is first re-allocated.
409 */
410 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
411 {
412 struct dma_page *page;
413 unsigned long flags;
414 unsigned int offset;
416 spin_lock_irqsave(&pool->lock, flags);
417 page = pool_find_page(pool, dma);
418 if (!page) {
419 spin_unlock_irqrestore(&pool->lock, flags);
420 if (pool->dev)
421 dev_err(pool->dev,
422 "dma_pool_free %s, %p/%lx (bad dma)\n",
423 pool->name, vaddr, (unsigned long)dma);
424 else
425 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
426 pool->name, vaddr, (unsigned long)dma);
427 return;
428 }
430 offset = vaddr - page->vaddr;
431 #ifdef DMAPOOL_DEBUG
432 if ((dma - page->dma) != offset) {
433 spin_unlock_irqrestore(&pool->lock, flags);
434 if (pool->dev)
435 dev_err(pool->dev,
436 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
437 pool->name, vaddr, (unsigned long long)dma);
438 else
439 printk(KERN_ERR
440 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
441 pool->name, vaddr, (unsigned long long)dma);
442 return;
443 }
444 {
445 unsigned int chain = page->offset;
446 while (chain < pool->allocation) {
447 if (chain != offset) {
448 chain = *(int *)(page->vaddr + chain);
449 continue;
450 }
451 spin_unlock_irqrestore(&pool->lock, flags);
452 if (pool->dev)
453 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
454 "already free\n", pool->name,
455 (unsigned long long)dma);
456 else
457 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
458 "already free\n", pool->name,
459 (unsigned long long)dma);
460 return;
461 }
462 }
463 memset(vaddr, POOL_POISON_FREED, pool->size);
464 #endif
466 page->in_use--;
467 *(int *)vaddr = page->offset;
468 page->offset = offset;
469 /*
470 * Resist a temptation to do
471 * if (!is_page_busy(page)) pool_free_page(pool, page);
472 * Better have a few empty pages hang around.
473 */
474 spin_unlock_irqrestore(&pool->lock, flags);
475 }
476 EXPORT_SYMBOL(dma_pool_free);
478 /*
479 * Managed DMA pool
480 */
481 static void dmam_pool_release(struct device *dev, void *res)
482 {
483 struct dma_pool *pool = *(struct dma_pool **)res;
485 dma_pool_destroy(pool);
486 }
488 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
489 {
490 return *(struct dma_pool **)res == match_data;
491 }
493 /**
494 * dmam_pool_create - Managed dma_pool_create()
495 * @name: name of pool, for diagnostics
496 * @dev: device that will be doing the DMA
497 * @size: size of the blocks in this pool.
498 * @align: alignment requirement for blocks; must be a power of two
499 * @allocation: returned blocks won't cross this boundary (or zero)
500 *
501 * Managed dma_pool_create(). DMA pool created with this function is
502 * automatically destroyed on driver detach.
503 */
504 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
505 size_t size, size_t align, size_t allocation)
506 {
507 struct dma_pool **ptr, *pool;
509 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
510 if (!ptr)
511 return NULL;
513 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
514 if (pool)
515 devres_add(dev, ptr);
516 else
517 devres_free(ptr);
519 return pool;
520 }
521 EXPORT_SYMBOL(dmam_pool_create);
523 /**
524 * dmam_pool_destroy - Managed dma_pool_destroy()
525 * @pool: dma pool that will be destroyed
526 *
527 * Managed dma_pool_destroy().
528 */
529 void dmam_pool_destroy(struct dma_pool *pool)
530 {
531 struct device *dev = pool->dev;
533 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
534 dma_pool_destroy(pool);
535 }
536 EXPORT_SYMBOL(dmam_pool_destroy);