1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
16 */
18 /*
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
22 * this capability.
23 *
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
26 * such as locking.
27 *
28 * LOCKING:
29 *
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
32 *
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
37 *
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
40 *
41 * See Documentation/driver-api/dmaengine for more details
42 */
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/platform_device.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/mm.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
60 #include <linux/idr.h>
61 #include <linux/slab.h>
62 #include <linux/acpi.h>
63 #include <linux/acpi_dma.h>
64 #include <linux/of_dma.h>
65 #include <linux/mempool.h>
67 static DEFINE_MUTEX(dma_list_mutex);
68 static DEFINE_IDA(dma_ida);
69 static LIST_HEAD(dma_device_list);
70 static long dmaengine_ref_count;
72 /* --- sysfs implementation --- */
74 /**
75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76 * @dev - device node
77 *
78 * Must be called under dma_list_mutex
79 */
80 static struct dma_chan *dev_to_dma_chan(struct device *dev)
81 {
82 struct dma_chan_dev *chan_dev;
84 chan_dev = container_of(dev, typeof(*chan_dev), device);
85 return chan_dev->chan;
86 }
88 static ssize_t memcpy_count_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90 {
91 struct dma_chan *chan;
92 unsigned long count = 0;
93 int i;
94 int err;
96 mutex_lock(&dma_list_mutex);
97 chan = dev_to_dma_chan(dev);
98 if (chan) {
99 for_each_possible_cpu(i)
100 count += per_cpu_ptr(chan->local, i)->memcpy_count;
101 err = sprintf(buf, "%lu\n", count);
102 } else
103 err = -ENODEV;
104 mutex_unlock(&dma_list_mutex);
106 return err;
107 }
108 static DEVICE_ATTR_RO(memcpy_count);
110 static ssize_t bytes_transferred_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
112 {
113 struct dma_chan *chan;
114 unsigned long count = 0;
115 int i;
116 int err;
118 mutex_lock(&dma_list_mutex);
119 chan = dev_to_dma_chan(dev);
120 if (chan) {
121 for_each_possible_cpu(i)
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
123 err = sprintf(buf, "%lu\n", count);
124 } else
125 err = -ENODEV;
126 mutex_unlock(&dma_list_mutex);
128 return err;
129 }
130 static DEVICE_ATTR_RO(bytes_transferred);
132 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
133 char *buf)
134 {
135 struct dma_chan *chan;
136 int err;
138 mutex_lock(&dma_list_mutex);
139 chan = dev_to_dma_chan(dev);
140 if (chan)
141 err = sprintf(buf, "%d\n", chan->client_count);
142 else
143 err = -ENODEV;
144 mutex_unlock(&dma_list_mutex);
146 return err;
147 }
148 static DEVICE_ATTR_RO(in_use);
150 static struct attribute *dma_dev_attrs[] = {
151 &dev_attr_memcpy_count.attr,
152 &dev_attr_bytes_transferred.attr,
153 &dev_attr_in_use.attr,
154 NULL,
155 };
156 ATTRIBUTE_GROUPS(dma_dev);
158 static void chan_dev_release(struct device *dev)
159 {
160 struct dma_chan_dev *chan_dev;
162 chan_dev = container_of(dev, typeof(*chan_dev), device);
163 if (atomic_dec_and_test(chan_dev->idr_ref)) {
164 mutex_lock(&dma_list_mutex);
165 ida_remove(&dma_ida, chan_dev->dev_id);
166 mutex_unlock(&dma_list_mutex);
167 kfree(chan_dev->idr_ref);
168 }
169 kfree(chan_dev);
170 }
172 static struct class dma_devclass = {
173 .name = "dma",
174 .dev_groups = dma_dev_groups,
175 .dev_release = chan_dev_release,
176 };
178 /* --- client and device registration --- */
180 #define dma_device_satisfies_mask(device, mask) \
181 __dma_device_satisfies_mask((device), &(mask))
182 static int
183 __dma_device_satisfies_mask(struct dma_device *device,
184 const dma_cap_mask_t *want)
185 {
186 dma_cap_mask_t has;
188 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
189 DMA_TX_TYPE_END);
190 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
191 }
193 static struct module *dma_chan_to_owner(struct dma_chan *chan)
194 {
195 return chan->device->dev->driver->owner;
196 }
198 /**
199 * balance_ref_count - catch up the channel reference count
200 * @chan - channel to balance ->client_count versus dmaengine_ref_count
201 *
202 * balance_ref_count must be called under dma_list_mutex
203 */
204 static void balance_ref_count(struct dma_chan *chan)
205 {
206 struct module *owner = dma_chan_to_owner(chan);
208 while (chan->client_count < dmaengine_ref_count) {
209 __module_get(owner);
210 chan->client_count++;
211 }
212 }
214 /**
215 * dma_chan_get - try to grab a dma channel's parent driver module
216 * @chan - channel to grab
217 *
218 * Must be called under dma_list_mutex
219 */
220 static int dma_chan_get(struct dma_chan *chan)
221 {
222 struct module *owner = dma_chan_to_owner(chan);
223 int ret;
225 /* The channel is already in use, update client count */
226 if (chan->client_count) {
227 __module_get(owner);
228 goto out;
229 }
231 if (!try_module_get(owner))
232 return -ENODEV;
234 /* allocate upon first client reference */
235 if (chan->device->device_alloc_chan_resources) {
236 ret = chan->device->device_alloc_chan_resources(chan);
237 if (ret < 0)
238 goto err_out;
239 }
241 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
242 balance_ref_count(chan);
244 out:
245 chan->client_count++;
246 return 0;
248 err_out:
249 module_put(owner);
250 return ret;
251 }
253 /**
254 * dma_chan_put - drop a reference to a dma channel's parent driver module
255 * @chan - channel to release
256 *
257 * Must be called under dma_list_mutex
258 */
259 static void dma_chan_put(struct dma_chan *chan)
260 {
261 /* This channel is not in use, bail out */
262 if (!chan->client_count)
263 return;
265 chan->client_count--;
266 module_put(dma_chan_to_owner(chan));
268 /* This channel is not in use anymore, free it */
269 if (!chan->client_count && chan->device->device_free_chan_resources) {
270 /* Make sure all operations have completed */
271 dmaengine_synchronize(chan);
272 chan->device->device_free_chan_resources(chan);
273 }
275 /* If the channel is used via a DMA request router, free the mapping */
276 if (chan->router && chan->router->route_free) {
277 chan->router->route_free(chan->router->dev, chan->route_data);
278 chan->router = NULL;
279 chan->route_data = NULL;
280 }
281 }
283 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
284 {
285 enum dma_status status;
286 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
288 dma_async_issue_pending(chan);
289 do {
290 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
291 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
292 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
293 return DMA_ERROR;
294 }
295 if (status != DMA_IN_PROGRESS)
296 break;
297 cpu_relax();
298 } while (1);
300 return status;
301 }
302 EXPORT_SYMBOL(dma_sync_wait);
304 /**
305 * dma_cap_mask_all - enable iteration over all operation types
306 */
307 static dma_cap_mask_t dma_cap_mask_all;
309 /**
310 * dma_chan_tbl_ent - tracks channel allocations per core/operation
311 * @chan - associated channel for this entry
312 */
313 struct dma_chan_tbl_ent {
314 struct dma_chan *chan;
315 };
317 /**
318 * channel_table - percpu lookup table for memory-to-memory offload providers
319 */
320 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
322 static int __init dma_channel_table_init(void)
323 {
324 enum dma_transaction_type cap;
325 int err = 0;
327 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
329 /* 'interrupt', 'private', and 'slave' are channel capabilities,
330 * but are not associated with an operation so they do not need
331 * an entry in the channel_table
332 */
333 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
334 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
335 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
337 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
338 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
339 if (!channel_table[cap]) {
340 err = -ENOMEM;
341 break;
342 }
343 }
345 if (err) {
346 pr_err("initialization failure\n");
347 for_each_dma_cap_mask(cap, dma_cap_mask_all)
348 free_percpu(channel_table[cap]);
349 }
351 return err;
352 }
353 arch_initcall(dma_channel_table_init);
355 /**
356 * dma_find_channel - find a channel to carry out the operation
357 * @tx_type: transaction type
358 */
359 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
360 {
361 return this_cpu_read(channel_table[tx_type]->chan);
362 }
363 EXPORT_SYMBOL(dma_find_channel);
365 /**
366 * dma_issue_pending_all - flush all pending operations across all channels
367 */
368 void dma_issue_pending_all(void)
369 {
370 struct dma_device *device;
371 struct dma_chan *chan;
373 rcu_read_lock();
374 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
375 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 continue;
377 list_for_each_entry(chan, &device->channels, device_node)
378 if (chan->client_count)
379 device->device_issue_pending(chan);
380 }
381 rcu_read_unlock();
382 }
383 EXPORT_SYMBOL(dma_issue_pending_all);
385 /**
386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
387 */
388 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
389 {
390 int node = dev_to_node(chan->device->dev);
391 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
392 }
394 /**
395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396 * @cap: capability to match
397 * @cpu: cpu index which the channel should be close to
398 *
399 * If some channels are close to the given cpu, the one with the lowest
400 * reference count is returned. Otherwise, cpu is ignored and only the
401 * reference count is taken into account.
402 * Must be called under dma_list_mutex.
403 */
404 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405 {
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *min = NULL;
409 struct dma_chan *localmin = NULL;
411 list_for_each_entry(device, &dma_device_list, global_node) {
412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 continue;
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
417 continue;
418 if (!min || chan->table_count < min->table_count)
419 min = chan;
421 if (dma_chan_is_local(chan, cpu))
422 if (!localmin ||
423 chan->table_count < localmin->table_count)
424 localmin = chan;
425 }
426 }
428 chan = localmin ? localmin : min;
430 if (chan)
431 chan->table_count++;
433 return chan;
434 }
436 /**
437 * dma_channel_rebalance - redistribute the available channels
438 *
439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
440 * operation type) in the SMP case, and operation isolation (avoid
441 * multi-tasking channels) in the non-SMP case. Must be called under
442 * dma_list_mutex.
443 */
444 static void dma_channel_rebalance(void)
445 {
446 struct dma_chan *chan;
447 struct dma_device *device;
448 int cpu;
449 int cap;
451 /* undo the last distribution */
452 for_each_dma_cap_mask(cap, dma_cap_mask_all)
453 for_each_possible_cpu(cpu)
454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
456 list_for_each_entry(device, &dma_device_list, global_node) {
457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458 continue;
459 list_for_each_entry(chan, &device->channels, device_node)
460 chan->table_count = 0;
461 }
463 /* don't populate the channel_table if no clients are available */
464 if (!dmaengine_ref_count)
465 return;
467 /* redistribute available channels */
468 for_each_dma_cap_mask(cap, dma_cap_mask_all)
469 for_each_online_cpu(cpu) {
470 chan = min_chan(cap, cpu);
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
472 }
473 }
475 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
476 {
477 struct dma_device *device;
479 if (!chan || !caps)
480 return -EINVAL;
482 device = chan->device;
484 /* check if the channel supports slave transactions */
485 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
486 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
487 return -ENXIO;
489 /*
490 * Check whether it reports it uses the generic slave
491 * capabilities, if not, that means it doesn't support any
492 * kind of slave capabilities reporting.
493 */
494 if (!device->directions)
495 return -ENXIO;
497 caps->src_addr_widths = device->src_addr_widths;
498 caps->dst_addr_widths = device->dst_addr_widths;
499 caps->directions = device->directions;
500 caps->max_burst = device->max_burst;
501 caps->residue_granularity = device->residue_granularity;
502 caps->descriptor_reuse = device->descriptor_reuse;
503 caps->cmd_pause = !!device->device_pause;
504 caps->cmd_resume = !!device->device_resume;
505 caps->cmd_terminate = !!device->device_terminate_all;
507 return 0;
508 }
509 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
511 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
512 struct dma_device *dev,
513 dma_filter_fn fn, void *fn_param)
514 {
515 struct dma_chan *chan;
517 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
518 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
519 return NULL;
520 }
521 /* devices with multiple channels need special handling as we need to
522 * ensure that all channels are either private or public.
523 */
524 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
525 list_for_each_entry(chan, &dev->channels, device_node) {
526 /* some channels are already publicly allocated */
527 if (chan->client_count)
528 return NULL;
529 }
531 list_for_each_entry(chan, &dev->channels, device_node) {
532 if (chan->client_count) {
533 dev_dbg(dev->dev, "%s: %s busy\n",
534 __func__, dma_chan_name(chan));
535 continue;
536 }
537 if (fn && !fn(chan, fn_param)) {
538 dev_dbg(dev->dev, "%s: %s filter said false\n",
539 __func__, dma_chan_name(chan));
540 continue;
541 }
542 return chan;
543 }
545 return NULL;
546 }
548 static struct dma_chan *find_candidate(struct dma_device *device,
549 const dma_cap_mask_t *mask,
550 dma_filter_fn fn, void *fn_param)
551 {
552 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
553 int err;
555 if (chan) {
556 /* Found a suitable channel, try to grab, prep, and return it.
557 * We first set DMA_PRIVATE to disable balance_ref_count as this
558 * channel will not be published in the general-purpose
559 * allocator
560 */
561 dma_cap_set(DMA_PRIVATE, device->cap_mask);
562 device->privatecnt++;
563 err = dma_chan_get(chan);
565 if (err) {
566 if (err == -ENODEV) {
567 dev_dbg(device->dev, "%s: %s module removed\n",
568 __func__, dma_chan_name(chan));
569 list_del_rcu(&device->global_node);
570 } else
571 dev_dbg(device->dev,
572 "%s: failed to get %s: (%d)\n",
573 __func__, dma_chan_name(chan), err);
575 if (--device->privatecnt == 0)
576 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
578 chan = ERR_PTR(err);
579 }
580 }
582 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
583 }
585 /**
586 * dma_get_slave_channel - try to get specific channel exclusively
587 * @chan: target channel
588 */
589 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
590 {
591 int err = -EBUSY;
593 /* lock against __dma_request_channel */
594 mutex_lock(&dma_list_mutex);
596 if (chan->client_count == 0) {
597 struct dma_device *device = chan->device;
599 dma_cap_set(DMA_PRIVATE, device->cap_mask);
600 device->privatecnt++;
601 err = dma_chan_get(chan);
602 if (err) {
603 dev_dbg(chan->device->dev,
604 "%s: failed to get %s: (%d)\n",
605 __func__, dma_chan_name(chan), err);
606 chan = NULL;
607 if (--device->privatecnt == 0)
608 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
609 }
610 } else
611 chan = NULL;
613 mutex_unlock(&dma_list_mutex);
616 return chan;
617 }
618 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
620 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
621 {
622 dma_cap_mask_t mask;
623 struct dma_chan *chan;
625 dma_cap_zero(mask);
626 dma_cap_set(DMA_SLAVE, mask);
628 /* lock against __dma_request_channel */
629 mutex_lock(&dma_list_mutex);
631 chan = find_candidate(device, &mask, NULL, NULL);
633 mutex_unlock(&dma_list_mutex);
635 return IS_ERR(chan) ? NULL : chan;
636 }
637 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
639 /**
640 * __dma_request_channel - try to allocate an exclusive channel
641 * @mask: capabilities that the channel must satisfy
642 * @fn: optional callback to disposition available channels
643 * @fn_param: opaque parameter to pass to dma_filter_fn
644 *
645 * Returns pointer to appropriate DMA channel on success or NULL.
646 */
647 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
648 dma_filter_fn fn, void *fn_param)
649 {
650 struct dma_device *device, *_d;
651 struct dma_chan *chan = NULL;
653 /* Find a channel */
654 mutex_lock(&dma_list_mutex);
655 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
656 chan = find_candidate(device, mask, fn, fn_param);
657 if (!IS_ERR(chan))
658 break;
660 chan = NULL;
661 }
662 mutex_unlock(&dma_list_mutex);
664 pr_debug("%s: %s (%s)\n",
665 __func__,
666 chan ? "success" : "fail",
667 chan ? dma_chan_name(chan) : NULL);
669 return chan;
670 }
671 EXPORT_SYMBOL_GPL(__dma_request_channel);
673 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
674 const char *name,
675 struct device *dev)
676 {
677 int i;
679 if (!device->filter.mapcnt)
680 return NULL;
682 for (i = 0; i < device->filter.mapcnt; i++) {
683 const struct dma_slave_map *map = &device->filter.map[i];
685 if (!strcmp(map->devname, dev_name(dev)) &&
686 !strcmp(map->slave, name))
687 return map;
688 }
690 return NULL;
691 }
693 /**
694 * dma_request_chan - try to allocate an exclusive slave channel
695 * @dev: pointer to client device structure
696 * @name: slave channel name
697 *
698 * Returns pointer to appropriate DMA channel on success or an error pointer.
699 */
700 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
701 {
702 struct dma_device *d, *_d;
703 struct dma_chan *chan = NULL;
705 /* If device-tree is present get slave info from here */
706 if (dev->of_node)
707 chan = of_dma_request_slave_channel(dev->of_node, name);
709 /* If device was enumerated by ACPI get slave info from here */
710 if (has_acpi_companion(dev) && !chan)
711 chan = acpi_dma_request_slave_chan_by_name(dev, name);
713 if (chan) {
714 /* Valid channel found or requester need to be deferred */
715 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
716 return chan;
717 }
719 /* Try to find the channel via the DMA filter map(s) */
720 mutex_lock(&dma_list_mutex);
721 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
722 dma_cap_mask_t mask;
723 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
725 if (!map)
726 continue;
728 dma_cap_zero(mask);
729 dma_cap_set(DMA_SLAVE, mask);
731 chan = find_candidate(d, &mask, d->filter.fn, map->param);
732 if (!IS_ERR(chan))
733 break;
734 }
735 mutex_unlock(&dma_list_mutex);
737 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
738 }
739 EXPORT_SYMBOL_GPL(dma_request_chan);
741 /**
742 * dma_request_slave_channel - try to allocate an exclusive slave channel
743 * @dev: pointer to client device structure
744 * @name: slave channel name
745 *
746 * Returns pointer to appropriate DMA channel on success or NULL.
747 */
748 struct dma_chan *dma_request_slave_channel(struct device *dev,
749 const char *name)
750 {
751 struct dma_chan *ch = dma_request_chan(dev, name);
752 if (IS_ERR(ch))
753 return NULL;
755 return ch;
756 }
757 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
759 /**
760 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
761 * @mask: capabilities that the channel must satisfy
762 *
763 * Returns pointer to appropriate DMA channel on success or an error pointer.
764 */
765 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
766 {
767 struct dma_chan *chan;
769 if (!mask)
770 return ERR_PTR(-ENODEV);
772 chan = __dma_request_channel(mask, NULL, NULL);
773 if (!chan) {
774 mutex_lock(&dma_list_mutex);
775 if (list_empty(&dma_device_list))
776 chan = ERR_PTR(-EPROBE_DEFER);
777 else
778 chan = ERR_PTR(-ENODEV);
779 mutex_unlock(&dma_list_mutex);
780 }
782 return chan;
783 }
784 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
786 void dma_release_channel(struct dma_chan *chan)
787 {
788 mutex_lock(&dma_list_mutex);
789 WARN_ONCE(chan->client_count != 1,
790 "chan reference count %d != 1\n", chan->client_count);
791 dma_chan_put(chan);
792 /* drop PRIVATE cap enabled by __dma_request_channel() */
793 if (--chan->device->privatecnt == 0)
794 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
795 mutex_unlock(&dma_list_mutex);
796 }
797 EXPORT_SYMBOL_GPL(dma_release_channel);
799 /**
800 * dmaengine_get - register interest in dma_channels
801 */
802 void dmaengine_get(void)
803 {
804 struct dma_device *device, *_d;
805 struct dma_chan *chan;
806 int err;
808 mutex_lock(&dma_list_mutex);
809 dmaengine_ref_count++;
811 /* try to grab channels */
812 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
813 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
814 continue;
815 list_for_each_entry(chan, &device->channels, device_node) {
816 err = dma_chan_get(chan);
817 if (err == -ENODEV) {
818 /* module removed before we could use it */
819 list_del_rcu(&device->global_node);
820 break;
821 } else if (err)
822 dev_dbg(chan->device->dev,
823 "%s: failed to get %s: (%d)\n",
824 __func__, dma_chan_name(chan), err);
825 }
826 }
828 /* if this is the first reference and there were channels
829 * waiting we need to rebalance to get those channels
830 * incorporated into the channel table
831 */
832 if (dmaengine_ref_count == 1)
833 dma_channel_rebalance();
834 mutex_unlock(&dma_list_mutex);
835 }
836 EXPORT_SYMBOL(dmaengine_get);
838 /**
839 * dmaengine_put - let dma drivers be removed when ref_count == 0
840 */
841 void dmaengine_put(void)
842 {
843 struct dma_device *device;
844 struct dma_chan *chan;
846 mutex_lock(&dma_list_mutex);
847 dmaengine_ref_count--;
848 BUG_ON(dmaengine_ref_count < 0);
849 /* drop channel references */
850 list_for_each_entry(device, &dma_device_list, global_node) {
851 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
852 continue;
853 list_for_each_entry(chan, &device->channels, device_node)
854 dma_chan_put(chan);
855 }
856 mutex_unlock(&dma_list_mutex);
857 }
858 EXPORT_SYMBOL(dmaengine_put);
860 static bool device_has_all_tx_types(struct dma_device *device)
861 {
862 /* A device that satisfies this test has channels that will never cause
863 * an async_tx channel switch event as all possible operation types can
864 * be handled.
865 */
866 #ifdef CONFIG_ASYNC_TX_DMA
867 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
868 return false;
869 #endif
871 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
872 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
873 return false;
874 #endif
876 #if IS_ENABLED(CONFIG_ASYNC_XOR)
877 if (!dma_has_cap(DMA_XOR, device->cap_mask))
878 return false;
880 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
881 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
882 return false;
883 #endif
884 #endif
886 #if IS_ENABLED(CONFIG_ASYNC_PQ)
887 if (!dma_has_cap(DMA_PQ, device->cap_mask))
888 return false;
890 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
891 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
892 return false;
893 #endif
894 #endif
896 return true;
897 }
899 static int get_dma_id(struct dma_device *device)
900 {
901 int rc;
903 do {
904 if (!ida_pre_get(&dma_ida, GFP_KERNEL))
905 return -ENOMEM;
906 mutex_lock(&dma_list_mutex);
907 rc = ida_get_new(&dma_ida, &device->dev_id);
908 mutex_unlock(&dma_list_mutex);
909 } while (rc == -EAGAIN);
911 return rc;
912 }
914 /**
915 * dma_async_device_register - registers DMA devices found
916 * @device: &dma_device
917 */
918 int dma_async_device_register(struct dma_device *device)
919 {
920 int chancnt = 0, rc;
921 struct dma_chan* chan;
922 atomic_t *idr_ref;
924 if (!device)
925 return -ENODEV;
927 /* validate device routines */
928 if (!device->dev) {
929 pr_err("DMAdevice must have dev\n");
930 return -EIO;
931 }
933 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
934 dev_err(device->dev,
935 "Device claims capability %s, but op is not defined\n",
936 "DMA_MEMCPY");
937 return -EIO;
938 }
940 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
941 dev_err(device->dev,
942 "Device claims capability %s, but op is not defined\n",
943 "DMA_XOR");
944 return -EIO;
945 }
947 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
948 dev_err(device->dev,
949 "Device claims capability %s, but op is not defined\n",
950 "DMA_XOR_VAL");
951 return -EIO;
952 }
954 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
955 dev_err(device->dev,
956 "Device claims capability %s, but op is not defined\n",
957 "DMA_PQ");
958 return -EIO;
959 }
961 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
962 dev_err(device->dev,
963 "Device claims capability %s, but op is not defined\n",
964 "DMA_PQ_VAL");
965 return -EIO;
966 }
968 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
969 dev_err(device->dev,
970 "Device claims capability %s, but op is not defined\n",
971 "DMA_MEMSET");
972 return -EIO;
973 }
975 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
976 dev_err(device->dev,
977 "Device claims capability %s, but op is not defined\n",
978 "DMA_INTERRUPT");
979 return -EIO;
980 }
982 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
983 dev_err(device->dev,
984 "Device claims capability %s, but op is not defined\n",
985 "DMA_CYCLIC");
986 return -EIO;
987 }
989 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
990 dev_err(device->dev,
991 "Device claims capability %s, but op is not defined\n",
992 "DMA_INTERLEAVE");
993 return -EIO;
994 }
997 if (!device->device_tx_status) {
998 dev_err(device->dev, "Device tx_status is not defined\n");
999 return -EIO;
1000 }
1003 if (!device->device_issue_pending) {
1004 dev_err(device->dev, "Device issue_pending is not defined\n");
1005 return -EIO;
1006 }
1008 /* note: this only matters in the
1009 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1010 */
1011 if (device_has_all_tx_types(device))
1012 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1014 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1015 if (!idr_ref)
1016 return -ENOMEM;
1017 rc = get_dma_id(device);
1018 if (rc != 0) {
1019 kfree(idr_ref);
1020 return rc;
1021 }
1023 atomic_set(idr_ref, 0);
1025 /* represent channels in sysfs. Probably want devs too */
1026 list_for_each_entry(chan, &device->channels, device_node) {
1027 rc = -ENOMEM;
1028 chan->local = alloc_percpu(typeof(*chan->local));
1029 if (chan->local == NULL)
1030 goto err_out;
1031 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1032 if (chan->dev == NULL) {
1033 free_percpu(chan->local);
1034 chan->local = NULL;
1035 goto err_out;
1036 }
1038 chan->chan_id = chancnt++;
1039 chan->dev->device.class = &dma_devclass;
1040 chan->dev->device.parent = device->dev;
1041 chan->dev->chan = chan;
1042 chan->dev->idr_ref = idr_ref;
1043 chan->dev->dev_id = device->dev_id;
1044 atomic_inc(idr_ref);
1045 dev_set_name(&chan->dev->device, "dma%dchan%d",
1046 device->dev_id, chan->chan_id);
1048 rc = device_register(&chan->dev->device);
1049 if (rc) {
1050 free_percpu(chan->local);
1051 chan->local = NULL;
1052 kfree(chan->dev);
1053 atomic_dec(idr_ref);
1054 goto err_out;
1055 }
1056 chan->client_count = 0;
1057 }
1059 if (!chancnt) {
1060 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1061 rc = -ENODEV;
1062 goto err_out;
1063 }
1065 device->chancnt = chancnt;
1067 mutex_lock(&dma_list_mutex);
1068 /* take references on public channels */
1069 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1070 list_for_each_entry(chan, &device->channels, device_node) {
1071 /* if clients are already waiting for channels we need
1072 * to take references on their behalf
1073 */
1074 if (dma_chan_get(chan) == -ENODEV) {
1075 /* note we can only get here for the first
1076 * channel as the remaining channels are
1077 * guaranteed to get a reference
1078 */
1079 rc = -ENODEV;
1080 mutex_unlock(&dma_list_mutex);
1081 goto err_out;
1082 }
1083 }
1084 list_add_tail_rcu(&device->global_node, &dma_device_list);
1085 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1086 device->privatecnt++; /* Always private */
1087 dma_channel_rebalance();
1088 mutex_unlock(&dma_list_mutex);
1090 return 0;
1092 err_out:
1093 /* if we never registered a channel just release the idr */
1094 if (atomic_read(idr_ref) == 0) {
1095 mutex_lock(&dma_list_mutex);
1096 ida_remove(&dma_ida, device->dev_id);
1097 mutex_unlock(&dma_list_mutex);
1098 kfree(idr_ref);
1099 return rc;
1100 }
1102 list_for_each_entry(chan, &device->channels, device_node) {
1103 if (chan->local == NULL)
1104 continue;
1105 mutex_lock(&dma_list_mutex);
1106 chan->dev->chan = NULL;
1107 mutex_unlock(&dma_list_mutex);
1108 device_unregister(&chan->dev->device);
1109 free_percpu(chan->local);
1110 }
1111 return rc;
1112 }
1113 EXPORT_SYMBOL(dma_async_device_register);
1115 /**
1116 * dma_async_device_unregister - unregister a DMA device
1117 * @device: &dma_device
1118 *
1119 * This routine is called by dma driver exit routines, dmaengine holds module
1120 * references to prevent it being called while channels are in use.
1121 */
1122 void dma_async_device_unregister(struct dma_device *device)
1123 {
1124 struct dma_chan *chan;
1126 mutex_lock(&dma_list_mutex);
1127 list_del_rcu(&device->global_node);
1128 dma_channel_rebalance();
1129 mutex_unlock(&dma_list_mutex);
1131 list_for_each_entry(chan, &device->channels, device_node) {
1132 WARN_ONCE(chan->client_count,
1133 "%s called while %d clients hold a reference\n",
1134 __func__, chan->client_count);
1135 mutex_lock(&dma_list_mutex);
1136 chan->dev->chan = NULL;
1137 mutex_unlock(&dma_list_mutex);
1138 device_unregister(&chan->dev->device);
1139 free_percpu(chan->local);
1140 }
1141 }
1142 EXPORT_SYMBOL(dma_async_device_unregister);
1144 static void dmam_device_release(struct device *dev, void *res)
1145 {
1146 struct dma_device *device;
1148 device = *(struct dma_device **)res;
1149 dma_async_device_unregister(device);
1150 }
1152 /**
1153 * dmaenginem_async_device_register - registers DMA devices found
1154 * @device: &dma_device
1155 *
1156 * The operation is managed and will be undone on driver detach.
1157 */
1158 int dmaenginem_async_device_register(struct dma_device *device)
1159 {
1160 void *p;
1161 int ret;
1163 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1164 if (!p)
1165 return -ENOMEM;
1167 ret = dma_async_device_register(device);
1168 if (!ret) {
1169 *(struct dma_device **)p = device;
1170 devres_add(device->dev, p);
1171 } else {
1172 devres_free(p);
1173 }
1175 return ret;
1176 }
1177 EXPORT_SYMBOL(dmaenginem_async_device_register);
1179 struct dmaengine_unmap_pool {
1180 struct kmem_cache *cache;
1181 const char *name;
1182 mempool_t *pool;
1183 size_t size;
1184 };
1186 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1187 static struct dmaengine_unmap_pool unmap_pool[] = {
1188 __UNMAP_POOL(2),
1189 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1190 __UNMAP_POOL(16),
1191 __UNMAP_POOL(128),
1192 __UNMAP_POOL(256),
1193 #endif
1194 };
1196 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1197 {
1198 int order = get_count_order(nr);
1200 switch (order) {
1201 case 0 ... 1:
1202 return &unmap_pool[0];
1203 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1204 case 2 ... 4:
1205 return &unmap_pool[1];
1206 case 5 ... 7:
1207 return &unmap_pool[2];
1208 case 8:
1209 return &unmap_pool[3];
1210 #endif
1211 default:
1212 BUG();
1213 return NULL;
1214 }
1215 }
1217 static void dmaengine_unmap(struct kref *kref)
1218 {
1219 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1220 struct device *dev = unmap->dev;
1221 int cnt, i;
1223 cnt = unmap->to_cnt;
1224 for (i = 0; i < cnt; i++)
1225 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1226 DMA_TO_DEVICE);
1227 cnt += unmap->from_cnt;
1228 for (; i < cnt; i++)
1229 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1230 DMA_FROM_DEVICE);
1231 cnt += unmap->bidi_cnt;
1232 for (; i < cnt; i++) {
1233 if (unmap->addr[i] == 0)
1234 continue;
1235 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1236 DMA_BIDIRECTIONAL);
1237 }
1238 cnt = unmap->map_cnt;
1239 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1240 }
1242 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1243 {
1244 if (unmap)
1245 kref_put(&unmap->kref, dmaengine_unmap);
1246 }
1247 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1249 static void dmaengine_destroy_unmap_pool(void)
1250 {
1251 int i;
1253 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1254 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1256 mempool_destroy(p->pool);
1257 p->pool = NULL;
1258 kmem_cache_destroy(p->cache);
1259 p->cache = NULL;
1260 }
1261 }
1263 static int __init dmaengine_init_unmap_pool(void)
1264 {
1265 int i;
1267 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1268 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1269 size_t size;
1271 size = sizeof(struct dmaengine_unmap_data) +
1272 sizeof(dma_addr_t) * p->size;
1274 p->cache = kmem_cache_create(p->name, size, 0,
1275 SLAB_HWCACHE_ALIGN, NULL);
1276 if (!p->cache)
1277 break;
1278 p->pool = mempool_create_slab_pool(1, p->cache);
1279 if (!p->pool)
1280 break;
1281 }
1283 if (i == ARRAY_SIZE(unmap_pool))
1284 return 0;
1286 dmaengine_destroy_unmap_pool();
1287 return -ENOMEM;
1288 }
1290 struct dmaengine_unmap_data *
1291 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1292 {
1293 struct dmaengine_unmap_data *unmap;
1295 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1296 if (!unmap)
1297 return NULL;
1299 memset(unmap, 0, sizeof(*unmap));
1300 kref_init(&unmap->kref);
1301 unmap->dev = dev;
1302 unmap->map_cnt = nr;
1304 return unmap;
1305 }
1306 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1308 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1309 struct dma_chan *chan)
1310 {
1311 tx->chan = chan;
1312 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1313 spin_lock_init(&tx->lock);
1314 #endif
1315 }
1316 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1318 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1319 * @tx: in-flight transaction to wait on
1320 */
1321 enum dma_status
1322 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1323 {
1324 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1326 if (!tx)
1327 return DMA_COMPLETE;
1329 while (tx->cookie == -EBUSY) {
1330 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1331 dev_err(tx->chan->device->dev,
1332 "%s timeout waiting for descriptor submission\n",
1333 __func__);
1334 return DMA_ERROR;
1335 }
1336 cpu_relax();
1337 }
1338 return dma_sync_wait(tx->chan, tx->cookie);
1339 }
1340 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1342 /* dma_run_dependencies - helper routine for dma drivers to process
1343 * (start) dependent operations on their target channel
1344 * @tx: transaction with dependencies
1345 */
1346 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1347 {
1348 struct dma_async_tx_descriptor *dep = txd_next(tx);
1349 struct dma_async_tx_descriptor *dep_next;
1350 struct dma_chan *chan;
1352 if (!dep)
1353 return;
1355 /* we'll submit tx->next now, so clear the link */
1356 txd_clear_next(tx);
1357 chan = dep->chan;
1359 /* keep submitting up until a channel switch is detected
1360 * in that case we will be called again as a result of
1361 * processing the interrupt from async_tx_channel_switch
1362 */
1363 for (; dep; dep = dep_next) {
1364 txd_lock(dep);
1365 txd_clear_parent(dep);
1366 dep_next = txd_next(dep);
1367 if (dep_next && dep_next->chan == chan)
1368 txd_clear_next(dep); /* ->next will be submitted */
1369 else
1370 dep_next = NULL; /* submit current dep and terminate */
1371 txd_unlock(dep);
1373 dep->tx_submit(dep);
1374 }
1376 chan->device->device_issue_pending(chan);
1377 }
1378 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1380 static int __init dma_bus_init(void)
1381 {
1382 int err = dmaengine_init_unmap_pool();
1384 if (err)
1385 return err;
1386 return class_register(&dma_devclass);
1387 }
1388 arch_initcall(dma_bus_init);