3acdc0c7aad51cfe1431879f4e65d1ef8f8cc690
1 /*
2 * EDMA3 Driver
3 *
4 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/interrupt.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/io.h>
23 #include <mach/edma.h>
25 /* Offsets matching "struct edmacc_param" */
26 #define PARM_OPT 0x00
27 #define PARM_SRC 0x04
28 #define PARM_A_B_CNT 0x08
29 #define PARM_DST 0x0c
30 #define PARM_SRC_DST_BIDX 0x10
31 #define PARM_LINK_BCNTRLD 0x14
32 #define PARM_SRC_DST_CIDX 0x18
33 #define PARM_CCNT 0x1c
35 #define PARM_SIZE 0x20
37 /* Offsets for EDMA CC global channel registers and their shadows */
38 #define SH_ER 0x00 /* 64 bits */
39 #define SH_ECR 0x08 /* 64 bits */
40 #define SH_ESR 0x10 /* 64 bits */
41 #define SH_CER 0x18 /* 64 bits */
42 #define SH_EER 0x20 /* 64 bits */
43 #define SH_EECR 0x28 /* 64 bits */
44 #define SH_EESR 0x30 /* 64 bits */
45 #define SH_SER 0x38 /* 64 bits */
46 #define SH_SECR 0x40 /* 64 bits */
47 #define SH_IER 0x50 /* 64 bits */
48 #define SH_IECR 0x58 /* 64 bits */
49 #define SH_IESR 0x60 /* 64 bits */
50 #define SH_IPR 0x68 /* 64 bits */
51 #define SH_ICR 0x70 /* 64 bits */
52 #define SH_IEVAL 0x78
53 #define SH_QER 0x80
54 #define SH_QEER 0x84
55 #define SH_QEECR 0x88
56 #define SH_QEESR 0x8c
57 #define SH_QSER 0x90
58 #define SH_QSECR 0x94
59 #define SH_SIZE 0x200
61 /* Offsets for EDMA CC global registers */
62 #define EDMA_REV 0x0000
63 #define EDMA_CCCFG 0x0004
64 #define EDMA_QCHMAP 0x0200 /* 8 registers */
65 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
66 #define EDMA_QDMAQNUM 0x0260
67 #define EDMA_QUETCMAP 0x0280
68 #define EDMA_QUEPRI 0x0284
69 #define EDMA_EMR 0x0300 /* 64 bits */
70 #define EDMA_EMCR 0x0308 /* 64 bits */
71 #define EDMA_QEMR 0x0310
72 #define EDMA_QEMCR 0x0314
73 #define EDMA_CCERR 0x0318
74 #define EDMA_CCERRCLR 0x031c
75 #define EDMA_EEVAL 0x0320
76 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
77 #define EDMA_QRAE 0x0380 /* 4 registers */
78 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
79 #define EDMA_QSTAT 0x0600 /* 2 registers */
80 #define EDMA_QWMTHRA 0x0620
81 #define EDMA_QWMTHRB 0x0624
82 #define EDMA_CCSTAT 0x0640
84 #define EDMA_M 0x1000 /* global channel registers */
85 #define EDMA_ECR 0x1008
86 #define EDMA_ECRH 0x100C
87 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
88 #define EDMA_PARM 0x4000 /* 128 param entries */
90 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
92 #define EDMA_DCHMAP 0x0100 /* 64 registers */
93 #define CHMAP_EXIST BIT(24)
96 /*function that maps the cross bar events to channels */
97 int (*xbar_event_to_channel_map)(unsigned event, unsigned *channel,
98 struct event_to_channel_map *xbar_event_map) = NULL;
100 /*****************************************************************************/
102 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
104 static inline unsigned int edma_read(unsigned ctlr, int offset)
105 {
106 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
107 }
109 static inline void edma_write(unsigned ctlr, int offset, int val)
110 {
111 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
112 }
113 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
114 unsigned or)
115 {
116 unsigned val = edma_read(ctlr, offset);
117 val &= and;
118 val |= or;
119 edma_write(ctlr, offset, val);
120 }
121 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
122 {
123 unsigned val = edma_read(ctlr, offset);
124 val &= and;
125 edma_write(ctlr, offset, val);
126 }
127 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
128 {
129 unsigned val = edma_read(ctlr, offset);
130 val |= or;
131 edma_write(ctlr, offset, val);
132 }
133 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
134 {
135 return edma_read(ctlr, offset + (i << 2));
136 }
137 static inline void edma_write_array(unsigned ctlr, int offset, int i,
138 unsigned val)
139 {
140 edma_write(ctlr, offset + (i << 2), val);
141 }
142 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
143 unsigned and, unsigned or)
144 {
145 edma_modify(ctlr, offset + (i << 2), and, or);
146 }
147 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
148 {
149 edma_or(ctlr, offset + (i << 2), or);
150 }
151 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
152 unsigned or)
153 {
154 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
155 }
156 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
157 unsigned val)
158 {
159 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
160 }
161 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
162 {
163 return edma_read(ctlr, EDMA_SHADOW0 + offset);
164 }
165 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
166 int i)
167 {
168 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
169 }
170 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
171 {
172 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
173 }
174 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
175 unsigned val)
176 {
177 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
178 }
179 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
180 int param_no)
181 {
182 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
183 }
184 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
185 unsigned val)
186 {
187 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
188 }
189 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
190 unsigned and, unsigned or)
191 {
192 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
193 }
194 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
195 unsigned and)
196 {
197 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
198 }
199 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
200 unsigned or)
201 {
202 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
203 }
205 static inline void set_bits(int offset, int len, unsigned long *p)
206 {
207 for (; len > 0; len--)
208 set_bit(offset + (len - 1), p);
209 }
211 static inline void clear_bits(int offset, int len, unsigned long *p)
212 {
213 for (; len > 0; len--)
214 clear_bit(offset + (len - 1), p);
215 }
217 /*****************************************************************************/
219 struct edma *edma_info[EDMA_MAX_CC];
220 static int arch_num_cc;
222 /* dummy param set used to (re)initialize parameter RAM slots */
223 static const struct edmacc_param dummy_paramset = {
224 .link_bcntrld = 0xffff,
225 .ccnt = 1,
226 };
228 /*****************************************************************************/
230 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
231 enum dma_event_q queue_no)
232 {
233 int bit = (ch_no & 0x7) * 4;
235 /* default to low priority queue */
236 if (queue_no == EVENTQ_DEFAULT)
237 queue_no = edma_info[ctlr]->default_queue;
239 queue_no &= 7;
240 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
241 ~(0x7 << bit), queue_no << bit);
242 }
244 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
245 {
246 int bit = queue_no * 4;
247 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
248 }
250 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
251 int priority)
252 {
253 int bit = queue_no * 4;
254 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
255 ((priority & 0x7) << bit));
256 }
258 /**
259 * map_dmach_param - Maps channel number to param entry number
260 *
261 * This maps the dma channel number to param entry numberter. In
262 * other words using the DMA channel mapping registers a param entry
263 * can be mapped to any channel
264 *
265 * Callers are responsible for ensuring the channel mapping logic is
266 * included in that particular EDMA variant (Eg : dm646x)
267 *
268 */
269 static void __init map_dmach_param(unsigned ctlr)
270 {
271 int i;
272 for (i = 0; i < EDMA_MAX_DMACH; i++)
273 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
274 }
276 static inline void
277 setup_dma_interrupt(unsigned lch,
278 void (*callback)(unsigned channel, u16 ch_status, void *data),
279 void *data)
280 {
281 unsigned ctlr;
283 ctlr = EDMA_CTLR(lch);
284 lch = EDMA_CHAN_SLOT(lch);
286 if (!callback) {
287 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
288 (1 << (lch & 0x1f)));
289 }
291 edma_info[ctlr]->intr_data[lch].callback = callback;
292 edma_info[ctlr]->intr_data[lch].data = data;
294 if (callback) {
295 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
296 (1 << (lch & 0x1f)));
297 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
298 (1 << (lch & 0x1f)));
299 }
300 }
302 static int irq2ctlr(int irq)
303 {
304 if (irq >= edma_info[0]->irq_res_start &&
305 irq <= edma_info[0]->irq_res_end)
306 return 0;
307 else if (irq >= edma_info[1]->irq_res_start &&
308 irq <= edma_info[1]->irq_res_end)
309 return 1;
311 return -1;
312 }
314 /******************************************************************************
315 *
316 * DMA interrupt handler
317 *
318 *****************************************************************************/
319 static irqreturn_t dma_irq_handler(int irq, void *data)
320 {
321 int i;
322 unsigned ctlr;
323 unsigned int cnt = 0;
325 ctlr = irq2ctlr(irq);
327 dev_dbg(data, "dma_irq_handler\n");
329 if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0)
330 && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
331 return IRQ_NONE;
333 while (1) {
334 int j;
335 if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
336 j = 0;
337 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
338 j = 1;
339 else
340 break;
341 dev_dbg(data, "IPR%d %08x\n", j,
342 edma_shadow0_read_array(ctlr, SH_IPR, j));
343 for (i = 0; i < 32; i++) {
344 int k = (j << 5) + i;
345 if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
346 (1 << i)) {
347 /* Clear the corresponding IPR bits */
348 edma_shadow0_write_array(ctlr, SH_ICR, j,
349 (1 << i));
350 if (edma_info[ctlr]->intr_data[k].callback) {
351 edma_info[ctlr]->intr_data[k].callback(
352 k, DMA_COMPLETE,
353 edma_info[ctlr]->intr_data[k].
354 data);
355 }
356 }
357 }
358 cnt++;
359 if (cnt > 10)
360 break;
361 }
362 edma_shadow0_write(ctlr, SH_IEVAL, 1);
363 return IRQ_HANDLED;
364 }
366 /******************************************************************************
367 *
368 * DMA error interrupt handler
369 *
370 *****************************************************************************/
371 static irqreturn_t dma_ccerr_handler(int irq, void *data)
372 {
373 int i;
374 unsigned ctlr;
375 unsigned int cnt = 0;
377 ctlr = irq2ctlr(irq);
379 dev_dbg(data, "dma_ccerr_handler\n");
381 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
382 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
383 (edma_read(ctlr, EDMA_QEMR) == 0) &&
384 (edma_read(ctlr, EDMA_CCERR) == 0))
385 return IRQ_NONE;
387 while (1) {
388 int j = -1;
389 if (edma_read_array(ctlr, EDMA_EMR, 0))
390 j = 0;
391 else if (edma_read_array(ctlr, EDMA_EMR, 1))
392 j = 1;
393 if (j >= 0) {
394 dev_dbg(data, "EMR%d %08x\n", j,
395 edma_read_array(ctlr, EDMA_EMR, j));
396 for (i = 0; i < 32; i++) {
397 int k = (j << 5) + i;
398 if (edma_read_array(ctlr, EDMA_EMR, j) &
399 (1 << i)) {
400 /* Clear the corresponding EMR bits */
401 edma_write_array(ctlr, EDMA_EMCR, j,
402 1 << i);
403 /* Clear any SER */
404 edma_shadow0_write_array(ctlr, SH_SECR,
405 j, (1 << i));
406 if (edma_info[ctlr]->intr_data[k].
407 callback) {
408 edma_info[ctlr]->intr_data[k].
409 callback(k,
410 DMA_CC_ERROR,
411 edma_info[ctlr]->intr_data
412 [k].data);
413 }
414 }
415 }
416 } else if (edma_read(ctlr, EDMA_QEMR)) {
417 dev_dbg(data, "QEMR %02x\n",
418 edma_read(ctlr, EDMA_QEMR));
419 for (i = 0; i < 8; i++) {
420 if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) {
421 /* Clear the corresponding IPR bits */
422 edma_write(ctlr, EDMA_QEMCR, 1 << i);
423 edma_shadow0_write(ctlr, SH_QSECR,
424 (1 << i));
426 /* NOTE: not reported!! */
427 }
428 }
429 } else if (edma_read(ctlr, EDMA_CCERR)) {
430 dev_dbg(data, "CCERR %08x\n",
431 edma_read(ctlr, EDMA_CCERR));
432 /* FIXME: CCERR.BIT(16) ignored! much better
433 * to just write CCERRCLR with CCERR value...
434 */
435 for (i = 0; i < 8; i++) {
436 if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) {
437 /* Clear the corresponding IPR bits */
438 edma_write(ctlr, EDMA_CCERRCLR, 1 << i);
440 /* NOTE: not reported!! */
441 }
442 }
443 }
444 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
445 && (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
446 && (edma_read(ctlr, EDMA_QEMR) == 0)
447 && (edma_read(ctlr, EDMA_CCERR) == 0)) {
448 break;
449 }
450 cnt++;
451 if (cnt > 10)
452 break;
453 }
454 edma_write(ctlr, EDMA_EEVAL, 1);
455 return IRQ_HANDLED;
456 }
458 /*-----------------------------------------------------------------------*/
460 static int reserve_contiguous_slots(int ctlr, unsigned int id,
461 unsigned int num_slots,
462 unsigned int start_slot)
463 {
464 int i, j;
465 unsigned int count = num_slots;
466 int stop_slot = start_slot;
467 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
469 for (i = start_slot; i < edma_info[ctlr]->num_slots; ++i) {
470 j = EDMA_CHAN_SLOT(i);
471 if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse)) {
472 /* Record our current beginning slot */
473 if (count == num_slots)
474 stop_slot = i;
476 count--;
477 set_bit(j, tmp_inuse);
479 if (count == 0)
480 break;
481 } else {
482 clear_bit(j, tmp_inuse);
484 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
485 stop_slot = i;
486 break;
487 } else
488 count = num_slots;
489 }
490 }
492 /*
493 * We have to clear any bits that we set
494 * if we run out parameter RAM slots, i.e we do find a set
495 * of contiguous parameter RAM slots but do not find the exact number
496 * requested as we may reach the total number of parameter RAM slots
497 */
498 if (i == edma_info[ctlr]->num_slots)
499 stop_slot = i;
501 for (j = start_slot; j < stop_slot; j++)
502 if (test_bit(j, tmp_inuse))
503 clear_bit(j, edma_info[ctlr]->edma_inuse);
505 if (count)
506 return -EBUSY;
508 for (j = i - num_slots + 1; j <= i; ++j)
509 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
510 &dummy_paramset, PARM_SIZE);
512 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
513 }
515 static int prepare_unused_channel_list(struct device *dev, void *data)
516 {
517 struct platform_device *pdev = to_platform_device(dev);
518 int i, ctlr;
520 for (i = 0; i < pdev->num_resources; i++) {
521 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
522 (int)pdev->resource[i].start >= 0) {
523 ctlr = EDMA_CTLR(pdev->resource[i].start);
524 /* confirm the range */
525 if (EDMA_CHAN_SLOT(pdev->resource[i].start <
526 EDMA_MAX_DMACH))
527 clear_bit(
528 EDMA_CHAN_SLOT(pdev->resource[i].start),
529 edma_info[ctlr]->edma_unused);
530 }
531 }
533 return 0;
534 }
536 /*-----------------------------------------------------------------------*/
538 static bool unused_chan_list_done;
540 /* Resource alloc/free: dma channels, parameter RAM slots */
542 /**
543 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
544 * @channel: specific channel to allocate; negative for "any unmapped channel"
545 * @callback: optional; to be issued on DMA completion or errors
546 * @data: passed to callback
547 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
548 * Controller (TC) executes requests using this channel. Use
549 * EVENTQ_DEFAULT unless you really need a high priority queue.
550 *
551 * This allocates a DMA channel and its associated parameter RAM slot.
552 * The parameter RAM is initialized to hold a dummy transfer.
553 *
554 * Normal use is to pass a specific channel number as @channel, to make
555 * use of hardware events mapped to that channel. When the channel will
556 * be used only for software triggering or event chaining, channels not
557 * mapped to hardware events (or mapped to unused events) are preferable.
558 *
559 * DMA transfers start from a channel using edma_start(), or by
560 * chaining. When the transfer described in that channel's parameter RAM
561 * slot completes, that slot's data may be reloaded through a link.
562 *
563 * DMA errors are only reported to the @callback associated with the
564 * channel driving that transfer, but transfer completion callbacks can
565 * be sent to another channel under control of the TCC field in
566 * the option word of the transfer's parameter RAM set. Drivers must not
567 * use DMA transfer completion callbacks for channels they did not allocate.
568 * (The same applies to TCC codes used in transfer chaining.)
569 *
570 * Returns the number of the channel, else negative errno.
571 */
572 int edma_alloc_channel(int channel,
573 void (*callback)(unsigned channel, u16 ch_status, void *data),
574 void *data,
575 enum dma_event_q eventq_no)
576 {
577 unsigned i, done = 0, ctlr = 0;
578 int ret = 0;
580 if (!unused_chan_list_done) {
581 /*
582 * Scan all the platform devices to find out the EDMA channels
583 * used and clear them in the unused list, making the rest
584 * available for ARM usage.
585 */
586 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
587 prepare_unused_channel_list);
588 if (ret < 0)
589 return ret;
591 unused_chan_list_done = true;
592 }
594 if (channel >= 0) {
595 ctlr = EDMA_CTLR(channel);
596 channel = EDMA_CHAN_SLOT(channel);
597 if (xbar_event_to_channel_map) {
598 ret = xbar_event_to_channel_map(channel,
599 &channel, edma_info[ctlr]->
600 xbar_event_mapping);
601 if (ret != 0)
602 return ret;
603 }
604 }
606 if (channel < 0) {
607 for (i = 0; i < arch_num_cc; i++) {
608 channel = 0;
609 for (;;) {
610 channel = find_next_bit(edma_info[i]->
611 edma_unused,
612 edma_info[i]->num_channels,
613 channel);
614 if (channel == edma_info[i]->num_channels)
615 break;
616 if (!test_and_set_bit(channel,
617 edma_info[i]->edma_inuse)) {
618 done = 1;
619 ctlr = i;
620 break;
621 }
622 channel++;
623 }
624 if (done)
625 break;
626 }
627 if (!done)
628 return -ENOMEM;
629 } else if (channel >= edma_info[ctlr]->num_channels) {
630 return -EINVAL;
631 } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
632 return -EBUSY;
633 }
635 /* ensure access through shadow region 0 */
636 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
638 /* ensure no events are pending */
639 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
640 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
641 &dummy_paramset, PARM_SIZE);
643 if (callback)
644 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
645 callback, data);
647 map_dmach_queue(ctlr, channel, eventq_no);
649 return EDMA_CTLR_CHAN(ctlr, channel);
650 }
651 EXPORT_SYMBOL(edma_alloc_channel);
654 /**
655 * edma_free_channel - deallocate DMA channel
656 * @channel: dma channel returned from edma_alloc_channel()
657 *
658 * This deallocates the DMA channel and associated parameter RAM slot
659 * allocated by edma_alloc_channel().
660 *
661 * Callers are responsible for ensuring the channel is inactive, and
662 * will not be reactivated by linking, chaining, or software calls to
663 * edma_start().
664 */
665 void edma_free_channel(unsigned channel)
666 {
667 unsigned ctlr;
669 ctlr = EDMA_CTLR(channel);
670 channel = EDMA_CHAN_SLOT(channel);
672 if (channel >= edma_info[ctlr]->num_channels)
673 return;
675 setup_dma_interrupt(channel, NULL, NULL);
676 /* REVISIT should probably take out of shadow region 0 */
678 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
679 &dummy_paramset, PARM_SIZE);
680 clear_bit(channel, edma_info[ctlr]->edma_inuse);
681 }
682 EXPORT_SYMBOL(edma_free_channel);
684 /**
685 * edma_alloc_slot - allocate DMA parameter RAM
686 * @slot: specific slot to allocate; negative for "any unused slot"
687 *
688 * This allocates a parameter RAM slot, initializing it to hold a
689 * dummy transfer. Slots allocated using this routine have not been
690 * mapped to a hardware DMA channel, and will normally be used by
691 * linking to them from a slot associated with a DMA channel.
692 *
693 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
694 * slots may be allocated on behalf of DSP firmware.
695 *
696 * Returns the number of the slot, else negative errno.
697 */
698 int edma_alloc_slot(unsigned ctlr, int slot)
699 {
700 if (slot >= 0)
701 slot = EDMA_CHAN_SLOT(slot);
703 if (slot < 0) {
704 slot = edma_info[ctlr]->num_channels;
705 for (;;) {
706 slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
707 edma_info[ctlr]->num_slots, slot);
708 if (slot == edma_info[ctlr]->num_slots)
709 return -ENOMEM;
710 if (!test_and_set_bit(slot,
711 edma_info[ctlr]->edma_inuse))
712 break;
713 }
714 } else if (slot < edma_info[ctlr]->num_channels ||
715 slot >= edma_info[ctlr]->num_slots) {
716 return -EINVAL;
717 } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
718 return -EBUSY;
719 }
721 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
722 &dummy_paramset, PARM_SIZE);
724 return EDMA_CTLR_CHAN(ctlr, slot);
725 }
726 EXPORT_SYMBOL(edma_alloc_slot);
728 /**
729 * edma_free_slot - deallocate DMA parameter RAM
730 * @slot: parameter RAM slot returned from edma_alloc_slot()
731 *
732 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
733 * Callers are responsible for ensuring the slot is inactive, and will
734 * not be activated.
735 */
736 void edma_free_slot(unsigned slot)
737 {
738 unsigned ctlr;
740 ctlr = EDMA_CTLR(slot);
741 slot = EDMA_CHAN_SLOT(slot);
743 if (slot < edma_info[ctlr]->num_channels ||
744 slot >= edma_info[ctlr]->num_slots)
745 return;
747 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
748 &dummy_paramset, PARM_SIZE);
749 clear_bit(slot, edma_info[ctlr]->edma_inuse);
750 }
751 EXPORT_SYMBOL(edma_free_slot);
754 /**
755 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
756 * The API will return the starting point of a set of
757 * contiguous parameter RAM slots that have been requested
758 *
759 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
760 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
761 * @count: number of contiguous Paramter RAM slots
762 * @slot - the start value of Parameter RAM slot that should be passed if id
763 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
764 *
765 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
766 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
767 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
768 *
769 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
770 * set of contiguous parameter RAM slots from the "slot" that is passed as an
771 * argument to the API.
772 *
773 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
774 * starts looking for a set of contiguous parameter RAMs from the "slot"
775 * that is passed as an argument to the API. On failure the API will try to
776 * find a set of contiguous Parameter RAM slots from the remaining Parameter
777 * RAM slots
778 */
779 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
780 {
781 /*
782 * The start slot requested should be greater than
783 * the number of channels and lesser than the total number
784 * of slots
785 */
786 if ((id != EDMA_CONT_PARAMS_ANY) &&
787 (slot < edma_info[ctlr]->num_channels ||
788 slot >= edma_info[ctlr]->num_slots))
789 return -EINVAL;
791 /*
792 * The number of parameter RAM slots requested cannot be less than 1
793 * and cannot be more than the number of slots minus the number of
794 * channels
795 */
796 if (count < 1 || count >
797 (edma_info[ctlr]->num_slots - edma_info[ctlr]->num_channels))
798 return -EINVAL;
800 switch (id) {
801 case EDMA_CONT_PARAMS_ANY:
802 return reserve_contiguous_slots(ctlr, id, count,
803 edma_info[ctlr]->num_channels);
804 case EDMA_CONT_PARAMS_FIXED_EXACT:
805 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
806 return reserve_contiguous_slots(ctlr, id, count, slot);
807 default:
808 return -EINVAL;
809 }
811 }
812 EXPORT_SYMBOL(edma_alloc_cont_slots);
814 /**
815 * edma_free_cont_slots - deallocate DMA parameter RAM slots
816 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
817 * @count: the number of contiguous parameter RAM slots to be freed
818 *
819 * This deallocates the parameter RAM slots allocated by
820 * edma_alloc_cont_slots.
821 * Callers/applications need to keep track of sets of contiguous
822 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
823 * API.
824 * Callers are responsible for ensuring the slots are inactive, and will
825 * not be activated.
826 */
827 int edma_free_cont_slots(unsigned slot, int count)
828 {
829 unsigned ctlr, slot_to_free;
830 int i;
832 ctlr = EDMA_CTLR(slot);
833 slot = EDMA_CHAN_SLOT(slot);
835 if (slot < edma_info[ctlr]->num_channels ||
836 slot >= edma_info[ctlr]->num_slots ||
837 count < 1)
838 return -EINVAL;
840 for (i = slot; i < slot + count; ++i) {
841 ctlr = EDMA_CTLR(i);
842 slot_to_free = EDMA_CHAN_SLOT(i);
844 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
845 &dummy_paramset, PARM_SIZE);
846 clear_bit(slot_to_free, edma_info[ctlr]->edma_inuse);
847 }
849 return 0;
850 }
851 EXPORT_SYMBOL(edma_free_cont_slots);
853 /*-----------------------------------------------------------------------*/
855 /* Parameter RAM operations (i) -- read/write partial slots */
857 /**
858 * edma_set_src - set initial DMA source address in parameter RAM slot
859 * @slot: parameter RAM slot being configured
860 * @src_port: physical address of source (memory, controller FIFO, etc)
861 * @addressMode: INCR, except in very rare cases
862 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
863 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
864 *
865 * Note that the source address is modified during the DMA transfer
866 * according to edma_set_src_index().
867 */
868 void edma_set_src(unsigned slot, dma_addr_t src_port,
869 enum address_mode mode, enum fifo_width width)
870 {
871 unsigned ctlr;
873 ctlr = EDMA_CTLR(slot);
874 slot = EDMA_CHAN_SLOT(slot);
876 if (slot < edma_info[ctlr]->num_slots) {
877 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
879 if (mode) {
880 /* set SAM and program FWID */
881 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
882 } else {
883 /* clear SAM */
884 i &= ~SAM;
885 }
886 edma_parm_write(ctlr, PARM_OPT, slot, i);
888 /* set the source port address
889 in source register of param structure */
890 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
891 }
892 }
893 EXPORT_SYMBOL(edma_set_src);
895 /**
896 * edma_set_dest - set initial DMA destination address in parameter RAM slot
897 * @slot: parameter RAM slot being configured
898 * @dest_port: physical address of destination (memory, controller FIFO, etc)
899 * @addressMode: INCR, except in very rare cases
900 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
901 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
902 *
903 * Note that the destination address is modified during the DMA transfer
904 * according to edma_set_dest_index().
905 */
906 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
907 enum address_mode mode, enum fifo_width width)
908 {
909 unsigned ctlr;
911 ctlr = EDMA_CTLR(slot);
912 slot = EDMA_CHAN_SLOT(slot);
914 if (slot < edma_info[ctlr]->num_slots) {
915 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
917 if (mode) {
918 /* set DAM and program FWID */
919 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
920 } else {
921 /* clear DAM */
922 i &= ~DAM;
923 }
924 edma_parm_write(ctlr, PARM_OPT, slot, i);
925 /* set the destination port address
926 in dest register of param structure */
927 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
928 }
929 }
930 EXPORT_SYMBOL(edma_set_dest);
932 /**
933 * edma_get_position - returns the current transfer points
934 * @slot: parameter RAM slot being examined
935 * @src: pointer to source port position
936 * @dst: pointer to destination port position
937 *
938 * Returns current source and destination addresses for a particular
939 * parameter RAM slot. Its channel should not be active when this is called.
940 */
941 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
942 {
943 struct edmacc_param temp;
944 unsigned ctlr;
946 ctlr = EDMA_CTLR(slot);
947 slot = EDMA_CHAN_SLOT(slot);
949 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
950 if (src != NULL)
951 *src = temp.src;
952 if (dst != NULL)
953 *dst = temp.dst;
954 }
955 EXPORT_SYMBOL(edma_get_position);
957 /**
958 * edma_set_src_index - configure DMA source address indexing
959 * @slot: parameter RAM slot being configured
960 * @src_bidx: byte offset between source arrays in a frame
961 * @src_cidx: byte offset between source frames in a block
962 *
963 * Offsets are specified to support either contiguous or discontiguous
964 * memory transfers, or repeated access to a hardware register, as needed.
965 * When accessing hardware registers, both offsets are normally zero.
966 */
967 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
968 {
969 unsigned ctlr;
971 ctlr = EDMA_CTLR(slot);
972 slot = EDMA_CHAN_SLOT(slot);
974 if (slot < edma_info[ctlr]->num_slots) {
975 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
976 0xffff0000, src_bidx);
977 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
978 0xffff0000, src_cidx);
979 }
980 }
981 EXPORT_SYMBOL(edma_set_src_index);
983 /**
984 * edma_set_dest_index - configure DMA destination address indexing
985 * @slot: parameter RAM slot being configured
986 * @dest_bidx: byte offset between destination arrays in a frame
987 * @dest_cidx: byte offset between destination frames in a block
988 *
989 * Offsets are specified to support either contiguous or discontiguous
990 * memory transfers, or repeated access to a hardware register, as needed.
991 * When accessing hardware registers, both offsets are normally zero.
992 */
993 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
994 {
995 unsigned ctlr;
997 ctlr = EDMA_CTLR(slot);
998 slot = EDMA_CHAN_SLOT(slot);
1000 if (slot < edma_info[ctlr]->num_slots) {
1001 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1002 0x0000ffff, dest_bidx << 16);
1003 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1004 0x0000ffff, dest_cidx << 16);
1005 }
1006 }
1007 EXPORT_SYMBOL(edma_set_dest_index);
1009 /**
1010 * edma_set_transfer_params - configure DMA transfer parameters
1011 * @slot: parameter RAM slot being configured
1012 * @acnt: how many bytes per array (at least one)
1013 * @bcnt: how many arrays per frame (at least one)
1014 * @ccnt: how many frames per block (at least one)
1015 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1016 * the value to reload into bcnt when it decrements to zero
1017 * @sync_mode: ASYNC or ABSYNC
1018 *
1019 * See the EDMA3 documentation to understand how to configure and link
1020 * transfers using the fields in PaRAM slots. If you are not doing it
1021 * all at once with edma_write_slot(), you will use this routine
1022 * plus two calls each for source and destination, setting the initial
1023 * address and saying how to index that address.
1024 *
1025 * An example of an A-Synchronized transfer is a serial link using a
1026 * single word shift register. In that case, @acnt would be equal to
1027 * that word size; the serial controller issues a DMA synchronization
1028 * event to transfer each word, and memory access by the DMA transfer
1029 * controller will be word-at-a-time.
1030 *
1031 * An example of an AB-Synchronized transfer is a device using a FIFO.
1032 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1033 * The controller with the FIFO issues DMA synchronization events when
1034 * the FIFO threshold is reached, and the DMA transfer controller will
1035 * transfer one frame to (or from) the FIFO. It will probably use
1036 * efficient burst modes to access memory.
1037 */
1038 void edma_set_transfer_params(unsigned slot,
1039 u16 acnt, u16 bcnt, u16 ccnt,
1040 u16 bcnt_rld, enum sync_dimension sync_mode)
1041 {
1042 unsigned ctlr;
1044 ctlr = EDMA_CTLR(slot);
1045 slot = EDMA_CHAN_SLOT(slot);
1047 if (slot < edma_info[ctlr]->num_slots) {
1048 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1049 0x0000ffff, bcnt_rld << 16);
1050 if (sync_mode == ASYNC)
1051 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1052 else
1053 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1054 /* Set the acount, bcount, ccount registers */
1055 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1056 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1057 }
1058 }
1059 EXPORT_SYMBOL(edma_set_transfer_params);
1061 /**
1062 * edma_link - link one parameter RAM slot to another
1063 * @from: parameter RAM slot originating the link
1064 * @to: parameter RAM slot which is the link target
1065 *
1066 * The originating slot should not be part of any active DMA transfer.
1067 */
1068 void edma_link(unsigned from, unsigned to)
1069 {
1070 unsigned ctlr_from, ctlr_to;
1072 ctlr_from = EDMA_CTLR(from);
1073 from = EDMA_CHAN_SLOT(from);
1074 ctlr_to = EDMA_CTLR(to);
1075 to = EDMA_CHAN_SLOT(to);
1077 if (from >= edma_info[ctlr_from]->num_slots)
1078 return;
1079 if (to >= edma_info[ctlr_to]->num_slots)
1080 return;
1081 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1082 PARM_OFFSET(to));
1083 }
1084 EXPORT_SYMBOL(edma_link);
1086 /**
1087 * edma_unlink - cut link from one parameter RAM slot
1088 * @from: parameter RAM slot originating the link
1089 *
1090 * The originating slot should not be part of any active DMA transfer.
1091 * Its link is set to 0xffff.
1092 */
1093 void edma_unlink(unsigned from)
1094 {
1095 unsigned ctlr;
1097 ctlr = EDMA_CTLR(from);
1098 from = EDMA_CHAN_SLOT(from);
1100 if (from >= edma_info[ctlr]->num_slots)
1101 return;
1102 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1103 }
1104 EXPORT_SYMBOL(edma_unlink);
1106 /*-----------------------------------------------------------------------*/
1108 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1110 /**
1111 * edma_write_slot - write parameter RAM data for slot
1112 * @slot: number of parameter RAM slot being modified
1113 * @param: data to be written into parameter RAM slot
1114 *
1115 * Use this to assign all parameters of a transfer at once. This
1116 * allows more efficient setup of transfers than issuing multiple
1117 * calls to set up those parameters in small pieces, and provides
1118 * complete control over all transfer options.
1119 */
1120 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1121 {
1122 unsigned ctlr;
1124 ctlr = EDMA_CTLR(slot);
1125 slot = EDMA_CHAN_SLOT(slot);
1127 if (slot >= edma_info[ctlr]->num_slots)
1128 return;
1129 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1130 PARM_SIZE);
1131 }
1132 EXPORT_SYMBOL(edma_write_slot);
1134 /**
1135 * edma_read_slot - read parameter RAM data from slot
1136 * @slot: number of parameter RAM slot being copied
1137 * @param: where to store copy of parameter RAM data
1138 *
1139 * Use this to read data from a parameter RAM slot, perhaps to
1140 * save them as a template for later reuse.
1141 */
1142 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1143 {
1144 unsigned ctlr;
1146 ctlr = EDMA_CTLR(slot);
1147 slot = EDMA_CHAN_SLOT(slot);
1149 if (slot >= edma_info[ctlr]->num_slots)
1150 return;
1151 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1152 PARM_SIZE);
1153 }
1154 EXPORT_SYMBOL(edma_read_slot);
1156 /*-----------------------------------------------------------------------*/
1158 /* Various EDMA channel control operations */
1160 /**
1161 * edma_pause - pause dma on a channel
1162 * @channel: on which edma_start() has been called
1163 *
1164 * This temporarily disables EDMA hardware events on the specified channel,
1165 * preventing them from triggering new transfers on its behalf
1166 */
1167 void edma_pause(unsigned channel)
1168 {
1169 unsigned ctlr;
1171 ctlr = EDMA_CTLR(channel);
1172 channel = EDMA_CHAN_SLOT(channel);
1174 if (channel < edma_info[ctlr]->num_channels) {
1175 unsigned int mask = (1 << (channel & 0x1f));
1177 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1178 }
1179 }
1180 EXPORT_SYMBOL(edma_pause);
1182 /**
1183 * edma_resume - resumes dma on a paused channel
1184 * @channel: on which edma_pause() has been called
1185 *
1186 * This re-enables EDMA hardware events on the specified channel.
1187 */
1188 void edma_resume(unsigned channel)
1189 {
1190 unsigned ctlr;
1192 ctlr = EDMA_CTLR(channel);
1193 channel = EDMA_CHAN_SLOT(channel);
1195 if (channel < edma_info[ctlr]->num_channels) {
1196 unsigned int mask = (1 << (channel & 0x1f));
1198 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1199 }
1200 }
1201 EXPORT_SYMBOL(edma_resume);
1203 /**
1204 * edma_start - start dma on a channel
1205 * @channel: channel being activated
1206 *
1207 * Channels with event associations will be triggered by their hardware
1208 * events, and channels without such associations will be triggered by
1209 * software. (At this writing there is no interface for using software
1210 * triggers except with channels that don't support hardware triggers.)
1211 *
1212 * Returns zero on success, else negative errno.
1213 */
1214 int edma_start(unsigned channel)
1215 {
1216 unsigned ctlr;
1218 ctlr = EDMA_CTLR(channel);
1219 channel = EDMA_CHAN_SLOT(channel);
1221 if (channel < edma_info[ctlr]->num_channels) {
1222 int j = channel >> 5;
1223 unsigned int mask = (1 << (channel & 0x1f));
1225 /* EDMA channels without event association */
1226 if (test_bit(channel, edma_info[ctlr]->edma_unused)) {
1227 pr_debug("EDMA: ESR%d %08x\n", j,
1228 edma_shadow0_read_array(ctlr, SH_ESR, j));
1229 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1230 return 0;
1231 }
1233 /* EDMA channel with event association */
1234 pr_debug("EDMA: ER%d %08x\n", j,
1235 edma_shadow0_read_array(ctlr, SH_ER, j));
1236 /* Clear any pending error */
1237 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1238 /* Clear any SER */
1239 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1240 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1241 pr_debug("EDMA: EER%d %08x\n", j,
1242 edma_shadow0_read_array(ctlr, SH_EER, j));
1243 return 0;
1244 }
1246 return -EINVAL;
1247 }
1248 EXPORT_SYMBOL(edma_start);
1250 /**
1251 * edma_stop - stops dma on the channel passed
1252 * @channel: channel being deactivated
1253 *
1254 * When @lch is a channel, any active transfer is paused and
1255 * all pending hardware events are cleared. The current transfer
1256 * may not be resumed, and the channel's Parameter RAM should be
1257 * reinitialized before being reused.
1258 */
1259 void edma_stop(unsigned channel)
1260 {
1261 unsigned ctlr;
1263 ctlr = EDMA_CTLR(channel);
1264 channel = EDMA_CHAN_SLOT(channel);
1266 if (channel < edma_info[ctlr]->num_channels) {
1267 int j = channel >> 5;
1268 unsigned int mask = (1 << (channel & 0x1f));
1270 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1271 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1272 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1273 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1275 pr_debug("EDMA: EER%d %08x\n", j,
1276 edma_shadow0_read_array(ctlr, SH_EER, j));
1278 /* REVISIT: consider guarding against inappropriate event
1279 * chaining by overwriting with dummy_paramset.
1280 */
1281 }
1282 }
1283 EXPORT_SYMBOL(edma_stop);
1285 /******************************************************************************
1286 *
1287 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1288 * been removed before EDMA has finished.It is usedful for removable media.
1289 * Arguments:
1290 * ch_no - channel no
1291 *
1292 * Return: zero on success, or corresponding error no on failure
1293 *
1294 * FIXME this should not be needed ... edma_stop() should suffice.
1295 *
1296 *****************************************************************************/
1298 void edma_clean_channel(unsigned channel)
1299 {
1300 unsigned ctlr;
1302 ctlr = EDMA_CTLR(channel);
1303 channel = EDMA_CHAN_SLOT(channel);
1305 if (channel < edma_info[ctlr]->num_channels) {
1306 int j = (channel >> 5);
1307 unsigned int mask = 1 << (channel & 0x1f);
1309 pr_debug("EDMA: EMR%d %08x\n", j,
1310 edma_read_array(ctlr, EDMA_EMR, j));
1311 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1312 /* Clear the corresponding EMR bits */
1313 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1314 /* Clear any SER */
1315 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1316 edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3);
1317 }
1318 }
1319 EXPORT_SYMBOL(edma_clean_channel);
1321 /*
1322 * edma_clear_event - clear an outstanding event on the DMA channel
1323 * Arguments:
1324 * channel - channel number
1325 */
1326 void edma_clear_event(unsigned channel)
1327 {
1328 unsigned ctlr;
1330 ctlr = EDMA_CTLR(channel);
1331 channel = EDMA_CHAN_SLOT(channel);
1333 if (channel >= edma_info[ctlr]->num_channels)
1334 return;
1335 if (channel < 32)
1336 edma_write(ctlr, EDMA_ECR, 1 << channel);
1337 else
1338 edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32));
1339 }
1340 EXPORT_SYMBOL(edma_clear_event);
1342 /*-----------------------------------------------------------------------*/
1344 static int __init edma_probe(struct platform_device *pdev)
1345 {
1346 struct edma_soc_info *info = pdev->dev.platform_data;
1347 const s8 (*queue_priority_mapping)[2];
1348 const s8 (*queue_tc_mapping)[2];
1349 int i, j, off, ln, found = 0;
1350 int status = -1;
1351 const s16 (*rsv_chans)[2];
1352 const s16 (*rsv_slots)[2];
1353 int irq[EDMA_MAX_CC] = {0, 0};
1354 int err_irq[EDMA_MAX_CC] = {0, 0};
1355 struct resource *r[EDMA_MAX_CC] = {NULL};
1356 resource_size_t len[EDMA_MAX_CC];
1357 char res_name[10];
1358 char irq_name[10];
1360 if (!info)
1361 return -ENODEV;
1363 for (j = 0; j < EDMA_MAX_CC; j++) {
1364 sprintf(res_name, "edma_cc%d", j);
1365 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1366 res_name);
1367 if (!r[j]) {
1368 if (found)
1369 break;
1370 else
1371 return -ENODEV;
1372 } else
1373 found = 1;
1375 len[j] = resource_size(r[j]);
1377 r[j] = request_mem_region(r[j]->start, len[j],
1378 dev_name(&pdev->dev));
1379 if (!r[j]) {
1380 status = -EBUSY;
1381 goto fail1;
1382 }
1384 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1385 if (!edmacc_regs_base[j]) {
1386 status = -EBUSY;
1387 goto fail1;
1388 }
1390 edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
1391 if (!edma_info[j]) {
1392 status = -ENOMEM;
1393 goto fail1;
1394 }
1395 memset(edma_info[j], 0, sizeof(struct edma));
1397 edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
1398 EDMA_MAX_DMACH);
1399 edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
1400 EDMA_MAX_PARAMENTRY);
1401 edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
1402 EDMA_MAX_CC);
1404 edma_info[j]->default_queue = info[j].default_queue;
1405 if (!edma_info[j]->default_queue)
1406 edma_info[j]->default_queue = EVENTQ_1;
1408 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1409 edmacc_regs_base[j]);
1411 for (i = 0; i < edma_info[j]->num_slots; i++)
1412 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1413 &dummy_paramset, PARM_SIZE);
1415 /* Mark all channels as unused */
1416 memset(edma_info[j]->edma_unused, 0xff,
1417 sizeof(edma_info[j]->edma_unused));
1419 /* Clear the reserved channels in unused list */
1420 rsv_chans = info[j].rsv_chans;
1421 if (rsv_chans) {
1422 for (i = 0; rsv_chans[i][0] != -1; i++) {
1423 off = rsv_chans[i][0];
1424 ln = rsv_chans[i][1];
1425 /* confirm the range */
1426 if ((off+ln) < EDMA_MAX_DMACH)
1427 clear_bits(off, ln,
1428 edma_info[j]->edma_unused);
1429 }
1430 }
1432 /* Set the reserved channels/slots in inuse list */
1433 rsv_slots = info[j].rsv_slots;
1434 if (rsv_slots) {
1435 for (i = 0; rsv_slots[i][0] != -1; i++) {
1436 off = rsv_slots[i][0];
1437 ln = rsv_slots[i][1];
1438 set_bits(off, ln, edma_info[j]->edma_inuse);
1439 }
1440 }
1442 sprintf(irq_name, "edma%d", j);
1443 irq[j] = platform_get_irq_byname(pdev, irq_name);
1444 edma_info[j]->irq_res_start = irq[j];
1445 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1446 &pdev->dev);
1447 if (status < 0) {
1448 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1449 irq[j], status);
1450 goto fail;
1451 }
1453 sprintf(irq_name, "edma%d_err", j);
1454 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1455 edma_info[j]->irq_res_end = err_irq[j];
1456 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1457 "edma_error", &pdev->dev);
1458 if (status < 0) {
1459 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1460 err_irq[j], status);
1461 goto fail;
1462 }
1464 /* Everything lives on transfer controller 1 until otherwise
1465 * specified. This way, long transfers on the low priority queue
1466 * started by the codec engine will not cause audio defects.
1467 */
1468 for (i = 0; i < edma_info[j]->num_channels; i++)
1469 map_dmach_queue(j, i, EVENTQ_1);
1471 queue_tc_mapping = info[j].queue_tc_mapping;
1472 queue_priority_mapping = info[j].queue_priority_mapping;
1474 /* Event queue to TC mapping */
1475 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1476 map_queue_tc(j, queue_tc_mapping[i][0],
1477 queue_tc_mapping[i][1]);
1479 /* Event queue priority mapping */
1480 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1481 assign_priority_to_queue(j,
1482 queue_priority_mapping[i][0],
1483 queue_priority_mapping[i][1]);
1485 /* Map the channel to param entry if channel mapping logic
1486 * exist
1487 */
1488 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1489 map_dmach_param(j);
1491 for (i = 0; i < info[j].n_region; i++) {
1492 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1493 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1494 edma_write_array(j, EDMA_QRAE, i, 0x0);
1495 }
1497 edma_info[j]->is_xbar = info[j].is_xbar;
1499 if (edma_info[j]->is_xbar) {
1500 edma_info[j]->num_events = info[j].n_events;
1501 edma_info[j]->xbar_event_mapping =
1502 info[j].xbar_event_mapping;
1503 xbar_event_to_channel_map = info[j].map_xbar_channel;
1504 }
1506 arch_num_cc++;
1507 }
1509 return 0;
1511 fail:
1512 for (i = 0; i < EDMA_MAX_CC; i++) {
1513 if (err_irq[i])
1514 free_irq(err_irq[i], &pdev->dev);
1515 if (irq[i])
1516 free_irq(irq[i], &pdev->dev);
1517 }
1518 fail1:
1519 for (i = 0; i < EDMA_MAX_CC; i++) {
1520 if (r[i])
1521 release_mem_region(r[i]->start, len[i]);
1522 if (edmacc_regs_base[i])
1523 iounmap(edmacc_regs_base[i]);
1524 kfree(edma_info[i]);
1525 }
1526 return status;
1527 }
1529 static struct platform_driver edma_driver = {
1530 .driver.name = "edma",
1531 };
1533 static int __init edma_init(void)
1534 {
1535 return platform_driver_probe(&edma_driver, edma_probe);
1536 }
1537 subsys_initcall(edma_init);