1 /*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * This file implements a DMA interface using TI's CPPI DMA.
5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
7 */
9 #include <linux/platform_device.h>
10 #include <linux/slab.h>
11 #include <linux/usb.h>
13 #include "musb_core.h"
14 #include "musb_debug.h"
15 #include "cppi_dma.h"
18 /* CPPI DMA status 7-mar-2006:
19 *
20 * - See musb_{host,gadget}.c for more info
21 *
22 * - Correct RX DMA generally forces the engine into irq-per-packet mode,
23 * which can easily saturate the CPU under non-mass-storage loads.
24 *
25 * NOTES 24-aug-2006 (2.6.18-rc4):
26 *
27 * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
28 * evidently after the 1 byte packet was received and acked, the queue
29 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
30 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
31 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
32 * of its next (512 byte) packet. IRQ issues?
33 *
34 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
35 * evidently also directly update the RX and TX CSRs ... so audit all
36 * host and peripheral side DMA code to avoid CSR access after DMA has
37 * been started.
38 */
40 /* REVISIT now we can avoid preallocating these descriptors; or
41 * more simply, switch to a global freelist not per-channel ones.
42 * Note: at full speed, 64 descriptors == 4K bulk data.
43 */
44 #define NUM_TXCHAN_BD 64
45 #define NUM_RXCHAN_BD 64
47 static inline void cpu_drain_writebuffer(void)
48 {
49 wmb();
50 #ifdef CONFIG_CPU_ARM926T
51 /* REVISIT this "should not be needed",
52 * but lack of it sure seemed to hurt ...
53 */
54 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
55 #endif
56 }
58 static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
59 {
60 struct cppi_descriptor *bd = c->freelist;
62 if (bd)
63 c->freelist = bd->next;
64 return bd;
65 }
67 static inline void
68 cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
69 {
70 if (!bd)
71 return;
72 bd->next = c->freelist;
73 c->freelist = bd;
74 }
76 /*
77 * Start DMA controller
78 *
79 * Initialize the DMA controller as necessary.
80 */
82 /* zero out entire rx state RAM entry for the channel */
83 static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
84 {
85 musb_writel(&rx->rx_skipbytes, 0, 0);
86 musb_writel(&rx->rx_head, 0, 0);
87 musb_writel(&rx->rx_sop, 0, 0);
88 musb_writel(&rx->rx_current, 0, 0);
89 musb_writel(&rx->rx_buf_current, 0, 0);
90 musb_writel(&rx->rx_len_len, 0, 0);
91 musb_writel(&rx->rx_cnt_cnt, 0, 0);
92 }
94 /* zero out entire tx state RAM entry for the channel */
95 static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
96 {
97 musb_writel(&tx->tx_head, 0, 0);
98 musb_writel(&tx->tx_buf, 0, 0);
99 musb_writel(&tx->tx_current, 0, 0);
100 musb_writel(&tx->tx_buf_current, 0, 0);
101 musb_writel(&tx->tx_info, 0, 0);
102 musb_writel(&tx->tx_rem_len, 0, 0);
103 /* musb_writel(&tx->tx_dummy, 0, 0); */
104 musb_writel(&tx->tx_complete, 0, ptr);
105 }
107 static void __devinit cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
108 {
109 int j;
111 /* initialize channel fields */
112 c->head = NULL;
113 c->tail = NULL;
114 c->last_processed = NULL;
115 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
116 c->controller = cppi;
117 c->is_rndis = 0;
118 c->freelist = NULL;
120 /* build the BD Free list for the channel */
121 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
122 struct cppi_descriptor *bd;
123 dma_addr_t dma;
125 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
126 bd->dma = dma;
127 cppi_bd_free(c, bd);
128 }
129 }
131 static int cppi_channel_abort(struct dma_channel *);
133 static void cppi_pool_free(struct cppi_channel *c)
134 {
135 struct cppi *cppi = c->controller;
136 struct cppi_descriptor *bd;
138 (void) cppi_channel_abort(&c->channel);
139 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
140 c->controller = NULL;
142 /* free all its bds */
143 bd = c->last_processed;
144 do {
145 if (bd)
146 dma_pool_free(cppi->pool, bd, bd->dma);
147 bd = cppi_bd_alloc(c);
148 } while (bd);
149 c->last_processed = NULL;
150 }
152 static int __devinit cppi_controller_start(struct dma_controller *c)
153 {
154 struct cppi *controller;
155 void __iomem *tibase;
156 int i;
158 controller = container_of(c, struct cppi, controller);
160 /* do whatever is necessary to start controller */
161 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
162 controller->tx[i].transmit = true;
163 controller->tx[i].index = i;
164 }
165 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
166 controller->rx[i].transmit = false;
167 controller->rx[i].index = i;
168 }
170 /* setup BD list on a per channel basis */
171 for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
172 cppi_pool_init(controller, controller->tx + i);
173 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
174 cppi_pool_init(controller, controller->rx + i);
176 tibase = controller->tibase;
177 INIT_LIST_HEAD(&controller->tx_complete);
179 /* initialise tx/rx channel head pointers to zero */
180 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
181 struct cppi_channel *tx_ch = controller->tx + i;
182 struct cppi_tx_stateram __iomem *tx;
184 INIT_LIST_HEAD(&tx_ch->tx_complete);
186 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
187 tx_ch->state_ram = tx;
188 cppi_reset_tx(tx, 0);
189 }
190 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
191 struct cppi_channel *rx_ch = controller->rx + i;
192 struct cppi_rx_stateram __iomem *rx;
194 INIT_LIST_HEAD(&rx_ch->tx_complete);
196 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
197 rx_ch->state_ram = rx;
198 cppi_reset_rx(rx);
199 }
201 /* enable individual cppi channels */
202 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
203 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
204 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
205 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
207 /* enable tx/rx CPPI control */
208 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
209 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
211 /* disable RNDIS mode, also host rx RNDIS autorequest */
212 musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
213 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
215 return 0;
216 }
218 /*
219 * Stop DMA controller
220 *
221 * De-Init the DMA controller as necessary.
222 */
224 static int cppi_controller_stop(struct dma_controller *c)
225 {
226 struct cppi *controller;
227 void __iomem *tibase;
228 int i;
229 struct musb *musb;
231 controller = container_of(c, struct cppi, controller);
232 musb = controller->musb;
234 tibase = controller->tibase;
235 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
236 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
237 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
238 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
239 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
241 dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
242 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
243 /* FIXME restructure of txdma to use bds like rxdma */
244 controller->tx[i].last_processed = NULL;
245 cppi_pool_free(controller->tx + i);
246 }
247 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
248 cppi_pool_free(controller->rx + i);
250 /* in Tx Case proper teardown is supported. We resort to disabling
251 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
252 * complete TX CPPI cannot be disabled.
253 */
254 /*disable tx/rx cppi */
255 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
256 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
258 return 0;
259 }
261 /* While dma channel is allocated, we only want the core irqs active
262 * for fault reports, otherwise we'd get irqs that we don't care about.
263 * Except for TX irqs, where dma done != fifo empty and reusable ...
264 *
265 * NOTE: docs don't say either way, but irq masking **enables** irqs.
266 *
267 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
268 */
269 static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
270 {
271 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
272 }
274 static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
275 {
276 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
277 }
280 /*
281 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
282 * each transfer direction of a non-control endpoint, so allocating
283 * (and deallocating) is mostly a way to notice bad housekeeping on
284 * the software side. We assume the irqs are always active.
285 */
286 static struct dma_channel *
287 cppi_channel_allocate(struct dma_controller *c,
288 struct musb_hw_ep *ep, u8 transmit)
289 {
290 struct cppi *controller;
291 u8 index;
292 struct cppi_channel *cppi_ch;
293 void __iomem *tibase;
294 struct musb *musb;
296 controller = container_of(c, struct cppi, controller);
297 tibase = controller->tibase;
298 musb = controller->musb;
300 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
301 index = ep->epnum - 1;
303 /* return the corresponding CPPI Channel Handle, and
304 * probably disable the non-CPPI irq until we need it.
305 */
306 if (transmit) {
307 if (index >= ARRAY_SIZE(controller->tx)) {
308 dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
309 return NULL;
310 }
311 cppi_ch = controller->tx + index;
312 } else {
313 if (index >= ARRAY_SIZE(controller->rx)) {
314 dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
315 return NULL;
316 }
317 cppi_ch = controller->rx + index;
318 core_rxirq_disable(tibase, ep->epnum);
319 }
321 /* REVISIT make this an error later once the same driver code works
322 * with the other DMA engine too
323 */
324 if (cppi_ch->hw_ep)
325 dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
326 index, transmit ? 'T' : 'R', cppi_ch);
327 cppi_ch->hw_ep = ep;
328 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
329 cppi_ch->channel.max_len = 0x7fffffff;
331 dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
332 return &cppi_ch->channel;
333 }
335 /* Release a CPPI Channel. */
336 static void cppi_channel_release(struct dma_channel *channel)
337 {
338 struct cppi_channel *c;
339 void __iomem *tibase;
341 /* REVISIT: for paranoia, check state and abort if needed... */
343 c = container_of(channel, struct cppi_channel, channel);
344 tibase = c->controller->tibase;
345 if (!c->hw_ep)
346 dev_dbg(c->controller->musb->controller,
347 "releasing idle DMA channel %p\n", c);
348 else if (!c->transmit)
349 core_rxirq_enable(tibase, c->index + 1);
351 /* for now, leave its cppi IRQ enabled (we won't trigger it) */
352 c->hw_ep = NULL;
353 channel->status = MUSB_DMA_STATUS_UNKNOWN;
354 }
356 /* Context: controller irqlocked */
357 static void
358 cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
359 {
360 void __iomem *base = c->controller->mregs;
361 struct cppi_rx_stateram __iomem *rx = c->state_ram;
362 struct musb *musb = c->controller->musb;
364 musb_ep_select(musb, base, c->index + 1);
366 dev_dbg(c->controller->musb->controller,
367 "RX DMA%d%s: %d left, csr %04x, "
368 "%08x H%08x S%08x C%08x, "
369 "B%08x L%08x %08x .. %08x"
370 "\n",
371 c->index, tag,
372 musb_readl(c->controller->tibase,
373 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
374 musb_readw(c->hw_ep->regs, MUSB_RXCSR),
376 musb_readl(&rx->rx_skipbytes, 0),
377 musb_readl(&rx->rx_head, 0),
378 musb_readl(&rx->rx_sop, 0),
379 musb_readl(&rx->rx_current, 0),
381 musb_readl(&rx->rx_buf_current, 0),
382 musb_readl(&rx->rx_len_len, 0),
383 musb_readl(&rx->rx_cnt_cnt, 0),
384 musb_readl(&rx->rx_complete, 0)
385 );
386 }
388 /* Context: controller irqlocked */
389 static void
390 cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
391 {
392 void __iomem *base = c->controller->mregs;
393 struct cppi_tx_stateram __iomem *tx = c->state_ram;
394 struct musb *musb = c->controller->musb;
396 musb_ep_select(musb, base, c->index + 1);
398 dev_dbg(c->controller->musb->controller,
399 "TX DMA%d%s: csr %04x, "
400 "H%08x S%08x C%08x %08x, "
401 "F%08x L%08x .. %08x"
402 "\n",
403 c->index, tag,
404 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
406 musb_readl(&tx->tx_head, 0),
407 musb_readl(&tx->tx_buf, 0),
408 musb_readl(&tx->tx_current, 0),
409 musb_readl(&tx->tx_buf_current, 0),
411 musb_readl(&tx->tx_info, 0),
412 musb_readl(&tx->tx_rem_len, 0),
413 /* dummy/unused word 6 */
414 musb_readl(&tx->tx_complete, 0)
415 );
416 }
418 /* Context: controller irqlocked */
419 static inline void
420 cppi_rndis_update(struct cppi_channel *c, int is_rx,
421 void __iomem *tibase, int is_rndis)
422 {
423 /* we may need to change the rndis flag for this cppi channel */
424 if (c->is_rndis != is_rndis) {
425 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
426 u32 temp = 1 << (c->index);
428 if (is_rx)
429 temp <<= 16;
430 if (is_rndis)
431 value |= temp;
432 else
433 value &= ~temp;
434 musb_writel(tibase, DAVINCI_RNDIS_REG, value);
435 c->is_rndis = is_rndis;
436 }
437 }
439 #ifdef CONFIG_USB_MUSB_DEBUG
440 static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
441 {
442 pr_debug("RXBD/%s %08x: "
443 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
444 tag, bd->dma,
445 bd->hw_next, bd->hw_bufp, bd->hw_off_len,
446 bd->hw_options);
447 }
448 #endif
450 static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
451 {
452 #ifdef CONFIG_USB_MUSB_DEBUG
453 struct cppi_descriptor *bd;
455 if (!_dbg_level(level))
456 return;
457 cppi_dump_rx(level, rx, tag);
458 if (rx->last_processed)
459 cppi_dump_rxbd("last", rx->last_processed);
460 for (bd = rx->head; bd; bd = bd->next)
461 cppi_dump_rxbd("active", bd);
462 #endif
463 }
466 /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
467 * so we won't ever use it (see "CPPI RX Woes" below).
468 */
469 static inline int cppi_autoreq_update(struct cppi_channel *rx,
470 void __iomem *tibase, int onepacket, unsigned n_bds)
471 {
472 u32 val;
474 #ifdef RNDIS_RX_IS_USABLE
475 u32 tmp;
476 /* assert(is_host_active(musb)) */
478 /* start from "AutoReq never" */
479 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
480 val = tmp & ~((0x3) << (rx->index * 2));
482 /* HCD arranged reqpkt for packet #1. we arrange int
483 * for all but the last one, maybe in two segments.
484 */
485 if (!onepacket) {
486 #if 0
487 /* use two segments, autoreq "all" then the last "never" */
488 val |= ((0x3) << (rx->index * 2));
489 n_bds--;
490 #else
491 /* one segment, autoreq "all-but-last" */
492 val |= ((0x1) << (rx->index * 2));
493 #endif
494 }
496 if (val != tmp) {
497 int n = 100;
499 /* make sure that autoreq is updated before continuing */
500 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
501 do {
502 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
503 if (tmp == val)
504 break;
505 cpu_relax();
506 } while (n-- > 0);
507 }
508 #endif
510 /* REQPKT is turned off after each segment */
511 if (n_bds && rx->channel.actual_len) {
512 void __iomem *regs = rx->hw_ep->regs;
514 val = musb_readw(regs, MUSB_RXCSR);
515 if (!(val & MUSB_RXCSR_H_REQPKT)) {
516 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
517 musb_writew(regs, MUSB_RXCSR, val);
518 /* flush writebuffer */
519 val = musb_readw(regs, MUSB_RXCSR);
520 }
521 }
522 return n_bds;
523 }
526 /* Buffer enqueuing Logic:
527 *
528 * - RX builds new queues each time, to help handle routine "early
529 * termination" cases (faults, including errors and short reads)
530 * more correctly.
531 *
532 * - for now, TX reuses the same queue of BDs every time
533 *
534 * REVISIT long term, we want a normal dynamic model.
535 * ... the goal will be to append to the
536 * existing queue, processing completed "dma buffers" (segments) on the fly.
537 *
538 * Otherwise we force an IRQ latency between requests, which slows us a lot
539 * (especially in "transparent" dma). Unfortunately that model seems to be
540 * inherent in the DMA model from the Mentor code, except in the rare case
541 * of transfers big enough (~128+ KB) that we could append "middle" segments
542 * in the TX paths. (RX can't do this, see below.)
543 *
544 * That's true even in the CPPI- friendly iso case, where most urbs have
545 * several small segments provided in a group and where the "packet at a time"
546 * "transparent" DMA model is always correct, even on the RX side.
547 */
549 /*
550 * CPPI TX:
551 * ========
552 * TX is a lot more reasonable than RX; it doesn't need to run in
553 * irq-per-packet mode very often. RNDIS mode seems to behave too
554 * (except how it handles the exactly-N-packets case). Building a
555 * txdma queue with multiple requests (urb or usb_request) looks
556 * like it would work ... but fault handling would need much testing.
557 *
558 * The main issue with TX mode RNDIS relates to transfer lengths that
559 * are an exact multiple of the packet length. It appears that there's
560 * a hiccup in that case (maybe the DMA completes before the ZLP gets
561 * written?) boiling down to not being able to rely on CPPI writing any
562 * terminating zero length packet before the next transfer is written.
563 * So that's punted to PIO; better yet, gadget drivers can avoid it.
564 *
565 * Plus, there's allegedly an undocumented constraint that rndis transfer
566 * length be a multiple of 64 bytes ... but the chip doesn't act that
567 * way, and we really don't _want_ that behavior anyway.
568 *
569 * On TX, "transparent" mode works ... although experiments have shown
570 * problems trying to use the SOP/EOP bits in different USB packets.
571 *
572 * REVISIT try to handle terminating zero length packets using CPPI
573 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
574 * links avoid that issue by forcing them to avoid zlps.)
575 */
576 static void
577 cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
578 {
579 unsigned maxpacket = tx->maxpacket;
580 dma_addr_t addr = tx->buf_dma + tx->offset;
581 size_t length = tx->buf_len - tx->offset;
582 struct cppi_descriptor *bd;
583 unsigned n_bds;
584 unsigned i;
585 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
586 int rndis;
588 /* TX can use the CPPI "rndis" mode, where we can probably fit this
589 * transfer in one BD and one IRQ. The only time we would NOT want
590 * to use it is when hardware constraints prevent it, or if we'd
591 * trigger the "send a ZLP?" confusion.
592 */
593 rndis = (maxpacket & 0x3f) == 0
594 && length > maxpacket
595 && length < 0xffff
596 && (length % maxpacket) != 0;
598 if (rndis) {
599 maxpacket = length;
600 n_bds = 1;
601 } else {
602 n_bds = length / maxpacket;
603 if (!length || (length % maxpacket))
604 n_bds++;
605 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
606 length = min(n_bds * maxpacket, length);
607 }
609 dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
610 tx->index,
611 maxpacket,
612 rndis ? "rndis" : "transparent",
613 n_bds,
614 (unsigned long long)addr, length);
616 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
618 /* assuming here that channel_program is called during
619 * transfer initiation ... current code maintains state
620 * for one outstanding request only (no queues, not even
621 * the implicit ones of an iso urb).
622 */
624 bd = tx->freelist;
625 tx->head = bd;
626 tx->last_processed = NULL;
628 /* FIXME use BD pool like RX side does, and just queue
629 * the minimum number for this request.
630 */
632 /* Prepare queue of BDs first, then hand it to hardware.
633 * All BDs except maybe the last should be of full packet
634 * size; for RNDIS there _is_ only that last packet.
635 */
636 for (i = 0; i < n_bds; ) {
637 if (++i < n_bds && bd->next)
638 bd->hw_next = bd->next->dma;
639 else
640 bd->hw_next = 0;
642 bd->hw_bufp = tx->buf_dma + tx->offset;
644 /* FIXME set EOP only on the last packet,
645 * SOP only on the first ... avoid IRQs
646 */
647 if ((tx->offset + maxpacket) <= tx->buf_len) {
648 tx->offset += maxpacket;
649 bd->hw_off_len = maxpacket;
650 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
651 | CPPI_OWN_SET | maxpacket;
652 } else {
653 /* only this one may be a partial USB Packet */
654 u32 partial_len;
656 partial_len = tx->buf_len - tx->offset;
657 tx->offset = tx->buf_len;
658 bd->hw_off_len = partial_len;
660 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
661 | CPPI_OWN_SET | partial_len;
662 if (partial_len == 0)
663 bd->hw_options |= CPPI_ZERO_SET;
664 }
666 dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
667 bd, bd->hw_next, bd->hw_bufp,
668 bd->hw_off_len, bd->hw_options);
670 /* update the last BD enqueued to the list */
671 tx->tail = bd;
672 bd = bd->next;
673 }
675 /* BDs live in DMA-coherent memory, but writes might be pending */
676 cpu_drain_writebuffer();
678 /* Write to the HeadPtr in state RAM to trigger */
679 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
681 cppi_dump_tx(5, tx, "/S");
682 }
684 /*
685 * CPPI RX Woes:
686 * =============
687 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
688 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
689 * (Full speed transfers have similar scenarios.)
690 *
691 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
692 * and the next packet goes into a buffer that's queued later; while (b) fills
693 * the buffer with 1024 bytes. How to do that with CPPI?
694 *
695 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
696 * (b) loses **BADLY** because nothing (!) happens when that second packet
697 * fills the buffer, much less when a third one arrives. (Which makes this
698 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
699 * is optional, and it's fine if peripherals -- not hosts! -- pad messages
700 * out to end-of-buffer. Standard PCI host controller DMA descriptors
701 * implement that mode by default ... which is no accident.)
702 *
703 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
704 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
705 * ignores SOP/EOP markings and processes both of those BDs; so both packets
706 * are loaded into the buffer (with a 212 byte gap between them), and the next
707 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
708 * are intended as outputs for RX queues, not inputs...)
709 *
710 * - A variant of "transparent" mode -- one BD at a time -- is the only way to
711 * reliably make both cases work, with software handling both cases correctly
712 * and at the significant penalty of needing an IRQ per packet. (The lack of
713 * I/O overlap can be slightly ameliorated by enabling double buffering.)
714 *
715 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
716 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
717 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
718 * with guaranteed driver level fault recovery and scrubbing out what's left
719 * of that garbaged datastream.
720 *
721 * But there seems to be no way to identify the cases where CPPI RNDIS mode
722 * is appropriate -- which do NOT include RNDIS host drivers, but do include
723 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
724 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
725 * that applies best on the peripheral side (and which could fail rudely).
726 *
727 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
728 * cases other than mass storage class. Otherwise we're correct but slow,
729 * since CPPI penalizes our need for a "true RNDIS" default mode.
730 */
733 /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
734 *
735 * IFF
736 * (a) peripheral mode ... since rndis peripherals could pad their
737 * writes to hosts, causing i/o failure; or we'd have to cope with
738 * a largely unknowable variety of host side protocol variants
739 * (b) and short reads are NOT errors ... since full reads would
740 * cause those same i/o failures
741 * (c) and read length is
742 * - less than 64KB (max per cppi descriptor)
743 * - not a multiple of 4096 (g_zero default, full reads typical)
744 * - N (>1) packets long, ditto (full reads not EXPECTED)
745 * THEN
746 * try rx rndis mode
747 *
748 * Cost of heuristic failing: RXDMA wedges at the end of transfers that
749 * fill out the whole buffer. Buggy host side usb network drivers could
750 * trigger that, but "in the field" such bugs seem to be all but unknown.
751 *
752 * So this module parameter lets the heuristic be disabled. When using
753 * gadgetfs, the heuristic will probably need to be disabled.
754 */
755 static int cppi_rx_rndis = 1;
757 module_param(cppi_rx_rndis, bool, 0);
758 MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
761 /**
762 * cppi_next_rx_segment - dma read for the next chunk of a buffer
763 * @musb: the controller
764 * @rx: dma channel
765 * @onepacket: true unless caller treats short reads as errors, and
766 * performs fault recovery above usbcore.
767 * Context: controller irqlocked
768 *
769 * See above notes about why we can't use multi-BD RX queues except in
770 * rare cases (mass storage class), and can never use the hardware "rndis"
771 * mode (since it's not a "true" RNDIS mode) with complete safety..
772 *
773 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
774 * code to recover from corrupted datastreams after each short transfer.
775 */
776 static void
777 cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
778 {
779 unsigned maxpacket = rx->maxpacket;
780 dma_addr_t addr = rx->buf_dma + rx->offset;
781 size_t length = rx->buf_len - rx->offset;
782 struct cppi_descriptor *bd, *tail;
783 unsigned n_bds;
784 unsigned i;
785 void __iomem *tibase = musb->ctrl_base;
786 int is_rndis = 0;
787 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
789 if (onepacket) {
790 /* almost every USB driver, host or peripheral side */
791 n_bds = 1;
793 /* maybe apply the heuristic above */
794 if (cppi_rx_rndis
795 && is_peripheral_active(musb)
796 && length > maxpacket
797 && (length & ~0xffff) == 0
798 && (length & 0x0fff) != 0
799 && (length & (maxpacket - 1)) == 0) {
800 maxpacket = length;
801 is_rndis = 1;
802 }
803 } else {
804 /* virtually nothing except mass storage class */
805 if (length > 0xffff) {
806 n_bds = 0xffff / maxpacket;
807 length = n_bds * maxpacket;
808 } else {
809 n_bds = length / maxpacket;
810 if (length % maxpacket)
811 n_bds++;
812 }
813 if (n_bds == 1)
814 onepacket = 1;
815 else
816 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
817 }
819 /* In host mode, autorequest logic can generate some IN tokens; it's
820 * tricky since we can't leave REQPKT set in RXCSR after the transfer
821 * finishes. So: multipacket transfers involve two or more segments.
822 * And always at least two IRQs ... RNDIS mode is not an option.
823 */
824 if (is_host_active(musb))
825 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
827 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
829 length = min(n_bds * maxpacket, length);
831 dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
832 "dma 0x%llx len %u %u/%u\n",
833 rx->index, maxpacket,
834 onepacket
835 ? (is_rndis ? "rndis" : "onepacket")
836 : "multipacket",
837 n_bds,
838 musb_readl(tibase,
839 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
840 & 0xffff,
841 (unsigned long long)addr, length,
842 rx->channel.actual_len, rx->buf_len);
844 /* only queue one segment at a time, since the hardware prevents
845 * correct queue shutdown after unexpected short packets
846 */
847 bd = cppi_bd_alloc(rx);
848 rx->head = bd;
850 /* Build BDs for all packets in this segment */
851 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
852 u32 bd_len;
854 if (i) {
855 bd = cppi_bd_alloc(rx);
856 if (!bd)
857 break;
858 tail->next = bd;
859 tail->hw_next = bd->dma;
860 }
861 bd->hw_next = 0;
863 /* all but the last packet will be maxpacket size */
864 if (maxpacket < length)
865 bd_len = maxpacket;
866 else
867 bd_len = length;
869 bd->hw_bufp = addr;
870 addr += bd_len;
871 rx->offset += bd_len;
873 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
874 bd->buflen = bd_len;
876 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
877 length -= bd_len;
878 }
880 /* we always expect at least one reusable BD! */
881 if (!tail) {
882 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
883 return;
884 } else if (i < n_bds)
885 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
887 tail->next = NULL;
888 tail->hw_next = 0;
890 bd = rx->head;
891 rx->tail = tail;
893 /* short reads and other faults should terminate this entire
894 * dma segment. we want one "dma packet" per dma segment, not
895 * one per USB packet, terminating the whole queue at once...
896 * NOTE that current hardware seems to ignore SOP and EOP.
897 */
898 bd->hw_options |= CPPI_SOP_SET;
899 tail->hw_options |= CPPI_EOP_SET;
901 #ifdef CONFIG_USB_MUSB_DEBUG
902 if (_dbg_level(5)) {
903 struct cppi_descriptor *d;
905 for (d = rx->head; d; d = d->next)
906 cppi_dump_rxbd("S", d);
907 }
908 #endif
910 /* in case the preceding transfer left some state... */
911 tail = rx->last_processed;
912 if (tail) {
913 tail->next = bd;
914 tail->hw_next = bd->dma;
915 }
917 core_rxirq_enable(tibase, rx->index + 1);
919 /* BDs live in DMA-coherent memory, but writes might be pending */
920 cpu_drain_writebuffer();
922 /* REVISIT specs say to write this AFTER the BUFCNT register
923 * below ... but that loses badly.
924 */
925 musb_writel(&rx_ram->rx_head, 0, bd->dma);
927 /* bufferCount must be at least 3, and zeroes on completion
928 * unless it underflows below zero, or stops at two, or keeps
929 * growing ... grr.
930 */
931 i = musb_readl(tibase,
932 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
933 & 0xffff;
935 if (!i)
936 musb_writel(tibase,
937 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
938 n_bds + 2);
939 else if (n_bds > (i - 3))
940 musb_writel(tibase,
941 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
942 n_bds - (i - 3));
944 i = musb_readl(tibase,
945 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
946 & 0xffff;
947 if (i < (2 + n_bds)) {
948 dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
949 rx->index, i, n_bds);
950 musb_writel(tibase,
951 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
952 n_bds + 2);
953 }
955 cppi_dump_rx(4, rx, "/S");
956 }
958 /**
959 * cppi_channel_program - program channel for data transfer
960 * @ch: the channel
961 * @maxpacket: max packet size
962 * @mode: For RX, 1 unless the usb protocol driver promised to treat
963 * all short reads as errors and kick in high level fault recovery.
964 * For TX, ignored because of RNDIS mode races/glitches.
965 * @dma_addr: dma address of buffer
966 * @len: length of buffer
967 * Context: controller irqlocked
968 */
969 static int cppi_channel_program(struct dma_channel *ch,
970 u16 maxpacket, u8 mode,
971 dma_addr_t dma_addr, u32 len)
972 {
973 struct cppi_channel *cppi_ch;
974 struct cppi *controller;
975 struct musb *musb;
977 cppi_ch = container_of(ch, struct cppi_channel, channel);
978 controller = cppi_ch->controller;
979 musb = controller->musb;
981 switch (ch->status) {
982 case MUSB_DMA_STATUS_BUS_ABORT:
983 case MUSB_DMA_STATUS_CORE_ABORT:
984 /* fault irq handler should have handled cleanup */
985 WARNING("%cX DMA%d not cleaned up after abort!\n",
986 cppi_ch->transmit ? 'T' : 'R',
987 cppi_ch->index);
988 /* WARN_ON(1); */
989 break;
990 case MUSB_DMA_STATUS_BUSY:
991 WARNING("program active channel? %cX DMA%d\n",
992 cppi_ch->transmit ? 'T' : 'R',
993 cppi_ch->index);
994 /* WARN_ON(1); */
995 break;
996 case MUSB_DMA_STATUS_UNKNOWN:
997 dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
998 cppi_ch->transmit ? 'T' : 'R',
999 cppi_ch->index);
1000 /* FALLTHROUGH */
1001 case MUSB_DMA_STATUS_FREE:
1002 break;
1003 }
1005 ch->status = MUSB_DMA_STATUS_BUSY;
1007 /* set transfer parameters, then queue up its first segment */
1008 cppi_ch->buf_dma = dma_addr;
1009 cppi_ch->offset = 0;
1010 cppi_ch->maxpacket = maxpacket;
1011 cppi_ch->buf_len = len;
1012 cppi_ch->channel.actual_len = 0;
1014 /* TX channel? or RX? */
1015 if (cppi_ch->transmit)
1016 cppi_next_tx_segment(musb, cppi_ch);
1017 else
1018 cppi_next_rx_segment(musb, cppi_ch, mode);
1020 return true;
1021 }
1023 static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1024 {
1025 struct cppi_channel *rx = &cppi->rx[ch];
1026 struct cppi_rx_stateram __iomem *state = rx->state_ram;
1027 struct cppi_descriptor *bd;
1028 struct cppi_descriptor *last = rx->last_processed;
1029 bool completed = false;
1030 bool acked = false;
1031 int i;
1032 dma_addr_t safe2ack;
1033 void __iomem *regs = rx->hw_ep->regs;
1034 struct musb *musb = cppi->musb;
1036 cppi_dump_rx(6, rx, "/K");
1038 bd = last ? last->next : rx->head;
1039 if (!bd)
1040 return false;
1042 /* run through all completed BDs */
1043 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
1044 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
1045 i++, bd = bd->next) {
1046 u16 len;
1048 /* catch latest BD writes from CPPI */
1049 rmb();
1050 if (!completed && (bd->hw_options & CPPI_OWN_SET))
1051 break;
1053 dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
1054 "off.len %08x opt.len %08x (%d)\n",
1055 (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
1056 bd->hw_off_len, bd->hw_options,
1057 rx->channel.actual_len);
1059 /* actual packet received length */
1060 if ((bd->hw_options & CPPI_SOP_SET) && !completed)
1061 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
1062 else
1063 len = 0;
1065 if (bd->hw_options & CPPI_EOQ_MASK)
1066 completed = true;
1068 if (!completed && len < bd->buflen) {
1069 /* NOTE: when we get a short packet, RXCSR_H_REQPKT
1070 * must have been cleared, and no more DMA packets may
1071 * active be in the queue... TI docs didn't say, but
1072 * CPPI ignores those BDs even though OWN is still set.
1073 */
1074 completed = true;
1075 dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
1076 len, bd->buflen,
1077 rx->channel.actual_len);
1078 }
1080 /* If we got here, we expect to ack at least one BD; meanwhile
1081 * CPPI may completing other BDs while we scan this list...
1082 *
1083 * RACE: we can notice OWN cleared before CPPI raises the
1084 * matching irq by writing that BD as the completion pointer.
1085 * In such cases, stop scanning and wait for the irq, avoiding
1086 * lost acks and states where BD ownership is unclear.
1087 */
1088 if (bd->dma == safe2ack) {
1089 musb_writel(&state->rx_complete, 0, safe2ack);
1090 safe2ack = musb_readl(&state->rx_complete, 0);
1091 acked = true;
1092 if (bd->dma == safe2ack)
1093 safe2ack = 0;
1094 }
1096 rx->channel.actual_len += len;
1098 cppi_bd_free(rx, last);
1099 last = bd;
1101 /* stop scanning on end-of-segment */
1102 if (bd->hw_next == 0)
1103 completed = true;
1104 }
1105 rx->last_processed = last;
1107 /* dma abort, lost ack, or ... */
1108 if (!acked && last) {
1109 int csr;
1111 if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
1112 musb_writel(&state->rx_complete, 0, safe2ack);
1113 if (safe2ack == 0) {
1114 cppi_bd_free(rx, last);
1115 rx->last_processed = NULL;
1117 /* if we land here on the host side, H_REQPKT will
1118 * be clear and we need to restart the queue...
1119 */
1120 WARN_ON(rx->head);
1121 }
1122 musb_ep_select(cppi->musb, cppi->mregs, rx->index + 1);
1123 csr = musb_readw(regs, MUSB_RXCSR);
1124 if (csr & MUSB_RXCSR_DMAENAB) {
1125 dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
1126 rx->index,
1127 rx->head, rx->tail,
1128 rx->last_processed
1129 ? (unsigned long long)
1130 rx->last_processed->dma
1131 : 0,
1132 completed ? ", completed" : "",
1133 csr);
1134 cppi_dump_rxq(4, "/what?", rx);
1135 }
1136 }
1137 if (!completed) {
1138 int csr;
1140 rx->head = bd;
1142 /* REVISIT seems like "autoreq all but EOP" doesn't...
1143 * setting it here "should" be racey, but seems to work
1144 */
1145 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1146 if (is_host_active(cppi->musb)
1147 && bd
1148 && !(csr & MUSB_RXCSR_H_REQPKT)) {
1149 csr |= MUSB_RXCSR_H_REQPKT;
1150 musb_writew(regs, MUSB_RXCSR,
1151 MUSB_RXCSR_H_WZC_BITS | csr);
1152 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1153 }
1154 } else {
1155 rx->head = NULL;
1156 rx->tail = NULL;
1157 }
1159 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
1160 return completed;
1161 }
1163 irqreturn_t cppi_interrupt(int irq, void *dev_id)
1164 {
1165 struct musb *musb = dev_id;
1166 struct cppi *cppi;
1167 void __iomem *tibase;
1168 struct musb_hw_ep *hw_ep = NULL;
1169 u32 rx, tx;
1170 int i, index;
1171 unsigned long uninitialized_var(flags);
1173 cppi = container_of(musb->dma_controller, struct cppi, controller);
1174 if (cppi->irq)
1175 spin_lock_irqsave(&musb->lock, flags);
1177 tibase = musb->ctrl_base;
1179 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
1180 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
1182 if (!tx && !rx) {
1183 if (cppi->irq)
1184 spin_unlock_irqrestore(&musb->lock, flags);
1185 return IRQ_NONE;
1186 }
1188 dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
1190 /* process TX channels */
1191 for (index = 0; tx; tx = tx >> 1, index++) {
1192 struct cppi_channel *tx_ch;
1193 struct cppi_tx_stateram __iomem *tx_ram;
1194 bool completed = false;
1195 struct cppi_descriptor *bd;
1197 if (!(tx & 1))
1198 continue;
1200 tx_ch = cppi->tx + index;
1201 tx_ram = tx_ch->state_ram;
1203 /* FIXME need a cppi_tx_scan() routine, which
1204 * can also be called from abort code
1205 */
1207 cppi_dump_tx(5, tx_ch, "/E");
1209 bd = tx_ch->head;
1211 /*
1212 * If Head is null then this could mean that a abort interrupt
1213 * that needs to be acknowledged.
1214 */
1215 if (NULL == bd) {
1216 dev_dbg(musb->controller, "null BD\n");
1217 musb_writel(&tx_ram->tx_complete, 0, 0);
1218 continue;
1219 }
1221 /* run through all completed BDs */
1222 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
1223 i++, bd = bd->next) {
1224 u16 len;
1226 /* catch latest BD writes from CPPI */
1227 rmb();
1228 if (bd->hw_options & CPPI_OWN_SET)
1229 break;
1231 dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
1232 bd, bd->hw_next, bd->hw_bufp,
1233 bd->hw_off_len, bd->hw_options);
1235 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
1236 tx_ch->channel.actual_len += len;
1238 tx_ch->last_processed = bd;
1240 /* write completion register to acknowledge
1241 * processing of completed BDs, and possibly
1242 * release the IRQ; EOQ might not be set ...
1243 *
1244 * REVISIT use the same ack strategy as rx
1245 *
1246 * REVISIT have observed bit 18 set; huh??
1247 */
1248 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
1249 musb_writel(&tx_ram->tx_complete, 0, bd->dma);
1251 /* stop scanning on end-of-segment */
1252 if (bd->hw_next == 0)
1253 completed = true;
1254 }
1256 /* on end of segment, maybe go to next one */
1257 if (completed) {
1258 /* cppi_dump_tx(4, tx_ch, "/complete"); */
1260 /* transfer more, or report completion */
1261 if (tx_ch->offset >= tx_ch->buf_len) {
1262 tx_ch->head = NULL;
1263 tx_ch->tail = NULL;
1264 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1266 hw_ep = tx_ch->hw_ep;
1268 musb_dma_completion(musb, index + 1, 1);
1270 } else {
1271 /* Bigger transfer than we could fit in
1272 * that first batch of descriptors...
1273 */
1274 cppi_next_tx_segment(musb, tx_ch);
1275 }
1276 } else
1277 tx_ch->head = bd;
1278 }
1280 /* Start processing the RX block */
1281 for (index = 0; rx; rx = rx >> 1, index++) {
1283 if (rx & 1) {
1284 struct cppi_channel *rx_ch;
1286 rx_ch = cppi->rx + index;
1288 /* let incomplete dma segments finish */
1289 if (!cppi_rx_scan(cppi, index))
1290 continue;
1292 /* start another dma segment if needed */
1293 if (rx_ch->channel.actual_len != rx_ch->buf_len
1294 && rx_ch->channel.actual_len
1295 == rx_ch->offset) {
1296 cppi_next_rx_segment(musb, rx_ch, 1);
1297 continue;
1298 }
1300 /* all segments completed! */
1301 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1303 hw_ep = rx_ch->hw_ep;
1305 core_rxirq_disable(tibase, index + 1);
1306 musb_dma_completion(musb, index + 1, 0);
1307 }
1308 }
1310 /* write to CPPI EOI register to re-enable interrupts */
1311 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1313 if (cppi->irq)
1314 spin_unlock_irqrestore(&musb->lock, flags);
1316 return IRQ_HANDLED;
1317 }
1319 /* Instantiate a software object representing a DMA controller. */
1320 struct dma_controller *__devinit
1321 cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
1322 {
1323 struct cppi *controller;
1324 struct device *dev = musb->controller;
1325 struct platform_device *pdev = to_platform_device(dev);
1326 int irq = platform_get_irq_byname(pdev, "dma");
1328 controller = kzalloc(sizeof *controller, GFP_KERNEL);
1329 if (!controller)
1330 return NULL;
1332 controller->mregs = mregs;
1333 controller->tibase = mregs - DAVINCI_BASE_OFFSET;
1335 controller->musb = musb;
1336 controller->controller.start = cppi_controller_start;
1337 controller->controller.stop = cppi_controller_stop;
1338 controller->controller.channel_alloc = cppi_channel_allocate;
1339 controller->controller.channel_release = cppi_channel_release;
1340 controller->controller.channel_program = cppi_channel_program;
1341 controller->controller.channel_abort = cppi_channel_abort;
1343 /* NOTE: allocating from on-chip SRAM would give the least
1344 * contention for memory access, if that ever matters here.
1345 */
1347 /* setup BufferPool */
1348 controller->pool = dma_pool_create("cppi",
1349 controller->musb->controller,
1350 sizeof(struct cppi_descriptor),
1351 CPPI_DESCRIPTOR_ALIGN, 0);
1352 if (!controller->pool) {
1353 kfree(controller);
1354 return NULL;
1355 }
1357 if (irq > 0) {
1358 if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
1359 dev_err(dev, "request_irq %d failed!\n", irq);
1360 cppi_dma_controller_destroy(&controller->controller);
1361 return NULL;
1362 }
1363 controller->irq = irq;
1364 }
1366 return &controller->controller;
1367 }
1368 EXPORT_SYMBOL(cppi_dma_controller_create);
1370 /*
1371 * Destroy a previously-instantiated DMA controller.
1372 */
1373 void cppi_dma_controller_destroy(struct dma_controller *c)
1374 {
1375 struct cppi *cppi;
1377 cppi = container_of(c, struct cppi, controller);
1379 if (cppi->irq)
1380 free_irq(cppi->irq, cppi->musb);
1382 /* assert: caller stopped the controller first */
1383 dma_pool_destroy(cppi->pool);
1385 kfree(cppi);
1386 }
1387 EXPORT_SYMBOL(cppi_dma_controller_destroy);
1389 /*
1390 * Context: controller irqlocked, endpoint selected
1391 */
1392 static int cppi_channel_abort(struct dma_channel *channel)
1393 {
1394 struct cppi_channel *cppi_ch;
1395 struct cppi *controller;
1396 void __iomem *mbase;
1397 void __iomem *tibase;
1398 void __iomem *regs;
1399 u32 value;
1400 struct cppi_descriptor *queue;
1402 cppi_ch = container_of(channel, struct cppi_channel, channel);
1404 controller = cppi_ch->controller;
1406 switch (channel->status) {
1407 case MUSB_DMA_STATUS_BUS_ABORT:
1408 case MUSB_DMA_STATUS_CORE_ABORT:
1409 /* from RX or TX fault irq handler */
1410 case MUSB_DMA_STATUS_BUSY:
1411 /* the hardware needs shutting down */
1412 regs = cppi_ch->hw_ep->regs;
1413 break;
1414 case MUSB_DMA_STATUS_UNKNOWN:
1415 case MUSB_DMA_STATUS_FREE:
1416 return 0;
1417 default:
1418 return -EINVAL;
1419 }
1421 if (!cppi_ch->transmit && cppi_ch->head)
1422 cppi_dump_rxq(3, "/abort", cppi_ch);
1424 mbase = controller->mregs;
1425 tibase = controller->tibase;
1427 queue = cppi_ch->head;
1428 cppi_ch->head = NULL;
1429 cppi_ch->tail = NULL;
1431 /* REVISIT should rely on caller having done this,
1432 * and caller should rely on us not changing it.
1433 * peripheral code is safe ... check host too.
1434 */
1435 musb_ep_select(controller->musb, mbase, cppi_ch->index + 1);
1437 if (cppi_ch->transmit) {
1438 struct cppi_tx_stateram __iomem *tx_ram;
1439 /* REVISIT put timeouts on these controller handshakes */
1441 cppi_dump_tx(6, cppi_ch, " (teardown)");
1443 /* teardown DMA engine then usb core */
1444 do {
1445 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
1446 } while (!(value & CPPI_TEAR_READY));
1447 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
1449 tx_ram = cppi_ch->state_ram;
1450 do {
1451 value = musb_readl(&tx_ram->tx_complete, 0);
1452 } while (0xFFFFFFFC != value);
1454 /* FIXME clean up the transfer state ... here?
1455 * the completion routine should get called with
1456 * an appropriate status code.
1457 */
1459 value = musb_readw(regs, MUSB_TXCSR);
1460 value &= ~MUSB_TXCSR_DMAENAB;
1461 value |= MUSB_TXCSR_FLUSHFIFO;
1462 musb_writew(regs, MUSB_TXCSR, value);
1463 musb_writew(regs, MUSB_TXCSR, value);
1465 /*
1466 * 1. Write to completion Ptr value 0x1(bit 0 set)
1467 * (write back mode)
1468 * 2. Wait for abort interrupt and then put the channel in
1469 * compare mode by writing 1 to the tx_complete register.
1470 */
1471 cppi_reset_tx(tx_ram, 1);
1472 cppi_ch->head = NULL;
1473 musb_writel(&tx_ram->tx_complete, 0, 1);
1474 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1476 /* REVISIT tx side _should_ clean up the same way
1477 * as the RX side ... this does no cleanup at all!
1478 */
1480 } else /* RX */ {
1481 u16 csr;
1483 /* NOTE: docs don't guarantee any of this works ... we
1484 * expect that if the usb core stops telling the cppi core
1485 * to pull more data from it, then it'll be safe to flush
1486 * current RX DMA state iff any pending fifo transfer is done.
1487 */
1489 core_rxirq_disable(tibase, cppi_ch->index + 1);
1491 /* for host, ensure ReqPkt is never set again */
1492 if (is_host_active(cppi_ch->controller->musb)) {
1493 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
1494 value &= ~((0x3) << (cppi_ch->index * 2));
1495 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
1496 }
1498 csr = musb_readw(regs, MUSB_RXCSR);
1500 /* for host, clear (just) ReqPkt at end of current packet(s) */
1501 if (is_host_active(cppi_ch->controller->musb)) {
1502 csr |= MUSB_RXCSR_H_WZC_BITS;
1503 csr &= ~MUSB_RXCSR_H_REQPKT;
1504 } else
1505 csr |= MUSB_RXCSR_P_WZC_BITS;
1507 /* clear dma enable */
1508 csr &= ~(MUSB_RXCSR_DMAENAB);
1509 musb_writew(regs, MUSB_RXCSR, csr);
1510 csr = musb_readw(regs, MUSB_RXCSR);
1512 /* Quiesce: wait for current dma to finish (if not cleanup).
1513 * We can't use bit zero of stateram->rx_sop, since that
1514 * refers to an entire "DMA packet" not just emptying the
1515 * current fifo. Most segments need multiple usb packets.
1516 */
1517 if (channel->status == MUSB_DMA_STATUS_BUSY)
1518 udelay(50);
1520 /* scan the current list, reporting any data that was
1521 * transferred and acking any IRQ
1522 */
1523 cppi_rx_scan(controller, cppi_ch->index);
1525 /* clobber the existing state once it's idle
1526 *
1527 * NOTE: arguably, we should also wait for all the other
1528 * RX channels to quiesce (how??) and then temporarily
1529 * disable RXCPPI_CTRL_REG ... but it seems that we can
1530 * rely on the controller restarting from state ram, with
1531 * only RXCPPI_BUFCNT state being bogus. BUFCNT will
1532 * correct itself after the next DMA transfer though.
1533 *
1534 * REVISIT does using rndis mode change that?
1535 */
1536 cppi_reset_rx(cppi_ch->state_ram);
1538 /* next DMA request _should_ load cppi head ptr */
1540 /* ... we don't "free" that list, only mutate it in place. */
1541 cppi_dump_rx(5, cppi_ch, " (done abort)");
1543 /* clean up previously pending bds */
1544 cppi_bd_free(cppi_ch, cppi_ch->last_processed);
1545 cppi_ch->last_processed = NULL;
1547 while (queue) {
1548 struct cppi_descriptor *tmp = queue->next;
1550 cppi_bd_free(cppi_ch, queue);
1551 queue = tmp;
1552 }
1553 }
1555 channel->status = MUSB_DMA_STATUS_FREE;
1556 cppi_ch->buf_dma = 0;
1557 cppi_ch->offset = 0;
1558 cppi_ch->buf_len = 0;
1559 cppi_ch->maxpacket = 0;
1560 return 0;
1561 }
1563 /* TBD Queries:
1564 *
1565 * Power Management ... probably turn off cppi during suspend, restart;
1566 * check state ram? Clocking is presumably shared with usb core.
1567 */
1568 MODULE_DESCRIPTION("CPPI dma controller driver for musb");
1569 MODULE_LICENSE("GPL v2");
1571 static int __init cppi_dma_init(void)
1572 {
1573 return 0;
1574 }
1575 module_init(cppi_dma_init);
1577 static void __exit cppi_dma__exit(void)
1578 {
1579 }
1580 module_exit(cppi_dma__exit);