48d77d349033f2f2ce262130e5d56d8794266e14
1 /*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
4 *
5 * This file implements a DMA interface using TI's CPPI 4.1 DMA.
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 */
22 #include <linux/errno.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/module.h>
26 #include "cppi41.h"
28 #include "musb_core.h"
29 #include "musb_dma.h"
30 #include "cppi41_dma.h"
32 /* Configuration */
33 #define USB_CPPI41_DESC_SIZE_SHIFT 6
34 #define USB_CPPI41_DESC_ALIGN (1 << USB_CPPI41_DESC_SIZE_SHIFT)
35 #define USB_CPPI41_CH_NUM_PD 64 /* 4K bulk data at full speed */
36 #define USB_CPPI41_MAX_PD (USB_CPPI41_CH_NUM_PD * USB_CPPI41_NUM_CH)
38 #undef DEBUG_CPPI_TD
39 #undef USBDRV_DEBUG
41 #ifdef USBDRV_DEBUG
42 #define dprintk(x, ...) printk(x, ## __VA_ARGS__)
43 #else
44 #define dprintk(x, ...)
45 #endif
47 /*
48 * Data structure definitions
49 */
51 /*
52 * USB Packet Descriptor
53 */
54 struct usb_pkt_desc;
56 struct usb_pkt_desc {
57 /* Hardware descriptor fields from this point */
58 struct cppi41_host_pkt_desc hw_desc;
59 /* Protocol specific data */
60 dma_addr_t dma_addr;
61 struct usb_pkt_desc *next_pd_ptr;
62 u8 ch_num;
63 u8 ep_num;
64 };
66 /**
67 * struct cppi41_channel - DMA Channel Control Structure
68 *
69 * Using the same for Tx/Rx.
70 */
71 struct cppi41_channel {
72 struct dma_channel channel;
74 struct cppi41_dma_ch_obj dma_ch_obj; /* DMA channel object */
75 struct cppi41_queue src_queue; /* Tx queue or Rx free descriptor/ */
76 /* buffer queue */
77 struct cppi41_queue_obj queue_obj; /* Tx queue object or Rx free */
78 /* descriptor/buffer queue object */
80 u32 tag_info; /* Tx PD Tag Information field */
82 /* Which direction of which endpoint? */
83 struct musb_hw_ep *end_pt;
84 u8 transmit;
85 u8 ch_num; /* Channel number of Tx/Rx 0..3 */
87 /* DMA mode: "transparent", RNDIS, CDC, or Generic RNDIS */
88 u8 dma_mode;
89 u8 autoreq;
91 /* Book keeping for the current transfer request */
92 dma_addr_t start_addr;
93 u32 length;
94 u32 curr_offset;
95 u16 pkt_size;
96 u8 transfer_mode;
97 u8 zlp_queued;
98 };
100 /**
101 * struct cppi41 - CPPI 4.1 DMA Controller Object
102 *
103 * Encapsulates all book keeping and data structures pertaining to
104 * the CPPI 1.4 DMA controller.
105 */
106 struct cppi41 {
107 struct dma_controller controller;
108 struct musb *musb;
110 struct cppi41_channel tx_cppi_ch[USB_CPPI41_NUM_CH];
111 struct cppi41_channel rx_cppi_ch[USB_CPPI41_NUM_CH];
113 struct usb_pkt_desc *pd_pool_head; /* Free PD pool head */
114 dma_addr_t pd_mem_phys; /* PD memory physical address */
115 void *pd_mem; /* PD memory pointer */
116 u8 pd_mem_rgn; /* PD memory region number */
118 u16 teardownQNum; /* Teardown completion queue number */
119 struct cppi41_queue_obj queue_obj; /* Teardown completion queue */
120 /* object */
121 u32 pkt_info; /* Tx PD Packet Information field */
122 };
124 #ifdef DEBUG_CPPI_TD
125 static void print_pd_list(struct usb_pkt_desc *pd_pool_head)
126 {
127 struct usb_pkt_desc *curr_pd = pd_pool_head;
128 int cnt = 0;
130 while (curr_pd != NULL) {
131 if (cnt % 8 == 0)
132 dprintk("\n%02x ", cnt);
133 cnt++;
134 dprintk(" %p", curr_pd);
135 curr_pd = curr_pd->next_pd_ptr;
136 }
137 dprintk("\n");
138 }
139 #endif
141 static struct usb_pkt_desc *usb_get_free_pd(struct cppi41 *cppi)
142 {
143 struct usb_pkt_desc *free_pd = cppi->pd_pool_head;
145 if (free_pd != NULL) {
146 cppi->pd_pool_head = free_pd->next_pd_ptr;
147 free_pd->next_pd_ptr = NULL;
148 }
149 return free_pd;
150 }
152 static void usb_put_free_pd(struct cppi41 *cppi, struct usb_pkt_desc *free_pd)
153 {
154 free_pd->next_pd_ptr = cppi->pd_pool_head;
155 cppi->pd_pool_head = free_pd;
156 }
158 /**
159 * cppi41_controller_start - start DMA controller
160 * @controller: the controller
161 *
162 * This function initializes the CPPI 4.1 Tx/Rx channels.
163 */
164 static int __devinit cppi41_controller_start(struct dma_controller *controller)
165 {
166 struct cppi41 *cppi;
167 struct cppi41_channel *cppi_ch;
168 void __iomem *reg_base;
169 struct usb_pkt_desc *curr_pd;
170 unsigned long pd_addr;
171 int i;
173 cppi = container_of(controller, struct cppi41, controller);
175 /*
176 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
177 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
178 * Similarly, the descriptor size should also be a multiple of 32.
179 */
181 /*
182 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
183 * dma_alloc_coherent() will return a page aligned address, so our
184 * alignment requirement will be honored.
185 */
186 cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
187 USB_CPPI41_MAX_PD *
188 USB_CPPI41_DESC_ALIGN,
189 &cppi->pd_mem_phys,
190 GFP_KERNEL | GFP_DMA);
191 if (cppi->pd_mem == NULL) {
192 DBG(1, "ERROR: packet descriptor memory allocation failed\n");
193 return 0;
194 }
195 if (cppi41_mem_rgn_alloc(usb_cppi41_info.q_mgr, cppi->pd_mem_phys,
196 USB_CPPI41_DESC_SIZE_SHIFT,
197 get_count_order(USB_CPPI41_MAX_PD),
198 &cppi->pd_mem_rgn)) {
199 DBG(1, "ERROR: queue manager memory region allocation "
200 "failed\n");
201 goto free_pds;
202 }
204 /* Allocate the teardown completion queue */
205 if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
206 0, &cppi->teardownQNum)) {
207 DBG(1, "ERROR: teardown completion queue allocation failed\n");
208 goto free_mem_rgn;
209 }
210 DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
211 cppi->teardownQNum);
213 if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
214 DBG(1, "ERROR: teardown completion queue initialization "
215 "failed\n");
216 goto free_queue;
217 }
219 /*
220 * "Slice" PDs one-by-one from the big chunk and
221 * add them to the free pool.
222 */
223 curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
224 pd_addr = cppi->pd_mem_phys;
225 for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
226 curr_pd->dma_addr = pd_addr;
228 usb_put_free_pd(cppi, curr_pd);
229 curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
230 USB_CPPI41_DESC_ALIGN);
231 pd_addr += USB_CPPI41_DESC_ALIGN;
232 }
234 /* Configure the Tx channels */
235 for (i = 0, cppi_ch = cppi->tx_cppi_ch;
236 i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
237 const struct cppi41_tx_ch *tx_info;
239 memset(cppi_ch, 0, sizeof(struct cppi41_channel));
240 cppi_ch->transmit = 1;
241 cppi_ch->ch_num = i;
242 cppi_ch->channel.private_data = cppi;
244 /*
245 * Extract the CPPI 4.1 DMA Tx channel configuration and
246 * construct/store the Tx PD tag info field for later use...
247 */
248 tx_info = cppi41_dma_block[usb_cppi41_info.dma_block].tx_ch_info
249 + usb_cppi41_info.ep_dma_ch[i];
250 cppi_ch->src_queue = tx_info->tx_queue[0];
251 cppi_ch->tag_info = (tx_info->port_num <<
252 CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
253 (tx_info->ch_num <<
254 CPPI41_SRC_TAG_CH_NUM_SHIFT) |
255 (tx_info->sub_ch_num <<
256 CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
257 }
259 /* Configure the Rx channels */
260 for (i = 0, cppi_ch = cppi->rx_cppi_ch;
261 i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
262 memset(cppi_ch, 0, sizeof(struct cppi41_channel));
263 cppi_ch->ch_num = i;
264 cppi_ch->channel.private_data = cppi;
265 }
267 /* Construct/store Tx PD packet info field for later use */
268 cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
269 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT) |
270 (usb_cppi41_info.q_mgr << CPPI41_RETURN_QMGR_SHIFT) |
271 (usb_cppi41_info.tx_comp_q[0] <<
272 CPPI41_RETURN_QNUM_SHIFT);
274 /* Do necessary configuartion in hardware to get started */
275 reg_base = cppi->musb->ctrl_base;
277 /* Disable auto request mode */
278 musb_writel(reg_base, USB_AUTOREQ_REG, 0);
280 /* Disable the CDC/RNDIS modes */
281 musb_writel(reg_base, USB_TX_MODE_REG, 0);
282 musb_writel(reg_base, USB_RX_MODE_REG, 0);
284 return 1;
286 free_queue:
287 if (cppi41_queue_free(0, cppi->teardownQNum))
288 DBG(1, "ERROR: failed to free teardown completion queue\n");
290 free_mem_rgn:
291 if (cppi41_mem_rgn_free(usb_cppi41_info.q_mgr, cppi->pd_mem_rgn))
292 DBG(1, "ERROR: failed to free queue manager memory region\n");
294 free_pds:
295 dma_free_coherent(cppi->musb->controller,
296 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
297 cppi->pd_mem, cppi->pd_mem_phys);
299 return 0;
300 }
302 /**
303 * cppi41_controller_stop - stop DMA controller
304 * @controller: the controller
305 *
306 * De-initialize the DMA Controller as necessary.
307 */
308 static int cppi41_controller_stop(struct dma_controller *controller)
309 {
310 struct cppi41 *cppi;
311 void __iomem *reg_base;
313 cppi = container_of(controller, struct cppi41, controller);
315 /* Free the teardown completion queue */
316 if (cppi41_queue_free(usb_cppi41_info.q_mgr, cppi->teardownQNum))
317 DBG(1, "ERROR: failed to free teardown completion queue\n");
319 /*
320 * Free the packet descriptor region allocated
321 * for all Tx/Rx channels.
322 */
323 if (cppi41_mem_rgn_free(usb_cppi41_info.q_mgr, cppi->pd_mem_rgn))
324 DBG(1, "ERROR: failed to free queue manager memory region\n");
326 dma_free_coherent(cppi->musb->controller,
327 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
328 cppi->pd_mem, cppi->pd_mem_phys);
330 reg_base = cppi->musb->ctrl_base;
332 /* Disable auto request mode */
333 musb_writel(reg_base, USB_AUTOREQ_REG, 0);
335 /* Disable the CDC/RNDIS modes */
336 musb_writel(reg_base, USB_TX_MODE_REG, 0);
337 musb_writel(reg_base, USB_RX_MODE_REG, 0);
339 return 1;
340 }
342 /**
343 * cppi41_channel_alloc - allocate a CPPI channel for DMA.
344 * @controller: the controller
345 * @ep: the endpoint
346 * @is_tx: 1 for Tx channel, 0 for Rx channel
347 *
348 * With CPPI, channels are bound to each transfer direction of a non-control
349 * endpoint, so allocating (and deallocating) is mostly a way to notice bad
350 * housekeeping on the software side. We assume the IRQs are always active.
351 */
352 static struct dma_channel *cppi41_channel_alloc(struct dma_controller
353 *controller,
354 struct musb_hw_ep *ep, u8 is_tx)
355 {
356 struct cppi41 *cppi;
357 struct cppi41_channel *cppi_ch;
358 u32 ch_num, ep_num = ep->epnum;
360 cppi = container_of(controller, struct cppi41, controller);
362 /* Remember, ep_num: 1 .. Max_EP, and CPPI ch_num: 0 .. Max_EP - 1 */
363 ch_num = ep_num - 1;
365 if (ep_num > USB_CPPI41_NUM_CH) {
366 DBG(1, "No %cx DMA channel for EP%d\n",
367 is_tx ? 'T' : 'R', ep_num);
368 return NULL;
369 }
371 cppi_ch = (is_tx ? cppi->tx_cppi_ch : cppi->rx_cppi_ch) + ch_num;
373 /* As of now, just return the corresponding CPPI 4.1 channel handle */
374 if (is_tx) {
375 /* Initialize the CPPI 4.1 Tx DMA channel */
376 if (cppi41_tx_ch_init(&cppi_ch->dma_ch_obj,
377 usb_cppi41_info.dma_block,
378 usb_cppi41_info.ep_dma_ch[ch_num])) {
379 DBG(1, "ERROR: cppi41_tx_ch_init failed for "
380 "channel %d\n", ch_num);
381 return NULL;
382 }
383 /*
384 * Teardown descriptors will be pushed to the dedicated
385 * completion queue.
386 */
387 cppi41_dma_ch_default_queue(&cppi_ch->dma_ch_obj,
388 0, cppi->teardownQNum);
389 } else {
390 struct cppi41_rx_ch_cfg rx_cfg;
391 u8 q_mgr = usb_cppi41_info.q_mgr;
392 int i;
394 /* Initialize the CPPI 4.1 Rx DMA channel */
395 if (cppi41_rx_ch_init(&cppi_ch->dma_ch_obj,
396 usb_cppi41_info.dma_block,
397 usb_cppi41_info.ep_dma_ch[ch_num])) {
398 DBG(1, "ERROR: cppi41_rx_ch_init failed\n");
399 return NULL;
400 }
402 if (cppi41_queue_alloc(CPPI41_FREE_DESC_BUF_QUEUE |
403 CPPI41_UNASSIGNED_QUEUE,
404 q_mgr, &cppi_ch->src_queue.q_num)) {
405 DBG(1, "ERROR: cppi41_queue_alloc failed for "
406 "free descriptor/buffer queue\n");
407 return NULL;
408 }
409 DBG(4, "Allocated free descriptor/buffer queue %d in "
410 "queue manager %d\n", cppi_ch->src_queue.q_num, q_mgr);
412 rx_cfg.default_desc_type = cppi41_rx_host_desc;
413 rx_cfg.sop_offset = 0;
414 rx_cfg.retry_starved = 1;
415 rx_cfg.rx_queue.q_mgr = cppi_ch->src_queue.q_mgr = q_mgr;
416 rx_cfg.rx_queue.q_num = usb_cppi41_info.rx_comp_q[0];
417 for (i = 0; i < 4; i++)
418 rx_cfg.cfg.host_pkt.fdb_queue[i] = cppi_ch->src_queue;
419 cppi41_rx_ch_configure(&cppi_ch->dma_ch_obj, &rx_cfg);
420 }
422 /* Initialize the CPPI 4.1 DMA source queue */
423 if (cppi41_queue_init(&cppi_ch->queue_obj, cppi_ch->src_queue.q_mgr,
424 cppi_ch->src_queue.q_num)) {
425 DBG(1, "ERROR: cppi41_queue_init failed for %s queue",
426 is_tx ? "Tx" : "Rx free descriptor/buffer");
427 if (is_tx == 0 &&
428 cppi41_queue_free(cppi_ch->src_queue.q_mgr,
429 cppi_ch->src_queue.q_num))
430 DBG(1, "ERROR: failed to free Rx descriptor/buffer "
431 "queue\n");
432 return NULL;
433 }
435 /* Enable the DMA channel */
436 cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
438 if (cppi_ch->end_pt)
439 DBG(1, "Re-allocating DMA %cx channel %d (%p)\n",
440 is_tx ? 'T' : 'R', ch_num, cppi_ch);
442 cppi_ch->end_pt = ep;
443 cppi_ch->ch_num = ch_num;
444 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
445 cppi_ch->channel.max_len = is_tx ?
446 CPPI41_TXDMA_MAXLEN : CPPI41_RXDMA_MAXLEN;
448 DBG(4, "Allocated DMA %cx channel %d for EP%d\n", is_tx ? 'T' : 'R',
449 ch_num, ep_num);
451 return &cppi_ch->channel;
452 }
454 /**
455 * cppi41_channel_release - release a CPPI DMA channel
456 * @channel: the channel
457 */
458 static void cppi41_channel_release(struct dma_channel *channel)
459 {
460 struct cppi41_channel *cppi_ch;
462 /* REVISIT: for paranoia, check state and abort if needed... */
463 cppi_ch = container_of(channel, struct cppi41_channel, channel);
464 if (cppi_ch->end_pt == NULL)
465 DBG(1, "Releasing idle DMA channel %p\n", cppi_ch);
467 /* But for now, not its IRQ */
468 cppi_ch->end_pt = NULL;
469 channel->status = MUSB_DMA_STATUS_UNKNOWN;
471 cppi41_dma_ch_disable(&cppi_ch->dma_ch_obj);
473 /* De-allocate Rx free descriptior/buffer queue */
474 if (cppi_ch->transmit == 0 &&
475 cppi41_queue_free(cppi_ch->src_queue.q_mgr,
476 cppi_ch->src_queue.q_num))
477 DBG(1, "ERROR: failed to free Rx descriptor/buffer queue\n");
478 }
480 static void cppi41_mode_update(struct cppi41_channel *cppi_ch, u8 mode)
481 {
482 if (mode != cppi_ch->dma_mode) {
483 struct cppi41 *cppi = cppi_ch->channel.private_data;
484 void *__iomem reg_base = cppi->musb->ctrl_base;
485 u32 reg_val;
486 u8 ep_num = cppi_ch->ch_num + 1;
488 if (cppi_ch->transmit) {
489 reg_val = musb_readl(reg_base, USB_TX_MODE_REG);
490 reg_val &= ~USB_TX_MODE_MASK(ep_num);
491 reg_val |= mode << USB_TX_MODE_SHIFT(ep_num);
492 musb_writel(reg_base, USB_TX_MODE_REG, reg_val);
493 } else {
494 reg_val = musb_readl(reg_base, USB_RX_MODE_REG);
495 reg_val &= ~USB_RX_MODE_MASK(ep_num);
496 reg_val |= mode << USB_RX_MODE_SHIFT(ep_num);
497 musb_writel(reg_base, USB_RX_MODE_REG, reg_val);
498 }
499 cppi_ch->dma_mode = mode;
500 }
501 }
503 /*
504 * CPPI 4.1 Tx:
505 * ============
506 * Tx is a lot more reasonable than Rx: RNDIS mode seems to behave well except
507 * how it handles the exactly-N-packets case. It appears that there's a hiccup
508 * in that case (maybe the DMA completes before a ZLP gets written?) boiling
509 * down to not being able to rely on the XFER DMA writing any terminating zero
510 * length packet before the next transfer is started...
511 *
512 * The generic RNDIS mode does not have this misfeature, so we prefer using it
513 * instead. We then send the terminating ZLP *explictly* using DMA instead of
514 * doing it by PIO after an IRQ.
515 *
516 */
518 /**
519 * cppi41_next_tx_segment - DMA write for the next chunk of a buffer
520 * @tx_ch: Tx channel
521 *
522 * Context: controller IRQ-locked
523 */
524 static unsigned cppi41_next_tx_segment(struct cppi41_channel *tx_ch)
525 {
526 struct cppi41 *cppi = tx_ch->channel.private_data;
527 struct usb_pkt_desc *curr_pd;
528 u32 length = tx_ch->length - tx_ch->curr_offset;
529 u32 pkt_size = tx_ch->pkt_size;
530 unsigned num_pds, n;
532 /*
533 * Tx can use the generic RNDIS mode where we can probably fit this
534 * transfer in one PD and one IRQ. The only time we would NOT want
535 * to use it is when the hardware constraints prevent it...
536 */
537 if ((pkt_size & 0x3f) == 0 && length > pkt_size) {
538 num_pds = 1;
539 pkt_size = length;
540 cppi41_mode_update(tx_ch, USB_GENERIC_RNDIS_MODE);
541 } else {
542 num_pds = (length + pkt_size - 1) / pkt_size;
543 cppi41_mode_update(tx_ch, USB_TRANSPARENT_MODE);
544 }
546 /*
547 * If length of transmit buffer is 0 or a multiple of the endpoint size,
548 * then send the zero length packet.
549 */
550 if (!length || (tx_ch->transfer_mode && length % pkt_size == 0))
551 num_pds++;
553 DBG(4, "TX DMA%u, %s, maxpkt %u, %u PDs, addr %#x, len %u\n",
554 tx_ch->ch_num, tx_ch->dma_mode ? "accelerated" : "transparent",
555 pkt_size, num_pds, tx_ch->start_addr + tx_ch->curr_offset, length);
557 for (n = 0; n < num_pds; n++) {
558 struct cppi41_host_pkt_desc *hw_desc;
560 /* Get Tx host packet descriptor from the free pool */
561 curr_pd = usb_get_free_pd(cppi);
562 if (curr_pd == NULL) {
563 DBG(1, "No Tx PDs\n");
564 break;
565 }
567 if (length < pkt_size)
568 pkt_size = length;
570 hw_desc = &curr_pd->hw_desc;
571 hw_desc->desc_info = (CPPI41_DESC_TYPE_HOST <<
572 CPPI41_DESC_TYPE_SHIFT) | pkt_size;
573 hw_desc->tag_info = tx_ch->tag_info;
574 hw_desc->pkt_info = cppi->pkt_info;
576 hw_desc->buf_ptr = tx_ch->start_addr + tx_ch->curr_offset;
577 hw_desc->buf_len = pkt_size;
578 hw_desc->next_desc_ptr = 0;
580 curr_pd->ch_num = tx_ch->ch_num;
581 curr_pd->ep_num = tx_ch->end_pt->epnum;
583 tx_ch->curr_offset += pkt_size;
584 length -= pkt_size;
586 if (pkt_size == 0)
587 tx_ch->zlp_queued = 1;
589 DBG(5, "TX PD %p: buf %08x, len %08x, pkt info %08x\n", curr_pd,
590 hw_desc->buf_ptr, hw_desc->buf_len, hw_desc->pkt_info);
592 cppi41_queue_push(&tx_ch->queue_obj, curr_pd->dma_addr,
593 USB_CPPI41_DESC_ALIGN, pkt_size);
594 }
596 return n;
597 }
599 static void cppi41_autoreq_update(struct cppi41_channel *rx_ch, u8 autoreq)
600 {
601 struct cppi41 *cppi = rx_ch->channel.private_data;
603 if (is_host_active(cppi->musb) &&
604 autoreq != rx_ch->autoreq) {
605 void *__iomem reg_base = cppi->musb->ctrl_base;
606 u32 reg_val = musb_readl(reg_base, USB_AUTOREQ_REG);
607 u8 ep_num = rx_ch->ch_num + 1;
609 reg_val &= ~USB_RX_AUTOREQ_MASK(ep_num);
610 reg_val |= autoreq << USB_RX_AUTOREQ_SHIFT(ep_num);
612 musb_writel(reg_base, USB_AUTOREQ_REG, reg_val);
613 rx_ch->autoreq = autoreq;
614 }
615 }
617 static void cppi41_set_ep_size(struct cppi41_channel *rx_ch, u32 pkt_size)
618 {
619 struct cppi41 *cppi = rx_ch->channel.private_data;
620 void *__iomem reg_base = cppi->musb->ctrl_base;
621 u8 ep_num = rx_ch->ch_num + 1;
623 musb_writel(reg_base, USB_GENERIC_RNDIS_EP_SIZE_REG(ep_num), pkt_size);
624 }
626 /*
627 * CPPI 4.1 Rx:
628 * ============
629 * Consider a 1KB bulk Rx buffer in two scenarios: (a) it's fed two 300 byte
630 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
631 * (Full speed transfers have similar scenarios.)
632 *
633 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
634 * and the next packet goes into a buffer that's queued later; while (b) fills
635 * the buffer with 1024 bytes. How to do that with accelerated DMA modes?
636 *
637 * Rx queues in RNDIS mode (one single BD) handle (a) correctly but (b) loses
638 * BADLY because nothing (!) happens when that second packet fills the buffer,
639 * much less when a third one arrives -- which makes it not a "true" RNDIS mode.
640 * In the RNDIS protocol short-packet termination is optional, and it's fine if
641 * the peripherals (not hosts!) pad the messages out to end of buffer. Standard
642 * PCI host controller DMA descriptors implement that mode by default... which
643 * is no accident.
644 *
645 * Generic RNDIS mode is the only way to reliably make both cases work. This
646 * mode is identical to the "normal" RNDIS mode except for the case where the
647 * last packet of the segment matches the max USB packet size -- in this case,
648 * the packet will be closed when a value (0x10000 max) in the Generic RNDIS
649 * EP Size register is reached. This mode will work for the network drivers
650 * (CDC/RNDIS) as well as for the mass storage drivers where there is no short
651 * packet.
652 *
653 * BUT we can only use non-transparent modes when USB packet size is a multiple
654 * of 64 bytes. Let's see what happens when this is not the case...
655 *
656 * Rx queues (2 BDs with 512 bytes each) have converse problems to RNDIS mode:
657 * (b) is handled right but (a) loses badly. DMA doesn't stop after receiving
658 * a short packet and processes both of those PDs; so both packets are loaded
659 * into the buffer (with 212 byte gap between them), and the next buffer queued
660 * will NOT get its 300 bytes of data. Even in the case when there should be
661 * no short packets (URB_SHORT_NOT_OK is set), queueing several packets in the
662 * host mode doesn't win us anything since we have to manually "prod" the Rx
663 * process after each packet is received by setting ReqPkt bit in endpoint's
664 * RXCSR; in the peripheral mode without short packets, queueing could be used
665 * BUT we'll have to *teardown* the channel if a short packet still arrives in
666 * the peripheral mode, and to "collect" the left-over packet descriptors from
667 * the free descriptor/buffer queue in both cases...
668 *
669 * One BD at a time is the only way to make make both cases work reliably, with
670 * software handling both cases correctly, at the significant penalty of needing
671 * an IRQ per packet. (The lack of I/O overlap can be slightly ameliorated by
672 * enabling double buffering.)
673 *
674 * There seems to be no way to identify for sure the cases where the CDC mode
675 * is appropriate...
676 *
677 */
679 /**
680 * cppi41_next_rx_segment - DMA read for the next chunk of a buffer
681 * @rx_ch: Rx channel
682 *
683 * Context: controller IRQ-locked
684 *
685 * NOTE: In the transparent mode, we have to queue one packet at a time since:
686 * - we must avoid starting reception of another packet after receiving
687 * a short packet;
688 * - in host mode we have to set ReqPkt bit in the endpoint's RXCSR after
689 * receiving each packet but the last one... ugly!
690 */
691 static unsigned cppi41_next_rx_segment(struct cppi41_channel *rx_ch)
692 {
693 struct cppi41 *cppi = rx_ch->channel.private_data;
694 struct usb_pkt_desc *curr_pd;
695 struct cppi41_host_pkt_desc *hw_desc;
696 u32 length = rx_ch->length - rx_ch->curr_offset;
697 u32 pkt_size = rx_ch->pkt_size;
699 /*
700 * Rx can use the generic RNDIS mode where we can probably fit this
701 * transfer in one PD and one IRQ (or two with a short packet).
702 */
703 if ((pkt_size & 0x3f) == 0 && length >= 2 * pkt_size) {
704 cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
705 cppi41_autoreq_update(rx_ch, USB_AUTOREQ_ALL_BUT_EOP);
707 if (likely(length < 0x10000))
708 pkt_size = length - length % pkt_size;
709 else
710 pkt_size = 0x10000;
711 cppi41_set_ep_size(rx_ch, pkt_size);
712 } else {
713 cppi41_mode_update(rx_ch, USB_TRANSPARENT_MODE);
714 cppi41_autoreq_update(rx_ch, USB_NO_AUTOREQ);
715 }
717 DBG(4, "RX DMA%u, %s, maxpkt %u, addr %#x, rec'd %u/%u\n",
718 rx_ch->ch_num, rx_ch->dma_mode ? "accelerated" : "transparent",
719 pkt_size, rx_ch->start_addr + rx_ch->curr_offset,
720 rx_ch->curr_offset, rx_ch->length);
722 /* Get Rx packet descriptor from the free pool */
723 curr_pd = usb_get_free_pd(cppi);
724 if (curr_pd == NULL) {
725 /* Shouldn't ever happen! */
726 DBG(4, "No Rx PDs\n");
727 return 0;
728 }
730 /*
731 * HCD arranged ReqPkt for the first packet.
732 * We arrange it for all but the last one.
733 */
734 if (is_host_active(cppi->musb) && rx_ch->channel.actual_len) {
735 void __iomem *epio = rx_ch->end_pt->regs;
736 u16 csr = musb_readw(epio, MUSB_RXCSR);
738 csr |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
739 musb_writew(epio, MUSB_RXCSR, csr);
740 }
742 if (length < pkt_size)
743 pkt_size = length;
745 hw_desc = &curr_pd->hw_desc;
746 hw_desc->orig_buf_ptr = rx_ch->start_addr + rx_ch->curr_offset;
747 hw_desc->orig_buf_len = pkt_size;
749 curr_pd->ch_num = rx_ch->ch_num;
750 curr_pd->ep_num = rx_ch->end_pt->epnum;
752 rx_ch->curr_offset += pkt_size;
754 /*
755 * Push the free Rx packet descriptor
756 * to the free descriptor/buffer queue.
757 */
758 cppi41_queue_push(&rx_ch->queue_obj, curr_pd->dma_addr,
759 USB_CPPI41_DESC_ALIGN, 0);
761 return 1;
762 }
764 /**
765 * cppi41_channel_program - program channel for data transfer
766 * @channel: the channel
767 * @maxpacket: max packet size
768 * @mode: for Rx, 1 unless the USB protocol driver promised to treat
769 * all short reads as errors and kick in high level fault recovery;
770 * for Tx, 0 unless the protocol driver _requires_ short-packet
771 * termination mode
772 * @dma_addr: DMA address of buffer
773 * @length: length of buffer
774 *
775 * Context: controller IRQ-locked
776 */
777 static int cppi41_channel_program(struct dma_channel *channel, u16 maxpacket,
778 u8 mode, dma_addr_t dma_addr, u32 length)
779 {
780 struct cppi41_channel *cppi_ch;
781 unsigned queued;
783 cppi_ch = container_of(channel, struct cppi41_channel, channel);
785 switch (channel->status) {
786 case MUSB_DMA_STATUS_BUS_ABORT:
787 case MUSB_DMA_STATUS_CORE_ABORT:
788 /* Fault IRQ handler should have handled cleanup */
789 WARNING("%cx DMA%d not cleaned up after abort!\n",
790 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
791 break;
792 case MUSB_DMA_STATUS_BUSY:
793 WARNING("Program active channel? %cx DMA%d\n",
794 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
795 break;
796 case MUSB_DMA_STATUS_UNKNOWN:
797 DBG(1, "%cx DMA%d not allocated!\n",
798 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
799 return 0;
800 case MUSB_DMA_STATUS_FREE:
801 break;
802 }
804 channel->status = MUSB_DMA_STATUS_BUSY;
806 /* Set the transfer parameters, then queue up the first segment */
807 cppi_ch->start_addr = dma_addr;
808 cppi_ch->curr_offset = 0;
809 cppi_ch->pkt_size = maxpacket;
810 cppi_ch->length = length;
811 cppi_ch->transfer_mode = mode;
812 cppi_ch->zlp_queued = 0;
813 cppi_ch->channel.actual_len = 0;
815 /* Tx or Rx channel? */
816 if (cppi_ch->transmit)
817 queued = cppi41_next_tx_segment(cppi_ch);
818 else
819 queued = cppi41_next_rx_segment(cppi_ch);
821 return queued > 0;
822 }
824 static struct usb_pkt_desc *usb_get_pd_ptr(struct cppi41 *cppi,
825 unsigned long pd_addr)
826 {
827 if (pd_addr >= cppi->pd_mem_phys && pd_addr < cppi->pd_mem_phys +
828 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN)
829 return pd_addr - cppi->pd_mem_phys + cppi->pd_mem;
830 else
831 return NULL;
832 }
834 static int usb_check_teardown(struct cppi41_channel *cppi_ch,
835 unsigned long pd_addr)
836 {
837 u32 info;
839 if (cppi41_get_teardown_info(pd_addr, &info)) {
840 DBG(1, "ERROR: not a teardown descriptor\n");
841 return 0;
842 }
844 if ((info & CPPI41_TEARDOWN_TX_RX_MASK) ==
845 (!cppi_ch->transmit << CPPI41_TEARDOWN_TX_RX_SHIFT) &&
846 (info & CPPI41_TEARDOWN_DMA_NUM_MASK) ==
847 (usb_cppi41_info.dma_block << CPPI41_TEARDOWN_DMA_NUM_SHIFT) &&
848 (info & CPPI41_TEARDOWN_CHAN_NUM_MASK) ==
849 (usb_cppi41_info.ep_dma_ch[cppi_ch->ch_num] <<
850 CPPI41_TEARDOWN_CHAN_NUM_SHIFT))
851 return 1;
853 DBG(1, "ERROR: unexpected values in teardown descriptor\n");
854 return 0;
855 }
857 /*
858 * We can't handle the channel teardown via the default completion queue in
859 * context of the controller IRQ-locked, so we use the dedicated teardown
860 * completion queue which we can just poll for a teardown descriptor, not
861 * interfering with the Tx completion queue processing.
862 */
863 static void usb_tx_ch_teardown(struct cppi41_channel *tx_ch)
864 {
865 struct cppi41 *cppi = tx_ch->channel.private_data;
866 unsigned long pd_addr;
868 /* Initiate teardown for Tx DMA channel */
869 cppi41_dma_ch_teardown(&tx_ch->dma_ch_obj);
871 do {
872 /* Wait for a descriptor to be queued and pop it... */
873 do {
874 pd_addr = cppi41_queue_pop(&cppi->queue_obj);
875 } while (!pd_addr);
877 dprintk("Descriptor (%08lx) popped from teardown completion "
878 "queue\n", pd_addr);
879 } while (!usb_check_teardown(tx_ch, pd_addr));
880 }
882 /*
883 * For Rx DMA channels, the situation is more complex: there's only a single
884 * completion queue for all our needs, so we have to temporarily redirect the
885 * completed descriptors to our teardown completion queue, with a possibility
886 * of a completed packet landing there as well...
887 */
888 static void usb_rx_ch_teardown(struct cppi41_channel *rx_ch)
889 {
890 struct cppi41 *cppi = rx_ch->channel.private_data;
892 cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, 0, cppi->teardownQNum);
894 /* Initiate teardown for Rx DMA channel */
895 cppi41_dma_ch_teardown(&rx_ch->dma_ch_obj);
897 while (1) {
898 struct usb_pkt_desc *curr_pd;
899 unsigned long pd_addr;
901 /* Wait for a descriptor to be queued and pop it... */
902 do {
903 pd_addr = cppi41_queue_pop(&cppi->queue_obj);
904 } while (!pd_addr);
906 dprintk("Descriptor (%08lx) popped from teardown completion "
907 "queue\n", pd_addr);
909 /*
910 * We might have popped a completed Rx PD, so check if the
911 * physical address is within the PD region first. If it's
912 * not the case, it must be a teardown descriptor...
913 * */
914 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
915 if (curr_pd == NULL) {
916 if (usb_check_teardown(rx_ch, pd_addr))
917 break;
918 continue;
919 }
921 /* Paranoia: check if PD is from the right channel... */
922 if (curr_pd->ch_num != rx_ch->ch_num) {
923 ERR("Unexpected channel %d in Rx PD\n",
924 curr_pd->ch_num);
925 continue;
926 }
928 /* Extract the buffer length from the completed PD */
929 rx_ch->channel.actual_len += curr_pd->hw_desc.buf_len;
931 /*
932 * Return Rx PDs to the software list --
933 * this is protected by critical section.
934 */
935 usb_put_free_pd(cppi, curr_pd);
936 }
938 /* Now restore the default Rx completion queue... */
939 cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, usb_cppi41_info.q_mgr,
940 usb_cppi41_info.rx_comp_q[0]);
941 }
943 /*
944 * cppi41_channel_abort
945 *
946 * Context: controller IRQ-locked, endpoint selected.
947 */
948 static int cppi41_channel_abort(struct dma_channel *channel)
949 {
950 struct cppi41 *cppi;
951 struct cppi41_channel *cppi_ch;
952 struct musb *musb;
953 void __iomem *reg_base, *epio;
954 unsigned long pd_addr;
955 u32 csr, td_reg;
956 u8 ch_num, ep_num;
958 cppi_ch = container_of(channel, struct cppi41_channel, channel);
959 ch_num = cppi_ch->ch_num;
961 switch (channel->status) {
962 case MUSB_DMA_STATUS_BUS_ABORT:
963 case MUSB_DMA_STATUS_CORE_ABORT:
964 /* From Rx or Tx fault IRQ handler */
965 case MUSB_DMA_STATUS_BUSY:
966 /* The hardware needs shutting down... */
967 dprintk("%s: DMA busy, status = %x\n",
968 __func__, channel->status);
969 break;
970 case MUSB_DMA_STATUS_UNKNOWN:
971 DBG(1, "%cx DMA%d not allocated\n",
972 cppi_ch->transmit ? 'T' : 'R', ch_num);
973 /* FALLTHROUGH */
974 case MUSB_DMA_STATUS_FREE:
975 return 0;
976 }
978 cppi = cppi_ch->channel.private_data;
979 musb = cppi->musb;
980 reg_base = musb->ctrl_base;
981 epio = cppi_ch->end_pt->regs;
982 ep_num = ch_num + 1;
984 #ifdef DEBUG_CPPI_TD
985 printk("Before teardown:");
986 print_pd_list(cppi->pd_pool_head);
987 #endif
989 if (cppi_ch->transmit) {
990 dprintk("Tx channel teardown, cppi_ch = %p\n", cppi_ch);
992 /* Tear down Tx DMA channel */
993 usb_tx_ch_teardown(cppi_ch);
995 /* Issue CPPI FIFO teardown for Tx channel */
996 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
997 td_reg |= USB_TX_TDOWN_MASK(ep_num);
998 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
1000 /* Flush FIFO of the endpoint */
1001 csr = musb_readw(epio, MUSB_TXCSR);
1002 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_H_WZC_BITS;
1003 musb_writew(epio, MUSB_TXCSR, csr);
1004 } else { /* Rx */
1005 dprintk("Rx channel teardown, cppi_ch = %p\n", cppi_ch);
1007 /* Flush FIFO of the endpoint */
1008 csr = musb_readw(epio, MUSB_RXCSR);
1009 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
1010 musb_writew(epio, MUSB_RXCSR, csr);
1012 /* Issue CPPI FIFO teardown for Rx channel */
1013 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
1014 td_reg |= USB_RX_TDOWN_MASK(ep_num);
1015 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
1017 /* Tear down Rx DMA channel */
1018 usb_rx_ch_teardown(cppi_ch);
1020 /*
1021 * NOTE: docs don't guarantee any of this works... we expect
1022 * that if the USB core stops telling the CPPI core to pull
1023 * more data from it, then it'll be safe to flush current Rx
1024 * DMA state iff any pending FIFO transfer is done.
1025 */
1027 /* For host, ensure ReqPkt is never set again */
1028 cppi41_autoreq_update(cppi_ch, USB_NO_AUTOREQ);
1030 /* For host, clear (just) ReqPkt at end of current packet(s) */
1031 if (is_host_active(cppi->musb))
1032 csr &= ~MUSB_RXCSR_H_REQPKT;
1033 csr |= MUSB_RXCSR_H_WZC_BITS;
1035 /* Clear DMA enable */
1036 csr &= ~MUSB_RXCSR_DMAENAB;
1037 musb_writew(epio, MUSB_RXCSR, csr);
1039 /* Flush the FIFO of endpoint once again */
1040 csr = musb_readw(epio, MUSB_RXCSR);
1041 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
1042 musb_writew(epio, MUSB_RXCSR, csr);
1044 udelay(50);
1045 }
1047 /*
1048 * There might be PDs in the Rx/Tx source queue that were not consumed
1049 * by the DMA controller -- they need to be recycled properly.
1050 */
1051 while ((pd_addr = cppi41_queue_pop(&cppi_ch->queue_obj)) != 0) {
1052 struct usb_pkt_desc *curr_pd;
1054 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1055 if (curr_pd == NULL) {
1056 ERR("Invalid PD popped from source queue\n");
1057 continue;
1058 }
1060 /*
1061 * Return Rx/Tx PDs to the software list --
1062 * this is protected by critical section.
1063 */
1064 dprintk("Returning PD %p to the free PD list\n", curr_pd);
1065 usb_put_free_pd(cppi, curr_pd);
1066 }
1068 #ifdef DEBUG_CPPI_TD
1069 printk("After teardown:");
1070 print_pd_list(cppi->pd_pool_head);
1071 #endif
1073 /* Re-enable the DMA channel */
1074 cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
1076 channel->status = MUSB_DMA_STATUS_FREE;
1078 return 0;
1079 }
1081 /**
1082 * cppi41_dma_controller_create -
1083 * instantiate an object representing DMA controller.
1084 */
1085 struct dma_controller * __devinit
1086 cppi41_dma_controller_create(struct musb *musb, void __iomem *mregs)
1087 {
1088 struct cppi41 *cppi;
1090 cppi = kzalloc(sizeof *cppi, GFP_KERNEL);
1091 if (!cppi)
1092 return NULL;
1094 /* Initialize the CPPI 4.1 DMA controller structure */
1095 cppi->musb = musb;
1096 cppi->controller.start = cppi41_controller_start;
1097 cppi->controller.stop = cppi41_controller_stop;
1098 cppi->controller.channel_alloc = cppi41_channel_alloc;
1099 cppi->controller.channel_release = cppi41_channel_release;
1100 cppi->controller.channel_program = cppi41_channel_program;
1101 cppi->controller.channel_abort = cppi41_channel_abort;
1103 return &cppi->controller;
1104 }
1105 EXPORT_SYMBOL(cppi41_dma_controller_create);
1107 /**
1108 * cppi41_dma_controller_destroy -
1109 * destroy a previously instantiated DMA controller
1110 * @controller: the controller
1111 */
1112 void cppi41_dma_controller_destroy(struct dma_controller *controller)
1113 {
1114 struct cppi41 *cppi;
1116 cppi = container_of(controller, struct cppi41, controller);
1118 /* Free the CPPI object */
1119 kfree(cppi);
1120 }
1121 EXPORT_SYMBOL(cppi41_dma_controller_destroy);
1123 static void usb_process_tx_queue(struct cppi41 *cppi, unsigned index)
1124 {
1125 struct cppi41_queue_obj tx_queue_obj;
1126 unsigned long pd_addr;
1128 if (cppi41_queue_init(&tx_queue_obj, usb_cppi41_info.q_mgr,
1129 usb_cppi41_info.tx_comp_q[index])) {
1130 DBG(1, "ERROR: cppi41_queue_init failed for "
1131 "Tx completion queue");
1132 return;
1133 }
1135 while ((pd_addr = cppi41_queue_pop(&tx_queue_obj)) != 0) {
1136 struct usb_pkt_desc *curr_pd;
1137 struct cppi41_channel *tx_ch;
1138 u8 ch_num, ep_num;
1139 u32 length;
1141 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1142 if (curr_pd == NULL) {
1143 ERR("Invalid PD popped from Tx completion queue\n");
1144 continue;
1145 }
1147 /* Extract the data from received packet descriptor */
1148 ch_num = curr_pd->ch_num;
1149 ep_num = curr_pd->ep_num;
1150 length = curr_pd->hw_desc.buf_len;
1152 tx_ch = &cppi->tx_cppi_ch[ch_num];
1153 tx_ch->channel.actual_len += length;
1155 /*
1156 * Return Tx PD to the software list --
1157 * this is protected by critical section
1158 */
1159 usb_put_free_pd(cppi, curr_pd);
1161 if ((tx_ch->curr_offset < tx_ch->length) ||
1162 (tx_ch->transfer_mode && !tx_ch->zlp_queued))
1163 cppi41_next_tx_segment(tx_ch);
1164 else if (tx_ch->channel.actual_len >= tx_ch->length) {
1165 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1167 /* Tx completion routine callback */
1168 musb_dma_completion(cppi->musb, ep_num, 1);
1169 }
1170 }
1171 }
1173 static void usb_process_rx_queue(struct cppi41 *cppi, unsigned index)
1174 {
1175 struct cppi41_queue_obj rx_queue_obj;
1176 unsigned long pd_addr;
1178 if (cppi41_queue_init(&rx_queue_obj, usb_cppi41_info.q_mgr,
1179 usb_cppi41_info.rx_comp_q[index])) {
1180 DBG(1, "ERROR: cppi41_queue_init failed for Rx queue\n");
1181 return;
1182 }
1184 while ((pd_addr = cppi41_queue_pop(&rx_queue_obj)) != 0) {
1185 struct usb_pkt_desc *curr_pd;
1186 struct cppi41_channel *rx_ch;
1187 u8 ch_num, ep_num;
1188 u32 length;
1190 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1191 if (curr_pd == NULL) {
1192 ERR("Invalid PD popped from Rx completion queue\n");
1193 continue;
1194 }
1196 /* Extract the data from received packet descriptor */
1197 ch_num = curr_pd->ch_num;
1198 ep_num = curr_pd->ep_num;
1199 length = curr_pd->hw_desc.buf_len;
1201 rx_ch = &cppi->rx_cppi_ch[ch_num];
1202 rx_ch->channel.actual_len += length;
1204 /*
1205 * Return Rx PD to the software list --
1206 * this is protected by critical section
1207 */
1208 usb_put_free_pd(cppi, curr_pd);
1210 if (unlikely(rx_ch->channel.actual_len >= rx_ch->length ||
1211 length < curr_pd->hw_desc.orig_buf_len)) {
1212 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1214 /* Rx completion routine callback */
1215 musb_dma_completion(cppi->musb, ep_num, 0);
1216 } else
1217 cppi41_next_rx_segment(rx_ch);
1218 }
1219 }
1221 /*
1222 * cppi41_completion - handle interrupts from the Tx/Rx completion queues
1223 *
1224 * NOTE: since we have to manually prod the Rx process in the transparent mode,
1225 * we certainly want to handle the Rx queues first.
1226 */
1227 void cppi41_completion(struct musb *musb, u32 rx, u32 tx)
1228 {
1229 struct cppi41 *cppi;
1230 unsigned index;
1232 cppi = container_of(musb->dma_controller, struct cppi41, controller);
1234 /* Process packet descriptors from the Rx queues */
1235 for (index = 0; rx != 0; rx >>= 1, index++)
1236 if (rx & 1)
1237 usb_process_rx_queue(cppi, index);
1239 /* Process packet descriptors from the Tx completion queues */
1240 for (index = 0; tx != 0; tx >>= 1, index++)
1241 if (tx & 1)
1242 usb_process_tx_queue(cppi, index);
1243 }
1245 MODULE_DESCRIPTION("CPPI4.1 dma controller driver for musb");
1246 MODULE_LICENSE("GPL v2");
1248 static int __init cppi41_dma_init(void)
1249 {
1250 return 0;
1251 }
1252 module_init(cppi41_dma_init);
1254 static void __exit cppi41_dma__exit(void)
1255 {
1256 }
1257 module_exit(cppi41_dma__exit);