8c9bf5f5c154ead9fcc26969cb1c477b098446c2
1 /*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
4 *
5 * This file implements a DMA interface using TI's CPPI 4.1 DMA.
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 */
22 #include <linux/errno.h>
23 #include <linux/dma-mapping.h>
25 #include "cppi41.h"
27 #include "musb_core.h"
28 #include "musb_dma.h"
29 #include "cppi41_dma.h"
31 /* Configuration */
32 #define USB_CPPI41_DESC_SIZE_SHIFT 6
33 #define USB_CPPI41_DESC_ALIGN (1 << USB_CPPI41_DESC_SIZE_SHIFT)
34 #define USB_CPPI41_CH_NUM_PD 64 /* 4K bulk data at full speed */
35 #define USB_CPPI41_MAX_PD (USB_CPPI41_CH_NUM_PD * USB_CPPI41_NUM_CH)
37 #undef DEBUG_CPPI_TD
38 #undef USBDRV_DEBUG
40 #ifdef USBDRV_DEBUG
41 #define dprintk(x, ...) printk(x, ## __VA_ARGS__)
42 #else
43 #define dprintk(x, ...)
44 #endif
46 /*
47 * Data structure definitions
48 */
50 /*
51 * USB Packet Descriptor
52 */
53 struct usb_pkt_desc;
55 struct usb_pkt_desc {
56 /* Hardware descriptor fields from this point */
57 struct cppi41_host_pkt_desc hw_desc;
58 /* Protocol specific data */
59 dma_addr_t dma_addr;
60 struct usb_pkt_desc *next_pd_ptr;
61 u8 ch_num;
62 u8 ep_num;
63 };
65 /**
66 * struct cppi41_channel - DMA Channel Control Structure
67 *
68 * Using the same for Tx/Rx.
69 */
70 struct cppi41_channel {
71 struct dma_channel channel;
73 struct cppi41_dma_ch_obj dma_ch_obj; /* DMA channel object */
74 struct cppi41_queue src_queue; /* Tx queue or Rx free descriptor/ */
75 /* buffer queue */
76 struct cppi41_queue_obj queue_obj; /* Tx queue object or Rx free */
77 /* descriptor/buffer queue object */
79 u32 tag_info; /* Tx PD Tag Information field */
81 /* Which direction of which endpoint? */
82 struct musb_hw_ep *end_pt;
83 u8 transmit;
84 u8 ch_num; /* Channel number of Tx/Rx 0..3 */
86 /* DMA mode: "transparent", RNDIS, CDC, or Generic RNDIS */
87 u8 dma_mode;
88 u8 autoreq;
90 /* Book keeping for the current transfer request */
91 dma_addr_t start_addr;
92 u32 length;
93 u32 curr_offset;
94 u16 pkt_size;
95 u8 transfer_mode;
96 u8 zlp_queued;
97 };
99 /**
100 * struct cppi41 - CPPI 4.1 DMA Controller Object
101 *
102 * Encapsulates all book keeping and data structures pertaining to
103 * the CPPI 1.4 DMA controller.
104 */
105 struct cppi41 {
106 struct dma_controller controller;
107 struct musb *musb;
109 struct cppi41_channel tx_cppi_ch[USB_CPPI41_NUM_CH];
110 struct cppi41_channel rx_cppi_ch[USB_CPPI41_NUM_CH];
112 struct usb_pkt_desc *pd_pool_head; /* Free PD pool head */
113 dma_addr_t pd_mem_phys; /* PD memory physical address */
114 void *pd_mem; /* PD memory pointer */
115 u8 pd_mem_rgn; /* PD memory region number */
117 u16 teardownQNum; /* Teardown completion queue number */
118 struct cppi41_queue_obj queue_obj; /* Teardown completion queue */
119 /* object */
120 u32 pkt_info; /* Tx PD Packet Information field */
121 };
123 #ifdef DEBUG_CPPI_TD
124 static void print_pd_list(struct usb_pkt_desc *pd_pool_head)
125 {
126 struct usb_pkt_desc *curr_pd = pd_pool_head;
127 int cnt = 0;
129 while (curr_pd != NULL) {
130 if (cnt % 8 == 0)
131 dprintk("\n%02x ", cnt);
132 cnt++;
133 dprintk(" %p", curr_pd);
134 curr_pd = curr_pd->next_pd_ptr;
135 }
136 dprintk("\n");
137 }
138 #endif
140 static struct usb_pkt_desc *usb_get_free_pd(struct cppi41 *cppi)
141 {
142 struct usb_pkt_desc *free_pd = cppi->pd_pool_head;
144 if (free_pd != NULL) {
145 cppi->pd_pool_head = free_pd->next_pd_ptr;
146 free_pd->next_pd_ptr = NULL;
147 }
148 return free_pd;
149 }
151 static void usb_put_free_pd(struct cppi41 *cppi, struct usb_pkt_desc *free_pd)
152 {
153 free_pd->next_pd_ptr = cppi->pd_pool_head;
154 cppi->pd_pool_head = free_pd;
155 }
157 /**
158 * cppi41_controller_start - start DMA controller
159 * @controller: the controller
160 *
161 * This function initializes the CPPI 4.1 Tx/Rx channels.
162 */
163 static int __init cppi41_controller_start(struct dma_controller *controller)
164 {
165 struct cppi41 *cppi;
166 struct cppi41_channel *cppi_ch;
167 void __iomem *reg_base;
168 struct usb_pkt_desc *curr_pd;
169 unsigned long pd_addr;
170 int i;
172 cppi = container_of(controller, struct cppi41, controller);
174 /*
175 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
176 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
177 * Similarly, the descriptor size should also be a multiple of 32.
178 */
180 /*
181 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
182 * dma_alloc_coherent() will return a page aligned address, so our
183 * alignment requirement will be honored.
184 */
185 cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
186 USB_CPPI41_MAX_PD *
187 USB_CPPI41_DESC_ALIGN,
188 &cppi->pd_mem_phys,
189 GFP_KERNEL | GFP_DMA);
190 if (cppi->pd_mem == NULL) {
191 DBG(1, "ERROR: packet descriptor memory allocation failed\n");
192 return 0;
193 }
194 if (cppi41_mem_rgn_alloc(usb_cppi41_info.q_mgr, cppi->pd_mem_phys,
195 USB_CPPI41_DESC_SIZE_SHIFT,
196 get_count_order(USB_CPPI41_MAX_PD),
197 &cppi->pd_mem_rgn)) {
198 DBG(1, "ERROR: queue manager memory region allocation "
199 "failed\n");
200 goto free_pds;
201 }
203 /* Allocate the teardown completion queue */
204 if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
205 0, &cppi->teardownQNum)) {
206 DBG(1, "ERROR: teardown completion queue allocation failed\n");
207 goto free_mem_rgn;
208 }
209 DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
210 cppi->teardownQNum);
212 if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
213 DBG(1, "ERROR: teardown completion queue initialization "
214 "failed\n");
215 goto free_queue;
216 }
218 /*
219 * "Slice" PDs one-by-one from the big chunk and
220 * add them to the free pool.
221 */
222 curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
223 pd_addr = cppi->pd_mem_phys;
224 for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
225 curr_pd->dma_addr = pd_addr;
227 usb_put_free_pd(cppi, curr_pd);
228 curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
229 USB_CPPI41_DESC_ALIGN);
230 pd_addr += USB_CPPI41_DESC_ALIGN;
231 }
233 /* Configure the Tx channels */
234 for (i = 0, cppi_ch = cppi->tx_cppi_ch;
235 i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
236 const struct cppi41_tx_ch *tx_info;
238 memset(cppi_ch, 0, sizeof(struct cppi41_channel));
239 cppi_ch->transmit = 1;
240 cppi_ch->ch_num = i;
241 cppi_ch->channel.private_data = cppi;
243 /*
244 * Extract the CPPI 4.1 DMA Tx channel configuration and
245 * construct/store the Tx PD tag info field for later use...
246 */
247 tx_info = cppi41_dma_block[usb_cppi41_info.dma_block].tx_ch_info
248 + usb_cppi41_info.ep_dma_ch[i];
249 cppi_ch->src_queue = tx_info->tx_queue[0];
250 cppi_ch->tag_info = (tx_info->port_num <<
251 CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
252 (tx_info->ch_num <<
253 CPPI41_SRC_TAG_CH_NUM_SHIFT) |
254 (tx_info->sub_ch_num <<
255 CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
256 }
258 /* Configure the Rx channels */
259 for (i = 0, cppi_ch = cppi->rx_cppi_ch;
260 i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
261 memset(cppi_ch, 0, sizeof(struct cppi41_channel));
262 cppi_ch->ch_num = i;
263 cppi_ch->channel.private_data = cppi;
264 }
266 /* Construct/store Tx PD packet info field for later use */
267 cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
268 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT) |
269 (usb_cppi41_info.q_mgr << CPPI41_RETURN_QMGR_SHIFT) |
270 (usb_cppi41_info.tx_comp_q[0] <<
271 CPPI41_RETURN_QNUM_SHIFT);
273 /* Do necessary configuartion in hardware to get started */
274 reg_base = cppi->musb->ctrl_base;
276 /* Disable auto request mode */
277 musb_writel(reg_base, USB_AUTOREQ_REG, 0);
279 /* Disable the CDC/RNDIS modes */
280 musb_writel(reg_base, USB_MODE_REG, 0);
282 return 1;
284 free_queue:
285 if (cppi41_queue_free(0, cppi->teardownQNum))
286 DBG(1, "ERROR: failed to free teardown completion queue\n");
288 free_mem_rgn:
289 if (cppi41_mem_rgn_free(usb_cppi41_info.q_mgr, cppi->pd_mem_rgn))
290 DBG(1, "ERROR: failed to free queue manager memory region\n");
292 free_pds:
293 dma_free_coherent(cppi->musb->controller,
294 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
295 cppi->pd_mem, cppi->pd_mem_phys);
297 return 0;
298 }
300 /**
301 * cppi41_controller_stop - stop DMA controller
302 * @controller: the controller
303 *
304 * De-initialize the DMA Controller as necessary.
305 */
306 static int cppi41_controller_stop(struct dma_controller *controller)
307 {
308 struct cppi41 *cppi;
309 void __iomem *reg_base;
311 cppi = container_of(controller, struct cppi41, controller);
313 /* Free the teardown completion queue */
314 if (cppi41_queue_free(usb_cppi41_info.q_mgr, cppi->teardownQNum))
315 DBG(1, "ERROR: failed to free teardown completion queue\n");
317 /*
318 * Free the packet descriptor region allocated
319 * for all Tx/Rx channels.
320 */
321 if (cppi41_mem_rgn_free(usb_cppi41_info.q_mgr, cppi->pd_mem_rgn))
322 DBG(1, "ERROR: failed to free queue manager memory region\n");
324 dma_free_coherent(cppi->musb->controller,
325 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
326 cppi->pd_mem, cppi->pd_mem_phys);
328 reg_base = cppi->musb->ctrl_base;
330 /* Disable auto request mode */
331 musb_writel(reg_base, USB_AUTOREQ_REG, 0);
333 /* Disable the CDC/RNDIS modes */
334 musb_writel(reg_base, USB_MODE_REG, 0);
336 return 1;
337 }
339 /**
340 * cppi41_channel_alloc - allocate a CPPI channel for DMA.
341 * @controller: the controller
342 * @ep: the endpoint
343 * @is_tx: 1 for Tx channel, 0 for Rx channel
344 *
345 * With CPPI, channels are bound to each transfer direction of a non-control
346 * endpoint, so allocating (and deallocating) is mostly a way to notice bad
347 * housekeeping on the software side. We assume the IRQs are always active.
348 */
349 static struct dma_channel *cppi41_channel_alloc(struct dma_controller
350 *controller,
351 struct musb_hw_ep *ep, u8 is_tx)
352 {
353 struct cppi41 *cppi;
354 struct cppi41_channel *cppi_ch;
355 u32 ch_num, ep_num = ep->epnum;
357 cppi = container_of(controller, struct cppi41, controller);
359 /* Remember, ep_num: 1 .. Max_EP, and CPPI ch_num: 0 .. Max_EP - 1 */
360 ch_num = ep_num - 1;
362 if (ep_num > USB_CPPI41_NUM_CH) {
363 DBG(1, "No %cx DMA channel for EP%d\n",
364 is_tx ? 'T' : 'R', ep_num);
365 return NULL;
366 }
368 cppi_ch = (is_tx ? cppi->tx_cppi_ch : cppi->rx_cppi_ch) + ch_num;
370 /* As of now, just return the corresponding CPPI 4.1 channel handle */
371 if (is_tx) {
372 /* Initialize the CPPI 4.1 Tx DMA channel */
373 if (cppi41_tx_ch_init(&cppi_ch->dma_ch_obj,
374 usb_cppi41_info.dma_block,
375 usb_cppi41_info.ep_dma_ch[ch_num])) {
376 DBG(1, "ERROR: cppi41_tx_ch_init failed for "
377 "channel %d\n", ch_num);
378 return NULL;
379 }
380 /*
381 * Teardown descriptors will be pushed to the dedicated
382 * completion queue.
383 */
384 cppi41_dma_ch_default_queue(&cppi_ch->dma_ch_obj,
385 0, cppi->teardownQNum);
386 } else {
387 struct cppi41_rx_ch_cfg rx_cfg;
388 u8 q_mgr = usb_cppi41_info.q_mgr;
389 int i;
391 /* Initialize the CPPI 4.1 Rx DMA channel */
392 if (cppi41_rx_ch_init(&cppi_ch->dma_ch_obj,
393 usb_cppi41_info.dma_block,
394 usb_cppi41_info.ep_dma_ch[ch_num])) {
395 DBG(1, "ERROR: cppi41_rx_ch_init failed\n");
396 return NULL;
397 }
399 if (cppi41_queue_alloc(CPPI41_FREE_DESC_BUF_QUEUE |
400 CPPI41_UNASSIGNED_QUEUE,
401 q_mgr, &cppi_ch->src_queue.q_num)) {
402 DBG(1, "ERROR: cppi41_queue_alloc failed for "
403 "free descriptor/buffer queue\n");
404 return NULL;
405 }
406 DBG(4, "Allocated free descriptor/buffer queue %d in "
407 "queue manager %d\n", cppi_ch->src_queue.q_num, q_mgr);
409 rx_cfg.default_desc_type = cppi41_rx_host_desc;
410 rx_cfg.sop_offset = 0;
411 rx_cfg.retry_starved = 1;
412 rx_cfg.rx_queue.q_mgr = cppi_ch->src_queue.q_mgr = q_mgr;
413 rx_cfg.rx_queue.q_num = usb_cppi41_info.rx_comp_q[0];
414 for (i = 0; i < 4; i++)
415 rx_cfg.cfg.host_pkt.fdb_queue[i] = cppi_ch->src_queue;
416 cppi41_rx_ch_configure(&cppi_ch->dma_ch_obj, &rx_cfg);
417 }
419 /* Initialize the CPPI 4.1 DMA source queue */
420 if (cppi41_queue_init(&cppi_ch->queue_obj, cppi_ch->src_queue.q_mgr,
421 cppi_ch->src_queue.q_num)) {
422 DBG(1, "ERROR: cppi41_queue_init failed for %s queue",
423 is_tx ? "Tx" : "Rx free descriptor/buffer");
424 if (is_tx == 0 &&
425 cppi41_queue_free(cppi_ch->src_queue.q_mgr,
426 cppi_ch->src_queue.q_num))
427 DBG(1, "ERROR: failed to free Rx descriptor/buffer "
428 "queue\n");
429 return NULL;
430 }
432 /* Enable the DMA channel */
433 cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
435 if (cppi_ch->end_pt)
436 DBG(1, "Re-allocating DMA %cx channel %d (%p)\n",
437 is_tx ? 'T' : 'R', ch_num, cppi_ch);
439 cppi_ch->end_pt = ep;
440 cppi_ch->ch_num = ch_num;
441 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
443 DBG(4, "Allocated DMA %cx channel %d for EP%d\n", is_tx ? 'T' : 'R',
444 ch_num, ep_num);
446 return &cppi_ch->channel;
447 }
449 /**
450 * cppi41_channel_release - release a CPPI DMA channel
451 * @channel: the channel
452 */
453 static void cppi41_channel_release(struct dma_channel *channel)
454 {
455 struct cppi41_channel *cppi_ch;
457 /* REVISIT: for paranoia, check state and abort if needed... */
458 cppi_ch = container_of(channel, struct cppi41_channel, channel);
459 if (cppi_ch->end_pt == NULL)
460 DBG(1, "Releasing idle DMA channel %p\n", cppi_ch);
462 /* But for now, not its IRQ */
463 cppi_ch->end_pt = NULL;
464 channel->status = MUSB_DMA_STATUS_UNKNOWN;
466 cppi41_dma_ch_disable(&cppi_ch->dma_ch_obj);
468 /* De-allocate Rx free descriptior/buffer queue */
469 if (cppi_ch->transmit == 0 &&
470 cppi41_queue_free(cppi_ch->src_queue.q_mgr,
471 cppi_ch->src_queue.q_num))
472 DBG(1, "ERROR: failed to free Rx descriptor/buffer queue\n");
473 }
475 static void cppi41_mode_update(struct cppi41_channel *cppi_ch, u8 mode)
476 {
477 if (mode != cppi_ch->dma_mode) {
478 struct cppi41 *cppi = cppi_ch->channel.private_data;
479 void *__iomem reg_base = cppi->musb->ctrl_base;
480 u32 reg_val = musb_readl(reg_base, USB_MODE_REG);
481 u8 ep_num = cppi_ch->ch_num + 1;
483 if (cppi_ch->transmit) {
484 reg_val &= ~USB_TX_MODE_MASK(ep_num);
485 reg_val |= mode << USB_TX_MODE_SHIFT(ep_num);
486 } else {
487 reg_val &= ~USB_RX_MODE_MASK(ep_num);
488 reg_val |= mode << USB_RX_MODE_SHIFT(ep_num);
489 }
490 musb_writel(reg_base, USB_MODE_REG, reg_val);
491 cppi_ch->dma_mode = mode;
492 }
493 }
495 /*
496 * CPPI 4.1 Tx:
497 * ============
498 * Tx is a lot more reasonable than Rx: RNDIS mode seems to behave well except
499 * how it handles the exactly-N-packets case. It appears that there's a hiccup
500 * in that case (maybe the DMA completes before a ZLP gets written?) boiling
501 * down to not being able to rely on the XFER DMA writing any terminating zero
502 * length packet before the next transfer is started...
503 *
504 * The generic RNDIS mode does not have this misfeature, so we prefer using it
505 * instead. We then send the terminating ZLP *explictly* using DMA instead of
506 * doing it by PIO after an IRQ.
507 *
508 */
510 /**
511 * cppi41_next_tx_segment - DMA write for the next chunk of a buffer
512 * @tx_ch: Tx channel
513 *
514 * Context: controller IRQ-locked
515 */
516 static unsigned cppi41_next_tx_segment(struct cppi41_channel *tx_ch)
517 {
518 struct cppi41 *cppi = tx_ch->channel.private_data;
519 struct usb_pkt_desc *curr_pd;
520 u32 length = tx_ch->length - tx_ch->curr_offset;
521 u32 pkt_size = tx_ch->pkt_size;
522 unsigned num_pds, n;
524 /*
525 * Tx can use the generic RNDIS mode where we can probably fit this
526 * transfer in one PD and one IRQ. The only time we would NOT want
527 * to use it is when the hardware constraints prevent it...
528 */
529 if ((pkt_size & 0x3f) == 0 && length > pkt_size) {
530 num_pds = 1;
531 pkt_size = length;
532 cppi41_mode_update(tx_ch, USB_GENERIC_RNDIS_MODE);
533 } else {
534 num_pds = (length + pkt_size - 1) / pkt_size;
535 cppi41_mode_update(tx_ch, USB_TRANSPARENT_MODE);
536 }
538 /*
539 * If length of transmit buffer is 0 or a multiple of the endpoint size,
540 * then send the zero length packet.
541 */
542 if (!length || (tx_ch->transfer_mode && length % pkt_size == 0))
543 num_pds++;
545 DBG(4, "TX DMA%u, %s, maxpkt %u, %u PDs, addr %#x, len %u\n",
546 tx_ch->ch_num, tx_ch->dma_mode ? "accelerated" : "transparent",
547 pkt_size, num_pds, tx_ch->start_addr + tx_ch->curr_offset, length);
549 for (n = 0; n < num_pds; n++) {
550 struct cppi41_host_pkt_desc *hw_desc;
552 /* Get Tx host packet descriptor from the free pool */
553 curr_pd = usb_get_free_pd(cppi);
554 if (curr_pd == NULL) {
555 DBG(1, "No Tx PDs\n");
556 break;
557 }
559 if (length < pkt_size)
560 pkt_size = length;
562 hw_desc = &curr_pd->hw_desc;
563 hw_desc->desc_info = (CPPI41_DESC_TYPE_HOST <<
564 CPPI41_DESC_TYPE_SHIFT) | pkt_size;
565 hw_desc->tag_info = tx_ch->tag_info;
566 hw_desc->pkt_info = cppi->pkt_info;
568 hw_desc->buf_ptr = tx_ch->start_addr + tx_ch->curr_offset;
569 hw_desc->buf_len = pkt_size;
570 hw_desc->next_desc_ptr = 0;
572 curr_pd->ch_num = tx_ch->ch_num;
573 curr_pd->ep_num = tx_ch->end_pt->epnum;
575 tx_ch->curr_offset += pkt_size;
576 length -= pkt_size;
578 if (pkt_size == 0)
579 tx_ch->zlp_queued = 1;
581 DBG(5, "TX PD %p: buf %08x, len %08x, pkt info %08x\n", curr_pd,
582 hw_desc->buf_ptr, hw_desc->buf_len, hw_desc->pkt_info);
584 cppi41_queue_push(&tx_ch->queue_obj, curr_pd->dma_addr,
585 USB_CPPI41_DESC_ALIGN, pkt_size);
586 }
588 return n;
589 }
591 static void cppi41_autoreq_update(struct cppi41_channel *rx_ch, u8 autoreq)
592 {
593 struct cppi41 *cppi = rx_ch->channel.private_data;
595 if (is_host_active(cppi->musb) &&
596 autoreq != rx_ch->autoreq) {
597 void *__iomem reg_base = cppi->musb->ctrl_base;
598 u32 reg_val = musb_readl(reg_base, USB_AUTOREQ_REG);
599 u8 ep_num = rx_ch->ch_num + 1;
601 reg_val &= ~USB_RX_AUTOREQ_MASK(ep_num);
602 reg_val |= autoreq << USB_RX_AUTOREQ_SHIFT(ep_num);
604 musb_writel(reg_base, USB_AUTOREQ_REG, reg_val);
605 rx_ch->autoreq = autoreq;
606 }
607 }
609 static void cppi41_set_ep_size(struct cppi41_channel *rx_ch, u32 pkt_size)
610 {
611 struct cppi41 *cppi = rx_ch->channel.private_data;
612 void *__iomem reg_base = cppi->musb->ctrl_base;
613 u8 ep_num = rx_ch->ch_num + 1;
615 musb_writel(reg_base, USB_GENERIC_RNDIS_EP_SIZE_REG(ep_num), pkt_size);
616 }
618 /*
619 * CPPI 4.1 Rx:
620 * ============
621 * Consider a 1KB bulk Rx buffer in two scenarios: (a) it's fed two 300 byte
622 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
623 * (Full speed transfers have similar scenarios.)
624 *
625 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
626 * and the next packet goes into a buffer that's queued later; while (b) fills
627 * the buffer with 1024 bytes. How to do that with accelerated DMA modes?
628 *
629 * Rx queues in RNDIS mode (one single BD) handle (a) correctly but (b) loses
630 * BADLY because nothing (!) happens when that second packet fills the buffer,
631 * much less when a third one arrives -- which makes it not a "true" RNDIS mode.
632 * In the RNDIS protocol short-packet termination is optional, and it's fine if
633 * the peripherals (not hosts!) pad the messages out to end of buffer. Standard
634 * PCI host controller DMA descriptors implement that mode by default... which
635 * is no accident.
636 *
637 * Generic RNDIS mode is the only way to reliably make both cases work. This
638 * mode is identical to the "normal" RNDIS mode except for the case where the
639 * last packet of the segment matches the max USB packet size -- in this case,
640 * the packet will be closed when a value (0x10000 max) in the Generic RNDIS
641 * EP Size register is reached. This mode will work for the network drivers
642 * (CDC/RNDIS) as well as for the mass storage drivers where there is no short
643 * packet.
644 *
645 * BUT we can only use non-transparent modes when USB packet size is a multiple
646 * of 64 bytes. Let's see what happens when this is not the case...
647 *
648 * Rx queues (2 BDs with 512 bytes each) have converse problems to RNDIS mode:
649 * (b) is handled right but (a) loses badly. DMA doesn't stop after receiving
650 * a short packet and processes both of those PDs; so both packets are loaded
651 * into the buffer (with 212 byte gap between them), and the next buffer queued
652 * will NOT get its 300 bytes of data. Even in the case when there should be
653 * no short packets (URB_SHORT_NOT_OK is set), queueing several packets in the
654 * host mode doesn't win us anything since we have to manually "prod" the Rx
655 * process after each packet is received by setting ReqPkt bit in endpoint's
656 * RXCSR; in the peripheral mode without short packets, queueing could be used
657 * BUT we'll have to *teardown* the channel if a short packet still arrives in
658 * the peripheral mode, and to "collect" the left-over packet descriptors from
659 * the free descriptor/buffer queue in both cases...
660 *
661 * One BD at a time is the only way to make make both cases work reliably, with
662 * software handling both cases correctly, at the significant penalty of needing
663 * an IRQ per packet. (The lack of I/O overlap can be slightly ameliorated by
664 * enabling double buffering.)
665 *
666 * There seems to be no way to identify for sure the cases where the CDC mode
667 * is appropriate...
668 *
669 */
671 /**
672 * cppi41_next_rx_segment - DMA read for the next chunk of a buffer
673 * @rx_ch: Rx channel
674 *
675 * Context: controller IRQ-locked
676 *
677 * NOTE: In the transparent mode, we have to queue one packet at a time since:
678 * - we must avoid starting reception of another packet after receiving
679 * a short packet;
680 * - in host mode we have to set ReqPkt bit in the endpoint's RXCSR after
681 * receiving each packet but the last one... ugly!
682 */
683 static unsigned cppi41_next_rx_segment(struct cppi41_channel *rx_ch)
684 {
685 struct cppi41 *cppi = rx_ch->channel.private_data;
686 struct usb_pkt_desc *curr_pd;
687 struct cppi41_host_pkt_desc *hw_desc;
688 u32 length = rx_ch->length - rx_ch->curr_offset;
689 u32 pkt_size = rx_ch->pkt_size;
691 /*
692 * Rx can use the generic RNDIS mode where we can probably fit this
693 * transfer in one PD and one IRQ (or two with a short packet).
694 */
695 if ((pkt_size & 0x3f) == 0 && length >= 2 * pkt_size) {
696 cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
697 cppi41_autoreq_update(rx_ch, USB_AUTOREQ_ALL_BUT_EOP);
699 if (likely(length < 0x10000))
700 pkt_size = length - length % pkt_size;
701 else
702 pkt_size = 0x10000;
703 cppi41_set_ep_size(rx_ch, pkt_size);
704 } else {
705 cppi41_mode_update(rx_ch, USB_TRANSPARENT_MODE);
706 cppi41_autoreq_update(rx_ch, USB_NO_AUTOREQ);
707 }
709 DBG(4, "RX DMA%u, %s, maxpkt %u, addr %#x, rec'd %u/%u\n",
710 rx_ch->ch_num, rx_ch->dma_mode ? "accelerated" : "transparent",
711 pkt_size, rx_ch->start_addr + rx_ch->curr_offset,
712 rx_ch->curr_offset, rx_ch->length);
714 /* Get Rx packet descriptor from the free pool */
715 curr_pd = usb_get_free_pd(cppi);
716 if (curr_pd == NULL) {
717 /* Shouldn't ever happen! */
718 DBG(4, "No Rx PDs\n");
719 return 0;
720 }
722 /*
723 * HCD arranged ReqPkt for the first packet.
724 * We arrange it for all but the last one.
725 */
726 if (is_host_active(cppi->musb) && rx_ch->channel.actual_len) {
727 void __iomem *epio = rx_ch->end_pt->regs;
728 u16 csr = musb_readw(epio, MUSB_RXCSR);
730 csr |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
731 musb_writew(epio, MUSB_RXCSR, csr);
732 }
734 if (length < pkt_size)
735 pkt_size = length;
737 hw_desc = &curr_pd->hw_desc;
738 hw_desc->orig_buf_ptr = rx_ch->start_addr + rx_ch->curr_offset;
739 hw_desc->orig_buf_len = pkt_size;
741 curr_pd->ch_num = rx_ch->ch_num;
742 curr_pd->ep_num = rx_ch->end_pt->epnum;
744 rx_ch->curr_offset += pkt_size;
746 /*
747 * Push the free Rx packet descriptor
748 * to the free descriptor/buffer queue.
749 */
750 cppi41_queue_push(&rx_ch->queue_obj, curr_pd->dma_addr,
751 USB_CPPI41_DESC_ALIGN, 0);
753 return 1;
754 }
756 /**
757 * cppi41_channel_program - program channel for data transfer
758 * @channel: the channel
759 * @maxpacket: max packet size
760 * @mode: for Rx, 1 unless the USB protocol driver promised to treat
761 * all short reads as errors and kick in high level fault recovery;
762 * for Tx, 0 unless the protocol driver _requires_ short-packet
763 * termination mode
764 * @dma_addr: DMA address of buffer
765 * @length: length of buffer
766 *
767 * Context: controller IRQ-locked
768 */
769 static int cppi41_channel_program(struct dma_channel *channel, u16 maxpacket,
770 u8 mode, dma_addr_t dma_addr, u32 length)
771 {
772 struct cppi41_channel *cppi_ch;
773 unsigned queued;
775 cppi_ch = container_of(channel, struct cppi41_channel, channel);
777 switch (channel->status) {
778 case MUSB_DMA_STATUS_BUS_ABORT:
779 case MUSB_DMA_STATUS_CORE_ABORT:
780 /* Fault IRQ handler should have handled cleanup */
781 WARNING("%cx DMA%d not cleaned up after abort!\n",
782 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
783 break;
784 case MUSB_DMA_STATUS_BUSY:
785 WARNING("Program active channel? %cx DMA%d\n",
786 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
787 break;
788 case MUSB_DMA_STATUS_UNKNOWN:
789 DBG(1, "%cx DMA%d not allocated!\n",
790 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
791 return 0;
792 case MUSB_DMA_STATUS_FREE:
793 break;
794 }
796 channel->status = MUSB_DMA_STATUS_BUSY;
798 /* Set the transfer parameters, then queue up the first segment */
799 cppi_ch->start_addr = dma_addr;
800 cppi_ch->curr_offset = 0;
801 cppi_ch->pkt_size = maxpacket;
802 cppi_ch->length = length;
803 cppi_ch->transfer_mode = mode;
804 cppi_ch->zlp_queued = 0;
806 /* Tx or Rx channel? */
807 if (cppi_ch->transmit)
808 queued = cppi41_next_tx_segment(cppi_ch);
809 else
810 queued = cppi41_next_rx_segment(cppi_ch);
812 return queued > 0;
813 }
815 static struct usb_pkt_desc *usb_get_pd_ptr(struct cppi41 *cppi,
816 unsigned long pd_addr)
817 {
818 if (pd_addr >= cppi->pd_mem_phys && pd_addr < cppi->pd_mem_phys +
819 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN)
820 return pd_addr - cppi->pd_mem_phys + cppi->pd_mem;
821 else
822 return NULL;
823 }
825 static int usb_check_teardown(struct cppi41_channel *cppi_ch,
826 unsigned long pd_addr)
827 {
828 u32 info;
830 if (cppi41_get_teardown_info(pd_addr, &info)) {
831 DBG(1, "ERROR: not a teardown descriptor\n");
832 return 0;
833 }
835 if ((info & CPPI41_TEARDOWN_TX_RX_MASK) ==
836 (!cppi_ch->transmit << CPPI41_TEARDOWN_TX_RX_SHIFT) &&
837 (info & CPPI41_TEARDOWN_DMA_NUM_MASK) ==
838 (usb_cppi41_info.dma_block << CPPI41_TEARDOWN_DMA_NUM_SHIFT) &&
839 (info & CPPI41_TEARDOWN_CHAN_NUM_MASK) ==
840 (usb_cppi41_info.ep_dma_ch[cppi_ch->ch_num] <<
841 CPPI41_TEARDOWN_CHAN_NUM_SHIFT))
842 return 1;
844 DBG(1, "ERROR: unexpected values in teardown descriptor\n");
845 return 0;
846 }
848 /*
849 * We can't handle the channel teardown via the default completion queue in
850 * context of the controller IRQ-locked, so we use the dedicated teardown
851 * completion queue which we can just poll for a teardown descriptor, not
852 * interfering with the Tx completion queue processing.
853 */
854 static void usb_tx_ch_teardown(struct cppi41_channel *tx_ch)
855 {
856 struct cppi41 *cppi = tx_ch->channel.private_data;
857 unsigned long pd_addr;
859 /* Initiate teardown for Tx DMA channel */
860 cppi41_dma_ch_teardown(&tx_ch->dma_ch_obj);
862 do {
863 /* Wait for a descriptor to be queued and pop it... */
864 do {
865 pd_addr = cppi41_queue_pop(&cppi->queue_obj);
866 } while (!pd_addr);
868 dprintk("Descriptor (%08lx) popped from teardown completion "
869 "queue\n", pd_addr);
870 } while (!usb_check_teardown(tx_ch, pd_addr));
871 }
873 /*
874 * For Rx DMA channels, the situation is more complex: there's only a single
875 * completion queue for all our needs, so we have to temporarily redirect the
876 * completed descriptors to our teardown completion queue, with a possibility
877 * of a completed packet landing there as well...
878 */
879 static void usb_rx_ch_teardown(struct cppi41_channel *rx_ch)
880 {
881 struct cppi41 *cppi = rx_ch->channel.private_data;
883 cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, 0, cppi->teardownQNum);
885 /* Initiate teardown for Rx DMA channel */
886 cppi41_dma_ch_teardown(&rx_ch->dma_ch_obj);
888 while (1) {
889 struct usb_pkt_desc *curr_pd;
890 unsigned long pd_addr;
892 /* Wait for a descriptor to be queued and pop it... */
893 do {
894 pd_addr = cppi41_queue_pop(&cppi->queue_obj);
895 } while (!pd_addr);
897 dprintk("Descriptor (%08lx) popped from teardown completion "
898 "queue\n", pd_addr);
900 /*
901 * We might have popped a completed Rx PD, so check if the
902 * physical address is within the PD region first. If it's
903 * not the case, it must be a teardown descriptor...
904 * */
905 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
906 if (curr_pd == NULL) {
907 if (usb_check_teardown(rx_ch, pd_addr))
908 break;
909 continue;
910 }
912 /* Paranoia: check if PD is from the right channel... */
913 if (curr_pd->ch_num != rx_ch->ch_num) {
914 ERR("Unexpected channel %d in Rx PD\n",
915 curr_pd->ch_num);
916 continue;
917 }
919 /* Extract the buffer length from the completed PD */
920 rx_ch->channel.actual_len += curr_pd->hw_desc.buf_len;
922 /*
923 * Return Rx PDs to the software list --
924 * this is protected by critical section.
925 */
926 usb_put_free_pd(cppi, curr_pd);
927 }
929 /* Now restore the default Rx completion queue... */
930 cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, usb_cppi41_info.q_mgr,
931 usb_cppi41_info.rx_comp_q[0]);
932 }
934 /*
935 * cppi41_channel_abort
936 *
937 * Context: controller IRQ-locked, endpoint selected.
938 */
939 static int cppi41_channel_abort(struct dma_channel *channel)
940 {
941 struct cppi41 *cppi;
942 struct cppi41_channel *cppi_ch;
943 struct musb *musb;
944 void __iomem *reg_base, *epio;
945 unsigned long pd_addr;
946 u32 csr, td_reg;
947 u8 ch_num, ep_num;
949 cppi_ch = container_of(channel, struct cppi41_channel, channel);
950 ch_num = cppi_ch->ch_num;
952 switch (channel->status) {
953 case MUSB_DMA_STATUS_BUS_ABORT:
954 case MUSB_DMA_STATUS_CORE_ABORT:
955 /* From Rx or Tx fault IRQ handler */
956 case MUSB_DMA_STATUS_BUSY:
957 /* The hardware needs shutting down... */
958 dprintk("%s: DMA busy, status = %x\n",
959 __func__, channel->status);
960 break;
961 case MUSB_DMA_STATUS_UNKNOWN:
962 DBG(1, "%cx DMA%d not allocated\n",
963 cppi_ch->transmit ? 'T' : 'R', ch_num);
964 /* FALLTHROUGH */
965 case MUSB_DMA_STATUS_FREE:
966 return 0;
967 }
969 cppi = cppi_ch->channel.private_data;
970 musb = cppi->musb;
971 reg_base = musb->ctrl_base;
972 epio = cppi_ch->end_pt->regs;
973 ep_num = ch_num + 1;
975 #ifdef DEBUG_CPPI_TD
976 printk("Before teardown:");
977 print_pd_list(cppi->pd_pool_head);
978 #endif
980 if (cppi_ch->transmit) {
981 dprintk("Tx channel teardown, cppi_ch = %p\n", cppi_ch);
983 /* Tear down Tx DMA channel */
984 usb_tx_ch_teardown(cppi_ch);
986 /* Issue CPPI FIFO teardown for Tx channel */
987 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
988 td_reg |= USB_TX_TDOWN_MASK(ep_num);
989 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
991 /* Flush FIFO of the endpoint */
992 csr = musb_readw(epio, MUSB_TXCSR);
993 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_H_WZC_BITS;
994 musb_writew(epio, MUSB_TXCSR, csr);
995 } else { /* Rx */
996 dprintk("Rx channel teardown, cppi_ch = %p\n", cppi_ch);
998 /* Flush FIFO of the endpoint */
999 csr = musb_readw(epio, MUSB_RXCSR);
1000 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
1001 musb_writew(epio, MUSB_RXCSR, csr);
1003 /* Issue CPPI FIFO teardown for Rx channel */
1004 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
1005 td_reg |= USB_RX_TDOWN_MASK(ep_num);
1006 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
1008 /* Tear down Rx DMA channel */
1009 usb_rx_ch_teardown(cppi_ch);
1011 /*
1012 * NOTE: docs don't guarantee any of this works... we expect
1013 * that if the USB core stops telling the CPPI core to pull
1014 * more data from it, then it'll be safe to flush current Rx
1015 * DMA state iff any pending FIFO transfer is done.
1016 */
1018 /* For host, ensure ReqPkt is never set again */
1019 cppi41_autoreq_update(cppi_ch, USB_NO_AUTOREQ);
1021 /* For host, clear (just) ReqPkt at end of current packet(s) */
1022 if (is_host_active(cppi->musb))
1023 csr &= ~MUSB_RXCSR_H_REQPKT;
1024 csr |= MUSB_RXCSR_H_WZC_BITS;
1026 /* Clear DMA enable */
1027 csr &= ~MUSB_RXCSR_DMAENAB;
1028 musb_writew(epio, MUSB_RXCSR, csr);
1030 /* Flush the FIFO of endpoint once again */
1031 csr = musb_readw(epio, MUSB_RXCSR);
1032 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
1033 musb_writew(epio, MUSB_RXCSR, csr);
1035 udelay(50);
1036 }
1038 /*
1039 * There might be PDs in the Rx/Tx source queue that were not consumed
1040 * by the DMA controller -- they need to be recycled properly.
1041 */
1042 while ((pd_addr = cppi41_queue_pop(&cppi_ch->queue_obj)) != 0) {
1043 struct usb_pkt_desc *curr_pd;
1045 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1046 if (curr_pd == NULL) {
1047 ERR("Invalid PD popped from source queue\n");
1048 continue;
1049 }
1051 /*
1052 * Return Rx/Tx PDs to the software list --
1053 * this is protected by critical section.
1054 */
1055 dprintk("Returning PD %p to the free PD list\n", curr_pd);
1056 usb_put_free_pd(cppi, curr_pd);
1057 }
1059 #ifdef DEBUG_CPPI_TD
1060 printk("After teardown:");
1061 print_pd_list(cppi->pd_pool_head);
1062 #endif
1064 /* Re-enable the DMA channel */
1065 cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
1067 channel->status = MUSB_DMA_STATUS_FREE;
1069 return 0;
1070 }
1072 /**
1073 * dma_controller_create - instantiate an object representing DMA controller.
1074 */
1075 struct dma_controller * __init dma_controller_create(struct musb *musb,
1076 void __iomem *mregs)
1077 {
1078 struct cppi41 *cppi;
1080 cppi = kzalloc(sizeof *cppi, GFP_KERNEL);
1081 if (!cppi)
1082 return NULL;
1084 /* Initialize the CPPI 4.1 DMA controller structure */
1085 cppi->musb = musb;
1086 cppi->controller.start = cppi41_controller_start;
1087 cppi->controller.stop = cppi41_controller_stop;
1088 cppi->controller.channel_alloc = cppi41_channel_alloc;
1089 cppi->controller.channel_release = cppi41_channel_release;
1090 cppi->controller.channel_program = cppi41_channel_program;
1091 cppi->controller.channel_abort = cppi41_channel_abort;
1093 return &cppi->controller;
1094 }
1096 /**
1097 * dma_controller_destroy - destroy a previously instantiated DMA controller
1098 * @controller: the controller
1099 */
1100 void dma_controller_destroy(struct dma_controller *controller)
1101 {
1102 struct cppi41 *cppi;
1104 cppi = container_of(controller, struct cppi41, controller);
1106 /* Free the CPPI object */
1107 kfree(cppi);
1108 }
1110 static void usb_process_tx_queue(struct cppi41 *cppi, unsigned index)
1111 {
1112 struct cppi41_queue_obj tx_queue_obj;
1113 unsigned long pd_addr;
1115 if (cppi41_queue_init(&tx_queue_obj, usb_cppi41_info.q_mgr,
1116 usb_cppi41_info.tx_comp_q[index])) {
1117 DBG(1, "ERROR: cppi41_queue_init failed for "
1118 "Tx completion queue");
1119 return;
1120 }
1122 while ((pd_addr = cppi41_queue_pop(&tx_queue_obj)) != 0) {
1123 struct usb_pkt_desc *curr_pd;
1124 struct cppi41_channel *tx_ch;
1125 u8 ch_num, ep_num;
1126 u32 length;
1128 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1129 if (curr_pd == NULL) {
1130 ERR("Invalid PD popped from Tx completion queue\n");
1131 continue;
1132 }
1134 /* Extract the data from received packet descriptor */
1135 ch_num = curr_pd->ch_num;
1136 ep_num = curr_pd->ep_num;
1137 length = curr_pd->hw_desc.buf_len;
1139 tx_ch = &cppi->tx_cppi_ch[ch_num];
1140 tx_ch->channel.actual_len += length;
1142 /*
1143 * Return Tx PD to the software list --
1144 * this is protected by critical section
1145 */
1146 usb_put_free_pd(cppi, curr_pd);
1148 if ((tx_ch->curr_offset < tx_ch->length) ||
1149 (tx_ch->transfer_mode && !tx_ch->zlp_queued))
1150 cppi41_next_tx_segment(tx_ch);
1151 else if (tx_ch->channel.actual_len >= tx_ch->length) {
1152 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1154 /* Tx completion routine callback */
1155 musb_dma_completion(cppi->musb, ep_num, 1);
1156 }
1157 }
1158 }
1160 static void usb_process_rx_queue(struct cppi41 *cppi, unsigned index)
1161 {
1162 struct cppi41_queue_obj rx_queue_obj;
1163 unsigned long pd_addr;
1165 if (cppi41_queue_init(&rx_queue_obj, usb_cppi41_info.q_mgr,
1166 usb_cppi41_info.rx_comp_q[index])) {
1167 DBG(1, "ERROR: cppi41_queue_init failed for Rx queue\n");
1168 return;
1169 }
1171 while ((pd_addr = cppi41_queue_pop(&rx_queue_obj)) != 0) {
1172 struct usb_pkt_desc *curr_pd;
1173 struct cppi41_channel *rx_ch;
1174 u8 ch_num, ep_num;
1175 u32 length;
1177 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1178 if (curr_pd == NULL) {
1179 ERR("Invalid PD popped from Rx completion queue\n");
1180 continue;
1181 }
1183 /* Extract the data from received packet descriptor */
1184 ch_num = curr_pd->ch_num;
1185 ep_num = curr_pd->ep_num;
1186 length = curr_pd->hw_desc.buf_len;
1188 rx_ch = &cppi->rx_cppi_ch[ch_num];
1189 rx_ch->channel.actual_len += length;
1191 /*
1192 * Return Rx PD to the software list --
1193 * this is protected by critical section
1194 */
1195 usb_put_free_pd(cppi, curr_pd);
1197 if (unlikely(rx_ch->channel.actual_len >= rx_ch->length ||
1198 length < curr_pd->hw_desc.orig_buf_len)) {
1199 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1201 /* Rx completion routine callback */
1202 musb_dma_completion(cppi->musb, ep_num, 0);
1203 } else
1204 cppi41_next_rx_segment(rx_ch);
1205 }
1206 }
1208 /*
1209 * cppi41_completion - handle interrupts from the Tx/Rx completion queues
1210 *
1211 * NOTE: since we have to manually prod the Rx process in the transparent mode,
1212 * we certainly want to handle the Rx queues first.
1213 */
1214 void cppi41_completion(struct musb *musb, u32 rx, u32 tx)
1215 {
1216 struct cppi41 *cppi;
1217 unsigned index;
1219 cppi = container_of(musb->dma_controller, struct cppi41, controller);
1221 /* Process packet descriptors from the Rx queues */
1222 for (index = 0; rx != 0; rx >>= 1, index++)
1223 if (rx & 1)
1224 usb_process_rx_queue(cppi, index);
1226 /* Process packet descriptors from the Tx completion queues */
1227 for (index = 0; tx != 0; tx >>= 1, index++)
1228 if (tx & 1)
1229 usb_process_tx_queue(cppi, index);
1230 }