305d843c28e7f6a875c7ad6b4ad614e9b921f7d7
1 /*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
46 #include "musb_core.h"
49 /* MUSB PERIPHERAL status 3-mar-2006:
50 *
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
52 * Minor glitches:
53 *
54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly
58 * clearing SENDSTALL?
59 *
60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not
63 * required.
64 *
65 * - TX/IN
66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost?
71 *
72 * - RX/OUT
73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs
78 *
79 * - ISO not tested ... might work, but only weakly isochronous
80 *
81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.)
84 *
85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works
89 */
91 /* ----------------------------------------------------------------------- */
93 #define is_buffer_mapped(req) (is_dma_capable() && \
94 (req->map_state != UN_MAPPED))
96 /* Maps the buffer to dma */
98 static inline void map_dma_buffer(struct musb_request *request,
99 struct musb *musb, struct musb_ep *musb_ep)
100 {
101 int compatible = true;
102 struct dma_controller *dma = musb->dma_controller;
104 request->map_state = UN_MAPPED;
106 if (!is_dma_capable() || !musb_ep->dma)
107 return;
109 /* Check if DMA engine can handle this request.
110 * DMA code must reject the USB request explicitly.
111 * Default behaviour is to map the request.
112 */
113 if (dma->is_compatible)
114 compatible = dma->is_compatible(musb_ep->dma,
115 musb_ep->packet_sz, request->request.buf,
116 request->request.length);
117 if (!compatible)
118 return;
120 if (request->request.dma == DMA_ADDR_INVALID) {
121 request->request.dma = dma_map_single(
122 musb->controller,
123 request->request.buf,
124 request->request.length,
125 request->tx
126 ? DMA_TO_DEVICE
127 : DMA_FROM_DEVICE);
128 request->map_state = MUSB_MAPPED;
129 } else {
130 dma_sync_single_for_device(musb->controller,
131 request->request.dma,
132 request->request.length,
133 request->tx
134 ? DMA_TO_DEVICE
135 : DMA_FROM_DEVICE);
136 request->map_state = PRE_MAPPED;
137 }
138 }
140 /* Unmap the buffer from dma and maps it back to cpu */
141 static inline void unmap_dma_buffer(struct musb_request *request,
142 struct musb *musb)
143 {
144 if (!is_buffer_mapped(request))
145 return;
147 if (request->request.dma == DMA_ADDR_INVALID) {
148 dev_vdbg(musb->controller,
149 "not unmapping a never mapped buffer\n");
150 return;
151 }
152 if (request->map_state == MUSB_MAPPED) {
153 dma_unmap_single(musb->controller,
154 request->request.dma,
155 request->request.length,
156 request->tx
157 ? DMA_TO_DEVICE
158 : DMA_FROM_DEVICE);
159 request->request.dma = DMA_ADDR_INVALID;
160 } else { /* PRE_MAPPED */
161 dma_sync_single_for_cpu(musb->controller,
162 request->request.dma,
163 request->request.length,
164 request->tx
165 ? DMA_TO_DEVICE
166 : DMA_FROM_DEVICE);
167 }
168 request->map_state = UN_MAPPED;
169 }
171 /*
172 * Immediately complete a request.
173 *
174 * @param request the request to complete
175 * @param status the status to complete the request with
176 * Context: controller locked, IRQs blocked.
177 */
178 void musb_g_giveback(
179 struct musb_ep *ep,
180 struct usb_request *request,
181 int status)
182 __releases(ep->musb->lock)
183 __acquires(ep->musb->lock)
184 {
185 struct musb_request *req;
186 struct musb *musb;
187 int busy = ep->busy;
189 req = to_musb_request(request);
191 list_del(&req->list);
192 if (req->request.status == -EINPROGRESS)
193 req->request.status = status;
194 musb = req->musb;
196 ep->busy = 1;
197 spin_unlock(&musb->lock);
198 unmap_dma_buffer(req, musb);
199 if (request->status == 0)
200 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
201 ep->end_point.name, request,
202 req->request.actual, req->request.length);
203 else
204 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
205 ep->end_point.name, request,
206 req->request.actual, req->request.length,
207 request->status);
208 req->request.complete(&req->ep->end_point, &req->request);
209 spin_lock(&musb->lock);
210 ep->busy = busy;
211 }
213 /* ----------------------------------------------------------------------- */
215 /*
216 * Abort requests queued to an endpoint using the status. Synchronous.
217 * caller locked controller and blocked irqs, and selected this ep.
218 */
219 static void nuke(struct musb_ep *ep, const int status)
220 {
221 struct musb *musb = ep->musb;
222 struct musb_request *req = NULL;
223 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
225 ep->busy = 1;
227 if (is_dma_capable() && ep->dma) {
228 struct dma_controller *c = ep->musb->dma_controller;
229 int value;
231 if (ep->is_in) {
232 /*
233 * The programming guide says that we must not clear
234 * the DMAMODE bit before DMAENAB, so we only
235 * clear it in the second write...
236 */
237 musb_writew(epio, MUSB_TXCSR,
238 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
239 musb_writew(epio, MUSB_TXCSR,
240 0 | MUSB_TXCSR_FLUSHFIFO);
241 } else {
242 musb_writew(epio, MUSB_RXCSR,
243 0 | MUSB_RXCSR_FLUSHFIFO);
244 musb_writew(epio, MUSB_RXCSR,
245 0 | MUSB_RXCSR_FLUSHFIFO);
246 }
248 value = c->channel_abort(ep->dma);
249 dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
250 ep->name, value);
251 c->channel_release(ep->dma);
252 ep->dma = NULL;
253 }
255 while (!list_empty(&ep->req_list)) {
256 req = list_first_entry(&ep->req_list, struct musb_request, list);
257 musb_g_giveback(ep, &req->request, status);
258 }
259 }
261 /* ----------------------------------------------------------------------- */
263 /* Data transfers - pure PIO, pure DMA, or mixed mode */
265 /*
266 * This assumes the separate CPPI engine is responding to DMA requests
267 * from the usb core ... sequenced a bit differently from mentor dma.
268 */
270 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
271 {
272 if (can_bulk_split(musb, ep->type))
273 return ep->hw_ep->max_packet_sz_tx;
274 else
275 return ep->packet_sz;
276 }
279 /* Peripheral tx (IN) using Mentor DMA works as follows:
280 Only mode 0 is used for transfers <= wPktSize,
281 mode 1 is used for larger transfers,
283 One of the following happens:
284 - Host sends IN token which causes an endpoint interrupt
285 -> TxAvail
286 -> if DMA is currently busy, exit.
287 -> if queue is non-empty, txstate().
289 - Request is queued by the gadget driver.
290 -> if queue was previously empty, txstate()
292 txstate()
293 -> start
294 /\ -> setup DMA
295 | (data is transferred to the FIFO, then sent out when
296 | IN token(s) are recd from Host.
297 | -> DMA interrupt on completion
298 | calls TxAvail.
299 | -> stop DMA, ~DMAENAB,
300 | -> set TxPktRdy for last short pkt or zlp
301 | -> Complete Request
302 | -> Continue next request (call txstate)
303 |___________________________________|
305 * Non-Mentor DMA engines can of course work differently, such as by
306 * upleveling from irq-per-packet to irq-per-buffer.
307 */
309 /*
310 * An endpoint is transmitting data. This can be called either from
311 * the IRQ routine or from ep.queue() to kickstart a request on an
312 * endpoint.
313 *
314 * Context: controller locked, IRQs blocked, endpoint selected
315 */
316 static void txstate(struct musb *musb, struct musb_request *req)
317 {
318 u8 epnum = req->epnum;
319 struct musb_ep *musb_ep;
320 void __iomem *epio = musb->endpoints[epnum].regs;
321 struct usb_request *request;
322 u16 fifo_count = 0, csr;
323 int use_dma = 0;
325 musb_ep = req->ep;
327 /* we shouldn't get here while DMA is active ... but we do ... */
328 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
329 dev_dbg(musb->controller, "dma pending...\n");
330 return;
331 }
333 /* read TXCSR before */
334 csr = musb_readw(epio, MUSB_TXCSR);
336 request = &req->request;
337 fifo_count = min(max_ep_writesize(musb, musb_ep),
338 (int)(request->length - request->actual));
340 if (csr & MUSB_TXCSR_TXPKTRDY) {
341 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
342 musb_ep->end_point.name, csr);
343 return;
344 }
346 if (csr & MUSB_TXCSR_P_SENDSTALL) {
347 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
348 musb_ep->end_point.name, csr);
349 return;
350 }
352 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
353 epnum, musb_ep->packet_sz, fifo_count,
354 csr);
356 #ifndef CONFIG_MUSB_PIO_ONLY
357 if (is_buffer_mapped(req)) {
358 struct dma_controller *c = musb->dma_controller;
359 size_t request_size;
361 /* setup DMA, then program endpoint CSR */
362 request_size = min_t(size_t, request->length - request->actual,
363 musb_ep->dma->max_len);
365 use_dma = (request->dma != DMA_ADDR_INVALID);
367 /* MUSB_TXCSR_P_ISO is still set correctly */
369 if (is_inventra_dma(musb) || is_ux500_dma(musb)) {
370 if (request_size < musb_ep->packet_sz)
371 musb_ep->dma->desired_mode = 0;
372 else
373 musb_ep->dma->desired_mode = 1;
375 use_dma = use_dma && c->channel_program(
376 musb_ep->dma, musb_ep->packet_sz,
377 musb_ep->dma->desired_mode,
378 request->dma + request->actual, request_size);
379 if (use_dma) {
380 if (musb_ep->dma->desired_mode == 0) {
381 /*
382 * We must not clear the DMAMODE bit
383 * before the DMAENAB bit -- and the
384 * latter doesn't always get cleared
385 * before we get here...
386 */
387 csr &= ~(MUSB_TXCSR_AUTOSET
388 | MUSB_TXCSR_DMAENAB);
389 musb_writew(epio, MUSB_TXCSR, csr
390 | MUSB_TXCSR_P_WZC_BITS);
391 csr &= ~MUSB_TXCSR_DMAMODE;
392 csr |= (MUSB_TXCSR_DMAENAB |
393 MUSB_TXCSR_MODE);
394 /* against programming guide */
395 } else {
396 csr |= (MUSB_TXCSR_DMAENAB
397 | MUSB_TXCSR_DMAMODE
398 | MUSB_TXCSR_MODE);
399 if (!musb_ep->hb_mult)
400 csr |= MUSB_TXCSR_AUTOSET;
401 }
402 csr &= ~MUSB_TXCSR_P_UNDERRUN;
404 musb_writew(epio, MUSB_TXCSR, csr);
405 }
406 } else if (is_cppi_enabled(musb) || is_cppi41_enabled(musb)) {
407 /* program endpoint CSR first, then setup DMA */
408 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
410 if (request_size == 0)
411 csr &= ~(MUSB_TXCSR_DMAENAB |
412 MUSB_TXCSR_DMAMODE);
413 else
414 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
415 MUSB_TXCSR_MODE;
416 musb_writew(epio, MUSB_TXCSR,
417 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
418 | csr);
420 /* ensure writebuffer is empty */
421 csr = musb_readw(epio, MUSB_TXCSR);
423 /* NOTE host side sets DMAENAB later than this; both are
424 * OK since the transfer dma glue (between CPPI & Mentor
425 * fifos) just tells CPPI it could start. Data only
426 * moves to the USB TX fifo when both fifos are ready.
427 */
429 /* "mode" is irrelevant here; handle terminating ZLPs
430 * like PIO does, since the hardware RNDIS mode seems
431 * unreliable except for the last-packet-is-already-
432 * short case.
433 */
434 /* for zero byte transfer use pio mode */
435 if (request_size == 0)
436 use_dma = 0;
437 else {
438 use_dma = use_dma && c->channel_program(
439 musb_ep->dma, musb_ep->packet_sz,
440 0,
441 request->dma + request->actual,
442 request_size);
443 if (!use_dma) {
444 c->channel_release(musb_ep->dma);
445 musb_ep->dma = NULL;
446 csr &= ~MUSB_TXCSR_DMAENAB;
447 musb_writew(epio, MUSB_TXCSR, csr);
448 /* invariant: prequest->buf is non-null */
449 }
450 }
451 } else if (tusb_dma_omap(musb)) {
452 use_dma = use_dma && c->channel_program(
453 musb_ep->dma, musb_ep->packet_sz,
454 request->zero,
455 request->dma + request->actual,
456 request_size);
457 }
458 }
459 #endif
461 if (!use_dma) {
462 /*
463 * Unmap the dma buffer back to cpu if dma channel
464 * programming fails
465 */
466 unmap_dma_buffer(req, musb);
468 musb->ops->write_fifo(musb_ep->hw_ep, fifo_count,
469 (u8 *) (request->buf + request->actual));
470 request->actual += fifo_count;
471 csr |= MUSB_TXCSR_TXPKTRDY;
472 csr &= ~MUSB_TXCSR_P_UNDERRUN;
473 musb_writew(epio, MUSB_TXCSR, csr);
474 }
476 /* host may already have the data when this message shows... */
477 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
478 musb_ep->end_point.name, use_dma ? "dma" : "pio",
479 request->actual, request->length,
480 musb_readw(epio, MUSB_TXCSR),
481 fifo_count,
482 musb_readw(epio, MUSB_TXMAXP));
483 }
485 /*
486 * FIFO state update (e.g. data ready).
487 * Called from IRQ, with controller locked.
488 */
489 void musb_g_tx(struct musb *musb, u8 epnum)
490 {
491 u16 csr;
492 struct musb_request *req;
493 struct usb_request *request;
494 u8 __iomem *mbase = musb->mregs;
495 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
496 void __iomem *epio = musb->endpoints[epnum].regs;
497 struct dma_channel *dma;
499 musb_ep_select(musb, mbase, epnum);
500 req = next_request(musb_ep);
501 request = &req->request;
503 csr = musb_readw(epio, MUSB_TXCSR);
504 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
506 dma = is_dma_capable() ? musb_ep->dma : NULL;
508 /*
509 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
510 * probably rates reporting as a host error.
511 */
512 if (csr & MUSB_TXCSR_P_SENTSTALL) {
513 csr |= MUSB_TXCSR_P_WZC_BITS;
514 csr &= ~MUSB_TXCSR_P_SENTSTALL;
515 musb_writew(epio, MUSB_TXCSR, csr);
516 return;
517 }
519 if (csr & MUSB_TXCSR_P_UNDERRUN) {
520 /* We NAKed, no big deal... little reason to care. */
521 csr |= MUSB_TXCSR_P_WZC_BITS;
522 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
523 musb_writew(epio, MUSB_TXCSR, csr);
524 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
525 epnum, request);
526 }
528 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
529 /*
530 * SHOULD NOT HAPPEN... has with CPPI though, after
531 * changing SENDSTALL (and other cases); harmless?
532 */
533 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
534 return;
535 }
537 if (request) {
538 u8 is_dma = 0;
540 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
541 is_dma = 1;
542 csr |= MUSB_TXCSR_P_WZC_BITS;
543 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
544 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
545 musb_writew(epio, MUSB_TXCSR, csr);
546 /* Ensure writebuffer is empty. */
547 csr = musb_readw(epio, MUSB_TXCSR);
548 request->actual += musb_ep->dma->actual_len;
549 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
550 epnum, csr, musb_ep->dma->actual_len, request);
551 }
553 /*
554 * First, maybe a terminating short packet. Some DMA
555 * engines might handle this by themselves.
556 */
557 if ((request->zero && request->length
558 && (request->length % musb_ep->packet_sz == 0)
559 && (request->actual == request->length))
560 || ((is_inventra_dma(musb) || is_ux500_dma(musb)) &&
561 is_dma && (!dma->desired_mode || (request->actual &
562 (musb_ep->packet_sz - 1))))
563 ) {
564 /*
565 * On DMA completion, FIFO may not be
566 * available yet...
567 */
568 if (csr & MUSB_TXCSR_TXPKTRDY)
569 return;
571 dev_dbg(musb->controller, "sending zero pkt\n");
572 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
573 | MUSB_TXCSR_TXPKTRDY);
574 request->zero = 0;
575 }
577 if (request->actual == request->length) {
578 musb_g_giveback(musb_ep, request, 0);
579 req = musb_ep->desc ? next_request(musb_ep) : NULL;
580 if (!req) {
581 dev_dbg(musb->controller, "%s idle now\n",
582 musb_ep->end_point.name);
583 return;
584 }
585 }
587 txstate(musb, req);
588 }
589 }
591 /* ------------------------------------------------------------ */
593 /* Peripheral rx (OUT) using Mentor DMA works as follows:
594 - Only mode 0 is used.
596 - Request is queued by the gadget class driver.
597 -> if queue was previously empty, rxstate()
599 - Host sends OUT token which causes an endpoint interrupt
600 /\ -> RxReady
601 | -> if request queued, call rxstate
602 | /\ -> setup DMA
603 | | -> DMA interrupt on completion
604 | | -> RxReady
605 | | -> stop DMA
606 | | -> ack the read
607 | | -> if data recd = max expected
608 | | by the request, or host
609 | | sent a short packet,
610 | | complete the request,
611 | | and start the next one.
612 | |_____________________________________|
613 | else just wait for the host
614 | to send the next OUT token.
615 |__________________________________________________|
617 * Non-Mentor DMA engines can of course work differently.
618 */
620 /*
621 * Context: controller locked, IRQs blocked, endpoint selected
622 */
623 static void rxstate(struct musb *musb, struct musb_request *req)
624 {
625 const u8 epnum = req->epnum;
626 struct usb_request *request = &req->request;
627 struct musb_ep *musb_ep;
628 void __iomem *epio = musb->endpoints[epnum].regs;
629 unsigned fifo_count = 0;
630 u16 len;
631 u16 csr = musb_readw(epio, MUSB_RXCSR);
632 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
633 u8 use_mode_1;
635 if (hw_ep->is_shared_fifo)
636 musb_ep = &hw_ep->ep_in;
637 else
638 musb_ep = &hw_ep->ep_out;
640 len = musb_ep->packet_sz;
642 /* We shouldn't get here while DMA is active, but we do... */
643 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
644 dev_dbg(musb->controller, "DMA pending...\n");
645 return;
646 }
648 if (csr & MUSB_RXCSR_P_SENDSTALL) {
649 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
650 musb_ep->end_point.name, csr);
651 return;
652 }
654 if ((is_cppi_enabled(musb) || is_cppi41_enabled(musb)) &&
655 is_buffer_mapped(req)) {
656 struct dma_controller *c = musb->dma_controller;
657 struct dma_channel *channel = musb_ep->dma;
659 /* NOTE: CPPI won't actually stop advancing the DMA
660 * queue after short packet transfers, so this is almost
661 * always going to run as IRQ-per-packet DMA so that
662 * faults will be handled correctly.
663 */
664 if (c->channel_program(channel,
665 musb_ep->packet_sz,
666 !request->short_not_ok,
667 request->dma + request->actual,
668 request->length - request->actual)) {
670 /* make sure that if an rxpkt arrived after the irq,
671 * the cppi engine will be ready to take it as soon
672 * as DMA is enabled
673 */
674 csr &= ~(MUSB_RXCSR_AUTOCLEAR
675 | MUSB_RXCSR_DMAMODE);
676 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
677 musb_writew(epio, MUSB_RXCSR, csr);
678 return;
679 }
680 }
682 if (csr & MUSB_RXCSR_RXPKTRDY) {
683 len = musb_readw(epio, MUSB_RXCOUNT);
685 /*
686 * Enable Mode 1 on RX transfers only when short_not_ok flag
687 * is set. Currently short_not_ok flag is set only from
688 * file_storage and f_mass_storage drivers
689 */
691 if (request->short_not_ok && len == musb_ep->packet_sz)
692 use_mode_1 = 1;
693 else
694 use_mode_1 = 0;
696 if (request->actual < request->length) {
697 if (is_buffer_mapped(req) && is_inventra_dma(musb)) {
698 struct dma_controller *c;
699 struct dma_channel *channel;
700 int use_dma = 0;
702 c = musb->dma_controller;
703 channel = musb_ep->dma;
705 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
706 * mode 0 only. So we do not get endpoint interrupts due to DMA
707 * completion. We only get interrupts from DMA controller.
708 *
709 * We could operate in DMA mode 1 if we knew the size of the tranfer
710 * in advance. For mass storage class, request->length = what the host
711 * sends, so that'd work. But for pretty much everything else,
712 * request->length is routinely more than what the host sends. For
713 * most these gadgets, end of is signified either by a short packet,
714 * or filling the last byte of the buffer. (Sending extra data in
715 * that last pckate should trigger an overflow fault.) But in mode 1,
716 * we don't get DMA completion interrupt for short packets.
717 *
718 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
719 * to get endpoint interrupt on every DMA req, but that didn't seem
720 * to work reliably.
721 *
722 * REVISIT an updated g_file_storage can set req->short_not_ok, which
723 * then becomes usable as a runtime "use mode 1" hint...
724 */
726 /* Experimental: Mode1 works with mass storage use cases */
727 if (use_mode_1) {
728 csr |= MUSB_RXCSR_AUTOCLEAR;
729 musb_writew(epio, MUSB_RXCSR, csr);
730 csr |= MUSB_RXCSR_DMAENAB;
731 musb_writew(epio, MUSB_RXCSR, csr);
733 /*
734 * this special sequence (enabling and then
735 * disabling MUSB_RXCSR_DMAMODE) is required
736 * to get DMAReq to activate
737 */
738 musb_writew(epio, MUSB_RXCSR,
739 csr | MUSB_RXCSR_DMAMODE);
740 musb_writew(epio, MUSB_RXCSR, csr);
742 } else {
743 if (!musb_ep->hb_mult &&
744 musb_ep->hw_ep->rx_double_buffered)
745 csr |= MUSB_RXCSR_AUTOCLEAR;
746 csr |= MUSB_RXCSR_DMAENAB;
747 musb_writew(epio, MUSB_RXCSR, csr);
748 }
750 if (request->actual < request->length) {
751 int transfer_size = 0;
752 if (use_mode_1) {
753 transfer_size = min(request->length - request->actual,
754 channel->max_len);
755 musb_ep->dma->desired_mode = 1;
756 } else {
757 transfer_size = min(request->length - request->actual,
758 (unsigned)len);
759 musb_ep->dma->desired_mode = 0;
760 }
762 use_dma = c->channel_program(
763 channel,
764 musb_ep->packet_sz,
765 channel->desired_mode,
766 request->dma
767 + request->actual,
768 transfer_size);
769 }
771 if (use_dma)
772 return;
773 }
774 if (is_ux500_dma(musb) && (is_buffer_mapped(req)) &&
775 (request->actual < request->length)) {
777 struct dma_controller *c;
778 struct dma_channel *channel;
779 int transfer_size = 0;
781 c = musb->dma_controller;
782 channel = musb_ep->dma;
784 /* In case first packet is short */
785 if (len < musb_ep->packet_sz)
786 transfer_size = len;
787 else if (request->short_not_ok)
788 transfer_size = min(request->length -
789 request->actual,
790 channel->max_len);
791 else
792 transfer_size = min(request->length -
793 request->actual,
794 (unsigned)len);
796 csr &= ~MUSB_RXCSR_DMAMODE;
797 csr |= (MUSB_RXCSR_DMAENAB |
798 MUSB_RXCSR_AUTOCLEAR);
800 musb_writew(epio, MUSB_RXCSR, csr);
802 if (transfer_size <= musb_ep->packet_sz) {
803 musb_ep->dma->desired_mode = 0;
804 } else {
805 musb_ep->dma->desired_mode = 1;
806 /* Mode must be set after DMAENAB */
807 csr |= MUSB_RXCSR_DMAMODE;
808 musb_writew(epio, MUSB_RXCSR, csr);
809 }
811 if (c->channel_program(channel,
812 musb_ep->packet_sz,
813 channel->desired_mode,
814 request->dma
815 + request->actual,
816 transfer_size))
818 return;
819 }
821 fifo_count = request->length - request->actual;
822 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
823 musb_ep->end_point.name,
824 len, fifo_count,
825 musb_ep->packet_sz);
827 fifo_count = min_t(unsigned, len, fifo_count);
829 if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
830 struct dma_controller *c = musb->dma_controller;
831 struct dma_channel *channel = musb_ep->dma;
832 u32 dma_addr = request->dma + request->actual;
833 int ret;
835 ret = c->channel_program(channel,
836 musb_ep->packet_sz,
837 channel->desired_mode,
838 dma_addr,
839 fifo_count);
840 if (ret)
841 return;
842 }
844 /*
845 * Unmap the dma buffer back to cpu if dma channel
846 * programming fails. This buffer is mapped if the
847 * channel allocation is successful
848 */
849 if (is_buffer_mapped(req)) {
850 unmap_dma_buffer(req, musb);
852 /*
853 * Clear DMAENAB and AUTOCLEAR for the
854 * PIO mode transfer
855 */
856 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
857 musb_writew(epio, MUSB_RXCSR, csr);
858 }
860 musb->ops->read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
861 (request->buf + request->actual));
862 request->actual += fifo_count;
864 /* REVISIT if we left anything in the fifo, flush
865 * it and report -EOVERFLOW
866 */
868 /* ack the read! */
869 csr |= MUSB_RXCSR_P_WZC_BITS;
870 csr &= ~MUSB_RXCSR_RXPKTRDY;
871 musb_writew(epio, MUSB_RXCSR, csr);
872 }
873 }
875 /* reach the end or short packet detected */
876 if (request->actual == request->length || len < musb_ep->packet_sz)
877 musb_g_giveback(musb_ep, request, 0);
878 }
880 /*
881 * Data ready for a request; called from IRQ
882 */
883 void musb_g_rx(struct musb *musb, u8 epnum)
884 {
885 u16 csr;
886 struct musb_request *req;
887 struct usb_request *request;
888 void __iomem *mbase = musb->mregs;
889 struct musb_ep *musb_ep;
890 void __iomem *epio = musb->endpoints[epnum].regs;
891 struct dma_channel *dma;
892 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
894 if (hw_ep->is_shared_fifo)
895 musb_ep = &hw_ep->ep_in;
896 else
897 musb_ep = &hw_ep->ep_out;
899 musb_ep_select(musb, mbase, epnum);
901 req = next_request(musb_ep);
902 if (!req)
903 return;
905 request = &req->request;
907 csr = musb_readw(epio, MUSB_RXCSR);
908 dma = is_dma_capable() ? musb_ep->dma : NULL;
910 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
911 csr, dma ? " (dma)" : "", request);
913 if (csr & MUSB_RXCSR_P_SENTSTALL) {
914 csr |= MUSB_RXCSR_P_WZC_BITS;
915 csr &= ~MUSB_RXCSR_P_SENTSTALL;
916 musb_writew(epio, MUSB_RXCSR, csr);
917 return;
918 }
920 if (csr & MUSB_RXCSR_P_OVERRUN) {
921 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
922 csr &= ~MUSB_RXCSR_P_OVERRUN;
923 musb_writew(epio, MUSB_RXCSR, csr);
925 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
926 if (request->status == -EINPROGRESS)
927 request->status = -EOVERFLOW;
928 }
929 if (csr & MUSB_RXCSR_INCOMPRX) {
930 /* REVISIT not necessarily an error */
931 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
932 }
934 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
935 /* "should not happen"; likely RXPKTRDY pending for DMA */
936 dev_dbg(musb->controller, "%s busy, csr %04x\n",
937 musb_ep->end_point.name, csr);
938 return;
939 }
941 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
942 csr &= ~(MUSB_RXCSR_AUTOCLEAR
943 | MUSB_RXCSR_DMAENAB
944 | MUSB_RXCSR_DMAMODE);
945 musb_writew(epio, MUSB_RXCSR,
946 MUSB_RXCSR_P_WZC_BITS | csr);
948 request->actual += musb_ep->dma->actual_len;
950 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
951 epnum, csr,
952 musb_readw(epio, MUSB_RXCSR),
953 musb_ep->dma->actual_len, request);
955 if (is_inventra_dma(musb) || tusb_dma_omap(musb)
956 || is_ux500_dma(musb)) {
957 /* Autoclear doesn't clear RxPktRdy for short packets */
958 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
959 || (dma->actual_len
960 & (musb_ep->packet_sz - 1))) {
961 /* ack the read! */
962 csr &= ~MUSB_RXCSR_RXPKTRDY;
963 musb_writew(epio, MUSB_RXCSR, csr);
964 }
966 /* incomplete, and not short? wait for next IN packet */
967 if ((request->actual < request->length)
968 && (musb_ep->dma->actual_len
969 == musb_ep->packet_sz)) {
970 /* In double buffer case, continue to unload
971 * fifo if there is Rx packet in FIFO.
972 **/
973 csr = musb_readw(epio, MUSB_RXCSR);
974 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
975 hw_ep->rx_double_buffered)
976 rxstate(musb, to_musb_request(request));
977 return;
978 }
979 }
980 musb_g_giveback(musb_ep, request, 0);
982 req = next_request(musb_ep);
983 if (!req)
984 return;
985 }
986 /* Analyze request */
987 rxstate(musb, req);
988 }
990 /* ------------------------------------------------------------ */
992 static int musb_gadget_enable(struct usb_ep *ep,
993 const struct usb_endpoint_descriptor *desc)
994 {
995 unsigned long flags;
996 struct musb_ep *musb_ep;
997 struct musb_hw_ep *hw_ep;
998 void __iomem *regs;
999 struct musb *musb;
1000 void __iomem *mbase;
1001 u8 epnum;
1002 u16 csr;
1003 unsigned tmp;
1004 int status = -EINVAL;
1006 if (!ep || !desc)
1007 return -EINVAL;
1009 musb_ep = to_musb_ep(ep);
1010 hw_ep = musb_ep->hw_ep;
1011 regs = hw_ep->regs;
1012 musb = musb_ep->musb;
1013 mbase = musb->mregs;
1014 epnum = musb_ep->current_epnum;
1016 spin_lock_irqsave(&musb->lock, flags);
1018 if (musb_ep->desc) {
1019 status = -EBUSY;
1020 goto fail;
1021 }
1022 musb_ep->type = usb_endpoint_type(desc);
1024 /* check direction and (later) maxpacket size against endpoint */
1025 if (usb_endpoint_num(desc) != epnum)
1026 goto fail;
1028 /* REVISIT this rules out high bandwidth periodic transfers */
1029 tmp = usb_endpoint_maxp(desc);
1030 if (tmp & ~0x07ff) {
1031 int ok;
1033 if (usb_endpoint_dir_in(desc))
1034 ok = musb->hb_iso_tx;
1035 else
1036 ok = musb->hb_iso_rx;
1038 if (!ok) {
1039 dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1040 goto fail;
1041 }
1042 musb_ep->hb_mult = (tmp >> 11) & 3;
1043 } else {
1044 musb_ep->hb_mult = 0;
1045 }
1047 musb_ep->packet_sz = tmp & 0x7ff;
1048 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1050 /* enable the interrupts for the endpoint, set the endpoint
1051 * packet size (or fail), set the mode, clear the fifo
1052 */
1053 musb_ep_select(musb, mbase, epnum);
1054 if (usb_endpoint_dir_in(desc)) {
1055 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1057 if (hw_ep->is_shared_fifo)
1058 musb_ep->is_in = 1;
1059 if (!musb_ep->is_in)
1060 goto fail;
1062 if (tmp > hw_ep->max_packet_sz_tx) {
1063 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1064 goto fail;
1065 }
1067 int_txe |= (1 << epnum);
1068 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1070 /* REVISIT if can_bulk_split(), use by updating "tmp";
1071 * likewise high bandwidth periodic tx
1072 */
1073 /* Set TXMAXP with the FIFO size of the endpoint
1074 * to disable double buffering mode.
1075 */
1076 if (musb->double_buffer_not_ok)
1077 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1078 else
1079 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1080 | (musb_ep->hb_mult << 11));
1082 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1083 if (musb_readw(regs, MUSB_TXCSR)
1084 & MUSB_TXCSR_FIFONOTEMPTY)
1085 csr |= MUSB_TXCSR_FLUSHFIFO;
1086 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1087 csr |= MUSB_TXCSR_P_ISO;
1089 /* set twice in case of double buffering */
1090 musb_writew(regs, MUSB_TXCSR, csr);
1091 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1092 musb_writew(regs, MUSB_TXCSR, csr);
1094 } else {
1095 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1097 if (hw_ep->is_shared_fifo)
1098 musb_ep->is_in = 0;
1099 if (musb_ep->is_in)
1100 goto fail;
1102 if (tmp > hw_ep->max_packet_sz_rx) {
1103 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1104 goto fail;
1105 }
1107 int_rxe |= (1 << epnum);
1108 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1110 /* REVISIT if can_bulk_combine() use by updating "tmp"
1111 * likewise high bandwidth periodic rx
1112 */
1113 /* Set RXMAXP with the FIFO size of the endpoint
1114 * to disable double buffering mode.
1115 */
1116 if (musb->double_buffer_not_ok)
1117 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1118 else
1119 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1120 | (musb_ep->hb_mult << 11));
1122 /* force shared fifo to OUT-only mode */
1123 if (hw_ep->is_shared_fifo) {
1124 csr = musb_readw(regs, MUSB_TXCSR);
1125 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1126 musb_writew(regs, MUSB_TXCSR, csr);
1127 }
1129 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1130 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1131 csr |= MUSB_RXCSR_P_ISO;
1132 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1133 csr |= MUSB_RXCSR_DISNYET;
1135 /* set twice in case of double buffering */
1136 musb_writew(regs, MUSB_RXCSR, csr);
1137 musb_writew(regs, MUSB_RXCSR, csr);
1138 }
1140 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1141 * for some reason you run out of channels here.
1142 */
1143 if (is_dma_capable() && musb->dma_controller) {
1144 struct dma_controller *c = musb->dma_controller;
1146 musb_ep->dma = c->channel_alloc(c, hw_ep,
1147 (desc->bEndpointAddress & USB_DIR_IN));
1148 } else
1149 musb_ep->dma = NULL;
1151 musb_ep->desc = desc;
1152 musb_ep->busy = 0;
1153 musb_ep->wedged = 0;
1154 status = 0;
1156 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1157 musb_driver_name, musb_ep->end_point.name,
1158 ({ char *s; switch (musb_ep->type) {
1159 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1160 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1161 default: s = "iso"; break;
1162 }; s; }),
1163 musb_ep->is_in ? "IN" : "OUT",
1164 musb_ep->dma ? "dma, " : "",
1165 musb_ep->packet_sz);
1167 schedule_work(&musb->irq_work);
1169 fail:
1170 spin_unlock_irqrestore(&musb->lock, flags);
1171 return status;
1172 }
1174 /*
1175 * Disable an endpoint flushing all requests queued.
1176 */
1177 static int musb_gadget_disable(struct usb_ep *ep)
1178 {
1179 unsigned long flags;
1180 struct musb *musb;
1181 u8 epnum;
1182 struct musb_ep *musb_ep;
1183 void __iomem *epio;
1184 int status = 0;
1186 musb_ep = to_musb_ep(ep);
1187 musb = musb_ep->musb;
1188 epnum = musb_ep->current_epnum;
1189 epio = musb->endpoints[epnum].regs;
1191 spin_lock_irqsave(&musb->lock, flags);
1192 musb_ep_select(musb, musb->mregs, epnum);
1194 /* zero the endpoint sizes */
1195 if (musb_ep->is_in) {
1196 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1197 int_txe &= ~(1 << epnum);
1198 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1199 musb_writew(epio, MUSB_TXMAXP, 0);
1200 } else {
1201 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1202 int_rxe &= ~(1 << epnum);
1203 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1204 musb_writew(epio, MUSB_RXMAXP, 0);
1205 }
1207 musb_ep->desc = NULL;
1209 /* abort all pending DMA and requests */
1210 nuke(musb_ep, -ESHUTDOWN);
1212 schedule_work(&musb->irq_work);
1214 spin_unlock_irqrestore(&(musb->lock), flags);
1216 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1218 return status;
1219 }
1221 /*
1222 * Allocate a request for an endpoint.
1223 * Reused by ep0 code.
1224 */
1225 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1226 {
1227 struct musb_ep *musb_ep = to_musb_ep(ep);
1228 struct musb *musb = musb_ep->musb;
1229 struct musb_request *request = NULL;
1231 request = kzalloc(sizeof *request, gfp_flags);
1232 if (!request) {
1233 dev_dbg(musb->controller, "not enough memory\n");
1234 return NULL;
1235 }
1237 request->request.dma = DMA_ADDR_INVALID;
1238 request->epnum = musb_ep->current_epnum;
1239 request->ep = musb_ep;
1241 return &request->request;
1242 }
1244 /*
1245 * Free a request
1246 * Reused by ep0 code.
1247 */
1248 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1249 {
1250 kfree(to_musb_request(req));
1251 }
1253 static LIST_HEAD(buffers);
1255 struct free_record {
1256 struct list_head list;
1257 struct device *dev;
1258 unsigned bytes;
1259 dma_addr_t dma;
1260 };
1262 /*
1263 * Context: controller locked, IRQs blocked.
1264 */
1265 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1266 {
1267 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1268 req->tx ? "TX/IN" : "RX/OUT",
1269 &req->request, req->request.length, req->epnum);
1271 musb_ep_select(musb, musb->mregs, req->epnum);
1272 if (req->tx)
1273 txstate(musb, req);
1274 else
1275 rxstate(musb, req);
1276 }
1278 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1279 gfp_t gfp_flags)
1280 {
1281 struct musb_ep *musb_ep;
1282 struct musb_request *request;
1283 struct musb *musb;
1284 int status = 0;
1285 unsigned long lockflags;
1287 if (!ep || !req)
1288 return -EINVAL;
1289 if (!req->buf)
1290 return -ENODATA;
1292 musb_ep = to_musb_ep(ep);
1293 musb = musb_ep->musb;
1295 request = to_musb_request(req);
1296 request->musb = musb;
1298 if (request->ep != musb_ep)
1299 return -EINVAL;
1301 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1303 /* request is mine now... */
1304 request->request.actual = 0;
1305 request->request.status = -EINPROGRESS;
1306 request->epnum = musb_ep->current_epnum;
1307 request->tx = musb_ep->is_in;
1309 map_dma_buffer(request, musb, musb_ep);
1311 spin_lock_irqsave(&musb->lock, lockflags);
1313 /* don't queue if the ep is down */
1314 if (!musb_ep->desc) {
1315 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1316 req, ep->name, "disabled");
1317 status = -ESHUTDOWN;
1318 goto cleanup;
1319 }
1321 /* add request to the list */
1322 list_add_tail(&request->list, &musb_ep->req_list);
1324 /* it this is the head of the queue, start i/o ... */
1325 if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1326 musb_ep_restart(musb, request);
1328 cleanup:
1329 spin_unlock_irqrestore(&musb->lock, lockflags);
1330 return status;
1331 }
1333 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1334 {
1335 struct musb_ep *musb_ep = to_musb_ep(ep);
1336 struct musb_request *req = to_musb_request(request);
1337 struct musb_request *r;
1338 unsigned long flags;
1339 int status = 0;
1340 struct musb *musb = musb_ep->musb;
1342 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1343 return -EINVAL;
1345 spin_lock_irqsave(&musb->lock, flags);
1347 list_for_each_entry(r, &musb_ep->req_list, list) {
1348 if (r == req)
1349 break;
1350 }
1351 if (r != req) {
1352 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1353 status = -EINVAL;
1354 goto done;
1355 }
1357 /* if the hardware doesn't have the request, easy ... */
1358 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1359 musb_g_giveback(musb_ep, request, -ECONNRESET);
1361 /* ... else abort the dma transfer ... */
1362 else if (is_dma_capable() && musb_ep->dma) {
1363 struct dma_controller *c = musb->dma_controller;
1365 musb_ep_select(musb, musb->mregs, musb_ep->current_epnum);
1366 if (c->channel_abort)
1367 status = c->channel_abort(musb_ep->dma);
1368 else
1369 status = -EBUSY;
1370 if (status == 0)
1371 musb_g_giveback(musb_ep, request, -ECONNRESET);
1372 } else {
1373 /* NOTE: by sticking to easily tested hardware/driver states,
1374 * we leave counting of in-flight packets imprecise.
1375 */
1376 musb_g_giveback(musb_ep, request, -ECONNRESET);
1377 }
1379 done:
1380 spin_unlock_irqrestore(&musb->lock, flags);
1381 return status;
1382 }
1384 /*
1385 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1386 * data but will queue requests.
1387 *
1388 * exported to ep0 code
1389 */
1390 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1391 {
1392 struct musb_ep *musb_ep = to_musb_ep(ep);
1393 u8 epnum = musb_ep->current_epnum;
1394 struct musb *musb = musb_ep->musb;
1395 void __iomem *epio = musb->endpoints[epnum].regs;
1396 void __iomem *mbase;
1397 unsigned long flags;
1398 u16 csr;
1399 struct musb_request *request;
1400 int status = 0;
1402 if (!ep)
1403 return -EINVAL;
1404 mbase = musb->mregs;
1406 spin_lock_irqsave(&musb->lock, flags);
1408 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1409 status = -EINVAL;
1410 goto done;
1411 }
1413 musb_ep_select(musb, mbase, epnum);
1415 request = next_request(musb_ep);
1416 if (value) {
1417 if (request) {
1418 dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1419 ep->name);
1420 status = -EAGAIN;
1421 goto done;
1422 }
1423 /* Cannot portably stall with non-empty FIFO */
1424 if (musb_ep->is_in) {
1425 csr = musb_readw(epio, MUSB_TXCSR);
1426 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1427 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1428 status = -EAGAIN;
1429 goto done;
1430 }
1431 }
1432 } else
1433 musb_ep->wedged = 0;
1435 /* set/clear the stall and toggle bits */
1436 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1437 if (musb_ep->is_in) {
1438 csr = musb_readw(epio, MUSB_TXCSR);
1439 csr |= MUSB_TXCSR_P_WZC_BITS
1440 | MUSB_TXCSR_CLRDATATOG;
1441 if (value)
1442 csr |= MUSB_TXCSR_P_SENDSTALL;
1443 else
1444 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1445 | MUSB_TXCSR_P_SENTSTALL);
1446 csr &= ~MUSB_TXCSR_TXPKTRDY;
1447 musb_writew(epio, MUSB_TXCSR, csr);
1448 } else {
1449 csr = musb_readw(epio, MUSB_RXCSR);
1450 csr |= MUSB_RXCSR_P_WZC_BITS
1451 | MUSB_RXCSR_FLUSHFIFO
1452 | MUSB_RXCSR_CLRDATATOG;
1453 if (value)
1454 csr |= MUSB_RXCSR_P_SENDSTALL;
1455 else
1456 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1457 | MUSB_RXCSR_P_SENTSTALL);
1458 musb_writew(epio, MUSB_RXCSR, csr);
1459 }
1461 /* maybe start the first request in the queue */
1462 if (!musb_ep->busy && !value && request) {
1463 dev_dbg(musb->controller, "restarting the request\n");
1464 musb_ep_restart(musb, request);
1465 }
1467 done:
1468 spin_unlock_irqrestore(&musb->lock, flags);
1469 return status;
1470 }
1472 /*
1473 * Sets the halt feature with the clear requests ignored
1474 */
1475 static int musb_gadget_set_wedge(struct usb_ep *ep)
1476 {
1477 struct musb_ep *musb_ep = to_musb_ep(ep);
1479 if (!ep)
1480 return -EINVAL;
1482 musb_ep->wedged = 1;
1484 return usb_ep_set_halt(ep);
1485 }
1487 static int musb_gadget_fifo_status(struct usb_ep *ep)
1488 {
1489 struct musb_ep *musb_ep = to_musb_ep(ep);
1490 void __iomem *epio = musb_ep->hw_ep->regs;
1491 int retval = -EINVAL;
1493 if (musb_ep->desc && !musb_ep->is_in) {
1494 struct musb *musb = musb_ep->musb;
1495 int epnum = musb_ep->current_epnum;
1496 void __iomem *mbase = musb->mregs;
1497 unsigned long flags;
1499 spin_lock_irqsave(&musb->lock, flags);
1501 musb_ep_select(musb, mbase, epnum);
1502 /* FIXME return zero unless RXPKTRDY is set */
1503 retval = musb_readw(epio, MUSB_RXCOUNT);
1505 spin_unlock_irqrestore(&musb->lock, flags);
1506 }
1507 return retval;
1508 }
1510 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1511 {
1512 struct musb_ep *musb_ep = to_musb_ep(ep);
1513 struct musb *musb = musb_ep->musb;
1514 u8 epnum = musb_ep->current_epnum;
1515 void __iomem *epio = musb->endpoints[epnum].regs;
1516 void __iomem *mbase;
1517 unsigned long flags;
1518 u16 csr, int_txe;
1520 mbase = musb->mregs;
1522 spin_lock_irqsave(&musb->lock, flags);
1523 musb_ep_select(musb, mbase, (u8) epnum);
1525 /* disable interrupts */
1526 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1527 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1529 if (musb_ep->is_in) {
1530 csr = musb_readw(epio, MUSB_TXCSR);
1531 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1532 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1533 /*
1534 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1535 * to interrupt current FIFO loading, but not flushing
1536 * the already loaded ones.
1537 */
1538 csr &= ~MUSB_TXCSR_TXPKTRDY;
1539 musb_writew(epio, MUSB_TXCSR, csr);
1540 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1541 musb_writew(epio, MUSB_TXCSR, csr);
1542 }
1543 } else {
1544 csr = musb_readw(epio, MUSB_RXCSR);
1545 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1546 musb_writew(epio, MUSB_RXCSR, csr);
1547 musb_writew(epio, MUSB_RXCSR, csr);
1548 }
1550 /* re-enable interrupt */
1551 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1552 spin_unlock_irqrestore(&musb->lock, flags);
1553 }
1555 static const struct usb_ep_ops musb_ep_ops = {
1556 .enable = musb_gadget_enable,
1557 .disable = musb_gadget_disable,
1558 .alloc_request = musb_alloc_request,
1559 .free_request = musb_free_request,
1560 .queue = musb_gadget_queue,
1561 .dequeue = musb_gadget_dequeue,
1562 .set_halt = musb_gadget_set_halt,
1563 .set_wedge = musb_gadget_set_wedge,
1564 .fifo_status = musb_gadget_fifo_status,
1565 .fifo_flush = musb_gadget_fifo_flush
1566 };
1568 /* ----------------------------------------------------------------------- */
1570 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1571 {
1572 struct musb *musb = gadget_to_musb(gadget);
1574 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1575 }
1577 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1578 {
1579 struct musb *musb = gadget_to_musb(gadget);
1580 void __iomem *mregs = musb->mregs;
1581 unsigned long flags;
1582 int status = -EINVAL;
1583 u8 power, devctl;
1584 int retries;
1586 spin_lock_irqsave(&musb->lock, flags);
1588 switch (musb->xceiv->state) {
1589 case OTG_STATE_B_PERIPHERAL:
1590 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1591 * that's part of the standard usb 1.1 state machine, and
1592 * doesn't affect OTG transitions.
1593 */
1594 if (musb->may_wakeup && musb->is_suspended)
1595 break;
1596 goto done;
1597 case OTG_STATE_B_IDLE:
1598 /* Start SRP ... OTG not required. */
1599 devctl = musb_readb(mregs, MUSB_DEVCTL);
1600 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1601 devctl |= MUSB_DEVCTL_SESSION;
1602 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1603 devctl = musb_readb(mregs, MUSB_DEVCTL);
1604 retries = 100;
1605 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1606 devctl = musb_readb(mregs, MUSB_DEVCTL);
1607 if (retries-- < 1)
1608 break;
1609 }
1610 retries = 10000;
1611 while (devctl & MUSB_DEVCTL_SESSION) {
1612 devctl = musb_readb(mregs, MUSB_DEVCTL);
1613 if (retries-- < 1)
1614 break;
1615 }
1617 spin_unlock_irqrestore(&musb->lock, flags);
1618 otg_start_srp(musb->xceiv);
1619 spin_lock_irqsave(&musb->lock, flags);
1621 /* Block idling for at least 1s */
1622 musb_platform_try_idle(musb,
1623 jiffies + msecs_to_jiffies(1 * HZ));
1625 status = 0;
1626 goto done;
1627 default:
1628 dev_dbg(musb->controller, "Unhandled wake: %s\n",
1629 otg_state_string(musb->xceiv->state));
1630 goto done;
1631 }
1633 status = 0;
1635 power = musb_readb(mregs, MUSB_POWER);
1636 power |= MUSB_POWER_RESUME;
1637 musb_writeb(mregs, MUSB_POWER, power);
1638 dev_dbg(musb->controller, "issue wakeup\n");
1640 /* FIXME do this next chunk in a timer callback, no udelay */
1641 mdelay(2);
1643 power = musb_readb(mregs, MUSB_POWER);
1644 power &= ~MUSB_POWER_RESUME;
1645 musb_writeb(mregs, MUSB_POWER, power);
1646 done:
1647 spin_unlock_irqrestore(&musb->lock, flags);
1648 return status;
1649 }
1651 static int
1652 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1653 {
1654 struct musb *musb = gadget_to_musb(gadget);
1656 musb->is_self_powered = !!is_selfpowered;
1657 return 0;
1658 }
1660 static void musb_pullup(struct musb *musb, int is_on)
1661 {
1662 u8 power;
1664 power = musb_readb(musb->mregs, MUSB_POWER);
1665 if (is_on)
1666 power |= MUSB_POWER_SOFTCONN;
1667 else
1668 power &= ~MUSB_POWER_SOFTCONN;
1670 /* FIXME if on, HdrcStart; if off, HdrcStop */
1672 dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1673 is_on ? "on" : "off");
1674 musb_writeb(musb->mregs, MUSB_POWER, power);
1675 }
1677 #if 0
1678 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1679 {
1680 dev_dbg(musb->controller, "<= %s =>\n", __func__);
1682 /*
1683 * FIXME iff driver's softconnect flag is set (as it is during probe,
1684 * though that can clear it), just musb_pullup().
1685 */
1687 return -EINVAL;
1688 }
1689 #endif
1691 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1692 {
1693 struct musb *musb = gadget_to_musb(gadget);
1695 if (!musb->xceiv->set_power)
1696 return -EOPNOTSUPP;
1697 return otg_set_power(musb->xceiv, mA);
1698 }
1700 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1701 {
1702 struct musb *musb = gadget_to_musb(gadget);
1703 unsigned long flags;
1705 is_on = !!is_on;
1707 pm_runtime_get_sync(musb->controller);
1709 /* NOTE: this assumes we are sensing vbus; we'd rather
1710 * not pullup unless the B-session is active.
1711 */
1712 spin_lock_irqsave(&musb->lock, flags);
1713 if (is_on != musb->softconnect) {
1714 musb->softconnect = is_on;
1715 musb_pullup(musb, is_on);
1716 }
1717 spin_unlock_irqrestore(&musb->lock, flags);
1719 pm_runtime_put(musb->controller);
1721 return 0;
1722 }
1724 static int musb_gadget_start(struct usb_gadget *g,
1725 struct usb_gadget_driver *driver);
1726 static int musb_gadget_stop(struct usb_gadget *g,
1727 struct usb_gadget_driver *driver);
1729 static const struct usb_gadget_ops musb_gadget_operations = {
1730 .get_frame = musb_gadget_get_frame,
1731 .wakeup = musb_gadget_wakeup,
1732 .set_selfpowered = musb_gadget_set_self_powered,
1733 /* .vbus_session = musb_gadget_vbus_session, */
1734 .vbus_draw = musb_gadget_vbus_draw,
1735 .pullup = musb_gadget_pullup,
1736 .udc_start = musb_gadget_start,
1737 .udc_stop = musb_gadget_stop,
1738 };
1740 /* ----------------------------------------------------------------------- */
1742 /* Registration */
1744 /* Only this registration code "knows" the rule (from USB standards)
1745 * about there being only one external upstream port. It assumes
1746 * all peripheral ports are external...
1747 */
1749 static void musb_gadget_release(struct device *dev)
1750 {
1751 /* kref_put(WHAT) */
1752 dev_dbg(dev, "%s\n", __func__);
1753 }
1756 static void __devinit
1757 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1758 {
1759 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1761 memset(ep, 0, sizeof *ep);
1763 ep->current_epnum = epnum;
1764 ep->musb = musb;
1765 ep->hw_ep = hw_ep;
1766 ep->is_in = is_in;
1768 INIT_LIST_HEAD(&ep->req_list);
1770 sprintf(ep->name, "ep%d%s", epnum,
1771 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1772 is_in ? "in" : "out"));
1773 ep->end_point.name = ep->name;
1774 INIT_LIST_HEAD(&ep->end_point.ep_list);
1775 if (!epnum) {
1776 ep->end_point.maxpacket = 64;
1777 ep->end_point.ops = &musb_g_ep0_ops;
1778 musb->g.ep0 = &ep->end_point;
1779 } else {
1780 if (is_in)
1781 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1782 else
1783 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1784 ep->end_point.ops = &musb_ep_ops;
1785 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1786 }
1787 }
1789 /*
1790 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1791 * to the rest of the driver state.
1792 */
1793 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1794 {
1795 u8 epnum;
1796 struct musb_hw_ep *hw_ep;
1797 unsigned count = 0;
1799 /* initialize endpoint list just once */
1800 INIT_LIST_HEAD(&(musb->g.ep_list));
1802 for (epnum = 0, hw_ep = musb->endpoints;
1803 epnum < musb->nr_endpoints;
1804 epnum++, hw_ep++) {
1805 if (hw_ep->is_shared_fifo /* || !epnum */) {
1806 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1807 count++;
1808 } else {
1809 if (hw_ep->max_packet_sz_tx) {
1810 init_peripheral_ep(musb, &hw_ep->ep_in,
1811 epnum, 1);
1812 count++;
1813 }
1814 if (hw_ep->max_packet_sz_rx) {
1815 init_peripheral_ep(musb, &hw_ep->ep_out,
1816 epnum, 0);
1817 count++;
1818 }
1819 }
1820 }
1821 }
1823 /* called once during driver setup to initialize and link into
1824 * the driver model; memory is zeroed.
1825 */
1826 int __devinit musb_gadget_setup(struct musb *musb)
1827 {
1828 int status;
1830 /* REVISIT minor race: if (erroneously) setting up two
1831 * musb peripherals at the same time, only the bus lock
1832 * is probably held.
1833 */
1835 musb->g.ops = &musb_gadget_operations;
1836 musb->g.max_speed = USB_SPEED_HIGH;
1837 musb->g.speed = USB_SPEED_UNKNOWN;
1839 /* this "gadget" abstracts/virtualizes the controller */
1840 dev_set_name(&musb->g.dev, "gadget");
1841 musb->g.dev.parent = musb->controller;
1842 musb->g.dev.dma_mask = musb->controller->dma_mask;
1843 musb->g.dev.release = musb_gadget_release;
1844 musb->g.name = musb_driver_name;
1846 if (is_otg_enabled(musb))
1847 musb->g.is_otg = 1;
1849 musb_g_init_endpoints(musb);
1851 musb->is_active = 0;
1852 musb_platform_try_idle(musb, 0);
1854 status = device_register(&musb->g.dev);
1855 if (status != 0) {
1856 put_device(&musb->g.dev);
1857 return status;
1858 }
1859 status = usb_add_gadget_udc(musb->controller, &musb->g);
1860 if (status)
1861 goto err;
1863 return 0;
1864 err:
1865 musb->g.dev.parent = NULL;
1866 device_unregister(&musb->g.dev);
1867 return status;
1868 }
1870 void musb_gadget_cleanup(struct musb *musb)
1871 {
1872 usb_del_gadget_udc(&musb->g);
1873 if (musb->g.dev.parent)
1874 device_unregister(&musb->g.dev);
1875 }
1877 /*
1878 * Register the gadget driver. Used by gadget drivers when
1879 * registering themselves with the controller.
1880 *
1881 * -EINVAL something went wrong (not driver)
1882 * -EBUSY another gadget is already using the controller
1883 * -ENOMEM no memory to perform the operation
1884 *
1885 * @param driver the gadget driver
1886 * @return <0 if error, 0 if everything is fine
1887 */
1888 static int musb_gadget_start(struct usb_gadget *g,
1889 struct usb_gadget_driver *driver)
1890 {
1891 struct musb *musb = gadget_to_musb(g);
1892 unsigned long flags;
1893 int retval = -EINVAL;
1895 if (driver->max_speed < USB_SPEED_HIGH)
1896 goto err0;
1898 pm_runtime_get_sync(musb->controller);
1900 dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1902 musb->softconnect = 0;
1903 musb->gadget_driver = driver;
1905 spin_lock_irqsave(&musb->lock, flags);
1906 musb->is_active = 1;
1908 otg_set_peripheral(musb->xceiv, &musb->g);
1909 musb->xceiv->state = OTG_STATE_B_IDLE;
1911 /*
1912 * FIXME this ignores the softconnect flag. Drivers are
1913 * allowed hold the peripheral inactive until for example
1914 * userspace hooks up printer hardware or DSP codecs, so
1915 * hosts only see fully functional devices.
1916 */
1918 if (!is_otg_enabled(musb))
1919 musb_start(musb);
1921 spin_unlock_irqrestore(&musb->lock, flags);
1923 if (is_otg_enabled(musb)) {
1924 struct usb_hcd *hcd = musb_to_hcd(musb);
1926 dev_dbg(musb->controller, "OTG startup...\n");
1928 /* REVISIT: funcall to other code, which also
1929 * handles power budgeting ... this way also
1930 * ensures HdrcStart is indirectly called.
1931 */
1932 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1933 if (retval < 0) {
1934 dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1935 goto err2;
1936 }
1938 if ((musb->xceiv->last_event == USB_EVENT_ID)
1939 && musb->xceiv->set_vbus)
1940 otg_set_vbus(musb->xceiv, 1);
1942 hcd->self.uses_pio_for_control = 1;
1943 }
1944 if (musb->xceiv->last_event == USB_EVENT_NONE)
1945 pm_runtime_put(musb->controller);
1947 return 0;
1949 err2:
1950 if (!is_otg_enabled(musb))
1951 musb_stop(musb);
1952 err0:
1953 return retval;
1954 }
1956 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1957 {
1958 int i;
1959 struct musb_hw_ep *hw_ep;
1961 /* don't disconnect if it's not connected */
1962 if (musb->g.speed == USB_SPEED_UNKNOWN)
1963 driver = NULL;
1964 else
1965 musb->g.speed = USB_SPEED_UNKNOWN;
1967 /* deactivate the hardware */
1968 if (musb->softconnect) {
1969 musb->softconnect = 0;
1970 musb_pullup(musb, 0);
1971 }
1972 musb_stop(musb);
1974 /* killing any outstanding requests will quiesce the driver;
1975 * then report disconnect
1976 */
1977 if (driver) {
1978 for (i = 0, hw_ep = musb->endpoints;
1979 i < musb->nr_endpoints;
1980 i++, hw_ep++) {
1981 musb_ep_select(musb, musb->mregs, i);
1982 if (hw_ep->is_shared_fifo /* || !epnum */) {
1983 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1984 } else {
1985 if (hw_ep->max_packet_sz_tx)
1986 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1987 if (hw_ep->max_packet_sz_rx)
1988 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1989 }
1990 }
1991 }
1992 }
1994 /*
1995 * Unregister the gadget driver. Used by gadget drivers when
1996 * unregistering themselves from the controller.
1997 *
1998 * @param driver the gadget driver to unregister
1999 */
2000 static int musb_gadget_stop(struct usb_gadget *g,
2001 struct usb_gadget_driver *driver)
2002 {
2003 struct musb *musb = gadget_to_musb(g);
2004 unsigned long flags;
2006 if (musb->xceiv->last_event == USB_EVENT_NONE)
2007 pm_runtime_get_sync(musb->controller);
2009 /*
2010 * REVISIT always use otg_set_peripheral() here too;
2011 * this needs to shut down the OTG engine.
2012 */
2014 spin_lock_irqsave(&musb->lock, flags);
2016 if (is_otg_enabled(musb))
2017 musb_hnp_stop(musb);
2019 (void) musb_gadget_vbus_draw(&musb->g, 0);
2021 musb->xceiv->state = OTG_STATE_UNDEFINED;
2022 stop_activity(musb, driver);
2023 otg_set_peripheral(musb->xceiv, NULL);
2025 dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2027 musb->is_active = 0;
2028 musb_platform_try_idle(musb, 0);
2029 spin_unlock_irqrestore(&musb->lock, flags);
2031 if (is_otg_enabled(musb)) {
2032 usb_remove_hcd(musb_to_hcd(musb));
2033 /* FIXME we need to be able to register another
2034 * gadget driver here and have everything work;
2035 * that currently misbehaves.
2036 */
2037 }
2039 if (!is_otg_enabled(musb))
2040 musb_stop(musb);
2042 pm_runtime_put(musb->controller);
2044 return 0;
2045 }
2047 /* ----------------------------------------------------------------------- */
2049 /* lifecycle operations called through plat_uds.c */
2051 void musb_g_resume(struct musb *musb)
2052 {
2053 musb->is_suspended = 0;
2054 switch (musb->xceiv->state) {
2055 case OTG_STATE_B_IDLE:
2056 break;
2057 case OTG_STATE_B_WAIT_ACON:
2058 case OTG_STATE_B_PERIPHERAL:
2059 musb->is_active = 1;
2060 if (musb->gadget_driver && musb->gadget_driver->resume) {
2061 spin_unlock(&musb->lock);
2062 musb->gadget_driver->resume(&musb->g);
2063 spin_lock(&musb->lock);
2064 }
2065 break;
2066 default:
2067 WARNING("unhandled RESUME transition (%s)\n",
2068 otg_state_string(musb->xceiv->state));
2069 }
2070 }
2072 /* called when SOF packets stop for 3+ msec */
2073 void musb_g_suspend(struct musb *musb)
2074 {
2075 u8 devctl;
2077 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2078 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2080 switch (musb->xceiv->state) {
2081 case OTG_STATE_B_IDLE:
2082 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2083 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2084 break;
2085 case OTG_STATE_B_PERIPHERAL:
2086 musb->is_suspended = 1;
2087 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2088 spin_unlock(&musb->lock);
2089 musb->gadget_driver->suspend(&musb->g);
2090 spin_lock(&musb->lock);
2091 }
2092 break;
2093 default:
2094 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2095 * A_PERIPHERAL may need care too
2096 */
2097 WARNING("unhandled SUSPEND transition (%s)\n",
2098 otg_state_string(musb->xceiv->state));
2099 }
2100 }
2102 /* Called during SRP */
2103 void musb_g_wakeup(struct musb *musb)
2104 {
2105 musb_gadget_wakeup(&musb->g);
2106 }
2108 /* called when VBUS drops below session threshold, and in other cases */
2109 void musb_g_disconnect(struct musb *musb)
2110 {
2111 void __iomem *mregs = musb->mregs;
2112 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2114 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2116 /* clear HR */
2117 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2119 /* don't draw vbus until new b-default session */
2120 (void) musb_gadget_vbus_draw(&musb->g, 0);
2122 musb->g.speed = USB_SPEED_UNKNOWN;
2123 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2124 spin_unlock(&musb->lock);
2125 musb->gadget_driver->disconnect(&musb->g);
2126 spin_lock(&musb->lock);
2127 }
2129 switch (musb->xceiv->state) {
2130 default:
2131 if (is_otg_enabled(musb)) {
2132 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2133 otg_state_string(musb->xceiv->state));
2134 musb->xceiv->state = OTG_STATE_A_IDLE;
2135 break;
2136 }
2137 case OTG_STATE_A_PERIPHERAL:
2138 if (is_otg_enabled(musb))
2139 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
2140 break;
2141 case OTG_STATE_B_WAIT_ACON:
2142 case OTG_STATE_B_HOST:
2143 if (!is_otg_enabled(musb))
2144 break;
2145 case OTG_STATE_B_PERIPHERAL:
2146 case OTG_STATE_B_IDLE:
2147 musb->xceiv->state = OTG_STATE_B_IDLE;
2148 break;
2149 case OTG_STATE_B_SRP_INIT:
2150 break;
2151 }
2153 musb->is_active = 0;
2154 }
2156 void musb_g_reset(struct musb *musb)
2157 __releases(musb->lock)
2158 __acquires(musb->lock)
2159 {
2160 void __iomem *mbase = musb->mregs;
2161 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2162 u8 power;
2164 dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2165 (devctl & MUSB_DEVCTL_BDEVICE)
2166 ? "B-Device" : "A-Device",
2167 musb_readb(mbase, MUSB_FADDR),
2168 musb->gadget_driver
2169 ? musb->gadget_driver->driver.name
2170 : NULL
2171 );
2173 /* report disconnect, if we didn't already (flushing EP state) */
2174 if (musb->g.speed != USB_SPEED_UNKNOWN)
2175 musb_g_disconnect(musb);
2177 /* clear HR */
2178 else if (devctl & MUSB_DEVCTL_HR)
2179 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2182 /* what speed did we negotiate? */
2183 power = musb_readb(mbase, MUSB_POWER);
2184 musb->g.speed = (power & MUSB_POWER_HSMODE)
2185 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2187 /* start in USB_STATE_DEFAULT */
2188 musb->is_active = 1;
2189 musb->is_suspended = 0;
2190 MUSB_DEV_MODE(musb);
2191 musb->address = 0;
2192 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2194 musb->may_wakeup = 0;
2195 musb->g.b_hnp_enable = 0;
2196 musb->g.a_alt_hnp_support = 0;
2197 musb->g.a_hnp_support = 0;
2199 /* Normal reset, as B-Device;
2200 * or else after HNP, as A-Device
2201 */
2202 if (devctl & MUSB_DEVCTL_BDEVICE) {
2203 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2204 musb->g.is_a_peripheral = 0;
2205 } else if (is_otg_enabled(musb)) {
2206 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2207 musb->g.is_a_peripheral = 1;
2208 } else
2209 WARN_ON(1);
2211 /* start with default limits on VBUS power draw */
2212 (void) musb_gadget_vbus_draw(&musb->g,
2213 is_otg_enabled(musb) ? 8 : 100);
2214 }