1 /*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
46 #include "musb_core.h"
49 /* MUSB PERIPHERAL status 3-mar-2006:
50 *
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
52 * Minor glitches:
53 *
54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly
58 * clearing SENDSTALL?
59 *
60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not
63 * required.
64 *
65 * - TX/IN
66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost?
71 *
72 * - RX/OUT
73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs
78 *
79 * - ISO not tested ... might work, but only weakly isochronous
80 *
81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.)
84 *
85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works
89 */
91 /* ----------------------------------------------------------------------- */
93 #define is_buffer_mapped(req) (is_dma_capable() && \
94 (req->map_state != UN_MAPPED))
96 /* Maps the buffer to dma */
98 static inline void map_dma_buffer(struct musb_request *request,
99 struct musb *musb, struct musb_ep *musb_ep)
100 {
101 int compatible = true;
102 struct dma_controller *dma = musb->dma_controller;
104 request->map_state = UN_MAPPED;
106 if (!is_dma_capable() || !musb_ep->dma)
107 return;
109 /* Check if DMA engine can handle this request.
110 * DMA code must reject the USB request explicitly.
111 * Default behaviour is to map the request.
112 */
113 if (dma->is_compatible)
114 compatible = dma->is_compatible(musb_ep->dma,
115 musb_ep->packet_sz, request->request.buf,
116 request->request.length);
117 if (!compatible)
118 return;
120 if (request->request.dma == DMA_ADDR_INVALID) {
121 request->request.dma = dma_map_single(
122 musb->controller,
123 request->request.buf,
124 request->request.length,
125 request->tx
126 ? DMA_TO_DEVICE
127 : DMA_FROM_DEVICE);
128 request->map_state = MUSB_MAPPED;
129 } else {
130 dma_sync_single_for_device(musb->controller,
131 request->request.dma,
132 request->request.length,
133 request->tx
134 ? DMA_TO_DEVICE
135 : DMA_FROM_DEVICE);
136 request->map_state = PRE_MAPPED;
137 }
138 }
140 /* Unmap the buffer from dma and maps it back to cpu */
141 static inline void unmap_dma_buffer(struct musb_request *request,
142 struct musb *musb)
143 {
144 if (!is_buffer_mapped(request))
145 return;
147 if (request->request.dma == DMA_ADDR_INVALID) {
148 dev_vdbg(musb->controller,
149 "not unmapping a never mapped buffer\n");
150 return;
151 }
152 if (request->map_state == MUSB_MAPPED) {
153 dma_unmap_single(musb->controller,
154 request->request.dma,
155 request->request.length,
156 request->tx
157 ? DMA_TO_DEVICE
158 : DMA_FROM_DEVICE);
159 request->request.dma = DMA_ADDR_INVALID;
160 } else { /* PRE_MAPPED */
161 dma_sync_single_for_cpu(musb->controller,
162 request->request.dma,
163 request->request.length,
164 request->tx
165 ? DMA_TO_DEVICE
166 : DMA_FROM_DEVICE);
167 }
168 request->map_state = UN_MAPPED;
169 }
171 /*
172 * Immediately complete a request.
173 *
174 * @param request the request to complete
175 * @param status the status to complete the request with
176 * Context: controller locked, IRQs blocked.
177 */
178 void musb_g_giveback(
179 struct musb_ep *ep,
180 struct usb_request *request,
181 int status)
182 __releases(ep->musb->lock)
183 __acquires(ep->musb->lock)
184 {
185 struct musb_request *req;
186 struct musb *musb;
187 int busy = ep->busy;
189 req = to_musb_request(request);
191 list_del(&req->list);
192 if (req->request.status == -EINPROGRESS)
193 req->request.status = status;
194 musb = req->musb;
196 ep->busy = 1;
197 spin_unlock(&musb->lock);
198 unmap_dma_buffer(req, musb);
199 if (request->status == 0)
200 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
201 ep->end_point.name, request,
202 req->request.actual, req->request.length);
203 else
204 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
205 ep->end_point.name, request,
206 req->request.actual, req->request.length,
207 request->status);
208 req->request.complete(&req->ep->end_point, &req->request);
209 spin_lock(&musb->lock);
210 ep->busy = busy;
211 }
213 /* ----------------------------------------------------------------------- */
215 /*
216 * Abort requests queued to an endpoint using the status. Synchronous.
217 * caller locked controller and blocked irqs, and selected this ep.
218 */
219 static void nuke(struct musb_ep *ep, const int status)
220 {
221 struct musb *musb = ep->musb;
222 struct musb_request *req = NULL;
223 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
225 ep->busy = 1;
227 if (is_dma_capable() && ep->dma) {
228 struct dma_controller *c = ep->musb->dma_controller;
229 int value;
231 if (ep->is_in) {
232 /*
233 * The programming guide says that we must not clear
234 * the DMAMODE bit before DMAENAB, so we only
235 * clear it in the second write...
236 */
237 musb_writew(epio, MUSB_TXCSR,
238 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
239 musb_writew(epio, MUSB_TXCSR,
240 0 | MUSB_TXCSR_FLUSHFIFO);
241 } else {
242 musb_writew(epio, MUSB_RXCSR,
243 0 | MUSB_RXCSR_FLUSHFIFO);
244 musb_writew(epio, MUSB_RXCSR,
245 0 | MUSB_RXCSR_FLUSHFIFO);
246 }
248 value = c->channel_abort(ep->dma);
249 dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
250 ep->name, value);
251 c->channel_release(ep->dma);
252 ep->dma = NULL;
253 }
255 while (!list_empty(&ep->req_list)) {
256 req = list_first_entry(&ep->req_list, struct musb_request, list);
257 musb_g_giveback(ep, &req->request, status);
258 }
259 }
261 /* ----------------------------------------------------------------------- */
263 /* Data transfers - pure PIO, pure DMA, or mixed mode */
265 /*
266 * This assumes the separate CPPI engine is responding to DMA requests
267 * from the usb core ... sequenced a bit differently from mentor dma.
268 */
270 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
271 {
272 if (can_bulk_split(musb, ep->type))
273 return ep->hw_ep->max_packet_sz_tx;
274 else
275 return ep->packet_sz;
276 }
279 /* Peripheral tx (IN) using Mentor DMA works as follows:
280 Only mode 0 is used for transfers <= wPktSize,
281 mode 1 is used for larger transfers,
283 One of the following happens:
284 - Host sends IN token which causes an endpoint interrupt
285 -> TxAvail
286 -> if DMA is currently busy, exit.
287 -> if queue is non-empty, txstate().
289 - Request is queued by the gadget driver.
290 -> if queue was previously empty, txstate()
292 txstate()
293 -> start
294 /\ -> setup DMA
295 | (data is transferred to the FIFO, then sent out when
296 | IN token(s) are recd from Host.
297 | -> DMA interrupt on completion
298 | calls TxAvail.
299 | -> stop DMA, ~DMAENAB,
300 | -> set TxPktRdy for last short pkt or zlp
301 | -> Complete Request
302 | -> Continue next request (call txstate)
303 |___________________________________|
305 * Non-Mentor DMA engines can of course work differently, such as by
306 * upleveling from irq-per-packet to irq-per-buffer.
307 */
309 /*
310 * An endpoint is transmitting data. This can be called either from
311 * the IRQ routine or from ep.queue() to kickstart a request on an
312 * endpoint.
313 *
314 * Context: controller locked, IRQs blocked, endpoint selected
315 */
316 static void txstate(struct musb *musb, struct musb_request *req)
317 {
318 u8 epnum = req->epnum;
319 struct musb_ep *musb_ep;
320 void __iomem *epio = musb->endpoints[epnum].regs;
321 struct usb_request *request;
322 u16 fifo_count = 0, csr;
323 int use_dma = 0;
325 musb_ep = req->ep;
327 /* we shouldn't get here while DMA is active ... but we do ... */
328 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
329 dev_dbg(musb->controller, "dma pending...\n");
330 return;
331 }
333 /* read TXCSR before */
334 csr = musb_readw(epio, MUSB_TXCSR);
336 request = &req->request;
337 fifo_count = min(max_ep_writesize(musb, musb_ep),
338 (int)(request->length - request->actual));
340 if (csr & MUSB_TXCSR_TXPKTRDY) {
341 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
342 musb_ep->end_point.name, csr);
343 return;
344 }
346 if (csr & MUSB_TXCSR_P_SENDSTALL) {
347 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
348 musb_ep->end_point.name, csr);
349 return;
350 }
352 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
353 epnum, musb_ep->packet_sz, fifo_count,
354 csr);
356 #ifndef CONFIG_MUSB_PIO_ONLY
357 if (is_buffer_mapped(req)) {
358 struct dma_controller *c = musb->dma_controller;
359 size_t request_size;
361 /* setup DMA, then program endpoint CSR */
362 request_size = min_t(size_t, request->length - request->actual,
363 musb_ep->dma->max_len);
365 use_dma = (request->dma != DMA_ADDR_INVALID);
367 /* MUSB_TXCSR_P_ISO is still set correctly */
369 if (is_inventra_dma(musb) || is_ux500_dma(musb)) {
370 if (request_size < musb_ep->packet_sz)
371 musb_ep->dma->desired_mode = 0;
372 else
373 musb_ep->dma->desired_mode = 1;
375 use_dma = use_dma && c->channel_program(
376 musb_ep->dma, musb_ep->packet_sz,
377 musb_ep->dma->desired_mode,
378 request->dma + request->actual, request_size);
379 if (use_dma) {
380 if (musb_ep->dma->desired_mode == 0) {
381 /*
382 * We must not clear the DMAMODE bit
383 * before the DMAENAB bit -- and the
384 * latter doesn't always get cleared
385 * before we get here...
386 */
387 csr &= ~(MUSB_TXCSR_AUTOSET
388 | MUSB_TXCSR_DMAENAB);
389 musb_writew(epio, MUSB_TXCSR, csr
390 | MUSB_TXCSR_P_WZC_BITS);
391 csr &= ~MUSB_TXCSR_DMAMODE;
392 csr |= (MUSB_TXCSR_DMAENAB |
393 MUSB_TXCSR_MODE);
394 /* against programming guide */
395 } else {
396 csr |= (MUSB_TXCSR_DMAENAB
397 | MUSB_TXCSR_DMAMODE
398 | MUSB_TXCSR_MODE);
399 if (!musb_ep->hb_mult)
400 csr |= MUSB_TXCSR_AUTOSET;
401 }
402 csr &= ~MUSB_TXCSR_P_UNDERRUN;
404 musb_writew(epio, MUSB_TXCSR, csr);
405 }
406 } else if (is_cppi_enabled(musb) || is_cppi41_enabled(musb)) {
407 /* program endpoint CSR first, then setup DMA */
408 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
410 if (request_size == 0)
411 csr &= ~(MUSB_TXCSR_DMAENAB |
412 MUSB_TXCSR_DMAMODE);
413 else
414 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
415 MUSB_TXCSR_MODE;
416 musb_writew(epio, MUSB_TXCSR,
417 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
418 | csr);
420 /* ensure writebuffer is empty */
421 csr = musb_readw(epio, MUSB_TXCSR);
423 /* NOTE host side sets DMAENAB later than this; both are
424 * OK since the transfer dma glue (between CPPI & Mentor
425 * fifos) just tells CPPI it could start. Data only
426 * moves to the USB TX fifo when both fifos are ready.
427 */
429 /* "mode" is irrelevant here; handle terminating ZLPs
430 * like PIO does, since the hardware RNDIS mode seems
431 * unreliable except for the last-packet-is-already-
432 * short case.
433 */
434 /* for zero byte transfer use pio mode */
436 /* Use pio mode for interrupt transfer of size <= 64
437 * byte. We have seen TxFiFoEmpty workqueue going into
438 * infinite loop when a CDC device is connected to
439 * another EVM. */
441 if ((request_size == 0) || (request_size <= 64 &&
442 musb_ep->type == USB_ENDPOINT_XFER_INT)) {
443 use_dma = 0;
444 } else {
445 use_dma = use_dma && c->channel_program(
446 musb_ep->dma, musb_ep->packet_sz,
447 0,
448 request->dma + request->actual,
449 request_size);
450 if (!use_dma) {
451 c->channel_release(musb_ep->dma);
452 musb_ep->dma = NULL;
453 csr &= ~MUSB_TXCSR_DMAENAB;
454 musb_writew(epio, MUSB_TXCSR, csr);
455 /* invariant: prequest->buf is non-null */
456 }
457 }
458 } else if (tusb_dma_omap(musb)) {
459 use_dma = use_dma && c->channel_program(
460 musb_ep->dma, musb_ep->packet_sz,
461 request->zero,
462 request->dma + request->actual,
463 request_size);
464 }
465 }
466 #endif
468 if (!use_dma) {
469 /*
470 * Unmap the dma buffer back to cpu if dma channel
471 * programming fails
472 */
473 unmap_dma_buffer(req, musb);
475 musb->ops->write_fifo(musb_ep->hw_ep, fifo_count,
476 (u8 *) (request->buf + request->actual));
477 request->actual += fifo_count;
478 csr |= MUSB_TXCSR_TXPKTRDY;
479 csr &= ~MUSB_TXCSR_P_UNDERRUN;
480 musb_writew(epio, MUSB_TXCSR, csr);
481 }
483 /* host may already have the data when this message shows... */
484 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
485 musb_ep->end_point.name, use_dma ? "dma" : "pio",
486 request->actual, request->length,
487 musb_readw(epio, MUSB_TXCSR),
488 fifo_count,
489 musb_readw(epio, MUSB_TXMAXP));
490 }
492 /*
493 * FIFO state update (e.g. data ready).
494 * Called from IRQ, with controller locked.
495 */
496 void musb_g_tx(struct musb *musb, u8 epnum)
497 {
498 u16 csr;
499 struct musb_request *req;
500 struct usb_request *request;
501 u8 __iomem *mbase = musb->mregs;
502 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
503 void __iomem *epio = musb->endpoints[epnum].regs;
504 struct dma_channel *dma;
506 musb_ep_select(musb, mbase, epnum);
507 req = next_request(musb_ep);
508 request = &req->request;
510 csr = musb_readw(epio, MUSB_TXCSR);
511 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
513 dma = is_dma_capable() ? musb_ep->dma : NULL;
515 /*
516 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
517 * probably rates reporting as a host error.
518 */
519 if (csr & MUSB_TXCSR_P_SENTSTALL) {
520 csr |= MUSB_TXCSR_P_WZC_BITS;
521 csr &= ~MUSB_TXCSR_P_SENTSTALL;
522 musb_writew(epio, MUSB_TXCSR, csr);
523 return;
524 }
526 if (csr & MUSB_TXCSR_P_UNDERRUN) {
527 /* We NAKed, no big deal... little reason to care. */
528 csr |= MUSB_TXCSR_P_WZC_BITS;
529 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
530 musb_writew(epio, MUSB_TXCSR, csr);
531 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
532 epnum, request);
533 }
535 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
536 /*
537 * SHOULD NOT HAPPEN... has with CPPI though, after
538 * changing SENDSTALL (and other cases); harmless?
539 */
540 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
541 return;
542 }
544 if (request) {
545 u8 is_dma = 0;
547 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
548 is_dma = 1;
549 csr |= MUSB_TXCSR_P_WZC_BITS;
550 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
551 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
552 musb_writew(epio, MUSB_TXCSR, csr);
553 /* Ensure writebuffer is empty. */
554 csr = musb_readw(epio, MUSB_TXCSR);
555 request->actual += musb_ep->dma->actual_len;
556 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
557 epnum, csr, musb_ep->dma->actual_len, request);
558 }
560 /*
561 * First, maybe a terminating short packet. Some DMA
562 * engines might handle this by themselves.
563 */
564 if ((request->zero && request->length
565 && (request->length % musb_ep->packet_sz == 0)
566 && (request->actual == request->length))
567 || ((is_inventra_dma(musb) || is_ux500_dma(musb)) &&
568 is_dma && (!dma->desired_mode || (request->actual &
569 (musb_ep->packet_sz - 1))))
570 ) {
571 /*
572 * On DMA completion, FIFO may not be
573 * available yet...
574 */
575 if (csr & MUSB_TXCSR_TXPKTRDY)
576 return;
578 dev_dbg(musb->controller, "sending zero pkt\n");
579 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
580 | MUSB_TXCSR_TXPKTRDY);
581 request->zero = 0;
582 }
584 if (request->actual == request->length) {
585 musb_g_giveback(musb_ep, request, 0);
586 req = musb_ep->desc ? next_request(musb_ep) : NULL;
587 if (!req) {
588 dev_dbg(musb->controller, "%s idle now\n",
589 musb_ep->end_point.name);
590 return;
591 }
592 }
594 txstate(musb, req);
595 }
596 }
598 /* ------------------------------------------------------------ */
600 /* Peripheral rx (OUT) using Mentor DMA works as follows:
601 - Only mode 0 is used.
603 - Request is queued by the gadget class driver.
604 -> if queue was previously empty, rxstate()
606 - Host sends OUT token which causes an endpoint interrupt
607 /\ -> RxReady
608 | -> if request queued, call rxstate
609 | /\ -> setup DMA
610 | | -> DMA interrupt on completion
611 | | -> RxReady
612 | | -> stop DMA
613 | | -> ack the read
614 | | -> if data recd = max expected
615 | | by the request, or host
616 | | sent a short packet,
617 | | complete the request,
618 | | and start the next one.
619 | |_____________________________________|
620 | else just wait for the host
621 | to send the next OUT token.
622 |__________________________________________________|
624 * Non-Mentor DMA engines can of course work differently.
625 */
627 /*
628 * Context: controller locked, IRQs blocked, endpoint selected
629 */
630 static void rxstate(struct musb *musb, struct musb_request *req)
631 {
632 const u8 epnum = req->epnum;
633 struct usb_request *request = &req->request;
634 struct musb_ep *musb_ep;
635 void __iomem *epio = musb->endpoints[epnum].regs;
636 unsigned fifo_count = 0;
637 u16 len;
638 u16 csr = musb_readw(epio, MUSB_RXCSR);
639 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
640 u8 use_mode_1;
642 if (hw_ep->is_shared_fifo)
643 musb_ep = &hw_ep->ep_in;
644 else
645 musb_ep = &hw_ep->ep_out;
647 len = musb_ep->packet_sz;
649 /* We shouldn't get here while DMA is active, but we do... */
650 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
651 dev_dbg(musb->controller, "DMA pending...\n");
652 return;
653 }
655 if (csr & MUSB_RXCSR_P_SENDSTALL) {
656 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
657 musb_ep->end_point.name, csr);
658 return;
659 }
661 if ((is_cppi_enabled(musb) || is_cppi41_enabled(musb)) &&
662 is_buffer_mapped(req)) {
663 struct dma_controller *c = musb->dma_controller;
664 struct dma_channel *channel = musb_ep->dma;
666 /* NOTE: CPPI won't actually stop advancing the DMA
667 * queue after short packet transfers, so this is almost
668 * always going to run as IRQ-per-packet DMA so that
669 * faults will be handled correctly.
670 */
671 if (c->channel_program(channel,
672 musb_ep->packet_sz,
673 !request->short_not_ok,
674 request->dma + request->actual,
675 request->length - request->actual)) {
677 /* make sure that if an rxpkt arrived after the irq,
678 * the cppi engine will be ready to take it as soon
679 * as DMA is enabled
680 */
681 csr &= ~(MUSB_RXCSR_AUTOCLEAR
682 | MUSB_RXCSR_DMAMODE);
683 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
684 musb_writew(epio, MUSB_RXCSR, csr);
685 return;
686 }
687 }
689 if (csr & MUSB_RXCSR_RXPKTRDY) {
690 len = musb_readw(epio, MUSB_RXCOUNT);
692 /*
693 * Enable Mode 1 on RX transfers only when short_not_ok flag
694 * is set. Currently short_not_ok flag is set only from
695 * file_storage and f_mass_storage drivers
696 */
698 if (request->short_not_ok && len == musb_ep->packet_sz)
699 use_mode_1 = 1;
700 else
701 use_mode_1 = 0;
703 if (request->actual < request->length) {
704 if (is_buffer_mapped(req) && is_inventra_dma(musb)) {
705 struct dma_controller *c;
706 struct dma_channel *channel;
707 int use_dma = 0;
709 c = musb->dma_controller;
710 channel = musb_ep->dma;
712 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
713 * mode 0 only. So we do not get endpoint interrupts due to DMA
714 * completion. We only get interrupts from DMA controller.
715 *
716 * We could operate in DMA mode 1 if we knew the size of the tranfer
717 * in advance. For mass storage class, request->length = what the host
718 * sends, so that'd work. But for pretty much everything else,
719 * request->length is routinely more than what the host sends. For
720 * most these gadgets, end of is signified either by a short packet,
721 * or filling the last byte of the buffer. (Sending extra data in
722 * that last pckate should trigger an overflow fault.) But in mode 1,
723 * we don't get DMA completion interrupt for short packets.
724 *
725 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
726 * to get endpoint interrupt on every DMA req, but that didn't seem
727 * to work reliably.
728 *
729 * REVISIT an updated g_file_storage can set req->short_not_ok, which
730 * then becomes usable as a runtime "use mode 1" hint...
731 */
733 /* Experimental: Mode1 works with mass storage use cases */
734 if (use_mode_1) {
735 csr |= MUSB_RXCSR_AUTOCLEAR;
736 musb_writew(epio, MUSB_RXCSR, csr);
737 csr |= MUSB_RXCSR_DMAENAB;
738 musb_writew(epio, MUSB_RXCSR, csr);
740 /*
741 * this special sequence (enabling and then
742 * disabling MUSB_RXCSR_DMAMODE) is required
743 * to get DMAReq to activate
744 */
745 musb_writew(epio, MUSB_RXCSR,
746 csr | MUSB_RXCSR_DMAMODE);
747 musb_writew(epio, MUSB_RXCSR, csr);
749 } else {
750 if (!musb_ep->hb_mult &&
751 musb_ep->hw_ep->rx_double_buffered)
752 csr |= MUSB_RXCSR_AUTOCLEAR;
753 csr |= MUSB_RXCSR_DMAENAB;
754 musb_writew(epio, MUSB_RXCSR, csr);
755 }
757 if (request->actual < request->length) {
758 int transfer_size = 0;
759 if (use_mode_1) {
760 transfer_size = min(request->length - request->actual,
761 channel->max_len);
762 musb_ep->dma->desired_mode = 1;
763 } else {
764 transfer_size = min(request->length - request->actual,
765 (unsigned)len);
766 musb_ep->dma->desired_mode = 0;
767 }
769 use_dma = c->channel_program(
770 channel,
771 musb_ep->packet_sz,
772 channel->desired_mode,
773 request->dma
774 + request->actual,
775 transfer_size);
776 }
778 if (use_dma)
779 return;
780 }
781 if (is_ux500_dma(musb) && (is_buffer_mapped(req)) &&
782 (request->actual < request->length)) {
784 struct dma_controller *c;
785 struct dma_channel *channel;
786 int transfer_size = 0;
788 c = musb->dma_controller;
789 channel = musb_ep->dma;
791 /* In case first packet is short */
792 if (len < musb_ep->packet_sz)
793 transfer_size = len;
794 else if (request->short_not_ok)
795 transfer_size = min(request->length -
796 request->actual,
797 channel->max_len);
798 else
799 transfer_size = min(request->length -
800 request->actual,
801 (unsigned)len);
803 csr &= ~MUSB_RXCSR_DMAMODE;
804 csr |= (MUSB_RXCSR_DMAENAB |
805 MUSB_RXCSR_AUTOCLEAR);
807 musb_writew(epio, MUSB_RXCSR, csr);
809 if (transfer_size <= musb_ep->packet_sz) {
810 musb_ep->dma->desired_mode = 0;
811 } else {
812 musb_ep->dma->desired_mode = 1;
813 /* Mode must be set after DMAENAB */
814 csr |= MUSB_RXCSR_DMAMODE;
815 musb_writew(epio, MUSB_RXCSR, csr);
816 }
818 if (c->channel_program(channel,
819 musb_ep->packet_sz,
820 channel->desired_mode,
821 request->dma
822 + request->actual,
823 transfer_size))
825 return;
826 }
828 fifo_count = request->length - request->actual;
829 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
830 musb_ep->end_point.name,
831 len, fifo_count,
832 musb_ep->packet_sz);
834 fifo_count = min_t(unsigned, len, fifo_count);
836 if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
837 struct dma_controller *c = musb->dma_controller;
838 struct dma_channel *channel = musb_ep->dma;
839 u32 dma_addr = request->dma + request->actual;
840 int ret;
842 ret = c->channel_program(channel,
843 musb_ep->packet_sz,
844 channel->desired_mode,
845 dma_addr,
846 fifo_count);
847 if (ret)
848 return;
849 }
851 /*
852 * Unmap the dma buffer back to cpu if dma channel
853 * programming fails. This buffer is mapped if the
854 * channel allocation is successful
855 */
856 if (is_buffer_mapped(req)) {
857 unmap_dma_buffer(req, musb);
859 /*
860 * Clear DMAENAB and AUTOCLEAR for the
861 * PIO mode transfer
862 */
863 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
864 musb_writew(epio, MUSB_RXCSR, csr);
865 }
867 musb->ops->read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
868 (request->buf + request->actual));
869 request->actual += fifo_count;
871 /* REVISIT if we left anything in the fifo, flush
872 * it and report -EOVERFLOW
873 */
875 /* ack the read! */
876 csr |= MUSB_RXCSR_P_WZC_BITS;
877 csr &= ~MUSB_RXCSR_RXPKTRDY;
878 musb_writew(epio, MUSB_RXCSR, csr);
879 }
880 }
882 /* reach the end or short packet detected */
883 if (request->actual == request->length || len < musb_ep->packet_sz)
884 musb_g_giveback(musb_ep, request, 0);
885 }
887 /*
888 * Data ready for a request; called from IRQ
889 */
890 void musb_g_rx(struct musb *musb, u8 epnum)
891 {
892 u16 csr;
893 struct musb_request *req;
894 struct usb_request *request;
895 void __iomem *mbase = musb->mregs;
896 struct musb_ep *musb_ep;
897 void __iomem *epio = musb->endpoints[epnum].regs;
898 struct dma_channel *dma;
899 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
901 if (hw_ep->is_shared_fifo)
902 musb_ep = &hw_ep->ep_in;
903 else
904 musb_ep = &hw_ep->ep_out;
906 musb_ep_select(musb, mbase, epnum);
908 req = next_request(musb_ep);
909 if (!req)
910 return;
912 request = &req->request;
914 csr = musb_readw(epio, MUSB_RXCSR);
915 dma = is_dma_capable() ? musb_ep->dma : NULL;
917 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
918 csr, dma ? " (dma)" : "", request);
920 if (csr & MUSB_RXCSR_P_SENTSTALL) {
921 csr |= MUSB_RXCSR_P_WZC_BITS;
922 csr &= ~MUSB_RXCSR_P_SENTSTALL;
923 musb_writew(epio, MUSB_RXCSR, csr);
924 return;
925 }
927 if (csr & MUSB_RXCSR_P_OVERRUN) {
928 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
929 csr &= ~MUSB_RXCSR_P_OVERRUN;
930 musb_writew(epio, MUSB_RXCSR, csr);
932 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
933 if (request->status == -EINPROGRESS)
934 request->status = -EOVERFLOW;
935 }
936 if (csr & MUSB_RXCSR_INCOMPRX) {
937 /* REVISIT not necessarily an error */
938 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
939 }
941 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
942 /* "should not happen"; likely RXPKTRDY pending for DMA */
943 dev_dbg(musb->controller, "%s busy, csr %04x\n",
944 musb_ep->end_point.name, csr);
945 return;
946 }
948 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
949 csr &= ~(MUSB_RXCSR_AUTOCLEAR
950 | MUSB_RXCSR_DMAENAB
951 | MUSB_RXCSR_DMAMODE);
952 musb_writew(epio, MUSB_RXCSR,
953 MUSB_RXCSR_P_WZC_BITS | csr);
955 request->actual += musb_ep->dma->actual_len;
957 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
958 epnum, csr,
959 musb_readw(epio, MUSB_RXCSR),
960 musb_ep->dma->actual_len, request);
962 if (is_inventra_dma(musb) || tusb_dma_omap(musb)
963 || is_ux500_dma(musb)) {
964 /* Autoclear doesn't clear RxPktRdy for short packets */
965 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
966 || (dma->actual_len
967 & (musb_ep->packet_sz - 1))) {
968 /* ack the read! */
969 csr &= ~MUSB_RXCSR_RXPKTRDY;
970 musb_writew(epio, MUSB_RXCSR, csr);
971 }
973 /* incomplete, and not short? wait for next IN packet */
974 if ((request->actual < request->length)
975 && (musb_ep->dma->actual_len
976 == musb_ep->packet_sz)) {
977 /* In double buffer case, continue to unload
978 * fifo if there is Rx packet in FIFO.
979 **/
980 csr = musb_readw(epio, MUSB_RXCSR);
981 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
982 hw_ep->rx_double_buffered)
983 rxstate(musb, to_musb_request(request));
984 return;
985 }
986 }
987 musb_g_giveback(musb_ep, request, 0);
989 req = next_request(musb_ep);
990 if (!req)
991 return;
992 }
993 /* Analyze request */
994 rxstate(musb, req);
995 }
997 /* ------------------------------------------------------------ */
999 static int musb_gadget_enable(struct usb_ep *ep,
1000 const struct usb_endpoint_descriptor *desc)
1001 {
1002 unsigned long flags;
1003 struct musb_ep *musb_ep;
1004 struct musb_hw_ep *hw_ep;
1005 void __iomem *regs;
1006 struct musb *musb;
1007 void __iomem *mbase;
1008 u8 epnum;
1009 u16 csr;
1010 unsigned tmp;
1011 int status = -EINVAL;
1013 if (!ep || !desc)
1014 return -EINVAL;
1016 musb_ep = to_musb_ep(ep);
1017 hw_ep = musb_ep->hw_ep;
1018 regs = hw_ep->regs;
1019 musb = musb_ep->musb;
1020 mbase = musb->mregs;
1021 epnum = musb_ep->current_epnum;
1023 spin_lock_irqsave(&musb->lock, flags);
1025 if (musb_ep->desc) {
1026 status = -EBUSY;
1027 goto fail;
1028 }
1029 musb_ep->type = usb_endpoint_type(desc);
1031 /* check direction and (later) maxpacket size against endpoint */
1032 if (usb_endpoint_num(desc) != epnum)
1033 goto fail;
1035 /* REVISIT this rules out high bandwidth periodic transfers */
1036 tmp = usb_endpoint_maxp(desc);
1037 if (tmp & ~0x07ff) {
1038 int ok;
1040 if (usb_endpoint_dir_in(desc))
1041 ok = musb->hb_iso_tx;
1042 else
1043 ok = musb->hb_iso_rx;
1045 if (!ok) {
1046 dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1047 goto fail;
1048 }
1049 musb_ep->hb_mult = (tmp >> 11) & 3;
1050 } else {
1051 musb_ep->hb_mult = 0;
1052 }
1054 musb_ep->packet_sz = tmp & 0x7ff;
1055 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1057 /* enable the interrupts for the endpoint, set the endpoint
1058 * packet size (or fail), set the mode, clear the fifo
1059 */
1060 musb_ep_select(musb, mbase, epnum);
1061 if (usb_endpoint_dir_in(desc)) {
1062 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1064 if (hw_ep->is_shared_fifo)
1065 musb_ep->is_in = 1;
1066 if (!musb_ep->is_in)
1067 goto fail;
1069 if (tmp > hw_ep->max_packet_sz_tx) {
1070 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1071 goto fail;
1072 }
1074 int_txe |= (1 << epnum);
1075 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1077 /* REVISIT if can_bulk_split(), use by updating "tmp";
1078 * likewise high bandwidth periodic tx
1079 */
1080 /* Set TXMAXP with the FIFO size of the endpoint
1081 * to disable double buffering mode.
1082 */
1083 if (musb->double_buffer_not_ok)
1084 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1085 else
1086 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1087 | (musb_ep->hb_mult << 11));
1089 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1090 if (musb_readw(regs, MUSB_TXCSR)
1091 & MUSB_TXCSR_FIFONOTEMPTY)
1092 csr |= MUSB_TXCSR_FLUSHFIFO;
1093 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1094 csr |= MUSB_TXCSR_P_ISO;
1096 /* set twice in case of double buffering */
1097 musb_writew(regs, MUSB_TXCSR, csr);
1098 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1099 musb_writew(regs, MUSB_TXCSR, csr);
1101 } else {
1102 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1104 if (hw_ep->is_shared_fifo)
1105 musb_ep->is_in = 0;
1106 if (musb_ep->is_in)
1107 goto fail;
1109 if (tmp > hw_ep->max_packet_sz_rx) {
1110 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1111 goto fail;
1112 }
1114 int_rxe |= (1 << epnum);
1115 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1117 /* REVISIT if can_bulk_combine() use by updating "tmp"
1118 * likewise high bandwidth periodic rx
1119 */
1120 /* Set RXMAXP with the FIFO size of the endpoint
1121 * to disable double buffering mode.
1122 */
1123 if (musb->double_buffer_not_ok)
1124 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1125 else
1126 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1127 | (musb_ep->hb_mult << 11));
1129 /* force shared fifo to OUT-only mode */
1130 if (hw_ep->is_shared_fifo) {
1131 csr = musb_readw(regs, MUSB_TXCSR);
1132 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1133 musb_writew(regs, MUSB_TXCSR, csr);
1134 }
1136 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1137 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1138 csr |= MUSB_RXCSR_P_ISO;
1139 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1140 csr |= MUSB_RXCSR_DISNYET;
1142 /* set twice in case of double buffering */
1143 musb_writew(regs, MUSB_RXCSR, csr);
1144 musb_writew(regs, MUSB_RXCSR, csr);
1145 }
1147 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1148 * for some reason you run out of channels here.
1149 */
1150 if (is_dma_capable() && musb->dma_controller) {
1151 struct dma_controller *c = musb->dma_controller;
1153 musb_ep->dma = c->channel_alloc(c, hw_ep,
1154 (desc->bEndpointAddress & USB_DIR_IN));
1155 } else
1156 musb_ep->dma = NULL;
1158 musb_ep->desc = desc;
1159 musb_ep->busy = 0;
1160 musb_ep->wedged = 0;
1161 status = 0;
1163 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1164 musb_driver_name, musb_ep->end_point.name,
1165 ({ char *s; switch (musb_ep->type) {
1166 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1167 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1168 default: s = "iso"; break;
1169 }; s; }),
1170 musb_ep->is_in ? "IN" : "OUT",
1171 musb_ep->dma ? "dma, " : "",
1172 musb_ep->packet_sz);
1174 schedule_work(&musb->irq_work);
1176 fail:
1177 spin_unlock_irqrestore(&musb->lock, flags);
1178 return status;
1179 }
1181 /*
1182 * Disable an endpoint flushing all requests queued.
1183 */
1184 static int musb_gadget_disable(struct usb_ep *ep)
1185 {
1186 unsigned long flags;
1187 struct musb *musb;
1188 u8 epnum;
1189 struct musb_ep *musb_ep;
1190 void __iomem *epio;
1191 int status = 0;
1193 musb_ep = to_musb_ep(ep);
1194 musb = musb_ep->musb;
1195 epnum = musb_ep->current_epnum;
1196 epio = musb->endpoints[epnum].regs;
1198 spin_lock_irqsave(&musb->lock, flags);
1199 musb_ep_select(musb, musb->mregs, epnum);
1201 /* zero the endpoint sizes */
1202 if (musb_ep->is_in) {
1203 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1204 int_txe &= ~(1 << epnum);
1205 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1206 musb_writew(epio, MUSB_TXMAXP, 0);
1207 } else {
1208 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1209 int_rxe &= ~(1 << epnum);
1210 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1211 musb_writew(epio, MUSB_RXMAXP, 0);
1212 }
1214 musb_ep->desc = NULL;
1216 /* abort all pending DMA and requests */
1217 nuke(musb_ep, -ESHUTDOWN);
1219 schedule_work(&musb->irq_work);
1221 spin_unlock_irqrestore(&(musb->lock), flags);
1223 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1225 return status;
1226 }
1228 /*
1229 * Allocate a request for an endpoint.
1230 * Reused by ep0 code.
1231 */
1232 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1233 {
1234 struct musb_ep *musb_ep = to_musb_ep(ep);
1235 struct musb *musb = musb_ep->musb;
1236 struct musb_request *request = NULL;
1238 request = kzalloc(sizeof *request, gfp_flags);
1239 if (!request) {
1240 dev_dbg(musb->controller, "not enough memory\n");
1241 return NULL;
1242 }
1244 request->request.dma = DMA_ADDR_INVALID;
1245 request->epnum = musb_ep->current_epnum;
1246 request->ep = musb_ep;
1248 return &request->request;
1249 }
1251 /*
1252 * Free a request
1253 * Reused by ep0 code.
1254 */
1255 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1256 {
1257 kfree(to_musb_request(req));
1258 }
1260 static LIST_HEAD(buffers);
1262 struct free_record {
1263 struct list_head list;
1264 struct device *dev;
1265 unsigned bytes;
1266 dma_addr_t dma;
1267 };
1269 /*
1270 * Context: controller locked, IRQs blocked.
1271 */
1272 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1273 {
1274 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1275 req->tx ? "TX/IN" : "RX/OUT",
1276 &req->request, req->request.length, req->epnum);
1278 musb_ep_select(musb, musb->mregs, req->epnum);
1279 if (req->tx)
1280 txstate(musb, req);
1281 else
1282 rxstate(musb, req);
1283 }
1285 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1286 gfp_t gfp_flags)
1287 {
1288 struct musb_ep *musb_ep;
1289 struct musb_request *request;
1290 struct musb *musb;
1291 int status = 0;
1292 unsigned long lockflags;
1294 if (!ep || !req)
1295 return -EINVAL;
1296 if (!req->buf)
1297 return -ENODATA;
1299 musb_ep = to_musb_ep(ep);
1300 musb = musb_ep->musb;
1302 request = to_musb_request(req);
1303 request->musb = musb;
1305 if (request->ep != musb_ep)
1306 return -EINVAL;
1308 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1310 /* request is mine now... */
1311 request->request.actual = 0;
1312 request->request.status = -EINPROGRESS;
1313 request->epnum = musb_ep->current_epnum;
1314 request->tx = musb_ep->is_in;
1316 map_dma_buffer(request, musb, musb_ep);
1318 spin_lock_irqsave(&musb->lock, lockflags);
1320 /* don't queue if the ep is down */
1321 if (!musb_ep->desc) {
1322 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1323 req, ep->name, "disabled");
1324 status = -ESHUTDOWN;
1325 goto cleanup;
1326 }
1328 /* add request to the list */
1329 list_add_tail(&request->list, &musb_ep->req_list);
1331 /* it this is the head of the queue, start i/o ... */
1332 if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1333 musb_ep_restart(musb, request);
1335 cleanup:
1336 spin_unlock_irqrestore(&musb->lock, lockflags);
1337 return status;
1338 }
1340 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1341 {
1342 struct musb_ep *musb_ep = to_musb_ep(ep);
1343 struct musb_request *req = to_musb_request(request);
1344 struct musb_request *r;
1345 unsigned long flags;
1346 int status = 0;
1347 struct musb *musb = musb_ep->musb;
1349 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1350 return -EINVAL;
1352 spin_lock_irqsave(&musb->lock, flags);
1354 list_for_each_entry(r, &musb_ep->req_list, list) {
1355 if (r == req)
1356 break;
1357 }
1358 if (r != req) {
1359 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1360 status = -EINVAL;
1361 goto done;
1362 }
1364 /* if the hardware doesn't have the request, easy ... */
1365 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1366 musb_g_giveback(musb_ep, request, -ECONNRESET);
1368 /* ... else abort the dma transfer ... */
1369 else if (is_dma_capable() && musb_ep->dma) {
1370 struct dma_controller *c = musb->dma_controller;
1372 musb_ep_select(musb, musb->mregs, musb_ep->current_epnum);
1373 if (c->channel_abort)
1374 status = c->channel_abort(musb_ep->dma);
1375 else
1376 status = -EBUSY;
1377 if (status == 0)
1378 musb_g_giveback(musb_ep, request, -ECONNRESET);
1379 } else {
1380 /* NOTE: by sticking to easily tested hardware/driver states,
1381 * we leave counting of in-flight packets imprecise.
1382 */
1383 musb_g_giveback(musb_ep, request, -ECONNRESET);
1384 }
1386 done:
1387 spin_unlock_irqrestore(&musb->lock, flags);
1388 return status;
1389 }
1391 /*
1392 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1393 * data but will queue requests.
1394 *
1395 * exported to ep0 code
1396 */
1397 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1398 {
1399 struct musb_ep *musb_ep = to_musb_ep(ep);
1400 u8 epnum = musb_ep->current_epnum;
1401 struct musb *musb = musb_ep->musb;
1402 void __iomem *epio = musb->endpoints[epnum].regs;
1403 void __iomem *mbase;
1404 unsigned long flags;
1405 u16 csr;
1406 struct musb_request *request;
1407 int status = 0;
1409 if (!ep)
1410 return -EINVAL;
1411 mbase = musb->mregs;
1413 spin_lock_irqsave(&musb->lock, flags);
1415 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1416 status = -EINVAL;
1417 goto done;
1418 }
1420 musb_ep_select(musb, mbase, epnum);
1422 request = next_request(musb_ep);
1423 if (value) {
1424 if (request) {
1425 dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1426 ep->name);
1427 status = -EAGAIN;
1428 goto done;
1429 }
1430 /* Cannot portably stall with non-empty FIFO */
1431 if (musb_ep->is_in) {
1432 csr = musb_readw(epio, MUSB_TXCSR);
1433 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1434 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1435 status = -EAGAIN;
1436 goto done;
1437 }
1438 }
1439 } else
1440 musb_ep->wedged = 0;
1442 /* set/clear the stall and toggle bits */
1443 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1444 if (musb_ep->is_in) {
1445 csr = musb_readw(epio, MUSB_TXCSR);
1446 csr |= MUSB_TXCSR_P_WZC_BITS
1447 | MUSB_TXCSR_CLRDATATOG;
1448 if (value)
1449 csr |= MUSB_TXCSR_P_SENDSTALL;
1450 else
1451 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1452 | MUSB_TXCSR_P_SENTSTALL);
1453 csr &= ~MUSB_TXCSR_TXPKTRDY;
1454 musb_writew(epio, MUSB_TXCSR, csr);
1455 } else {
1456 csr = musb_readw(epio, MUSB_RXCSR);
1457 csr |= MUSB_RXCSR_P_WZC_BITS
1458 | MUSB_RXCSR_FLUSHFIFO
1459 | MUSB_RXCSR_CLRDATATOG;
1460 if (value)
1461 csr |= MUSB_RXCSR_P_SENDSTALL;
1462 else
1463 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1464 | MUSB_RXCSR_P_SENTSTALL);
1465 musb_writew(epio, MUSB_RXCSR, csr);
1466 }
1468 /* maybe start the first request in the queue */
1469 if (!musb_ep->busy && !value && request) {
1470 dev_dbg(musb->controller, "restarting the request\n");
1471 musb_ep_restart(musb, request);
1472 }
1474 done:
1475 spin_unlock_irqrestore(&musb->lock, flags);
1476 return status;
1477 }
1479 /*
1480 * Sets the halt feature with the clear requests ignored
1481 */
1482 static int musb_gadget_set_wedge(struct usb_ep *ep)
1483 {
1484 struct musb_ep *musb_ep = to_musb_ep(ep);
1486 if (!ep)
1487 return -EINVAL;
1489 musb_ep->wedged = 1;
1491 return usb_ep_set_halt(ep);
1492 }
1494 static int musb_gadget_fifo_status(struct usb_ep *ep)
1495 {
1496 struct musb_ep *musb_ep = to_musb_ep(ep);
1497 void __iomem *epio = musb_ep->hw_ep->regs;
1498 int retval = -EINVAL;
1500 if (musb_ep->desc && !musb_ep->is_in) {
1501 struct musb *musb = musb_ep->musb;
1502 int epnum = musb_ep->current_epnum;
1503 void __iomem *mbase = musb->mregs;
1504 unsigned long flags;
1506 spin_lock_irqsave(&musb->lock, flags);
1508 musb_ep_select(musb, mbase, epnum);
1509 /* FIXME return zero unless RXPKTRDY is set */
1510 retval = musb_readw(epio, MUSB_RXCOUNT);
1512 spin_unlock_irqrestore(&musb->lock, flags);
1513 }
1514 return retval;
1515 }
1517 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1518 {
1519 struct musb_ep *musb_ep = to_musb_ep(ep);
1520 struct musb *musb = musb_ep->musb;
1521 u8 epnum = musb_ep->current_epnum;
1522 void __iomem *epio = musb->endpoints[epnum].regs;
1523 void __iomem *mbase;
1524 unsigned long flags;
1525 u16 csr, int_txe;
1527 mbase = musb->mregs;
1529 spin_lock_irqsave(&musb->lock, flags);
1530 musb_ep_select(musb, mbase, (u8) epnum);
1532 /* disable interrupts */
1533 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1534 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1536 if (musb_ep->is_in) {
1537 csr = musb_readw(epio, MUSB_TXCSR);
1538 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1539 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1540 /*
1541 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1542 * to interrupt current FIFO loading, but not flushing
1543 * the already loaded ones.
1544 */
1545 csr &= ~MUSB_TXCSR_TXPKTRDY;
1546 musb_writew(epio, MUSB_TXCSR, csr);
1547 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1548 musb_writew(epio, MUSB_TXCSR, csr);
1549 }
1550 } else {
1551 csr = musb_readw(epio, MUSB_RXCSR);
1552 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1553 musb_writew(epio, MUSB_RXCSR, csr);
1554 musb_writew(epio, MUSB_RXCSR, csr);
1555 }
1557 /* re-enable interrupt */
1558 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1559 spin_unlock_irqrestore(&musb->lock, flags);
1560 }
1562 static const struct usb_ep_ops musb_ep_ops = {
1563 .enable = musb_gadget_enable,
1564 .disable = musb_gadget_disable,
1565 .alloc_request = musb_alloc_request,
1566 .free_request = musb_free_request,
1567 .queue = musb_gadget_queue,
1568 .dequeue = musb_gadget_dequeue,
1569 .set_halt = musb_gadget_set_halt,
1570 .set_wedge = musb_gadget_set_wedge,
1571 .fifo_status = musb_gadget_fifo_status,
1572 .fifo_flush = musb_gadget_fifo_flush
1573 };
1575 /* ----------------------------------------------------------------------- */
1577 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1578 {
1579 struct musb *musb = gadget_to_musb(gadget);
1581 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1582 }
1584 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1585 {
1586 struct musb *musb = gadget_to_musb(gadget);
1587 void __iomem *mregs = musb->mregs;
1588 unsigned long flags;
1589 int status = -EINVAL;
1590 u8 power, devctl;
1591 int retries;
1593 spin_lock_irqsave(&musb->lock, flags);
1595 switch (musb->xceiv->state) {
1596 case OTG_STATE_B_PERIPHERAL:
1597 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1598 * that's part of the standard usb 1.1 state machine, and
1599 * doesn't affect OTG transitions.
1600 */
1601 if (musb->may_wakeup && musb->is_suspended)
1602 break;
1603 goto done;
1604 case OTG_STATE_B_IDLE:
1605 /* Start SRP ... OTG not required. */
1606 devctl = musb_readb(mregs, MUSB_DEVCTL);
1607 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1608 devctl |= MUSB_DEVCTL_SESSION;
1609 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1610 devctl = musb_readb(mregs, MUSB_DEVCTL);
1611 retries = 100;
1612 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1613 devctl = musb_readb(mregs, MUSB_DEVCTL);
1614 if (retries-- < 1)
1615 break;
1616 }
1617 retries = 10000;
1618 while (devctl & MUSB_DEVCTL_SESSION) {
1619 devctl = musb_readb(mregs, MUSB_DEVCTL);
1620 if (retries-- < 1)
1621 break;
1622 }
1624 spin_unlock_irqrestore(&musb->lock, flags);
1625 otg_start_srp(musb->xceiv);
1626 spin_lock_irqsave(&musb->lock, flags);
1628 /* Block idling for at least 1s */
1629 musb_platform_try_idle(musb,
1630 jiffies + msecs_to_jiffies(1 * HZ));
1632 status = 0;
1633 goto done;
1634 default:
1635 dev_dbg(musb->controller, "Unhandled wake: %s\n",
1636 otg_state_string(musb->xceiv->state));
1637 goto done;
1638 }
1640 status = 0;
1642 power = musb_readb(mregs, MUSB_POWER);
1643 power |= MUSB_POWER_RESUME;
1644 musb_writeb(mregs, MUSB_POWER, power);
1645 dev_dbg(musb->controller, "issue wakeup\n");
1647 /* FIXME do this next chunk in a timer callback, no udelay */
1648 mdelay(2);
1650 power = musb_readb(mregs, MUSB_POWER);
1651 power &= ~MUSB_POWER_RESUME;
1652 musb_writeb(mregs, MUSB_POWER, power);
1653 done:
1654 spin_unlock_irqrestore(&musb->lock, flags);
1655 return status;
1656 }
1658 static int
1659 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1660 {
1661 struct musb *musb = gadget_to_musb(gadget);
1663 musb->is_self_powered = !!is_selfpowered;
1664 return 0;
1665 }
1667 static void musb_pullup(struct musb *musb, int is_on)
1668 {
1669 u8 power;
1671 power = musb_readb(musb->mregs, MUSB_POWER);
1672 if (is_on)
1673 power |= MUSB_POWER_SOFTCONN;
1674 else
1675 power &= ~MUSB_POWER_SOFTCONN;
1677 /* FIXME if on, HdrcStart; if off, HdrcStop */
1679 dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1680 is_on ? "on" : "off");
1681 musb_writeb(musb->mregs, MUSB_POWER, power);
1682 }
1684 #if 0
1685 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1686 {
1687 dev_dbg(musb->controller, "<= %s =>\n", __func__);
1689 /*
1690 * FIXME iff driver's softconnect flag is set (as it is during probe,
1691 * though that can clear it), just musb_pullup().
1692 */
1694 return -EINVAL;
1695 }
1696 #endif
1698 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1699 {
1700 struct musb *musb = gadget_to_musb(gadget);
1702 if (!musb->xceiv->set_power)
1703 return -EOPNOTSUPP;
1704 return otg_set_power(musb->xceiv, mA);
1705 }
1707 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1708 {
1709 struct musb *musb = gadget_to_musb(gadget);
1710 unsigned long flags;
1712 is_on = !!is_on;
1714 pm_runtime_get_sync(musb->controller);
1716 /* NOTE: this assumes we are sensing vbus; we'd rather
1717 * not pullup unless the B-session is active.
1718 */
1719 spin_lock_irqsave(&musb->lock, flags);
1720 if (is_on != musb->softconnect) {
1721 musb->softconnect = is_on;
1722 musb_pullup(musb, is_on);
1723 }
1724 spin_unlock_irqrestore(&musb->lock, flags);
1726 pm_runtime_put(musb->controller);
1728 return 0;
1729 }
1731 static int musb_gadget_start(struct usb_gadget *g,
1732 struct usb_gadget_driver *driver);
1733 static int musb_gadget_stop(struct usb_gadget *g,
1734 struct usb_gadget_driver *driver);
1736 static const struct usb_gadget_ops musb_gadget_operations = {
1737 .get_frame = musb_gadget_get_frame,
1738 .wakeup = musb_gadget_wakeup,
1739 .set_selfpowered = musb_gadget_set_self_powered,
1740 /* .vbus_session = musb_gadget_vbus_session, */
1741 .vbus_draw = musb_gadget_vbus_draw,
1742 .pullup = musb_gadget_pullup,
1743 .udc_start = musb_gadget_start,
1744 .udc_stop = musb_gadget_stop,
1745 };
1747 /* ----------------------------------------------------------------------- */
1749 /* Registration */
1751 /* Only this registration code "knows" the rule (from USB standards)
1752 * about there being only one external upstream port. It assumes
1753 * all peripheral ports are external...
1754 */
1756 static void musb_gadget_release(struct device *dev)
1757 {
1758 /* kref_put(WHAT) */
1759 dev_dbg(dev, "%s\n", __func__);
1760 }
1763 static void __devinit
1764 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1765 {
1766 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1768 memset(ep, 0, sizeof *ep);
1770 ep->current_epnum = epnum;
1771 ep->musb = musb;
1772 ep->hw_ep = hw_ep;
1773 ep->is_in = is_in;
1775 INIT_LIST_HEAD(&ep->req_list);
1777 sprintf(ep->name, "ep%d%s", epnum,
1778 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1779 is_in ? "in" : "out"));
1780 ep->end_point.name = ep->name;
1781 INIT_LIST_HEAD(&ep->end_point.ep_list);
1782 if (!epnum) {
1783 ep->end_point.maxpacket = 64;
1784 ep->end_point.ops = &musb_g_ep0_ops;
1785 musb->g.ep0 = &ep->end_point;
1786 } else {
1787 if (is_in)
1788 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1789 else
1790 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1791 ep->end_point.ops = &musb_ep_ops;
1792 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1793 }
1794 }
1796 /*
1797 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1798 * to the rest of the driver state.
1799 */
1800 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1801 {
1802 u8 epnum;
1803 struct musb_hw_ep *hw_ep;
1804 unsigned count = 0;
1806 /* initialize endpoint list just once */
1807 INIT_LIST_HEAD(&(musb->g.ep_list));
1809 for (epnum = 0, hw_ep = musb->endpoints;
1810 epnum < musb->nr_endpoints;
1811 epnum++, hw_ep++) {
1812 if (hw_ep->is_shared_fifo /* || !epnum */) {
1813 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1814 count++;
1815 } else {
1816 if (hw_ep->max_packet_sz_tx) {
1817 init_peripheral_ep(musb, &hw_ep->ep_in,
1818 epnum, 1);
1819 count++;
1820 }
1821 if (hw_ep->max_packet_sz_rx) {
1822 init_peripheral_ep(musb, &hw_ep->ep_out,
1823 epnum, 0);
1824 count++;
1825 }
1826 }
1827 }
1828 }
1830 /* called once during driver setup to initialize and link into
1831 * the driver model; memory is zeroed.
1832 */
1833 int __devinit musb_gadget_setup(struct musb *musb)
1834 {
1835 int status;
1837 /* REVISIT minor race: if (erroneously) setting up two
1838 * musb peripherals at the same time, only the bus lock
1839 * is probably held.
1840 */
1842 musb->g.ops = &musb_gadget_operations;
1843 musb->g.max_speed = USB_SPEED_HIGH;
1844 musb->g.speed = USB_SPEED_UNKNOWN;
1846 /* this "gadget" abstracts/virtualizes the controller */
1847 dev_set_name(&musb->g.dev, "gadget");
1848 musb->g.dev.parent = musb->controller;
1849 musb->g.dev.dma_mask = musb->controller->dma_mask;
1850 musb->g.dev.release = musb_gadget_release;
1851 musb->g.name = musb_driver_name;
1853 if (is_otg_enabled(musb))
1854 musb->g.is_otg = 1;
1856 musb_g_init_endpoints(musb);
1858 musb->is_active = 0;
1859 musb_platform_try_idle(musb, 0);
1861 status = device_register(&musb->g.dev);
1862 if (status != 0) {
1863 put_device(&musb->g.dev);
1864 return status;
1865 }
1866 status = usb_add_gadget_udc(musb->controller, &musb->g);
1867 if (status)
1868 goto err;
1870 return 0;
1871 err:
1872 musb->g.dev.parent = NULL;
1873 device_unregister(&musb->g.dev);
1874 return status;
1875 }
1877 void musb_gadget_cleanup(struct musb *musb)
1878 {
1879 usb_del_gadget_udc(&musb->g);
1880 if (musb->g.dev.parent)
1881 device_unregister(&musb->g.dev);
1882 }
1884 /*
1885 * Register the gadget driver. Used by gadget drivers when
1886 * registering themselves with the controller.
1887 *
1888 * -EINVAL something went wrong (not driver)
1889 * -EBUSY another gadget is already using the controller
1890 * -ENOMEM no memory to perform the operation
1891 *
1892 * @param driver the gadget driver
1893 * @return <0 if error, 0 if everything is fine
1894 */
1895 static int musb_gadget_start(struct usb_gadget *g,
1896 struct usb_gadget_driver *driver)
1897 {
1898 struct musb *musb = gadget_to_musb(g);
1899 unsigned long flags;
1900 int retval = -EINVAL;
1902 if (driver->max_speed < USB_SPEED_HIGH)
1903 goto err0;
1905 pm_runtime_get_sync(musb->controller);
1907 dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1909 musb->softconnect = 0;
1910 musb->gadget_driver = driver;
1912 spin_lock_irqsave(&musb->lock, flags);
1913 musb->is_active = 1;
1915 otg_set_peripheral(musb->xceiv, &musb->g);
1916 musb->xceiv->state = OTG_STATE_B_IDLE;
1918 /*
1919 * FIXME this ignores the softconnect flag. Drivers are
1920 * allowed hold the peripheral inactive until for example
1921 * userspace hooks up printer hardware or DSP codecs, so
1922 * hosts only see fully functional devices.
1923 */
1925 if (!is_otg_enabled(musb))
1926 musb_start(musb);
1928 spin_unlock_irqrestore(&musb->lock, flags);
1930 if (is_otg_enabled(musb)) {
1931 struct usb_hcd *hcd = musb_to_hcd(musb);
1933 dev_dbg(musb->controller, "OTG startup...\n");
1935 /* REVISIT: funcall to other code, which also
1936 * handles power budgeting ... this way also
1937 * ensures HdrcStart is indirectly called.
1938 */
1939 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1940 if (retval < 0) {
1941 dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1942 goto err2;
1943 }
1945 if ((musb->xceiv->last_event == USB_EVENT_ID)
1946 && musb->xceiv->set_vbus)
1947 otg_set_vbus(musb->xceiv, 1);
1949 hcd->self.uses_pio_for_control = 1;
1950 }
1951 if (musb->xceiv->last_event == USB_EVENT_NONE)
1952 pm_runtime_put(musb->controller);
1954 return 0;
1956 err2:
1957 if (!is_otg_enabled(musb))
1958 musb_stop(musb);
1959 err0:
1960 return retval;
1961 }
1963 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1964 {
1965 int i;
1966 struct musb_hw_ep *hw_ep;
1968 /* don't disconnect if it's not connected */
1969 if (musb->g.speed == USB_SPEED_UNKNOWN)
1970 driver = NULL;
1971 else
1972 musb->g.speed = USB_SPEED_UNKNOWN;
1974 /* deactivate the hardware */
1975 if (musb->softconnect) {
1976 musb->softconnect = 0;
1977 musb_pullup(musb, 0);
1978 }
1979 musb_stop(musb);
1981 /* killing any outstanding requests will quiesce the driver;
1982 * then report disconnect
1983 */
1984 if (driver) {
1985 for (i = 0, hw_ep = musb->endpoints;
1986 i < musb->nr_endpoints;
1987 i++, hw_ep++) {
1988 musb_ep_select(musb, musb->mregs, i);
1989 if (hw_ep->is_shared_fifo /* || !epnum */) {
1990 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1991 } else {
1992 if (hw_ep->max_packet_sz_tx)
1993 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1994 if (hw_ep->max_packet_sz_rx)
1995 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1996 }
1997 }
1998 }
1999 }
2001 /*
2002 * Unregister the gadget driver. Used by gadget drivers when
2003 * unregistering themselves from the controller.
2004 *
2005 * @param driver the gadget driver to unregister
2006 */
2007 static int musb_gadget_stop(struct usb_gadget *g,
2008 struct usb_gadget_driver *driver)
2009 {
2010 struct musb *musb = gadget_to_musb(g);
2011 unsigned long flags;
2013 if (musb->xceiv->last_event == USB_EVENT_NONE)
2014 pm_runtime_get_sync(musb->controller);
2016 /*
2017 * REVISIT always use otg_set_peripheral() here too;
2018 * this needs to shut down the OTG engine.
2019 */
2021 spin_lock_irqsave(&musb->lock, flags);
2023 if (is_otg_enabled(musb))
2024 musb_hnp_stop(musb);
2026 (void) musb_gadget_vbus_draw(&musb->g, 0);
2028 musb->xceiv->state = OTG_STATE_UNDEFINED;
2029 stop_activity(musb, driver);
2030 otg_set_peripheral(musb->xceiv, NULL);
2032 dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2034 musb->is_active = 0;
2035 musb_platform_try_idle(musb, 0);
2036 spin_unlock_irqrestore(&musb->lock, flags);
2038 if (is_otg_enabled(musb)) {
2039 usb_remove_hcd(musb_to_hcd(musb));
2040 /* FIXME we need to be able to register another
2041 * gadget driver here and have everything work;
2042 * that currently misbehaves.
2043 */
2044 }
2046 if (!is_otg_enabled(musb))
2047 musb_stop(musb);
2049 pm_runtime_put(musb->controller);
2051 return 0;
2052 }
2054 /* ----------------------------------------------------------------------- */
2056 /* lifecycle operations called through plat_uds.c */
2058 void musb_g_resume(struct musb *musb)
2059 {
2060 musb->is_suspended = 0;
2061 switch (musb->xceiv->state) {
2062 case OTG_STATE_B_IDLE:
2063 break;
2064 case OTG_STATE_B_WAIT_ACON:
2065 case OTG_STATE_B_PERIPHERAL:
2066 musb->is_active = 1;
2067 if (musb->gadget_driver && musb->gadget_driver->resume) {
2068 spin_unlock(&musb->lock);
2069 musb->gadget_driver->resume(&musb->g);
2070 spin_lock(&musb->lock);
2071 }
2072 break;
2073 default:
2074 WARNING("unhandled RESUME transition (%s)\n",
2075 otg_state_string(musb->xceiv->state));
2076 }
2077 }
2079 /* called when SOF packets stop for 3+ msec */
2080 void musb_g_suspend(struct musb *musb)
2081 {
2082 u8 devctl;
2084 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2085 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2087 switch (musb->xceiv->state) {
2088 case OTG_STATE_B_IDLE:
2089 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2090 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2091 break;
2092 case OTG_STATE_B_PERIPHERAL:
2093 musb->is_suspended = 1;
2094 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2095 spin_unlock(&musb->lock);
2096 musb->gadget_driver->suspend(&musb->g);
2097 spin_lock(&musb->lock);
2098 }
2099 break;
2100 default:
2101 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2102 * A_PERIPHERAL may need care too
2103 */
2104 WARNING("unhandled SUSPEND transition (%s)\n",
2105 otg_state_string(musb->xceiv->state));
2106 }
2107 }
2109 /* Called during SRP */
2110 void musb_g_wakeup(struct musb *musb)
2111 {
2112 musb_gadget_wakeup(&musb->g);
2113 }
2115 /* called when VBUS drops below session threshold, and in other cases */
2116 void musb_g_disconnect(struct musb *musb)
2117 {
2118 void __iomem *mregs = musb->mregs;
2119 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2121 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2123 /* clear HR */
2124 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2126 /* don't draw vbus until new b-default session */
2127 (void) musb_gadget_vbus_draw(&musb->g, 0);
2129 musb->g.speed = USB_SPEED_UNKNOWN;
2130 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2131 spin_unlock(&musb->lock);
2132 musb->gadget_driver->disconnect(&musb->g);
2133 spin_lock(&musb->lock);
2134 }
2136 switch (musb->xceiv->state) {
2137 default:
2138 if (is_otg_enabled(musb)) {
2139 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2140 otg_state_string(musb->xceiv->state));
2141 musb->xceiv->state = OTG_STATE_A_IDLE;
2142 break;
2143 }
2144 case OTG_STATE_A_PERIPHERAL:
2145 if (is_otg_enabled(musb))
2146 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
2147 break;
2148 case OTG_STATE_B_WAIT_ACON:
2149 case OTG_STATE_B_HOST:
2150 if (!is_otg_enabled(musb))
2151 break;
2152 case OTG_STATE_B_PERIPHERAL:
2153 case OTG_STATE_B_IDLE:
2154 musb->xceiv->state = OTG_STATE_B_IDLE;
2155 break;
2156 case OTG_STATE_B_SRP_INIT:
2157 break;
2158 }
2160 musb->is_active = 0;
2161 }
2163 void musb_g_reset(struct musb *musb)
2164 __releases(musb->lock)
2165 __acquires(musb->lock)
2166 {
2167 void __iomem *mbase = musb->mregs;
2168 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2169 u8 power;
2171 dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2172 (devctl & MUSB_DEVCTL_BDEVICE)
2173 ? "B-Device" : "A-Device",
2174 musb_readb(mbase, MUSB_FADDR),
2175 musb->gadget_driver
2176 ? musb->gadget_driver->driver.name
2177 : NULL
2178 );
2180 /* report disconnect, if we didn't already (flushing EP state) */
2181 if (musb->g.speed != USB_SPEED_UNKNOWN)
2182 musb_g_disconnect(musb);
2184 /* clear HR */
2185 else if (devctl & MUSB_DEVCTL_HR)
2186 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2189 /* what speed did we negotiate? */
2190 power = musb_readb(mbase, MUSB_POWER);
2191 musb->g.speed = (power & MUSB_POWER_HSMODE)
2192 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2194 /* start in USB_STATE_DEFAULT */
2195 musb->is_active = 1;
2196 musb->is_suspended = 0;
2197 MUSB_DEV_MODE(musb);
2198 musb->address = 0;
2199 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2201 musb->may_wakeup = 0;
2202 musb->g.b_hnp_enable = 0;
2203 musb->g.a_alt_hnp_support = 0;
2204 musb->g.a_hnp_support = 0;
2206 /* Normal reset, as B-Device;
2207 * or else after HNP, as A-Device
2208 */
2209 if (devctl & MUSB_DEVCTL_BDEVICE) {
2210 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2211 musb->g.is_a_peripheral = 0;
2212 } else if (is_otg_enabled(musb)) {
2213 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2214 musb->g.is_a_peripheral = 1;
2215 } else
2216 WARN_ON(1);
2218 /* start with default limits on VBUS power draw */
2219 (void) musb_gadget_vbus_draw(&musb->g,
2220 is_otg_enabled(musb) ? 8 : 100);
2221 }