summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: d67c375)
raw | patch | inline | side by side (parent: d67c375)
author | Ming Lei <tom.leiming@gmail.com> | |
Thu, 23 Dec 2010 15:13:50 +0000 (23:13 +0800) | ||
committer | Vaibhav Hiremath <hvaibhav@ti.com> | |
Mon, 23 Jan 2012 19:14:04 +0000 (00:44 +0530) |
This patche adds the paramer of musb pointer to musb_ep_select
and MUSB_EP_OFFSET, then we can pass musb into this two helpers
and can give corresponding implementation for flat mappings and
indexed mappings of hw endpoint register address using the information
from hw glue driver.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
and MUSB_EP_OFFSET, then we can pass musb into this two helpers
and can give corresponding implementation for flat mappings and
indexed mappings of hw endpoint register address using the information
from hw glue driver.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
index 6815fbaf9d31954c50243316c2d5df1feb4243dd..6b216bb62232c3638495bdd250b8bac4b9cc9415 100644 (file)
{
void __iomem *base = c->controller->mregs;
struct cppi_rx_stateram __iomem *rx = c->state_ram;
+ struct musb *musb = c->controller->musb;
- musb_ep_select(base, c->index + 1);
+ musb_ep_select(musb, base, c->index + 1);
dev_dbg(c->controller->musb->controller,
"RX DMA%d%s: %d left, csr %04x, "
{
void __iomem *base = c->controller->mregs;
struct cppi_tx_stateram __iomem *tx = c->state_ram;
+ struct musb *musb = c->controller->musb;
- musb_ep_select(base, c->index + 1);
+ musb_ep_select(musb, base, c->index + 1);
dev_dbg(c->controller->musb->controller,
"TX DMA%d%s: csr %04x, "
*/
WARN_ON(rx->head);
}
- musb_ep_select(cppi->mregs, rx->index + 1);
+ musb_ep_select(cppi->musb, cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
* and caller should rely on us not changing it.
* peripheral code is safe ... check host too.
*/
- musb_ep_select(mbase, cppi_ch->index + 1);
+ musb_ep_select(controller->musb, mbase, cppi_ch->index + 1);
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
index 36fa9e4c0a4c3e32aab45aab65e45ed9f87ad354..aa8f23fb590e60192a79213257b42c7f6fdfb969 100644 (file)
{
void __iomem *regs = musb->endpoints[0].regs;
- musb_ep_select(musb->mregs, 0);
+ musb_ep_select(musb, musb->mregs, 0);
musb->ops->write_fifo(musb->control_ep,
sizeof(musb_test_packet), musb_test_packet);
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
/* FIXME pick up ep0 maxpacket size */
for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
hw_ep = musb->endpoints + epnum;
ret = musb_read_fifosize(musb, hw_ep, epnum);
hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
}
- hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+ hw_ep->regs = MUSB_EP_OFFSET(musb, i, 0) + mbase;
hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
hw_ep->rx_reinit = 1;
hw_ep->tx_reinit = 1;
ep_num = 1;
while (reg) {
if (reg & 1) {
- /* musb_ep_select(musb->mregs, ep_num); */
+ /* musb_ep_select(musb, musb->mregs, ep_num); */
/* REVISIT just retval = ep->rx_irq(...) */
retval = IRQ_HANDLED;
if (devctl & MUSB_DEVCTL_HM) {
ep_num = 1;
while (reg) {
if (reg & 1) {
- /* musb_ep_select(musb->mregs, ep_num); */
+ /* musb_ep_select(musb, musb->mregs, ep_num); */
/* REVISIT just retval |= ep->tx_irq(...) */
retval = IRQ_HANDLED;
if (devctl & MUSB_DEVCTL_HM) {
index 586af0c5699b23c3f79ebc784f1a535109561079..e070251deb2ae3efe3f7f8946f4de25e38a8dc0a 100644 (file)
/* "flat" mapping: each endpoint has its own i/o address */
#ifdef MUSB_FLAT_REG
-#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
+#define musb_ep_select(_musb, _mbase, _epnum) \
+ (((void)(_mbase)), ((void)(_epnum)))
#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
/* "indexed" mapping: INDEX register controls register bank select */
#else
-#define musb_ep_select(_mbase, _epnum) \
+#define musb_ep_select(_musb, _mbase, _epnum) \
musb_writeb((_mbase), MUSB_INDEX, (_epnum))
#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
#endif
u8 reg = 0;
/* read from core using indexed model */
- reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE));
+ reg = musb_readb(mbase, MUSB_EP_OFFSET(musb, epnum, MUSB_FIFOSIZE));
/* 0's returned when no more endpoints */
if (!reg)
return -ENODEV;
index d7536066eb0c1f0ca78471bf66d684bb8ee590cf..42cbf52256eb43b3aab94c963b6675d779c0d9b4 100644 (file)
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
req = next_request(musb_ep);
request = &req->request;
else
musb_ep = &hw_ep->ep_out;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
req = next_request(musb_ep);
if (!req)
/* enable the interrupts for the endpoint, set the endpoint
* packet size (or fail), set the mode, clear the fifo
*/
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
if (usb_endpoint_dir_in(desc)) {
u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
epio = musb->endpoints[epnum].regs;
spin_lock_irqsave(&musb->lock, flags);
- musb_ep_select(musb->mregs, epnum);
+ musb_ep_select(musb, musb->mregs, epnum);
/* zero the endpoint sizes */
if (musb_ep->is_in) {
req->tx ? "TX/IN" : "RX/OUT",
&req->request, req->request.length, req->epnum);
- musb_ep_select(musb->mregs, req->epnum);
+ musb_ep_select(musb, musb->mregs, req->epnum);
if (req->tx)
txstate(musb, req);
else
@@ -1371,7 +1371,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
else if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
- musb_ep_select(musb->mregs, musb_ep->current_epnum);
+ musb_ep_select(musb, musb->mregs, musb_ep->current_epnum);
if (c->channel_abort)
status = c->channel_abort(musb_ep->dma);
else
goto done;
}
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
request = next_request(musb_ep);
if (value) {
spin_lock_irqsave(&musb->lock, flags);
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
/* FIXME return zero unless RXPKTRDY is set */
retval = musb_readw(epio, MUSB_RXCOUNT);
mbase = musb->mregs;
spin_lock_irqsave(&musb->lock, flags);
- musb_ep_select(mbase, (u8) epnum);
+ musb_ep_select(musb, mbase, (u8) epnum);
/* disable interrupts */
int_txe = musb_readw(mbase, MUSB_INTRTXE);
@@ -1987,7 +1987,7 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
for (i = 0, hw_ep = musb->endpoints;
i < musb->nr_endpoints;
i++, hw_ep++) {
- musb_ep_select(musb->mregs, i);
+ musb_ep_select(musb, musb->mregs, i);
if (hw_ep->is_shared_fifo /* || !epnum */) {
nuke(&hw_ep->ep_in, -ESHUTDOWN);
} else {
index e5b350d84d17629eb4b75c370b977ba90bab2d5b..32fd0144eb2689978b8eed00f8a36c1b11305e93 100644 (file)
break;
}
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
if (is_in)
tmp = musb_readw(regs, MUSB_TXCSR)
& MUSB_TXCSR_P_SENDSTALL;
else
tmp = musb_readw(regs, MUSB_RXCSR)
& MUSB_RXCSR_P_SENDSTALL;
- musb_ep_select(mbase, 0);
+ musb_ep_select(musb, mbase, 0);
result[0] = tmp ? 1 : 0;
} break;
if (musb_ep->wedged)
break;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
if (is_in) {
csr = musb_readw(regs, MUSB_TXCSR);
csr |= MUSB_TXCSR_CLRDATATOG |
}
/* select ep0 again */
- musb_ep_select(mbase, 0);
+ musb_ep_select(musb, mbase, 0);
} break;
default:
/* class, vendor, etc ... delegate */
if (!musb_ep->desc)
break;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
if (is_in) {
csr = musb_readw(regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
}
/* select ep0 again */
- musb_ep_select(mbase, 0);
+ musb_ep_select(musb, mbase, 0);
handled = 1;
} break;
return;
musb->ackpend = 0;
}
- musb_ep_select(musb->mregs, 0);
+ musb_ep_select(musb, musb->mregs, 0);
musb_writew(regs, MUSB_CSR0, csr);
}
}
/* send it out, triggering a "txpktrdy cleared" irq */
- musb_ep_select(musb->mregs, 0);
+ musb_ep_select(musb, musb->mregs, 0);
musb_writew(regs, MUSB_CSR0, csr);
}
void __iomem *regs = musb->endpoints[0].regs;
irqreturn_t retval = IRQ_NONE;
- musb_ep_select(mbase, 0); /* select ep0 */
+ musb_ep_select(musb, mbase, 0); /* select ep0 */
csr = musb_readw(regs, MUSB_CSR0);
len = musb_readb(regs, MUSB_COUNT0);
handled = forward_to_driver(musb, &setup);
if (handled < 0) {
- musb_ep_select(mbase, 0);
+ musb_ep_select(musb, mbase, 0);
stall:
dev_dbg(musb->controller, "stall (%d)\n", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
req->request.length);
- musb_ep_select(musb->mregs, 0);
+ musb_ep_select(musb, musb->mregs, 0);
/* sequence #1, IN ... start writing the data */
if (musb->ep0_state == MUSB_EP0_STAGE_TX)
goto cleanup;
}
- musb_ep_select(base, 0);
+ musb_ep_select(musb, base, 0);
csr = musb->ackpend;
switch (musb->ep0_state) {
index 9c6b05bb372d73fc6f57551c76e9f89c07901d63..e6826060d8bc7a7ec44e300a125edc9d8be91818 100644 (file)
int pipe = urb->pipe;
void *buffer = urb->transfer_buffer;
- /* musb_ep_select(mbase, epnum); */
+ /* musb_ep_select(musb, mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
urb->transfer_buffer, qh->offset,
qh->h_addr_reg, qh->h_port_reg,
len);
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
/* candidate for DMA? */
dma_controller = musb->dma_controller;
/* ep0 only has one queue, "in" */
urb = next_urb(qh);
- musb_ep_select(mbase, 0);
+ musb_ep_select(musb, mbase, 0);
csr = musb_readw(epio, MUSB_CSR0);
len = (csr & MUSB_CSR0_RXPKTRDY)
? musb_readb(epio, MUSB_COUNT0)
struct dma_channel *dma;
bool transfer_pending = false;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
/* with CPPI, DMA sometimes triggers "extra" irqs */
* if (bulk && qh->ring.next != &musb->out_bulk), then
* we have a candidate... NAKing is *NOT* an error
*/
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY);
| MUSB_TXCSR_H_NAKTIMEOUT
);
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
musb_writew(epio, MUSB_TXCSR, tx_csr);
/* REVISIT may need to clear FLUSHFIFO ... */
musb_writew(epio, MUSB_TXCSR, tx_csr);
musb->ops->write_fifo(hw_ep, length, urb->transfer_buffer + offset);
qh->segsize = length;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
}
@@ -1382,7 +1382,7 @@ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr;
- musb_ep_select(mbase, ep->epnum);
+ musb_ep_select(musb, mbase, ep->epnum);
dma = is_dma_capable() ? ep->rx_channel : NULL;
/* clear nak timeout bit */
u32 status;
struct dma_channel *dma;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
urb = next_urb(qh);
dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
musb_bulk_rx_nak_timeout(musb, hw_ep);
return;
}
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | rx_csr);
}
/* SCRUB (RX) */
/* do the proper sequence to abort the transfer */
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
val &= ~MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, val);
goto finish;
int status = 0;
u16 csr;
- musb_ep_select(regs, hw_end);
+ musb_ep_select(ep->musb, regs, hw_end);
if (is_dma_capable()) {
struct dma_channel *dma;
index 3321baec3a44c644c9ae863ec048b95902e39ca1..1af0ea9a4d063ac2b4146466ee3fde0cb70d1922 100644 (file)
#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
/* Offsets to endpoint registers in indexed model (using INDEX register) */
-#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+#define MUSB_INDEXED_OFFSET(_musb, _epnum, _offset) \
(0x10 + (_offset))
/* Offsets to endpoint registers in flat models */
-#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+#define MUSB_FLAT_OFFSET(_musb, _epnum, _offset) \
(0x100 + (0x10*(_epnum)) + (_offset))
#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
#define MUSB_TXCOUNT 0x28
/* Offsets to endpoint registers in indexed model (using INDEX register) */
-#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+#define MUSB_INDEXED_OFFSET(_musb, _epnum, _offset) \
(0x40 + (_offset))
/* Offsets to endpoint registers in flat models */
-#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+#define MUSB_FLAT_OFFSET(_musb, _epnum, _offset) \
(USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset))
/* Not implemented - HW has separate Tx/Rx FIFO */
index 444b9ee06490dcd3a3e42ddda190b504d98f352d..4d7d83bf47db4312977ddb6887e91559d7864d93 100644 (file)
{
struct musb_dma_channel *musb_channel = channel->private_data;
void __iomem *mbase = musb_channel->controller->base;
-
+ struct musb *musb = musb_channel->controller->private_data;
u8 bchannel = musb_channel->idx;
int offset;
u16 csr;
if (channel->status == MUSB_DMA_STATUS_BUSY) {
if (musb_channel->transmit) {
- offset = MUSB_EP_OFFSET(musb_channel->epnum,
+ offset = MUSB_EP_OFFSET(musb, musb_channel->epnum,
MUSB_TXCSR);
/*
csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(mbase, offset, csr);
} else {
- offset = MUSB_EP_OFFSET(musb_channel->epnum,
+ offset = MUSB_EP_OFFSET(musb, musb_channel->epnum,
MUSB_RXCSR);
csr = musb_readw(mbase, offset);
(musb_channel->max_packet_sz - 1)))
) {
u8 epnum = musb_channel->epnum;
- int offset = MUSB_EP_OFFSET(epnum,
+ int offset = MUSB_EP_OFFSET(musb, epnum,
MUSB_TXCSR);
u16 txcsr;
* The programming guide says that we
* must clear DMAENAB before DMAMODE.
*/
- musb_ep_select(mbase, epnum);
+ musb_ep_select(musb, mbase, epnum);
txcsr = musb_readw(mbase, offset);
txcsr &= ~(MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_AUTOSET);
index 1cfc5d07579198f70b1cf39ef1cfb622bc087dc0..74fa695b0a5c7672d573acc833c7fa502cd7dfcb 100644 (file)
if (chdat->tx) {
dev_dbg(musb->controller, "terminating short tx packet\n");
- musb_ep_select(mbase, chdat->epnum);
+ musb_ep_select(musb, mbase, chdat->epnum);
csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_P_WZC_BITS;
* Prepare MUSB for DMA transfer
*/
if (chdat->tx) {
- musb_ep_select(mbase, chdat->epnum);
+ musb_ep_select(musb, mbase, chdat->epnum);
csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
} else {
- musb_ep_select(mbase, chdat->epnum);
+ musb_ep_select(musb, mbase, chdat->epnum);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_DMAENAB;
csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);