index a6b314d262f0a1476c51455df05acade44a9b61c..b553d49931b1a8a03e946db1cbb1db054ed6332b 100644 (file)
u16 pkt_size;
u8 transfer_mode;
u8 zlp_queued;
+ u8 inf_mode;
+ u8 tx_complete;
};
/**
struct cppi41_channel tx_cppi_ch[USB_CPPI41_NUM_CH];
struct cppi41_channel rx_cppi_ch[USB_CPPI41_NUM_CH];
+ struct work_struct txdma_work;
struct usb_pkt_desc *pd_pool_head; /* Free PD pool head */
dma_addr_t pd_mem_phys; /* PD memory physical address */
u32 automode_reg_offs; /* USB_AUTOREQ_REG offset */
u32 teardown_reg_offs; /* USB_TEARDOWN_REG offset */
u32 bd_size;
+ u8 inf_mode;
};
struct usb_cppi41_info usb_cppi41_info[2];
cppi_info = cppi->cppi_info;
musb = cppi->musb;
- if (cpu_is_ti81xx()) {
+ if (cpu_is_ti816x() || cpu_is_am33xx()) {
cppi->automode_reg_offs = TI81XX_USB_AUTOREQ_REG;
cppi->teardown_reg_offs = TI81XX_USB_TEARDOWN_REG;
} else {
rx_cfg.default_desc_type = cppi41_rx_host_desc;
rx_cfg.sop_offset = 0;
rx_cfg.retry_starved = 1;
+ rx_cfg.rx_max_buf_cnt = 0;
rx_cfg.rx_queue.q_mgr = cppi_ch->src_queue.q_mgr = q_mgr;
rx_cfg.rx_queue.q_num = cppi_info->rx_comp_q[ch_num];
for (i = 0; i < 4; i++)
* transfer in one PD and one IRQ. The only time we would NOT want
* to use it is when the hardware constraints prevent it...
*/
- if ((pkt_size & 0x3f) == 0 && length > pkt_size) {
- num_pds = 1;
- pkt_size = length;
+ if ((pkt_size & 0x3f) == 0) {
+ num_pds = length ? 1 : 0;
cppi41_mode_update(tx_ch, USB_GENERIC_RNDIS_MODE);
} else {
num_pds = (length + pkt_size - 1) / pkt_size;
cppi41_mode_update(tx_ch, USB_TRANSPARENT_MODE);
}
+ pkt_size = length;
/*
* If length of transmit buffer is 0 or a multiple of the endpoint size,
* then send the zero length packet.
struct cppi41 *cppi = rx_ch->channel.private_data;
void *__iomem reg_base = cppi->musb->ctrl_base;
u8 ep_num = rx_ch->ch_num + 1;
+ u32 res = pkt_size % 64;
+
+ /* epsize register must be multiple of 64 */
+ pkt_size += res ? (64 - res) : res;
musb_writel(reg_base, USB_GENERIC_RNDIS_EP_SIZE_REG(ep_num), pkt_size);
}
u32 max_rx_transfer_size = 64 * 1024;
u32 i, n_bd , pkt_len;
struct usb_gadget_driver *gadget_driver;
- u8 en_bd_intr = cppi->en_bd_intr;
+ u8 en_bd_intr = cppi->en_bd_intr, mode;
if (is_peripheral_active(cppi->musb)) {
/* TODO: temporary fix for CDC/RNDIS which needs to be in
* GENERIC_RNDIS mode. Without this RNDIS gadget taking
* more then 2K ms for a 64 byte pings.
*/
-#if defined(CONFIG_USB_GADGET_MUSB_HDRC) || defined(CONFIG_USB_GADGET_MUSB_HDRC_MODULE)
gadget_driver = cppi->musb->gadget_driver;
-#endif
- if (!strcmp(gadget_driver->driver.name, "g_file_storage"))
- max_rx_transfer_size = rx_ch->pkt_size;
- pkt_len = 0;
- if (rx_ch->length < max_rx_transfer_size)
- pkt_len = rx_ch->length;
- cppi41_set_ep_size(rx_ch, pkt_len);
- cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
+
+ pkt_len = rx_ch->pkt_size;
+ mode = USB_GENERIC_RNDIS_MODE;
+ if (!strcmp(gadget_driver->driver.name, "g_file_storage")) {
+ if (cppi->inf_mode && length > pkt_len) {
+ pkt_len = 0;
+ length = length - rx_ch->pkt_size;
+ cppi41_rx_ch_set_maxbufcnt(&rx_ch->dma_ch_obj,
+ DMA_CH_RX_MAX_BUF_CNT_1);
+ rx_ch->inf_mode = 1;
+ } else {
+ max_rx_transfer_size = rx_ch->pkt_size;
+ mode = USB_TRANSPARENT_MODE;
+ }
+ } else
+ if (rx_ch->length < max_rx_transfer_size)
+ pkt_len = rx_ch->length;
+
+ if (mode != USB_TRANSPARENT_MODE)
+ cppi41_set_ep_size(rx_ch, pkt_len);
+ cppi41_mode_update(rx_ch, mode);
} else {
/*
* Rx can use the generic RNDIS mode where we can
* probably fit this transfer in one PD and one IRQ
* (or two with a short packet).
*/
- if ((pkt_size & 0x3f) == 0 && length >= 2 * pkt_size) {
+ if ((pkt_size & 0x3f) == 0) {
cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
cppi41_autoreq_update(rx_ch, USB_AUTOREQ_ALL_BUT_EOP);
- if (likely(length < 0x10000))
- pkt_size = length - length % pkt_size;
- else
- pkt_size = 0x10000;
+ pkt_size = (length > 0x10000) ? 0x10000 : length;
cppi41_set_ep_size(rx_ch, pkt_size);
} else {
cppi41_mode_update(rx_ch, USB_TRANSPARENT_MODE);
cppi41_autoreq_update(rx_ch, USB_NO_AUTOREQ);
+ max_rx_transfer_size = rx_ch->pkt_size;
}
}
if (pd_addr) {
- DBG(1, "Descriptor (%08lx) popped from teardown completion "
+ dev_dbg(musb->controller, "Descriptor (%08lx) popped from teardown completion "
"queue\n", pd_addr);
if (usb_check_teardown(tx_ch, pd_addr)) {
- DBG(1, "Teardown Desc (%lx) rcvd\n", pd_addr);
+ dev_dbg(musb->controller, "Teardown Desc (%lx) rcvd\n", pd_addr);
} else
ERR("Invalid PD(%08lx)popped from TearDn completion"
"queue\n", pd_addr);
continue;
}
- DBG(1, "Tx-PD(%p) popped from completion queue\n", curr_pd);
- DBG(1, "ch(%d)epnum(%d)len(%d)\n", curr_pd->ch_num,
+ dev_dbg(musb->controller, "Tx-PD(%p) popped from completion queue\n", curr_pd);
+ dev_dbg(musb->controller, "ch(%d)epnum(%d)len(%d)\n", curr_pd->ch_num,
curr_pd->ep_num, curr_pd->hw_desc.buf_len);
usb_put_free_pd(cppi, curr_pd);
static void usb_rx_ch_teardown(struct cppi41_channel *rx_ch)
{
struct cppi41 *cppi = rx_ch->channel.private_data;
+ struct musb *musb = cppi->musb;
struct usb_cppi41_info *cppi_info = cppi->cppi_info;
u32 timeout = 0xfffff, pd_addr;
struct cppi41_queue_obj rx_queue_obj;
break;
}
- DBG(1, "Descriptor (%08lx) popped from teardown completion "
+ dev_dbg(musb->controller, "Descriptor (%08lx) popped from teardown completion "
"queue\n", pd_addr);
/*
continue;
}
- DBG(1, "Rx-PD(%p) popped from completion queue\n", curr_pd);
- DBG(1, "ch(%d)epnum(%d)len(%d)\n", curr_pd->ch_num,
+ dev_dbg(musb->controller, "Rx-PD(%p) popped from completion queue\n", curr_pd);
+ dev_dbg(musb->controller, "ch(%d)epnum(%d)len(%d)\n", curr_pd->ch_num,
curr_pd->ep_num, curr_pd->hw_desc.buf_len);
usb_put_free_pd(cppi, curr_pd);
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_H_WZC_BITS;
musb_writew(epio, MUSB_TXCSR, csr);
musb_writew(epio, MUSB_TXCSR, csr);
+ cppi_ch->tx_complete = 0;
} else { /* Rx */
dprintk("Rx channel teardown, cppi_ch = %p\n", cppi_ch);
return 0;
}
+void txdma_completion_work(struct work_struct *data)
+{
+ struct cppi41 *cppi = container_of(data, struct cppi41, txdma_work);
+ struct cppi41_channel *tx_ch;
+ struct musb *musb = cppi->musb;
+ unsigned index;
+ u8 resched = 0;
+ unsigned long flags;
+
+ while (1) {
+ for (index = 0; index < USB_CPPI41_NUM_CH; index++) {
+ void __iomem *epio;
+ u16 csr;
+
+ tx_ch = &cppi->tx_cppi_ch[index];
+ spin_lock_irqsave(&musb->lock, flags);
+ if (tx_ch->tx_complete) {
+ /* Sometimes a EP can unregister from a DMA
+ * channel while the data is still in the FIFO.
+ * Probable reason a proper abort was not
+ * called before taking such a step.
+ * Protect against such cases.
+ */
+ if (!tx_ch->end_pt) {
+ tx_ch->tx_complete = 0;
+ continue;
+ }
+
+ epio = tx_ch->end_pt->regs;
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ if (csr & (MUSB_TXCSR_TXPKTRDY |
+ MUSB_TXCSR_FIFONOTEMPTY)) {
+ resched = 1;
+ } else {
+ tx_ch->channel.status =
+ MUSB_DMA_STATUS_FREE;
+ tx_ch->tx_complete = 0;
+ musb_dma_completion(musb, index+1, 1);
+ }
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (!resched)
+ cond_resched();
+ }
+
+ if (resched) {
+ resched = 0;
+ cond_resched();
+ } else {
+ return ;
+ }
+ }
+
+}
+
/**
* cppi41_dma_controller_create -
* instantiate an object representing DMA controller.
cppi->controller.channel_abort = cppi41_channel_abort;
cppi->cppi_info = (struct usb_cppi41_info *)&usb_cppi41_info[musb->id];;
cppi->en_bd_intr = cppi->cppi_info->bd_intr_ctrl;
+ INIT_WORK(&cppi->txdma_work, txdma_completion_work);
+
+ /* enable infinite mode only for ti81xx silicon rev2 */
+ if (cpu_is_am33xx() || cpu_is_ti816x()) {
+ dev_dbg(musb->controller, "cppi41dma supports infinite mode\n");
+ cppi->inf_mode = 1;
+ }
return &cppi->controller;
}
(tx_ch->transfer_mode && !tx_ch->zlp_queued))
cppi41_next_tx_segment(tx_ch);
else if (tx_ch->channel.actual_len >= tx_ch->length) {
- tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
-
/*
* We get Tx DMA completion interrupt even when
* data is still in FIFO and not moved out to
* USB functionality. So far, we have obsered
* failure with iperf.
*/
- udelay(20);
- /* Tx completion routine callback */
- musb_dma_completion(cppi->musb, ep_num, 1);
+ tx_ch->tx_complete = 1;
+ schedule_work(&cppi->txdma_work);
}
}
}
struct usb_pkt_desc *curr_pd;
struct cppi41_channel *rx_ch;
u8 ch_num, ep_num;
- u32 length, orig_buf_len, timeout = 50;
+ u32 length = 0, orig_buf_len, timeout = 50;
curr_pd = usb_get_pd_ptr(cppi, pd_addr);
if (curr_pd == NULL) {
ch_num = curr_pd->ch_num;
ep_num = curr_pd->ep_num;
- DBG(4, "Rx complete: dma channel(%d) ep%d len %d timeout %d\n",
+ dev_dbg(musb->controller, "Rx complete: dma channel(%d) ep%d len %d timeout %d\n",
ch_num, ep_num, length, (50-timeout));
rx_ch = &cppi->rx_cppi_ch[ch_num];
if (curr_pd->eop) {
curr_pd->eop = 0;
/* disable the rx dma schedular */
- if (is_peripheral_active(cppi->musb))
+ if (is_peripheral_active(cppi->musb) && !cppi->inf_mode)
cppi41_schedtbl_remove_dma_ch(0, 0, ch_num, 0);
}
if (unlikely(rx_ch->channel.actual_len >= rx_ch->length ||
length < orig_buf_len)) {
-#ifdef CONFIG_SOC_OMAPTI81XX
+#if defined(CONFIG_SOC_OMAPTI81XX) || defined(CONFIG_SOC_OMAPAM33XX)
struct musb_hw_ep *ep;
u8 isoc, next_seg = 0;
/* Workaround for early rx completion of
* cppi41 dma in Generic RNDIS mode for ti81xx
*/
- if (cpu_is_ti81xx() && is_host_enabled(cppi->musb)) {
+ if (is_host_enabled(cppi->musb)) {
u32 pkt_size = rx_ch->pkt_size;
ep = cppi->musb->endpoints + ep_num;
isoc = musb_readb(ep->regs, MUSB_RXTYPE);
{
rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+ if (rx_ch->inf_mode) {
+ cppi41_rx_ch_set_maxbufcnt(
+ &rx_ch->dma_ch_obj, 0);
+ rx_ch->inf_mode = 0;
+ }
/* Rx completion routine callback */
musb_dma_completion(cppi->musb, ep_num, 0);
}