summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 1f93f27)
raw | patch | inline | side by side (parent: 1f93f27)
author | Ajay Kumar Gupta <ajay.gupta@ti.com> | |
Thu, 25 Nov 2010 12:55:47 +0000 (18:25 +0530) | ||
committer | Vaibhav Hiremath <hvaibhav@ti.com> | |
Mon, 23 Jan 2012 19:14:08 +0000 (00:44 +0530) |
Current musb host driver does the giveback of completed urb first and
then start the next request. This is significantly affecting the streaming
from an USB camera wherein we observe huge delay between the two IN tokens
from musb host. This is due to the fact that UVC driver is doing decoding
and further processing in giveback context.
The patch tries to defer the giveback part to a workqueue and continues
with the start of new request in completion path.
the giveback workqueue has only succesfull completed URBs, it is safe to
giveback URBs without taking musb spinlocks.
Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
Signed-off-by: Sriramakrishnan A G <srk@ti.com>
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
then start the next request. This is significantly affecting the streaming
from an USB camera wherein we observe huge delay between the two IN tokens
from musb host. This is due to the fact that UVC driver is doing decoding
and further processing in giveback context.
The patch tries to defer the giveback part to a workqueue and continues
with the start of new request in completion path.
the giveback workqueue has only succesfull completed URBs, it is safe to
giveback URBs without taking musb spinlocks.
Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
Signed-off-by: Sriramakrishnan A G <srk@ti.com>
Signed-off-by: Vaibhav Hiremath <hvaibhav@ti.com>
drivers/usb/musb/musb_core.c | patch | blob | history | |
drivers/usb/musb/musb_core.h | patch | blob | history | |
drivers/usb/musb/musb_host.c | patch | blob | history | |
include/linux/usb.h | patch | blob | history |
index 88e661ef4e69b6df56e902a8e8d032eb0f75eda4..0712bc97388e2ca3260108da5f2c57a23fa56870 100644 (file)
INIT_LIST_HEAD(&musb->control);
INIT_LIST_HEAD(&musb->in_bulk);
INIT_LIST_HEAD(&musb->out_bulk);
INIT_LIST_HEAD(&musb->control);
INIT_LIST_HEAD(&musb->in_bulk);
INIT_LIST_HEAD(&musb->out_bulk);
+ INIT_LIST_HEAD(&musb->gb_list);
hcd->uses_new_polling = 1;
hcd->has_tt = 1;
hcd->uses_new_polling = 1;
hcd->has_tt = 1;
musb->ops->dma_controller_destroy(c);
}
musb->ops->dma_controller_destroy(c);
}
+ if (musb->gb_queue)
+ destroy_workqueue(musb->gb_queue);
+
kfree(musb);
}
kfree(musb);
}
pm_runtime_enable(musb->controller);
spin_lock_init(&musb->lock);
pm_runtime_enable(musb->controller);
spin_lock_init(&musb->lock);
+ spin_lock_init(&musb->gb_lock);
musb->board_mode = plat->mode;
musb->board_set_power = plat->set_power;
musb->min_power = plat->min_power;
musb->board_mode = plat->mode;
musb->board_set_power = plat->set_power;
musb->min_power = plat->min_power;
if (status == 0)
musb_debug_create("driver/musb_hdrc", musb);
if (status == 0)
musb_debug_create("driver/musb_hdrc", musb);
+ musb->gb_queue = create_singlethread_workqueue(dev_name(dev));
+ if (musb->gb_queue == NULL)
+ goto fail6;
+ /* Init giveback workqueue */
+ INIT_WORK(&musb->gb_work, musb_gb_work);
+
return 0;
return 0;
+fail6:
+ destroy_workqueue(musb->gb_queue);
+
fail5:
musb_exit_debugfs(musb);
fail5:
musb_exit_debugfs(musb);
index 56b5d2dbe6c71874476f16fcce19c541c78866f1..af8823b63043d472e305549a285d1dbc3ed238a0 100644 (file)
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
+ struct workqueue_struct *gb_queue;
+ struct work_struct gb_work;
+ spinlock_t gb_lock;
+ struct list_head gb_list; /* of urbs */
+
struct notifier_block nb;
struct dma_controller *dma_controller;
struct notifier_block nb;
struct dma_controller *dma_controller;
#endif
}
#endif
}
+extern void musb_gb_work(struct work_struct *data);
/*-------------------------- ProcFS definitions ---------------------*/
struct proc_dir_entry;
/*-------------------------- ProcFS definitions ---------------------*/
struct proc_dir_entry;
index 8b8a845b80b3af383a456a9374357e265de5c62d..592e1904ceb0de27476a7b0e519741a90c5d52b7 100644 (file)
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len);
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len);
+void push_queue(struct musb *musb, struct urb *urb)
+{
+ spin_lock(&musb->gb_lock);
+ list_add_tail(&urb->giveback_list, &musb->gb_list);
+ spin_unlock(&musb->gb_lock);
+}
+
+struct urb *pop_queue(struct musb *musb)
+{
+ struct urb *urb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->gb_lock, flags);
+ if (list_empty(&musb->gb_list)) {
+ spin_unlock_irqrestore(&musb->gb_lock, flags);
+ return NULL;
+ }
+ urb = list_entry(musb->gb_list.next, struct urb, giveback_list);
+ list_del(&urb->giveback_list);
+ spin_unlock_irqrestore(&musb->gb_lock, flags);
+
+ return urb;
+}
+
/*
* Clear TX fifo. Needed to avoid BABBLE errors.
*/
/*
* Clear TX fifo. Needed to avoid BABBLE errors.
*/
/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
-__releases(musb->lock)
-__acquires(musb->lock)
{
dev_dbg(musb->controller,
"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
{
dev_dbg(musb->controller,
"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
urb->actual_length, urb->transfer_buffer_length
);
urb->actual_length, urb->transfer_buffer_length
);
- usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
- spin_unlock(&musb->lock);
usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
- spin_lock(&musb->lock);
}
/* For bulk/interrupt endpoints only */
}
/* For bulk/interrupt endpoints only */
usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
}
usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
}
+/* Used to complete urb giveback */
+void musb_gb_work(struct work_struct *data)
+{
+ struct musb *musb = container_of(data, struct musb, gb_work);
+ struct urb *urb;
+
+ while ((urb = pop_queue(musb)) != 0)
+ musb_giveback(musb, urb, 0);
+}
/*
* Advance this hardware endpoint's queue, completing the specified URB and
/*
* Advance this hardware endpoint's queue, completing the specified URB and
break;
}
break;
}
- qh->is_ready = 0;
- musb_giveback(musb, urb, status);
- qh->is_ready = ready;
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+ /* If URB completed with error then giveback first */
+ if (status != 0) {
+ qh->is_ready = 0;
+ spin_unlock(&musb->lock);
+ musb_giveback(musb, urb, status);
+ spin_lock(&musb->lock);
+ qh->is_ready = ready;
+ }
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
+
+ /* if URB is successfully completed then giveback in workqueue */
+ if (status == 0) {
+ push_queue(musb, urb);
+ queue_work(musb->gb_queue, &musb->gb_work);
+ }
}
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
}
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
qh = ret ? NULL : hep->hcpriv;
if (qh)
urb->hcpriv = qh;
qh = ret ? NULL : hep->hcpriv;
if (qh)
urb->hcpriv = qh;
+
+ INIT_LIST_HEAD(&urb->giveback_list);
spin_unlock_irqrestore(&musb->lock, flags);
/* DMA mapping was already done, if needed, and this urb is on
spin_unlock_irqrestore(&musb->lock, flags);
/* DMA mapping was already done, if needed, and this urb is on
@@ -2161,8 +2203,12 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
int ready = qh->is_ready;
|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
int ready = qh->is_ready;
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
qh->is_ready = 0;
qh->is_ready = 0;
+ spin_unlock(&musb->lock);
musb_giveback(musb, urb, 0);
musb_giveback(musb, urb, 0);
+ spin_lock(&musb->lock);
qh->is_ready = ready;
/* If nothing else (usually musb_giveback) is using it
qh->is_ready = ready;
/* If nothing else (usually musb_giveback) is using it
* other transfers, and since !qh->is_ready nothing
* will activate any of these as it advances.
*/
* other transfers, and since !qh->is_ready nothing
* will activate any of these as it advances.
*/
- while (!list_empty(&hep->urb_list))
- musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+ while (!list_empty(&hep->urb_list)) {
+ urb = next_urb(qh);
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+ spin_unlock(&musb->lock);
+ musb_giveback(musb, urb, -ESHUTDOWN);
+ spin_lock(&musb->lock);
+ }
hep->hcpriv = NULL;
list_del(&qh->ring);
hep->hcpriv = NULL;
list_del(&qh->ring);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index d3d0c1374334d58c684e5f1b4d13422aa8f57ae8..1cea2070c5df66943f3804f0ee86522395316e42 100644 (file)
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
struct list_head urb_list; /* list head for use by the urb's
* current owner */
struct list_head anchor_list; /* the URB may be anchored */
struct list_head urb_list; /* list head for use by the urb's
* current owner */
struct list_head anchor_list; /* the URB may be anchored */
+ struct list_head giveback_list; /* to postpone the giveback call */
struct usb_anchor *anchor;
struct usb_device *dev; /* (in) pointer to associated device */
struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */
struct usb_anchor *anchor;
struct usb_device *dev; /* (in) pointer to associated device */
struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */