aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci.c')
-rw-r--r--drivers/usb/host/xhci.c126
1 files changed, 88 insertions, 38 deletions
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b217456b0ac7..f0c557052f2f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -66,7 +66,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
66 * handshake done). There are two failure modes: "usec" have passed (major 66 * handshake done). There are two failure modes: "usec" have passed (major
67 * hardware flakeout), or the register reads as all-ones (hardware removed). 67 * hardware flakeout), or the register reads as all-ones (hardware removed).
68 */ 68 */
69int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) 69int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
70{ 70{
71 u32 result; 71 u32 result;
72 int ret; 72 int ret;
@@ -74,7 +74,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
74 ret = readl_poll_timeout_atomic(ptr, result, 74 ret = readl_poll_timeout_atomic(ptr, result,
75 (result & mask) == done || 75 (result & mask) == done ||
76 result == U32_MAX, 76 result == U32_MAX,
77 1, usec); 77 1, timeout_us);
78 if (result == U32_MAX) /* card removed */ 78 if (result == U32_MAX) /* card removed */
79 return -ENODEV; 79 return -ENODEV;
80 80
@@ -163,7 +163,7 @@ int xhci_start(struct xhci_hcd *xhci)
163 * Transactions will be terminated immediately, and operational registers 163 * Transactions will be terminated immediately, and operational registers
164 * will be set to their defaults. 164 * will be set to their defaults.
165 */ 165 */
166int xhci_reset(struct xhci_hcd *xhci) 166int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
167{ 167{
168 u32 command; 168 u32 command;
169 u32 state; 169 u32 state;
@@ -196,8 +196,7 @@ int xhci_reset(struct xhci_hcd *xhci)
196 if (xhci->quirks & XHCI_INTEL_HOST) 196 if (xhci->quirks & XHCI_INTEL_HOST)
197 udelay(1000); 197 udelay(1000);
198 198
199 ret = xhci_handshake(&xhci->op_regs->command, 199 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
200 CMD_RESET, 0, 10 * 1000 * 1000);
201 if (ret) 200 if (ret)
202 return ret; 201 return ret;
203 202
@@ -210,8 +209,7 @@ int xhci_reset(struct xhci_hcd *xhci)
210 * xHCI cannot write to any doorbells or operational registers other 209 * xHCI cannot write to any doorbells or operational registers other
211 * than status until the "Controller Not Ready" flag is cleared. 210 * than status until the "Controller Not Ready" flag is cleared.
212 */ 211 */
213 ret = xhci_handshake(&xhci->op_regs->status, 212 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
214 STS_CNR, 0, 10 * 1000 * 1000);
215 213
216 for (i = 0; i < 2; i++) { 214 for (i = 0; i < 2; i++) {
217 xhci->bus_state[i].port_c_suspend = 0; 215 xhci->bus_state[i].port_c_suspend = 0;
@@ -227,6 +225,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
227 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 225 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
228 int err, i; 226 int err, i;
229 u64 val; 227 u64 val;
228 u32 intrs;
230 229
231 /* 230 /*
232 * Some Renesas controllers get into a weird state if they are 231 * Some Renesas controllers get into a weird state if they are
@@ -265,7 +264,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
265 if (upper_32_bits(val)) 264 if (upper_32_bits(val))
266 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 265 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
267 266
268 for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) { 267 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
268 ARRAY_SIZE(xhci->run_regs->ir_set));
269
270 for (i = 0; i < intrs; i++) {
269 struct xhci_intr_reg __iomem *ir; 271 struct xhci_intr_reg __iomem *ir;
270 272
271 ir = &xhci->run_regs->ir_set[i]; 273 ir = &xhci->run_regs->ir_set[i];
@@ -727,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd)
727 xhci->xhc_state |= XHCI_STATE_HALTED; 729 xhci->xhc_state |= XHCI_STATE_HALTED;
728 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 730 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
729 xhci_halt(xhci); 731 xhci_halt(xhci);
730 xhci_reset(xhci); 732 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
731 spin_unlock_irq(&xhci->lock); 733 spin_unlock_irq(&xhci->lock);
732 734
733 xhci_cleanup_msix(xhci); 735 xhci_cleanup_msix(xhci);
@@ -776,11 +778,22 @@ void xhci_shutdown(struct usb_hcd *hcd)
776 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 778 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
777 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); 779 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
778 780
781 /* Don't poll the roothubs after shutdown. */
782 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
783 __func__, hcd->self.busnum);
784 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
785 del_timer_sync(&hcd->rh_timer);
786
787 if (xhci->shared_hcd) {
788 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
789 del_timer_sync(&xhci->shared_hcd->rh_timer);
790 }
791
779 spin_lock_irq(&xhci->lock); 792 spin_lock_irq(&xhci->lock);
780 xhci_halt(xhci); 793 xhci_halt(xhci);
781 /* Workaround for spurious wakeups at shutdown with HSW */ 794 /* Workaround for spurious wakeups at shutdown with HSW */
782 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 795 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
783 xhci_reset(xhci); 796 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
784 spin_unlock_irq(&xhci->lock); 797 spin_unlock_irq(&xhci->lock);
785 798
786 xhci_cleanup_msix(xhci); 799 xhci_cleanup_msix(xhci);
@@ -1078,6 +1091,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1078 struct usb_hcd *secondary_hcd; 1091 struct usb_hcd *secondary_hcd;
1079 int retval = 0; 1092 int retval = 0;
1080 bool comp_timer_running = false; 1093 bool comp_timer_running = false;
1094 bool pending_portevent = false;
1095 bool reinit_xhc = false;
1081 1096
1082 if (!hcd->state) 1097 if (!hcd->state)
1083 return 0; 1098 return 0;
@@ -1094,10 +1109,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1094 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1109 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1095 1110
1096 spin_lock_irq(&xhci->lock); 1111 spin_lock_irq(&xhci->lock);
1097 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
1098 hibernated = true;
1099 1112
1100 if (!hibernated) { 1113 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1114 reinit_xhc = true;
1115
1116 if (!reinit_xhc) {
1101 /* 1117 /*
1102 * Some controllers might lose power during suspend, so wait 1118 * Some controllers might lose power during suspend, so wait
1103 * for controller not ready bit to clear, just as in xHC init. 1119 * for controller not ready bit to clear, just as in xHC init.
@@ -1130,12 +1146,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1130 spin_unlock_irq(&xhci->lock); 1146 spin_unlock_irq(&xhci->lock);
1131 return -ETIMEDOUT; 1147 return -ETIMEDOUT;
1132 } 1148 }
1133 temp = readl(&xhci->op_regs->status);
1134 } 1149 }
1135 1150
1136 /* If restore operation fails, re-initialize the HC during resume */ 1151 temp = readl(&xhci->op_regs->status);
1137 if ((temp & STS_SRE) || hibernated) { 1152
1153 /* re-initialize the HC on Restore Error, or Host Controller Error */
1154 if (temp & (STS_SRE | STS_HCE)) {
1155 reinit_xhc = true;
1156 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1157 }
1138 1158
1159 if (reinit_xhc) {
1139 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1160 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1140 !(xhci_all_ports_seen_u0(xhci))) { 1161 !(xhci_all_ports_seen_u0(xhci))) {
1141 del_timer_sync(&xhci->comp_mode_recovery_timer); 1162 del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1150,7 +1171,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1150 xhci_dbg(xhci, "Stop HCD\n"); 1171 xhci_dbg(xhci, "Stop HCD\n");
1151 xhci_halt(xhci); 1172 xhci_halt(xhci);
1152 xhci_zero_64b_regs(xhci); 1173 xhci_zero_64b_regs(xhci);
1153 retval = xhci_reset(xhci); 1174 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1154 spin_unlock_irq(&xhci->lock); 1175 spin_unlock_irq(&xhci->lock);
1155 if (retval) 1176 if (retval)
1156 return retval; 1177 return retval;
@@ -1216,13 +1237,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1216 1237
1217 done: 1238 done:
1218 if (retval == 0) { 1239 if (retval == 0) {
1219 /* Resume root hubs only when have pending events. */ 1240 /*
1220 if (xhci_pending_portevent(xhci)) { 1241 * Resume roothubs only if there are pending events.
1242 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1243 * the first wake signalling failed, give it that chance.
1244 */
1245 pending_portevent = xhci_pending_portevent(xhci);
1246 if (!pending_portevent) {
1247 msleep(120);
1248 pending_portevent = xhci_pending_portevent(xhci);
1249 }
1250
1251 if (pending_portevent) {
1221 usb_hcd_resume_root_hub(xhci->shared_hcd); 1252 usb_hcd_resume_root_hub(xhci->shared_hcd);
1222 usb_hcd_resume_root_hub(hcd); 1253 usb_hcd_resume_root_hub(hcd);
1223 } 1254 }
1224 } 1255 }
1225
1226 /* 1256 /*
1227 * If system is subject to the Quirk, Compliance Mode Timer needs to 1257 * If system is subject to the Quirk, Compliance Mode Timer needs to
1228 * be re-initialized Always after a system resume. Ports are subject 1258 * be re-initialized Always after a system resume. Ports are subject
@@ -1360,7 +1390,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1360 * we need to issue an evaluate context command and wait on it. 1390 * we need to issue an evaluate context command and wait on it.
1361 */ 1391 */
1362static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1392static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1363 unsigned int ep_index, struct urb *urb) 1393 unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1364{ 1394{
1365 struct xhci_container_ctx *out_ctx; 1395 struct xhci_container_ctx *out_ctx;
1366 struct xhci_input_control_ctx *ctrl_ctx; 1396 struct xhci_input_control_ctx *ctrl_ctx;
@@ -1391,7 +1421,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1391 * changes max packet sizes. 1421 * changes max packet sizes.
1392 */ 1422 */
1393 1423
1394 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 1424 command = xhci_alloc_command(xhci, true, mem_flags);
1395 if (!command) 1425 if (!command)
1396 return -ENOMEM; 1426 return -ENOMEM;
1397 1427
@@ -1443,9 +1473,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
1443 struct urb_priv *urb_priv; 1473 struct urb_priv *urb_priv;
1444 int num_tds; 1474 int num_tds;
1445 1475
1446 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1476 if (!urb)
1447 true, true, __func__) <= 0)
1448 return -EINVAL; 1477 return -EINVAL;
1478 ret = xhci_check_args(hcd, urb->dev, urb->ep,
1479 true, true, __func__);
1480 if (ret <= 0)
1481 return ret ? ret : -EINVAL;
1449 1482
1450 slot_id = urb->dev->slot_id; 1483 slot_id = urb->dev->slot_id;
1451 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1484 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
@@ -1488,7 +1521,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
1488 */ 1521 */
1489 if (urb->dev->speed == USB_SPEED_FULL) { 1522 if (urb->dev->speed == USB_SPEED_FULL) {
1490 ret = xhci_check_maxpacket(xhci, slot_id, 1523 ret = xhci_check_maxpacket(xhci, slot_id,
1491 ep_index, urb); 1524 ep_index, urb, mem_flags);
1492 if (ret < 0) { 1525 if (ret < 0) {
1493 xhci_urb_free_priv(urb_priv); 1526 xhci_urb_free_priv(urb_priv);
1494 urb->hcpriv = NULL; 1527 urb->hcpriv = NULL;
@@ -3079,10 +3112,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3079 ep = &vdev->eps[ep_index]; 3112 ep = &vdev->eps[ep_index];
3080 3113
3081 /* Bail out if toggle is already being cleared by a endpoint reset */ 3114 /* Bail out if toggle is already being cleared by a endpoint reset */
3115 spin_lock_irqsave(&xhci->lock, flags);
3082 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { 3116 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3083 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; 3117 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3118 spin_unlock_irqrestore(&xhci->lock, flags);
3084 return; 3119 return;
3085 } 3120 }
3121 spin_unlock_irqrestore(&xhci->lock, flags);
3086 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ 3122 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3087 if (usb_endpoint_xfer_control(&host_ep->desc) || 3123 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3088 usb_endpoint_xfer_isoc(&host_ep->desc)) 3124 usb_endpoint_xfer_isoc(&host_ep->desc))
@@ -3138,6 +3174,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3138 3174
3139 /* config ep command clears toggle if add and drop ep flags are set */ 3175 /* config ep command clears toggle if add and drop ep flags are set */
3140 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); 3176 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3177 if (!ctrl_ctx) {
3178 spin_unlock_irqrestore(&xhci->lock, flags);
3179 xhci_free_command(xhci, cfg_cmd);
3180 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3181 __func__);
3182 goto cleanup;
3183 }
3184
3141 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, 3185 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3142 ctrl_ctx, ep_flag, ep_flag); 3186 ctrl_ctx, ep_flag, ep_flag);
3143 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); 3187 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
@@ -3160,8 +3204,10 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3160 xhci_free_command(xhci, cfg_cmd); 3204 xhci_free_command(xhci, cfg_cmd);
3161cleanup: 3205cleanup:
3162 xhci_free_command(xhci, stop_cmd); 3206 xhci_free_command(xhci, stop_cmd);
3207 spin_lock_irqsave(&xhci->lock, flags);
3163 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) 3208 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3164 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; 3209 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3210 spin_unlock_irqrestore(&xhci->lock, flags);
3165} 3211}
3166 3212
3167static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3213static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
@@ -3176,7 +3222,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3176 return -EINVAL; 3222 return -EINVAL;
3177 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 3223 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3178 if (ret <= 0) 3224 if (ret <= 0)
3179 return -EINVAL; 3225 return ret ? ret : -EINVAL;
3180 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 3226 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3181 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3227 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3182 " descriptor for ep 0x%x does not support streams\n", 3228 " descriptor for ep 0x%x does not support streams\n",
@@ -3783,7 +3829,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3783 struct xhci_slot_ctx *slot_ctx; 3829 struct xhci_slot_ctx *slot_ctx;
3784 int i, ret; 3830 int i, ret;
3785 3831
3786#ifndef CONFIG_USB_DEFAULT_PERSIST
3787 /* 3832 /*
3788 * We called pm_runtime_get_noresume when the device was attached. 3833 * We called pm_runtime_get_noresume when the device was attached.
3789 * Decrement the counter here to allow controller to runtime suspend 3834 * Decrement the counter here to allow controller to runtime suspend
@@ -3791,7 +3836,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3791 */ 3836 */
3792 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3837 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3793 pm_runtime_put_noidle(hcd->self.controller); 3838 pm_runtime_put_noidle(hcd->self.controller);
3794#endif
3795 3839
3796 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3840 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3797 /* If the host is halted due to driver unload, we still need to free the 3841 /* If the host is halted due to driver unload, we still need to free the
@@ -3811,9 +3855,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3811 } 3855 }
3812 xhci_debugfs_remove_slot(xhci, udev->slot_id); 3856 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3813 virt_dev->udev = NULL; 3857 virt_dev->udev = NULL;
3814 ret = xhci_disable_slot(xhci, udev->slot_id); 3858 xhci_disable_slot(xhci, udev->slot_id);
3815 if (ret) 3859 xhci_free_virt_device(xhci, udev->slot_id);
3816 xhci_free_virt_device(xhci, udev->slot_id);
3817} 3860}
3818 3861
3819int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 3862int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -3823,7 +3866,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3823 u32 state; 3866 u32 state;
3824 int ret = 0; 3867 int ret = 0;
3825 3868
3826 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 3869 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3827 if (!command) 3870 if (!command)
3828 return -ENOMEM; 3871 return -ENOMEM;
3829 3872
@@ -3846,6 +3889,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3846 } 3889 }
3847 xhci_ring_cmd_db(xhci); 3890 xhci_ring_cmd_db(xhci);
3848 spin_unlock_irqrestore(&xhci->lock, flags); 3891 spin_unlock_irqrestore(&xhci->lock, flags);
3892
3893 wait_for_completion(command->completion);
3894
3895 if (command->status != COMP_SUCCESS)
3896 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
3897 slot_id, command->status);
3898
3899 xhci_free_command(xhci, command);
3900
3849 return ret; 3901 return ret;
3850} 3902}
3851 3903
@@ -3942,23 +3994,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3942 3994
3943 xhci_debugfs_create_slot(xhci, slot_id); 3995 xhci_debugfs_create_slot(xhci, slot_id);
3944 3996
3945#ifndef CONFIG_USB_DEFAULT_PERSIST
3946 /* 3997 /*
3947 * If resetting upon resume, we can't put the controller into runtime 3998 * If resetting upon resume, we can't put the controller into runtime
3948 * suspend if there is a device attached. 3999 * suspend if there is a device attached.
3949 */ 4000 */
3950 if (xhci->quirks & XHCI_RESET_ON_RESUME) 4001 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3951 pm_runtime_get_noresume(hcd->self.controller); 4002 pm_runtime_get_noresume(hcd->self.controller);
3952#endif
3953 4003
3954 /* Is this a LS or FS device under a HS hub? */ 4004 /* Is this a LS or FS device under a HS hub? */
3955 /* Hub or peripherial? */ 4005 /* Hub or peripherial? */
3956 return 1; 4006 return 1;
3957 4007
3958disable_slot: 4008disable_slot:
3959 ret = xhci_disable_slot(xhci, udev->slot_id); 4009 xhci_disable_slot(xhci, udev->slot_id);
3960 if (ret) 4010 xhci_free_virt_device(xhci, udev->slot_id);
3961 xhci_free_virt_device(xhci, udev->slot_id);
3962 4011
3963 return 0; 4012 return 0;
3964} 4013}
@@ -4087,6 +4136,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4087 4136
4088 mutex_unlock(&xhci->mutex); 4137 mutex_unlock(&xhci->mutex);
4089 ret = xhci_disable_slot(xhci, udev->slot_id); 4138 ret = xhci_disable_slot(xhci, udev->slot_id);
4139 xhci_free_virt_device(xhci, udev->slot_id);
4090 if (!ret) 4140 if (!ret)
4091 xhci_alloc_dev(hcd, udev); 4141 xhci_alloc_dev(hcd, udev);
4092 kfree(command->completion); 4142 kfree(command->completion);
@@ -5149,7 +5199,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5149 5199
5150 xhci_dbg(xhci, "Resetting HCD\n"); 5200 xhci_dbg(xhci, "Resetting HCD\n");
5151 /* Reset the internal HC memory state and registers. */ 5201 /* Reset the internal HC memory state and registers. */
5152 retval = xhci_reset(xhci); 5202 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5153 if (retval) 5203 if (retval)
5154 return retval; 5204 return retval;
5155 xhci_dbg(xhci, "Reset complete\n"); 5205 xhci_dbg(xhci, "Reset complete\n");