summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c350
1 files changed, 287 insertions, 63 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 173c39c76489..ee7bc7ecbc59 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -306,7 +306,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
*/
- if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
+ if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
&& !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +475,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
ep->ep_state |= SET_DEQ_PENDING;
}
+static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep)
+{
+ ep->ep_state &= ~EP_HALT_PENDING;
+ /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
+ * timer is running on another CPU, we don't decrement stop_cmds_pending
+ * (since we didn't successfully stop the watchdog timer).
+ */
+ if (del_timer(&ep->stop_cmd_timer))
+ ep->stop_cmds_pending--;
+}
+
+/* Must be called with xhci->lock held in interrupt context */
+static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+ struct xhci_td *cur_td, int status, char *adjective)
+{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+ cur_td->urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
+ xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
+
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, cur_td->urb, status);
+ kfree(cur_td);
+ spin_lock(&xhci->lock);
+ xhci_dbg(xhci, "%s URB given back\n", adjective);
+}
+
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -497,9 +526,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
-#ifdef CONFIG_USB_HCD_STAT
- ktime_t stop_time = ktime_get();
-#endif
memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +533,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
ep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = ep->ring;
- if (list_empty(&ep->cancelled_td_list))
+ if (list_empty(&ep->cancelled_td_list)) {
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ring_ep_doorbell(xhci, slot_id, ep_index);
return;
+ }
/* Fix up the ep ring first, so HW stops executing cancelled TDs.
* We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +564,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* the cancelled TD list for URB completion later.
*/
list_del(&cur_td->td_list);
- ep->cancels_pending--;
}
last_unlinked_td = cur_td;
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +590,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
list_del(&cur_td->cancelled_td_list);
/* Clean up the cancelled URB */
-#ifdef CONFIG_USB_HCD_STAT
- hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
- ktime_sub(stop_time, cur_td->start_time));
-#endif
- cur_td->urb->hcpriv = NULL;
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
-
- xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
- spin_unlock(&xhci->lock);
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
- kfree(cur_td);
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
- spin_lock(&xhci->lock);
+ /* Stop processing the cancelled list if the watchdog timer is
+ * running.
+ */
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return;
} while (cur_td != last_unlinked_td);
/* Return to the event handler with xhci->lock re-acquired */
}
+/* Watchdog timer function for when a stop endpoint command fails to complete.
+ * In this case, we assume the host controller is broken or dying or dead. The
+ * host may still be completing some other events, so we have to be careful to
+ * let the event ring handler and the URB dequeueing/enqueueing functions know
+ * through xhci->state.
+ *
+ * The timer may also fire if the host takes a very long time to respond to the
+ * command, and the stop endpoint command completion handler cannot delete the
+ * timer before the timer function is called. Another endpoint cancellation may
+ * sneak in before the timer function can grab the lock, and that may queue
+ * another stop endpoint command and add the timer back. So we cannot use a
+ * simple flag to say whether there is a pending stop endpoint command for a
+ * particular endpoint.
+ *
+ * Instead we use a combination of that flag and a counter for the number of
+ * pending stop endpoint commands. If the timer is the tail end of the last
+ * stop endpoint command, and the endpoint's command is still pending, we assume
+ * the host is dying.
+ */
+void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_ep *ep;
+ struct xhci_virt_ep *temp_ep;
+ struct xhci_ring *ring;
+ struct xhci_td *cur_td;
+ int ret, i, j;
+
+ ep = (struct xhci_virt_ep *) arg;
+ xhci = ep->xhci;
+
+ spin_lock(&xhci->lock);
+
+ ep->stop_cmds_pending--;
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
+ "xHCI as DYING, exiting.\n");
+ spin_unlock(&xhci->lock);
+ return;
+ }
+ if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
+ xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
+ "exiting.\n");
+ spin_unlock(&xhci->lock);
+ return;
+ }
+
+ xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
+ xhci_warn(xhci, "Assuming host is dying, halting host.\n");
+ /* Oops, HC is dead or dying or at least not responding to the stop
+ * endpoint command.
+ */
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ /* Disable interrupts from the host controller and start halting it */
+ xhci_quiesce(xhci);
+ spin_unlock(&xhci->lock);
+
+ ret = xhci_halt(xhci);
+
+ spin_lock(&xhci->lock);
+ if (ret < 0) {
+ /* This is bad; the host is not responding to commands and it's
+ * not allowing itself to be halted. At least interrupts are
+ * disabled, so we can set HC_STATE_HALT and notify the
+ * USB core. But if we call usb_hc_died(), it will attempt to
+ * disconnect all device drivers under this host. Those
+ * disconnect() methods will wait for all URBs to be unlinked,
+ * so we must complete them.
+ */
+ xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
+ xhci_warn(xhci, "Completing active URBs anyway.\n");
+ /* We could turn all TDs on the rings to no-ops. This won't
+ * help if the host has cached part of the ring, and is slow if
+ * we want to preserve the cycle bit. Skip it and hope the host
+ * doesn't touch the memory.
+ */
+ }
+ for (i = 0; i < MAX_HC_SLOTS; i++) {
+ if (!xhci->devs[i])
+ continue;
+ for (j = 0; j < 31; j++) {
+ temp_ep = &xhci->devs[i]->eps[j];
+ ring = temp_ep->ring;
+ if (!ring)
+ continue;
+ xhci_dbg(xhci, "Killing URBs for slot ID %u, "
+ "ep index %u\n", i, j);
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td,
+ td_list);
+ list_del(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td,
+ -ESHUTDOWN, "killed");
+ }
+ while (!list_empty(&temp_ep->cancelled_td_list)) {
+ cur_td = list_first_entry(
+ &temp_ep->cancelled_td_list,
+ struct xhci_td,
+ cancelled_td_list);
+ list_del(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td,
+ -ESHUTDOWN, "killed");
+ }
+ }
+ }
+ spin_unlock(&xhci->lock);
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ xhci_dbg(xhci, "Calling usb_hc_died()\n");
+ usb_hc_died(xhci_to_hcd(xhci));
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+}
+
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +903,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
- if (!ep_ring) {
- /* This must have been an initial configure endpoint */
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(event->status);
- complete(&xhci->devs[slot_id]->cmd_completion);
- break;
- }
- ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
- xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
- "state = %d\n", ep_index, ep_state);
+ /* A usb_set_interface() call directly after clearing a halted
+ * condition may race on this quirky hardware.
+ * Not worth worrying about, since this is prototype hardware.
+ */
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_state & EP_HALTED) {
+ ep_index != (unsigned int) -1 &&
+ ctrl_ctx->add_flags - SLOT_FLAG ==
+ ctrl_ctx->drop_flags) {
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ if (!(ep_state & EP_HALTED))
+ goto bandwidth_change;
+ xhci_dbg(xhci, "Completed config ep cmd - "
+ "last ep index = %d, state = %d\n",
+ ep_index, ep_state);
/* Clear our internal halted state and restart ring */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
- } else {
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(event->status);
- complete(&xhci->devs[slot_id]->cmd_completion);
+ break;
}
+bandwidth_change:
+ xhci_dbg(xhci, "Completed config ep cmd\n");
+ xhci->devs[slot_id]->cmd_status =
+ GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
break;
case TRB_TYPE(TRB_EVAL_CONTEXT):
virt_dev = xhci->devs[slot_id];
@@ -849,8 +991,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* TRB in this TD, this function returns that TRB's segment. Otherwise it
* returns 0.
*/
-static struct xhci_segment *trb_in_td(
- struct xhci_segment *start_seg,
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t suspect_dma)
@@ -864,9 +1005,11 @@ static struct xhci_segment *trb_in_td(
cur_seg = start_seg;
do {
+ if (start_dma == 0)
+ return 0;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
- &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
/* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
@@ -893,8 +1036,48 @@ static struct xhci_segment *trb_in_td(
}
cur_seg = cur_seg->next;
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
- } while (1);
+ } while (cur_seg != start_seg);
+ return 0;
+}
+
+static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_td *td, union xhci_trb *event_trb)
+{
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep->ep_state |= EP_HALTED;
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+ xhci_ring_cmd_db(xhci);
+}
+
+/* Check if an error has halted the endpoint ring. The class driver will
+ * cleanup the halt for a non-default control endpoint if we indicate a stall.
+ * However, a babble and other errors also halt the endpoint ring, and the class
+ * driver won't clear the halt in that case, so we need to issue a Set Transfer
+ * Ring Dequeue Pointer command manually.
+ */
+static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ unsigned int trb_comp_code)
+{
+ /* TRB completion codes that may require a manual halt cleanup */
+ if (trb_comp_code == COMP_TX_ERR ||
+ trb_comp_code == COMP_BABBLE ||
+ trb_comp_code == COMP_SPLIT_ERR)
+ /* The 0.96 spec says a babbling control endpoint
+ * is not halted. The 0.96 spec says it is. Some HW
+ * claims to be 0.95 compliant, but it halts the control
+ * endpoint anyway. Check if a babble halted the
+ * endpoint.
+ */
+ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
+ return 1;
+
+ return 0;
}
/*
@@ -999,6 +1182,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci, "WARN: TRB error on endpoint\n");
status = -EILSEQ;
break;
+ case COMP_SPLIT_ERR:
case COMP_TX_ERR:
xhci_warn(xhci, "WARN: transfer error on endpoint\n");
status = -EPROTO;
@@ -1012,6 +1196,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
status = -ENOSR;
break;
default:
+ if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+ /* Vendor defined "informational" completion code,
+ * treat as not-an-error.
+ */
+ xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+ trb_comp_code);
+ xhci_dbg(xhci, "Treating code as success.\n");
+ status = 0;
+ break;
+ }
xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
urb = NULL;
goto cleanup;
@@ -1040,15 +1234,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else
status = 0;
break;
- case COMP_BABBLE:
- /* The 0.96 spec says a babbling control endpoint
- * is not halted. The 0.96 spec says it is. Some HW
- * claims to be 0.95 compliant, but it halts the control
- * endpoint anyway. Check if a babble halted the
- * endpoint.
- */
- if (ep_ctx->ep_info != EP_STATE_HALTED)
+
+ default:
+ if (!xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code))
break;
+ xhci_dbg(xhci, "TRB error code %u, "
+ "halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
@@ -1060,15 +1253,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else
td->urb->actual_length = 0;
- ep->stopped_td = td;
- ep->stopped_trb = event_trb;
- xhci_queue_reset_ep(xhci, slot_id, ep_index);
- xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
- xhci_ring_cmd_db(xhci);
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, td, event_trb);
goto td_cleanup;
- default:
- /* Others already handled above */
- break;
}
/*
* Did we transfer any data, despite the errors that might have
@@ -1206,16 +1393,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->stopped_td = td;
ep->stopped_trb = event_trb;
} else {
- if (trb_comp_code == COMP_STALL ||
- trb_comp_code == COMP_BABBLE) {
+ if (trb_comp_code == COMP_STALL) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
- * the halt condition must be cleared first.
+ * the halt condition must be cleared first. Let the
+ * USB class driver clear the stall later.
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+ /* Other types of errors halt the endpoint, but the
+ * class driver doesn't call usb_reset_endpoint() unless
+ * the error is -EPIPE. Clear the halted status in the
+ * xHCI hardware manually.
+ */
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, td, event_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1246,10 +1442,9 @@ td_cleanup:
}
list_del(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list)) {
+ if (!list_empty(&td->cancelled_td_list))
list_del(&td->cancelled_td_list);
- ep->cancels_pending--;
- }
+
/* Leave the TD around for the reset endpoint function to use
* (but only if it's not a control endpoint, since we already
* queued the Set TR dequeue pointer command for stalled
@@ -1328,6 +1523,14 @@ void xhci_handle_event(struct xhci_hcd *xhci)
default:
xhci->error_bitmask |= 1 << 3;
}
+ /* Any of the above functions may drop and re-acquire the lock, so check
+ * to make sure a watchdog timer didn't mark the host as non-responsive.
+ */
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "xHCI host dying, returning from "
+ "event handler.\n");
+ return;
+ }
if (update_ptrs) {
/* Update SW and HC event ring dequeue pointer */
@@ -1552,6 +1755,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}
+/*
+ * The TD size is the number of bytes remaining in the TD (including this TRB),
+ * right shifted by 10.
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ */
+static u32 xhci_td_remainder(unsigned int remainder)
+{
+ u32 max = (1 << (21 - 17 + 1)) - 1;
+
+ if ((remainder >> 10) >= max)
+ return max << 17;
+ else
+ return (remainder >> 10) << 17;
+}
+
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
@@ -1609,6 +1827,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
do {
u32 field = 0;
u32 length_field = 0;
+ u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb)
@@ -1638,8 +1857,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
}
+ remainder = xhci_td_remainder(urb->transfer_buffer_length -
+ running_total) ;
length_field = TRB_LEN(trb_buff_len) |
- TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+ remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false,
lower_32_bits(addr),
@@ -1752,6 +1973,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue the first TRB, even if it's zero-length */
do {
+ u32 remainder = 0;
field = 0;
/* Don't change the cycle bit of the first TRB until later */
@@ -1770,8 +1992,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
+ remainder = xhci_td_remainder(urb->transfer_buffer_length -
+ running_total);
length_field = TRB_LEN(trb_buff_len) |
- TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+ remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false,
lower_32_bits(addr),
@@ -1859,7 +2083,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* If there's data, queue data TRBs */
field = 0;
length_field = TRB_LEN(urb->transfer_buffer_length) |
- TD_REMAINDER(urb->transfer_buffer_length) |
+ xhci_td_remainder(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)