]> git.karo-electronics.de Git - linux-beck.git/commitdiff
xhci: Update internal dequeue pointers after stalls.
authorSarah Sharp <sarah.a.sharp@linux.intel.com>
Wed, 23 Feb 2011 23:46:42 +0000 (15:46 -0800)
committerSarah Sharp <sarah.a.sharp@linux.intel.com>
Mon, 14 Mar 2011 01:23:53 +0000 (18:23 -0700)
When an endpoint stalls, the xHCI driver must move the endpoint ring's
dequeue pointer past the stalled transfer.  To do that, the driver issues
a Set TR Dequeue Pointer command, which will complete some time later.

Takashi was having issues with USB 1.1 audio devices that stalled, and his
analysis of the code was that the old code would not update the xHCI
driver's ring dequeue pointer after the command completes.  However, the
dequeue pointer is set in xhci_find_new_dequeue_state(), just before the
set command is issued to the hardware.

Setting the dequeue pointer before the Set TR Dequeue Pointer command
completes is a dangerous thing to do, since the xHCI hardware can fail the
command.  Instead, store the new dequeue pointer in the xhci_virt_ep
structure, and update the ring's dequeue pointer when the Set TR dequeue
pointer command completes.

While we're at it, make sure we can't queue another Set TR Dequeue Command
while the first one is still being processed.  This just won't work with
the internal xHCI state code.  I'm still not sure if this is the right
thing to do, since we might have a case where a driver queues multiple
URBs to a control ring, one of the URBs Stalls, and then the driver tries
to cancel the second URB.  There may be a race condition there where the
xHCI driver might try to issue multiple Set TR Dequeue Pointer commands,
but I would have to think very hard about how the Stop Endpoint and
cancellation code works.  Keep the fix simple until when/if we run into
that case.

This patch should be queued to kernels all the way back to 2.6.31.

Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Tested-by: Takashi Iwai <tiwai@suse.de>
Cc: stable@kernel.org
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h

index bd0f2343ef9c39310626f850cb8ef3c135ebb431..3577cd663ebc564f918775016924ca9bedb775c1 100644 (file)
@@ -501,9 +501,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
        xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
                        (unsigned long long) addr);
-       xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
-       ep_ring->dequeue = state->new_deq_ptr;
-       ep_ring->deq_seg = state->new_deq_seg;
 }
 
 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
@@ -945,9 +942,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
        } else {
                xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
                                ep_ctx->deq);
+               if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
+                                       dev->eps[ep_index].queued_deq_ptr) ==
+                               (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) {
+                       /* Update the ring's dequeue segment and dequeue pointer
+                        * to reflect the new position.
+                        */
+                       ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
+                       ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
+               } else {
+                       xhci_warn(xhci, "Mismatch between completed Set TR Deq "
+                                       "Ptr command & xHCI internal state.\n");
+                       xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+                                       dev->eps[ep_index].queued_deq_seg,
+                                       dev->eps[ep_index].queued_deq_ptr);
+               }
        }
 
        dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+       dev->eps[ep_index].queued_deq_seg = NULL;
+       dev->eps[ep_index].queued_deq_ptr = NULL;
        /* Restart any rings with pending URBs */
        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 }
@@ -3283,6 +3297,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
        u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
        u32 type = TRB_TYPE(TRB_SET_DEQ);
+       struct xhci_virt_ep *ep;
 
        addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
        if (addr == 0) {
@@ -3291,6 +3306,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
                                deq_seg, deq_ptr);
                return 0;
        }
+       ep = &xhci->devs[slot_id]->eps[ep_index];
+       if ((ep->ep_state & SET_DEQ_PENDING)) {
+               xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+               xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
+               return 0;
+       }
+       ep->queued_deq_seg = deq_seg;
+       ep->queued_deq_ptr = deq_ptr;
        return queue_command(xhci, lower_32_bits(addr) | cycle_state,
                        upper_32_bits(addr), trb_stream_id,
                        trb_slot_id | trb_ep_index | type, false);
index e69f1cdf4b5bb8b05b8f8942a443afc3dbfa1f01..7aca6b16e98684408f34e20ee1f131fe436c7440 100644 (file)
@@ -644,6 +644,9 @@ struct xhci_ep_ctx {
 #define AVG_TRB_LENGTH_FOR_EP(p)       ((p) & 0xffff)
 #define MAX_ESIT_PAYLOAD_FOR_EP(p)     (((p) & 0xffff) << 16)
 
+/* deq bitmasks */
+#define EP_CTX_CYCLE_MASK              (1 << 0)
+
 
 /**
  * struct xhci_input_control_context
@@ -746,6 +749,12 @@ struct xhci_virt_ep {
        struct timer_list       stop_cmd_timer;
        int                     stop_cmds_pending;
        struct xhci_hcd         *xhci;
+       /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
+        * command.  We'll need to update the ring's dequeue segment and dequeue
+        * pointer after the command completes.
+        */
+       struct xhci_segment     *queued_deq_seg;
+       union xhci_trb          *queued_deq_ptr;
        /*
         * Sometimes the xHC can not process isochronous endpoint ring quickly
         * enough, and it will miss some isoc tds on the ring and generate