]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/usb/host/xhci-ring.c
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / drivers / usb / host / xhci-ring.c
index df558f6f84e30eff6d6df0fc61c3bd67fb183387..3289bf4832c9acdca8d58a6edaa3d6dd7b2d43e1 100644 (file)
@@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 /* Ring the host controller doorbell after placing a command on the ring */
 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 {
-       u32 temp;
-
        xhci_dbg(xhci, "// Ding dong!\n");
-       temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
-       xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+       xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
        /* Flush PCI posted writes */
        xhci_readl(xhci, &xhci->dba->doorbell[0]);
 }
@@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
                unsigned int ep_index,
                unsigned int stream_id)
 {
-       struct xhci_virt_ep *ep;
-       unsigned int ep_state;
-       u32 field;
        __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+       struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+       unsigned int ep_state = ep->ep_state;
 
-       ep = &xhci->devs[slot_id]->eps[ep_index];
-       ep_state = ep->ep_state;
        /* Don't ring the doorbell for this endpoint if there are pending
-        * cancellations because the we don't want to interrupt processing.
+        * cancellations because we don't want to interrupt processing.
         * We don't want to restart any stream rings if there's a set dequeue
         * pointer command pending because the device can choose to start any
         * stream once the endpoint is on the HW schedule.
         * FIXME - check all the stream rings for pending cancellations.
         */
-       if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
-                       && !(ep_state & EP_HALTED)) {
-               field = xhci_readl(xhci, db_addr) & DB_MASK;
-               field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
-               xhci_writel(xhci, field, db_addr);
-       }
+       if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+           (ep_state & EP_HALTED))
+               return;
+       xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+       /* The CPU has better things to do at this point than wait for a
+        * write-posting flush.  It'll get there soon enough.
+        */
 }
 
 /* Ring the doorbell for any rings with pending URBs */
@@ -479,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        state->new_deq_seg = find_trb_seg(cur_td->start_seg,
                        dev->eps[ep_index].stopped_trb,
                        &state->new_cycle_state);
-       if (!state->new_deq_seg)
-               BUG();
+       if (!state->new_deq_seg) {
+               WARN_ON(1);
+               return;
+       }
+
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
        xhci_dbg(xhci, "Finding endpoint context\n");
        ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -491,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
                        state->new_deq_ptr,
                        &state->new_cycle_state);
-       if (!state->new_deq_seg)
-               BUG();
+       if (!state->new_deq_seg) {
+               WARN_ON(1);
+               return;
+       }
 
        trb = &state->new_deq_ptr->generic;
        if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -1188,7 +1188,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
 
        addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
        temp = xhci_readl(xhci, addr);
-       if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
+       if (hcd->state == HC_STATE_SUSPENDED) {
                xhci_dbg(xhci, "resume root hub\n");
                usb_hcd_resume_root_hub(hcd);
        }
@@ -1710,8 +1710,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                /* Others already handled above */
                break;
        }
-       dev_dbg(&td->urb->dev->dev,
-                       "ep %#x - asked for %d bytes, "
+       xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
                        "%d bytes untransferred\n",
                        td->urb->ep->desc.bEndpointAddress,
                        td->urb->transfer_buffer_length,
@@ -2369,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 
                /* Scatter gather list entries may cross 64KB boundaries */
                running_total = TRB_MAX_BUFF_SIZE -
-                       (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+                       (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+               running_total &= TRB_MAX_BUFF_SIZE - 1;
                if (running_total != 0)
                        num_trbs++;
 
                /* How many more 64KB chunks to transfer, how many more TRBs? */
-               while (running_total < sg_dma_len(sg)) {
+               while (running_total < sg_dma_len(sg) && running_total < temp) {
                        num_trbs++;
                        running_total += TRB_MAX_BUFF_SIZE;
                }
@@ -2389,7 +2389,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
        }
        xhci_dbg(xhci, "\n");
        if (!in_interrupt())
-               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+               xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
+                               "num_trbs = %d\n",
                                urb->ep->desc.bEndpointAddress,
                                urb->transfer_buffer_length,
                                num_trbs);
@@ -2399,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 {
        if (num_trbs != 0)
-               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+               dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
                                "TRBs, %d left\n", __func__,
                                urb->ep->desc.bEndpointAddress, num_trbs);
        if (running_total != urb->transfer_buffer_length)
-               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+               dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
                                "queued %#x (%d), asked for %#x (%d)\n",
                                __func__,
                                urb->ep->desc.bEndpointAddress,
@@ -2414,14 +2415,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
 
 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
                unsigned int ep_index, unsigned int stream_id, int start_cycle,
-               struct xhci_generic_trb *start_trb, struct xhci_td *td)
+               struct xhci_generic_trb *start_trb)
 {
        /*
         * Pass all the TRBs to the hardware at once and make sure this write
         * isn't reordered.
         */
        wmb();
-       start_trb->field[3] |= start_cycle;
+       if (start_cycle)
+               start_trb->field[3] |= start_cycle;
+       else
+               start_trb->field[3] &= ~0x1;
        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
 }
 
@@ -2449,7 +2453,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
         * to set the polling interval (once the API is added).
         */
        if (xhci_interval != ep_interval) {
-               if (!printk_ratelimit())
+               if (printk_ratelimit())
                        dev_dbg(&urb->dev->dev, "Driver uses different interval"
                                        " (%d microframe%s) than xHCI "
                                        "(%d microframe%s)\n",
@@ -2535,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        sg = urb->sg;
        addr = (u64) sg_dma_address(sg);
        this_sg_len = sg_dma_len(sg);
-       trb_buff_len = TRB_MAX_BUFF_SIZE -
-               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
        trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
        if (trb_buff_len > urb->transfer_buffer_length)
                trb_buff_len = urb->transfer_buffer_length;
@@ -2551,9 +2554,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                u32 remainder = 0;
 
                /* Don't change the cycle bit of the first TRB until later */
-               if (first_trb)
+               if (first_trb) {
                        first_trb = false;
-               else
+                       if (start_cycle == 0)
+                               field |= 0x1;
+               } else
                        field |= ep_ring->cycle_state;
 
                /* Chain all the TRBs together; clear the chain bit in the last
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
                                (unsigned int) addr + trb_buff_len);
                if (TRB_MAX_BUFF_SIZE -
-                               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+                               (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
                        xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
                        xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
                                        (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
 
                trb_buff_len = TRB_MAX_BUFF_SIZE -
-                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+                       (addr & (TRB_MAX_BUFF_SIZE - 1));
                trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
                if (running_total + trb_buff_len > urb->transfer_buffer_length)
                        trb_buff_len =
@@ -2625,7 +2630,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-                       start_cycle, start_trb, td);
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        num_trbs = 0;
        /* How much data is (potentially) left before the 64KB boundary? */
        running_total = TRB_MAX_BUFF_SIZE -
-               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+               (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+       running_total &= TRB_MAX_BUFF_SIZE - 1;
 
        /* If there's some data on this 64KB chunk, or we have to send a
         * zero-length transfer, we need at least one TRB
@@ -2671,7 +2677,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
        if (!in_interrupt())
-               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+               xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
+                               "addr = %#llx, num_trbs = %d\n",
                                urb->ep->desc.bEndpointAddress,
                                urb->transfer_buffer_length,
                                urb->transfer_buffer_length,
@@ -2699,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* How much data is in the first TRB? */
        addr = (u64) urb->transfer_dma;
        trb_buff_len = TRB_MAX_BUFF_SIZE -
-               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
-       if (urb->transfer_buffer_length < trb_buff_len)
+               (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+       if (trb_buff_len > urb->transfer_buffer_length)
                trb_buff_len = urb->transfer_buffer_length;
 
        first_trb = true;
@@ -2711,9 +2718,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                field = 0;
 
                /* Don't change the cycle bit of the first TRB until later */
-               if (first_trb)
+               if (first_trb) {
                        first_trb = false;
-               else
+                       if (start_cycle == 0)
+                               field |= 0x1;
+               } else
                        field |= ep_ring->cycle_state;
 
                /* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2766,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-                       start_cycle, start_trb, td);
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -2818,13 +2827,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        /* Queue setup TRB - see section 6.4.1.2.1 */
        /* FIXME better way to translate setup_packet into two u32 fields? */
        setup = (struct usb_ctrlrequest *) urb->setup_packet;
+       field = 0;
+       field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+       if (start_cycle == 0)
+               field |= 0x1;
        queue_trb(xhci, ep_ring, false, true,
                        /* FIXME endianness is probably going to bite my ass here. */
                        setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
                        setup->wIndex | setup->wLength << 16,
                        TRB_LEN(8) | TRB_INTR_TARGET(0),
                        /* Immediate data in pointer */
-                       TRB_IDT | TRB_TYPE(TRB_SETUP));
+                       field);
 
        /* If there's data, queue data TRBs */
        field = 0;
@@ -2859,7 +2872,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
 
        giveback_first_trb(xhci, slot_id, ep_index, 0,
-                       start_cycle, start_trb, td);
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -2872,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
        td_len = urb->iso_frame_desc[i].length;
 
-       running_total = TRB_MAX_BUFF_SIZE -
-                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+       running_total &= TRB_MAX_BUFF_SIZE - 1;
        if (running_total != 0)
                num_trbs++;
 
@@ -2900,6 +2913,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        int running_total, trb_buff_len, td_len, td_remain_len, ret;
        u64 start_addr, addr;
        int i, j;
+       bool more_trbs_coming;
 
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
@@ -2910,7 +2924,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        }
 
        if (!in_interrupt())
-               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+               xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
                                " addr = %#llx, num_tds = %d\n",
                                urb->ep->desc.bEndpointAddress,
                                urb->transfer_buffer_length,
@@ -2950,7 +2964,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                field |= TRB_TYPE(TRB_ISOC);
                                /* Assume URB_ISO_ASAP is set */
                                field |= TRB_SIA;
-                               if (i > 0)
+                               if (i == 0) {
+                                       if (start_cycle == 0)
+                                               field |= 0x1;
+                               } else
                                        field |= ep_ring->cycle_state;
                                first_trb = false;
                        } else {
@@ -2965,9 +2982,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                         */
                        if (j < trbs_per_td - 1) {
                                field |= TRB_CHAIN;
+                               more_trbs_coming = true;
                        } else {
                                td->last_trb = ep_ring->enqueue;
                                field |= TRB_IOC;
+                               more_trbs_coming = false;
                        }
 
                        /* Calculate TRB length */
@@ -2980,7 +2999,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        length_field = TRB_LEN(trb_buff_len) |
                                remainder |
                                TRB_INTR_TARGET(0);
-                       queue_trb(xhci, ep_ring, false, false,
+                       queue_trb(xhci, ep_ring, false, more_trbs_coming,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3003,10 +3022,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
        }
 
-       wmb();
-       start_trb->field[3] |= start_cycle;
-
-       xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+       giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+                       start_cycle, start_trb);
        return 0;
 }
 
@@ -3064,7 +3081,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
         * to set the polling interval (once the API is added).
         */
        if (xhci_interval != ep_interval) {
-               if (!printk_ratelimit())
+               if (printk_ratelimit())
                        dev_dbg(&urb->dev->dev, "Driver uses different interval"
                                        " (%d microframe%s) than xHCI "
                                        "(%d microframe%s)\n",