2 * Copyright (c) 2001-2004 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /* this file is part of ehci-hcd.c */
22 /*-------------------------------------------------------------------------*/
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
37 static int ehci_get_frame (struct usb_hcd *hcd);
40 * periodic_next_shadow - return "next" pointer on shadow list
41 * @periodic: host pointer to qh/itd/sitd
42 * @tag: hardware tag for type of this record
44 static union ehci_shadow *
45 periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
48 switch (hc32_to_cpu(ehci, tag)) {
50 return &periodic->qh->qh_next;
52 return &periodic->fstn->fstn_next;
54 return &periodic->itd->itd_next;
57 return &periodic->sitd->sitd_next;
62 shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
65 switch (hc32_to_cpu(ehci, tag)) {
66 /* our ehci_shadow.qh is actually software part */
68 return &periodic->qh->hw->hw_next;
69 /* others are hw parts */
71 return periodic->hw_next;
75 /* caller must hold ehci->lock */
76 static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
78 union ehci_shadow *prev_p = &ehci->pshadow[frame];
79 __hc32 *hw_p = &ehci->periodic[frame];
80 union ehci_shadow here = *prev_p;
82 /* find predecessor of "ptr"; hw and shadow lists are in sync */
83 while (here.ptr && here.ptr != ptr) {
84 prev_p = periodic_next_shadow(ehci, prev_p,
85 Q_NEXT_TYPE(ehci, *hw_p));
86 hw_p = shadow_next_periodic(ehci, &here,
87 Q_NEXT_TYPE(ehci, *hw_p));
90 /* an interrupt entry (at list end) could have been shared */
94 /* update shadow and hardware lists ... the old "next" pointers
95 * from ptr may still be in use, the caller updates them.
97 *prev_p = *periodic_next_shadow(ehci, &here,
98 Q_NEXT_TYPE(ehci, *hw_p));
100 if (!ehci->use_dummy_qh ||
101 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
102 != EHCI_LIST_END(ehci))
103 *hw_p = *shadow_next_periodic(ehci, &here,
104 Q_NEXT_TYPE(ehci, *hw_p));
106 *hw_p = ehci->dummy->qh_dma;
109 /* how many of the uframe's 125 usecs are allocated? */
110 static unsigned short
111 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
113 __hc32 *hw_p = &ehci->periodic [frame];
114 union ehci_shadow *q = &ehci->pshadow [frame];
116 struct ehci_qh_hw *hw;
119 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
122 /* is it in the S-mask? */
123 if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
124 usecs += q->qh->usecs;
126 if (hw->hw_info2 & cpu_to_hc32(ehci,
128 usecs += q->qh->c_usecs;
134 /* for "save place" FSTNs, count the relevant INTR
135 * bandwidth from the previous frame
137 if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
138 ehci_dbg (ehci, "ignoring FSTN cost ...\n");
140 hw_p = &q->fstn->hw_next;
141 q = &q->fstn->fstn_next;
144 if (q->itd->hw_transaction[uframe])
145 usecs += q->itd->stream->usecs;
146 hw_p = &q->itd->hw_next;
147 q = &q->itd->itd_next;
150 /* is it in the S-mask? (count SPLIT, DATA) */
151 if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
153 if (q->sitd->hw_fullspeed_ep &
154 cpu_to_hc32(ehci, 1<<31))
155 usecs += q->sitd->stream->usecs;
156 else /* worst case for OUT start-split */
157 usecs += HS_USECS_ISO (188);
160 /* ... C-mask? (count CSPLIT, DATA) */
161 if (q->sitd->hw_uframe &
162 cpu_to_hc32(ehci, 1 << (8 + uframe))) {
163 /* worst case for IN complete-split */
164 usecs += q->sitd->stream->c_usecs;
167 hw_p = &q->sitd->hw_next;
168 q = &q->sitd->sitd_next;
173 if (usecs > ehci->uframe_periodic_max)
174 ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
175 frame * 8 + uframe, usecs);
180 /*-------------------------------------------------------------------------*/
182 static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
184 if (!dev1->tt || !dev2->tt)
186 if (dev1->tt != dev2->tt)
189 return dev1->ttport == dev2->ttport;
194 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
196 /* Which uframe does the low/fullspeed transfer start in?
198 * The parameter is the mask of ssplits in "H-frame" terms
199 * and this returns the transfer start uframe in "B-frame" terms,
200 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
201 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
202 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
204 static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
206 unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
208 ehci_err(ehci, "invalid empty smask!\n");
209 /* uframe 7 can't have bw so this will indicate failure */
212 return ffs(smask) - 1;
215 static const unsigned char
216 max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
218 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
219 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
222 for (i=0; i<7; i++) {
223 if (max_tt_usecs[i] < tt_usecs[i]) {
224 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
225 tt_usecs[i] = max_tt_usecs[i];
230 /* How many of the tt's periodic downstream 1000 usecs are allocated?
232 * While this measures the bandwidth in terms of usecs/uframe,
233 * the low/fullspeed bus has no notion of uframes, so any particular
234 * low/fullspeed transfer can "carry over" from one uframe to the next,
235 * since the TT just performs downstream transfers in sequence.
237 * For example two separate 100 usec transfers can start in the same uframe,
238 * and the second one would "carry over" 75 usecs into the next uframe.
242 struct ehci_hcd *ehci,
243 struct usb_device *dev,
245 unsigned short tt_usecs[8]
248 __hc32 *hw_p = &ehci->periodic [frame];
249 union ehci_shadow *q = &ehci->pshadow [frame];
252 memset(tt_usecs, 0, 16);
255 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
257 hw_p = &q->itd->hw_next;
258 q = &q->itd->itd_next;
261 if (same_tt(dev, q->qh->dev)) {
262 uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
263 tt_usecs[uf] += q->qh->tt_usecs;
265 hw_p = &q->qh->hw->hw_next;
269 if (same_tt(dev, q->sitd->urb->dev)) {
270 uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
271 tt_usecs[uf] += q->sitd->stream->tt_usecs;
273 hw_p = &q->sitd->hw_next;
274 q = &q->sitd->sitd_next;
278 ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
280 hw_p = &q->fstn->hw_next;
281 q = &q->fstn->fstn_next;
285 carryover_tt_bandwidth(tt_usecs);
287 if (max_tt_usecs[7] < tt_usecs[7])
288 ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
289 frame, tt_usecs[7] - max_tt_usecs[7]);
293 * Return true if the device's tt's downstream bus is available for a
294 * periodic transfer of the specified length (usecs), starting at the
295 * specified frame/uframe. Note that (as summarized in section 11.19
296 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
299 * The uframe parameter is when the fullspeed/lowspeed transfer
300 * should be executed in "B-frame" terms, which is the same as the
301 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
302 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
303 * See the EHCI spec sec 4.5 and fig 4.7.
305 * This checks if the full/lowspeed bus, at the specified starting uframe,
306 * has the specified bandwidth available, according to rules listed
307 * in USB 2.0 spec section 11.18.1 fig 11-60.
309 * This does not check if the transfer would exceed the max ssplit
310 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
311 * since proper scheduling limits ssplits to less than 16 per uframe.
313 static int tt_available (
314 struct ehci_hcd *ehci,
316 struct usb_device *dev,
322 if ((period == 0) || (uframe >= 7)) /* error */
325 for (; frame < ehci->periodic_size; frame += period) {
326 unsigned short tt_usecs[8];
328 periodic_tt_usecs (ehci, dev, frame, tt_usecs);
330 ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
331 " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
332 frame, usecs, uframe,
333 tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
334 tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
336 if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
337 ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
342 /* special case for isoc transfers larger than 125us:
343 * the first and each subsequent fully used uframe
344 * must be empty, so as to not illegally delay
345 * already scheduled transactions
348 int ufs = (usecs / 125);
350 for (i = uframe; i < (uframe + ufs) && i < 8; i++)
351 if (0 < tt_usecs[i]) {
353 "multi-uframe xfer can't fit "
354 "in frame %d uframe %d\n",
360 tt_usecs[uframe] += usecs;
362 carryover_tt_bandwidth(tt_usecs);
364 /* fail if the carryover pushed bw past the last uframe's limit */
365 if (max_tt_usecs[7] < tt_usecs[7]) {
367 "tt unavailable usecs %d frame %d uframe %d\n",
368 usecs, frame, uframe);
378 /* return true iff the device's transaction translator is available
379 * for a periodic transfer starting at the specified frame, using
380 * all the uframes in the mask.
382 static int tt_no_collision (
383 struct ehci_hcd *ehci,
385 struct usb_device *dev,
390 if (period == 0) /* error */
393 /* note bandwidth wastage: split never follows csplit
394 * (different dev or endpoint) until the next uframe.
395 * calling convention doesn't make that distinction.
397 for (; frame < ehci->periodic_size; frame += period) {
398 union ehci_shadow here;
400 struct ehci_qh_hw *hw;
402 here = ehci->pshadow [frame];
403 type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
405 switch (hc32_to_cpu(ehci, type)) {
407 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
408 here = here.itd->itd_next;
412 if (same_tt (dev, here.qh->dev)) {
415 mask = hc32_to_cpu(ehci,
417 /* "knows" no gap is needed */
422 type = Q_NEXT_TYPE(ehci, hw->hw_next);
423 here = here.qh->qh_next;
426 if (same_tt (dev, here.sitd->urb->dev)) {
429 mask = hc32_to_cpu(ehci, here.sitd
431 /* FIXME assumes no gap for IN! */
436 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
437 here = here.sitd->sitd_next;
442 "periodic frame %d bogus type %d\n",
446 /* collision or error */
455 #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
457 /*-------------------------------------------------------------------------*/
459 static void enable_periodic(struct ehci_hcd *ehci)
461 if (ehci->periodic_count++)
464 /* Stop waiting to turn off the periodic schedule */
465 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
467 /* Don't start the schedule until PSS is 0 */
469 turn_on_io_watchdog(ehci);
472 static void disable_periodic(struct ehci_hcd *ehci)
474 if (--ehci->periodic_count)
477 /* Don't turn off the schedule until PSS is 1 */
481 /*-------------------------------------------------------------------------*/
483 /* periodic schedule slots have iso tds (normal or split) first, then a
484 * sparse tree for active interrupt transfers.
486 * this just links in a qh; caller guarantees uframe masks are set right.
487 * no FSTN support (yet; ehci 0.96+)
489 static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
492 unsigned period = qh->period;
494 dev_dbg (&qh->dev->dev,
495 "link qh%d-%04x/%p start %d [%d/%d us]\n",
496 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
497 & (QH_CMASK | QH_SMASK),
498 qh, qh->start, qh->usecs, qh->c_usecs);
500 /* high bandwidth, or otherwise every microframe */
504 for (i = qh->start; i < ehci->periodic_size; i += period) {
505 union ehci_shadow *prev = &ehci->pshadow[i];
506 __hc32 *hw_p = &ehci->periodic[i];
507 union ehci_shadow here = *prev;
510 /* skip the iso nodes at list head */
512 type = Q_NEXT_TYPE(ehci, *hw_p);
513 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
515 prev = periodic_next_shadow(ehci, prev, type);
516 hw_p = shadow_next_periodic(ehci, &here, type);
520 /* sorting each branch by period (slow-->fast)
521 * enables sharing interior tree nodes
523 while (here.ptr && qh != here.qh) {
524 if (qh->period > here.qh->period)
526 prev = &here.qh->qh_next;
527 hw_p = &here.qh->hw->hw_next;
530 /* link in this qh, unless some earlier pass did that */
534 qh->hw->hw_next = *hw_p;
537 *hw_p = QH_NEXT (ehci, qh->qh_dma);
540 qh->qh_state = QH_STATE_LINKED;
543 /* update per-qh bandwidth for usbfs */
544 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
545 ? ((qh->usecs + qh->c_usecs) / qh->period)
548 list_add(&qh->intr_node, &ehci->intr_qh_list);
550 /* maybe enable periodic schedule processing */
552 enable_periodic(ehci);
555 static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
561 * If qh is for a low/full-speed device, simply unlinking it
562 * could interfere with an ongoing split transaction. To unlink
563 * it safely would require setting the QH_INACTIVATE bit and
564 * waiting at least one frame, as described in EHCI 4.12.2.5.
566 * We won't bother with any of this. Instead, we assume that the
567 * only reason for unlinking an interrupt QH while the current URB
568 * is still active is to dequeue all the URBs (flush the whole
571 * If rebalancing the periodic schedule is ever implemented, this
572 * approach will no longer be valid.
575 /* high bandwidth, or otherwise part of every microframe */
576 if ((period = qh->period) == 0)
579 for (i = qh->start; i < ehci->periodic_size; i += period)
580 periodic_unlink (ehci, i, qh);
582 /* update per-qh bandwidth for usbfs */
583 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
584 ? ((qh->usecs + qh->c_usecs) / qh->period)
587 dev_dbg (&qh->dev->dev,
588 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
590 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
591 qh, qh->start, qh->usecs, qh->c_usecs);
593 /* qh->qh_next still "live" to HC */
594 qh->qh_state = QH_STATE_UNLINK;
595 qh->qh_next.ptr = NULL;
597 if (ehci->qh_scan_next == qh)
598 ehci->qh_scan_next = list_entry(qh->intr_node.next,
599 struct ehci_qh, intr_node);
600 list_del(&qh->intr_node);
603 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
605 /* If the QH isn't linked then there's nothing we can do
606 * unless we were called during a giveback, in which case
607 * qh_completions() has to deal with it.
609 if (qh->qh_state != QH_STATE_LINKED) {
610 if (qh->qh_state == QH_STATE_COMPLETING)
611 qh->needs_rescan = 1;
615 qh_unlink_periodic (ehci, qh);
617 /* Make sure the unlinks are visible before starting the timer */
621 * The EHCI spec doesn't say how long it takes the controller to
622 * stop accessing an unlinked interrupt QH. The timer delay is
623 * 9 uframes; presumably that will be long enough.
625 qh->unlink_cycle = ehci->intr_unlink_cycle;
627 /* New entries go at the end of the intr_unlink list */
628 if (ehci->intr_unlink)
629 ehci->intr_unlink_last->unlink_next = qh;
631 ehci->intr_unlink = qh;
632 ehci->intr_unlink_last = qh;
634 if (ehci->intr_unlinking)
635 ; /* Avoid recursive calls */
636 else if (ehci->rh_state < EHCI_RH_RUNNING)
637 ehci_handle_intr_unlinks(ehci);
638 else if (ehci->intr_unlink == qh) {
639 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
640 ++ehci->intr_unlink_cycle;
644 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
646 struct ehci_qh_hw *hw = qh->hw;
649 qh->qh_state = QH_STATE_IDLE;
650 hw->hw_next = EHCI_LIST_END(ehci);
652 qh_completions(ehci, qh);
654 /* reschedule QH iff another request is queued */
655 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
656 rc = qh_schedule(ehci, qh);
658 /* An error here likely indicates handshake failure
659 * or no space left in the schedule. Neither fault
660 * should happen often ...
662 * FIXME kill the now-dysfunctional queued urbs
665 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
669 /* maybe turn off periodic schedule */
671 disable_periodic(ehci);
674 /*-------------------------------------------------------------------------*/
676 static int check_period (
677 struct ehci_hcd *ehci,
685 /* complete split running into next frame?
686 * given FSTN support, we could sometimes check...
691 /* convert "usecs we need" to "max already claimed" */
692 usecs = ehci->uframe_periodic_max - usecs;
694 /* we "know" 2 and 4 uframe intervals were rejected; so
695 * for period 0, check _every_ microframe in the schedule.
697 if (unlikely (period == 0)) {
699 for (uframe = 0; uframe < 7; uframe++) {
700 claimed = periodic_usecs (ehci, frame, uframe);
704 } while ((frame += 1) < ehci->periodic_size);
706 /* just check the specified uframe, at that period */
709 claimed = periodic_usecs (ehci, frame, uframe);
712 } while ((frame += period) < ehci->periodic_size);
719 static int check_intr_schedule (
720 struct ehci_hcd *ehci,
723 const struct ehci_qh *qh,
727 int retval = -ENOSPC;
730 if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
733 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
741 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
742 if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
746 /* TODO : this may need FSTN for SSPLIT in uframe 5. */
747 for (i=uframe+1; i<8 && i<uframe+4; i++)
748 if (!check_period (ehci, frame, i,
749 qh->period, qh->c_usecs))
756 *c_maskp = cpu_to_hc32(ehci, mask << 8);
759 /* Make sure this tt's buffer is also available for CSPLITs.
760 * We pessimize a bit; probably the typical full speed case
761 * doesn't need the second CSPLIT.
763 * NOTE: both SPLIT and CSPLIT could be checked in just
766 mask = 0x03 << (uframe + qh->gap_uf);
767 *c_maskp = cpu_to_hc32(ehci, mask << 8);
770 if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
771 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
772 qh->period, qh->c_usecs))
774 if (!check_period (ehci, frame, uframe + qh->gap_uf,
775 qh->period, qh->c_usecs))
784 /* "first fit" scheduling policy used the first time through,
785 * or when the previous schedule slot can't be re-used.
787 static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
792 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
793 struct ehci_qh_hw *hw = qh->hw;
795 qh_refresh(ehci, qh);
796 hw->hw_next = EHCI_LIST_END(ehci);
799 /* reuse the previous schedule slots, if we can */
800 if (frame < qh->period) {
801 uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
802 status = check_intr_schedule (ehci, frame, --uframe,
810 /* else scan the schedule to find a group of slots such that all
811 * uframes have enough periodic bandwidth available.
814 /* "normal" case, uframing flexible except with splits */
818 for (i = qh->period; status && i > 0; --i) {
819 frame = ++ehci->random_frame % qh->period;
820 for (uframe = 0; uframe < 8; uframe++) {
821 status = check_intr_schedule (ehci,
829 /* qh->period == 0 means every uframe */
832 status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
838 /* reset S-frame and (maybe) C-frame masks */
839 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
840 hw->hw_info2 |= qh->period
841 ? cpu_to_hc32(ehci, 1 << uframe)
842 : cpu_to_hc32(ehci, QH_SMASK);
843 hw->hw_info2 |= c_mask;
845 ehci_dbg (ehci, "reused qh %p schedule\n", qh);
847 /* stuff into the periodic schedule */
848 qh_link_periodic(ehci, qh);
853 static int intr_submit (
854 struct ehci_hcd *ehci,
856 struct list_head *qtd_list,
863 struct list_head empty;
865 /* get endpoint and transfer/schedule data */
866 epnum = urb->ep->desc.bEndpointAddress;
868 spin_lock_irqsave (&ehci->lock, flags);
870 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
872 goto done_not_linked;
874 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
875 if (unlikely(status))
876 goto done_not_linked;
878 /* get qh and force any scheduling errors */
879 INIT_LIST_HEAD (&empty);
880 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
885 if (qh->qh_state == QH_STATE_IDLE) {
886 if ((status = qh_schedule (ehci, qh)) != 0)
890 /* then queue the urb's tds to the qh */
891 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
894 /* ... update usbfs periodic stats */
895 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
898 if (unlikely(status))
899 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
901 spin_unlock_irqrestore (&ehci->lock, flags);
903 qtd_list_free (ehci, urb, qtd_list);
908 static void scan_intr(struct ehci_hcd *ehci)
912 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
915 /* clean any finished work for this qh */
916 if (!list_empty(&qh->qtd_list)) {
920 * Unlinks could happen here; completion reporting
921 * drops the lock. That's why ehci->qh_scan_next
922 * always holds the next qh to scan; if the next qh
923 * gets unlinked then ehci->qh_scan_next is adjusted
924 * in qh_unlink_periodic().
926 temp = qh_completions(ehci, qh);
927 if (unlikely(qh->needs_rescan ||
928 (list_empty(&qh->qtd_list) &&
929 qh->qh_state == QH_STATE_LINKED)))
930 start_unlink_intr(ehci, qh);
937 /*-------------------------------------------------------------------------*/
939 /* ehci_iso_stream ops work with both ITD and SITD */
941 static struct ehci_iso_stream *
942 iso_stream_alloc (gfp_t mem_flags)
944 struct ehci_iso_stream *stream;
946 stream = kzalloc(sizeof *stream, mem_flags);
947 if (likely (stream != NULL)) {
948 INIT_LIST_HEAD(&stream->td_list);
949 INIT_LIST_HEAD(&stream->free_list);
950 stream->next_uframe = -1;
957 struct ehci_hcd *ehci,
958 struct ehci_iso_stream *stream,
959 struct usb_device *dev,
964 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
967 unsigned epnum, maxp;
972 * this might be a "high bandwidth" highspeed endpoint,
973 * as encoded in the ep descriptor's wMaxPacket field
975 epnum = usb_pipeendpoint (pipe);
976 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
977 maxp = usb_maxpacket(dev, pipe, !is_input);
984 /* knows about ITD vs SITD */
985 if (dev->speed == USB_SPEED_HIGH) {
986 unsigned multi = hb_mult(maxp);
988 stream->highspeed = 1;
990 maxp = max_packet(maxp);
994 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
995 stream->buf1 = cpu_to_hc32(ehci, buf1);
996 stream->buf2 = cpu_to_hc32(ehci, multi);
998 /* usbfs wants to report the average usecs per frame tied up
999 * when transfers on this endpoint are scheduled ...
1001 stream->usecs = HS_USECS_ISO (maxp);
1002 bandwidth = stream->usecs * 8;
1003 bandwidth /= interval;
1010 addr = dev->ttport << 24;
1011 if (!ehci_is_TDI(ehci)
1013 ehci_to_hcd(ehci)->self.root_hub))
1014 addr |= dev->tt->hub->devnum << 16;
1016 addr |= dev->devnum;
1017 stream->usecs = HS_USECS_ISO (maxp);
1018 think_time = dev->tt ? dev->tt->think_time : 0;
1019 stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
1020 dev->speed, is_input, 1, maxp));
1021 hs_transfers = max (1u, (maxp + 187) / 188);
1026 stream->c_usecs = stream->usecs;
1027 stream->usecs = HS_USECS_ISO (1);
1028 stream->raw_mask = 1;
1030 /* c-mask as specified in USB 2.0 11.18.4 3.c */
1031 tmp = (1 << (hs_transfers + 2)) - 1;
1032 stream->raw_mask |= tmp << (8 + 2);
1034 stream->raw_mask = smask_out [hs_transfers - 1];
1035 bandwidth = stream->usecs + stream->c_usecs;
1036 bandwidth /= interval << 3;
1038 /* stream->splits gets created from raw_mask later */
1039 stream->address = cpu_to_hc32(ehci, addr);
1041 stream->bandwidth = bandwidth;
1045 stream->bEndpointAddress = is_input | epnum;
1046 stream->interval = interval;
1047 stream->maxp = maxp;
1050 static struct ehci_iso_stream *
1051 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1054 struct ehci_iso_stream *stream;
1055 struct usb_host_endpoint *ep;
1056 unsigned long flags;
1058 epnum = usb_pipeendpoint (urb->pipe);
1059 if (usb_pipein(urb->pipe))
1060 ep = urb->dev->ep_in[epnum];
1062 ep = urb->dev->ep_out[epnum];
1064 spin_lock_irqsave (&ehci->lock, flags);
1065 stream = ep->hcpriv;
1067 if (unlikely (stream == NULL)) {
1068 stream = iso_stream_alloc(GFP_ATOMIC);
1069 if (likely (stream != NULL)) {
1070 ep->hcpriv = stream;
1072 iso_stream_init(ehci, stream, urb->dev, urb->pipe,
1076 /* if dev->ep [epnum] is a QH, hw is set */
1077 } else if (unlikely (stream->hw != NULL)) {
1078 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1079 urb->dev->devpath, epnum,
1080 usb_pipein(urb->pipe) ? "in" : "out");
1084 spin_unlock_irqrestore (&ehci->lock, flags);
1088 /*-------------------------------------------------------------------------*/
1090 /* ehci_iso_sched ops can be ITD-only or SITD-only */
1092 static struct ehci_iso_sched *
1093 iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1095 struct ehci_iso_sched *iso_sched;
1096 int size = sizeof *iso_sched;
1098 size += packets * sizeof (struct ehci_iso_packet);
1099 iso_sched = kzalloc(size, mem_flags);
1100 if (likely (iso_sched != NULL)) {
1101 INIT_LIST_HEAD (&iso_sched->td_list);
1108 struct ehci_hcd *ehci,
1109 struct ehci_iso_sched *iso_sched,
1110 struct ehci_iso_stream *stream,
1115 dma_addr_t dma = urb->transfer_dma;
1117 /* how many uframes are needed for these transfers */
1118 iso_sched->span = urb->number_of_packets * stream->interval;
1120 /* figure out per-uframe itd fields that we'll need later
1121 * when we fit new itds into the schedule.
1123 for (i = 0; i < urb->number_of_packets; i++) {
1124 struct ehci_iso_packet *uframe = &iso_sched->packet [i];
1129 length = urb->iso_frame_desc [i].length;
1130 buf = dma + urb->iso_frame_desc [i].offset;
1132 trans = EHCI_ISOC_ACTIVE;
1133 trans |= buf & 0x0fff;
1134 if (unlikely (((i + 1) == urb->number_of_packets))
1135 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1136 trans |= EHCI_ITD_IOC;
1137 trans |= length << 16;
1138 uframe->transaction = cpu_to_hc32(ehci, trans);
1140 /* might need to cross a buffer page within a uframe */
1141 uframe->bufp = (buf & ~(u64)0x0fff);
1143 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1150 struct ehci_iso_stream *stream,
1151 struct ehci_iso_sched *iso_sched
1156 // caller must hold ehci->lock!
1157 list_splice (&iso_sched->td_list, &stream->free_list);
1162 itd_urb_transaction (
1163 struct ehci_iso_stream *stream,
1164 struct ehci_hcd *ehci,
1169 struct ehci_itd *itd;
1173 struct ehci_iso_sched *sched;
1174 unsigned long flags;
1176 sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1177 if (unlikely (sched == NULL))
1180 itd_sched_init(ehci, sched, stream, urb);
1182 if (urb->interval < 8)
1183 num_itds = 1 + (sched->span + 7) / 8;
1185 num_itds = urb->number_of_packets;
1187 /* allocate/init ITDs */
1188 spin_lock_irqsave (&ehci->lock, flags);
1189 for (i = 0; i < num_itds; i++) {
1192 * Use iTDs from the free list, but not iTDs that may
1193 * still be in use by the hardware.
1195 if (likely(!list_empty(&stream->free_list))) {
1196 itd = list_first_entry(&stream->free_list,
1197 struct ehci_itd, itd_list);
1198 if (itd->frame == ehci->now_frame)
1200 list_del (&itd->itd_list);
1201 itd_dma = itd->itd_dma;
1204 spin_unlock_irqrestore (&ehci->lock, flags);
1205 itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1207 spin_lock_irqsave (&ehci->lock, flags);
1209 iso_sched_free(stream, sched);
1210 spin_unlock_irqrestore(&ehci->lock, flags);
1215 memset (itd, 0, sizeof *itd);
1216 itd->itd_dma = itd_dma;
1217 list_add (&itd->itd_list, &sched->td_list);
1219 spin_unlock_irqrestore (&ehci->lock, flags);
1221 /* temporarily store schedule info in hcpriv */
1222 urb->hcpriv = sched;
1223 urb->error_count = 0;
1227 /*-------------------------------------------------------------------------*/
1231 struct ehci_hcd *ehci,
1240 /* can't commit more than uframe_periodic_max usec */
1241 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
1242 > (ehci->uframe_periodic_max - usecs))
1245 /* we know urb->interval is 2^N uframes */
1247 } while (uframe < mod);
1253 struct ehci_hcd *ehci,
1255 struct ehci_iso_stream *stream,
1257 struct ehci_iso_sched *sched,
1264 mask = stream->raw_mask << (uframe & 7);
1266 /* for IN, don't wrap CSPLIT into the next frame */
1270 /* check bandwidth */
1271 uframe %= period_uframes;
1272 frame = uframe >> 3;
1274 #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1275 /* The tt's fullspeed bus bandwidth must be available.
1276 * tt_available scheduling guarantees 10+% for control/bulk.
1279 if (!tt_available(ehci, period_uframes >> 3,
1280 stream->udev, frame, uf, stream->tt_usecs))
1283 /* tt must be idle for start(s), any gap, and csplit.
1284 * assume scheduling slop leaves 10+% for control/bulk.
1286 if (!tt_no_collision(ehci, period_uframes >> 3,
1287 stream->udev, frame, mask))
1291 /* this multi-pass logic is simple, but performance may
1292 * suffer when the schedule data isn't cached.
1297 frame = uframe >> 3;
1300 /* check starts (OUT uses more than one) */
1301 max_used = ehci->uframe_periodic_max - stream->usecs;
1302 for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
1303 if (periodic_usecs (ehci, frame, uf) > max_used)
1307 /* for IN, check CSPLIT */
1308 if (stream->c_usecs) {
1310 max_used = ehci->uframe_periodic_max - stream->c_usecs;
1314 if ((stream->raw_mask & tmp) == 0)
1316 if (periodic_usecs (ehci, frame, uf)
1322 /* we know urb->interval is 2^N uframes */
1323 uframe += period_uframes;
1324 } while (uframe < mod);
1326 stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
1331 * This scheduler plans almost as far into the future as it has actual
1332 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1333 * "as small as possible" to be cache-friendlier.) That limits the size
1334 * transfers you can stream reliably; avoid more than 64 msec per urb.
1335 * Also avoid queue depths of less than ehci's worst irq latency (affected
1336 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1337 * and other factors); or more than about 230 msec total (for portability,
1338 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1341 #define SCHEDULING_DELAY 40 /* microframes */
1344 iso_stream_schedule (
1345 struct ehci_hcd *ehci,
1347 struct ehci_iso_stream *stream
1350 u32 now, base, next, start, period, span;
1352 unsigned mod = ehci->periodic_size << 3;
1353 struct ehci_iso_sched *sched = urb->hcpriv;
1355 period = urb->interval;
1357 if (!stream->highspeed) {
1362 now = ehci_read_frame_index(ehci) & (mod - 1);
1364 /* Typical case: reuse current schedule, stream is still active.
1365 * Hopefully there are no gaps from the host falling behind
1366 * (irq delays etc). If there are, the behavior depends on
1367 * whether URB_ISO_ASAP is set.
1369 if (likely (!list_empty (&stream->td_list))) {
1371 /* Take the isochronous scheduling threshold into account */
1373 next = now + ehci->i_thresh; /* uframe cache */
1375 next = (now + 2 + 7) & ~0x07; /* full frame cache */
1378 * Use ehci->last_iso_frame as the base. There can't be any
1379 * TDs scheduled for earlier than that.
1381 base = ehci->last_iso_frame << 3;
1382 next = (next - base) & (mod - 1);
1383 start = (stream->next_uframe - base) & (mod - 1);
1385 /* Is the schedule already full? */
1386 if (unlikely(start < period)) {
1387 ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
1388 urb, stream->next_uframe, base,
1394 /* Behind the scheduling threshold? */
1395 if (unlikely(start < next)) {
1397 /* USB_ISO_ASAP: Round up to the first available slot */
1398 if (urb->transfer_flags & URB_ISO_ASAP)
1399 start += (next - start + period - 1) & -period;
1402 * Not ASAP: Use the next slot in the stream. If
1403 * the entire URB falls before the threshold, fail.
1405 else if (start + span - period < next) {
1406 ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
1408 span - period, next + base);
1417 /* need to schedule; when's the next (u)frame we could start?
1418 * this is bigger than ehci->i_thresh allows; scheduling itself
1419 * isn't free, the delay should handle reasonably slow cpus. it
1420 * can also help high bandwidth if the dma and irq loads don't
1421 * jump until after the queue is primed.
1427 start = base + SCHEDULING_DELAY;
1429 /* find a uframe slot with enough bandwidth.
1430 * Early uframes are more precious because full-speed
1431 * iso IN transfers can't use late uframes,
1432 * and therefore they should be allocated last.
1438 /* check schedule: enough space? */
1439 if (stream->highspeed) {
1440 if (itd_slot_ok(ehci, mod, start,
1441 stream->usecs, period))
1444 if ((start % 8) >= 6)
1446 if (sitd_slot_ok(ehci, mod, stream,
1447 start, sched, period))
1450 } while (start > next && !done);
1452 /* no room in the schedule */
1454 ehci_dbg(ehci, "iso sched full %p", urb);
1460 /* Tried to schedule too far into the future? */
1461 if (unlikely(start - base + span - period >= mod)) {
1462 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1463 urb, start - base, span - period, mod);
1468 stream->next_uframe = start & (mod - 1);
1470 /* report high speed start in uframes; full speed, in frames */
1471 urb->start_frame = stream->next_uframe;
1472 if (!stream->highspeed)
1473 urb->start_frame >>= 3;
1475 /* Make sure scan_isoc() sees these */
1476 if (ehci->isoc_count == 0)
1477 ehci->last_iso_frame = now >> 3;
1481 iso_sched_free(stream, sched);
1486 /*-------------------------------------------------------------------------*/
1489 itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1490 struct ehci_itd *itd)
1494 /* it's been recently zeroed */
1495 itd->hw_next = EHCI_LIST_END(ehci);
1496 itd->hw_bufp [0] = stream->buf0;
1497 itd->hw_bufp [1] = stream->buf1;
1498 itd->hw_bufp [2] = stream->buf2;
1500 for (i = 0; i < 8; i++)
1503 /* All other fields are filled when scheduling */
1508 struct ehci_hcd *ehci,
1509 struct ehci_itd *itd,
1510 struct ehci_iso_sched *iso_sched,
1515 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1516 unsigned pg = itd->pg;
1518 // BUG_ON (pg == 6 && uf->cross);
1521 itd->index [uframe] = index;
1523 itd->hw_transaction[uframe] = uf->transaction;
1524 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1525 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1526 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1528 /* iso_frame_desc[].offset must be strictly increasing */
1529 if (unlikely (uf->cross)) {
1530 u64 bufp = uf->bufp + 4096;
1533 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1534 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1539 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1541 union ehci_shadow *prev = &ehci->pshadow[frame];
1542 __hc32 *hw_p = &ehci->periodic[frame];
1543 union ehci_shadow here = *prev;
1546 /* skip any iso nodes which might belong to previous microframes */
1548 type = Q_NEXT_TYPE(ehci, *hw_p);
1549 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1551 prev = periodic_next_shadow(ehci, prev, type);
1552 hw_p = shadow_next_periodic(ehci, &here, type);
1556 itd->itd_next = here;
1557 itd->hw_next = *hw_p;
1561 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1564 /* fit urb's itds into the selected schedule slot; activate as needed */
1565 static void itd_link_urb(
1566 struct ehci_hcd *ehci,
1569 struct ehci_iso_stream *stream
1573 unsigned next_uframe, uframe, frame;
1574 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1575 struct ehci_itd *itd;
1577 next_uframe = stream->next_uframe & (mod - 1);
1579 if (unlikely (list_empty(&stream->td_list))) {
1580 ehci_to_hcd(ehci)->self.bandwidth_allocated
1581 += stream->bandwidth;
1583 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1584 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1585 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1587 next_uframe >> 3, next_uframe & 0x7);
1590 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1591 if (ehci->amd_pll_fix == 1)
1592 usb_amd_quirk_pll_disable();
1595 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1597 /* fill iTDs uframe by uframe */
1598 for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
1600 /* ASSERT: we have all necessary itds */
1601 // BUG_ON (list_empty (&iso_sched->td_list));
1603 /* ASSERT: no itds for this endpoint in this uframe */
1605 itd = list_entry (iso_sched->td_list.next,
1606 struct ehci_itd, itd_list);
1607 list_move_tail (&itd->itd_list, &stream->td_list);
1608 itd->stream = stream;
1610 itd_init (ehci, stream, itd);
1613 uframe = next_uframe & 0x07;
1614 frame = next_uframe >> 3;
1616 itd_patch(ehci, itd, iso_sched, packet, uframe);
1618 next_uframe += stream->interval;
1619 next_uframe &= mod - 1;
1622 /* link completed itds into the schedule */
1623 if (((next_uframe >> 3) != frame)
1624 || packet == urb->number_of_packets) {
1625 itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1629 stream->next_uframe = next_uframe;
1631 /* don't need that schedule data any more */
1632 iso_sched_free (stream, iso_sched);
1633 urb->hcpriv = stream;
1636 enable_periodic(ehci);
1639 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1641 /* Process and recycle a completed ITD. Return true iff its urb completed,
1642 * and hence its completion callback probably added things to the hardware
1645 * Note that we carefully avoid recycling this descriptor until after any
1646 * completion callback runs, so that it won't be reused quickly. That is,
1647 * assuming (a) no more than two urbs per frame on this endpoint, and also
1648 * (b) only this endpoint's completions submit URBs. It seems some silicon
1649 * corrupts things if you reuse completed descriptors very quickly...
1651 static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1653 struct urb *urb = itd->urb;
1654 struct usb_iso_packet_descriptor *desc;
1658 struct ehci_iso_stream *stream = itd->stream;
1659 struct usb_device *dev;
1660 bool retval = false;
1662 /* for each uframe with a packet */
1663 for (uframe = 0; uframe < 8; uframe++) {
1664 if (likely (itd->index[uframe] == -1))
1666 urb_index = itd->index[uframe];
1667 desc = &urb->iso_frame_desc [urb_index];
1669 t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1670 itd->hw_transaction [uframe] = 0;
1672 /* report transfer status */
1673 if (unlikely (t & ISO_ERRS)) {
1675 if (t & EHCI_ISOC_BUF_ERR)
1676 desc->status = usb_pipein (urb->pipe)
1677 ? -ENOSR /* hc couldn't read */
1678 : -ECOMM; /* hc couldn't write */
1679 else if (t & EHCI_ISOC_BABBLE)
1680 desc->status = -EOVERFLOW;
1681 else /* (t & EHCI_ISOC_XACTERR) */
1682 desc->status = -EPROTO;
1684 /* HC need not update length with this error */
1685 if (!(t & EHCI_ISOC_BABBLE)) {
1686 desc->actual_length = EHCI_ITD_LENGTH(t);
1687 urb->actual_length += desc->actual_length;
1689 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1691 desc->actual_length = EHCI_ITD_LENGTH(t);
1692 urb->actual_length += desc->actual_length;
1694 /* URB was too late */
1699 /* handle completion now? */
1700 if (likely ((urb_index + 1) != urb->number_of_packets))
1703 /* ASSERT: it's really the last itd for this urb
1704 list_for_each_entry (itd, &stream->td_list, itd_list)
1705 BUG_ON (itd->urb == urb);
1708 /* give urb back to the driver; completion often (re)submits */
1710 ehci_urb_done(ehci, urb, 0);
1715 disable_periodic(ehci);
1717 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1718 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1719 if (ehci->amd_pll_fix == 1)
1720 usb_amd_quirk_pll_enable();
1723 if (unlikely(list_is_singular(&stream->td_list))) {
1724 ehci_to_hcd(ehci)->self.bandwidth_allocated
1725 -= stream->bandwidth;
1727 "deschedule devp %s ep%d%s-iso\n",
1728 dev->devpath, stream->bEndpointAddress & 0x0f,
1729 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1735 /* Add to the end of the free list for later reuse */
1736 list_move_tail(&itd->itd_list, &stream->free_list);
1738 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1739 if (list_empty(&stream->td_list)) {
1740 list_splice_tail_init(&stream->free_list,
1741 &ehci->cached_itd_list);
1742 start_free_itds(ehci);
1748 /*-------------------------------------------------------------------------*/
1750 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1753 int status = -EINVAL;
1754 unsigned long flags;
1755 struct ehci_iso_stream *stream;
1757 /* Get iso_stream head */
1758 stream = iso_stream_find (ehci, urb);
1759 if (unlikely (stream == NULL)) {
1760 ehci_dbg (ehci, "can't get iso stream\n");
1763 if (unlikely (urb->interval != stream->interval)) {
1764 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1765 stream->interval, urb->interval);
1769 #ifdef EHCI_URB_TRACE
1771 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1772 __func__, urb->dev->devpath, urb,
1773 usb_pipeendpoint (urb->pipe),
1774 usb_pipein (urb->pipe) ? "in" : "out",
1775 urb->transfer_buffer_length,
1776 urb->number_of_packets, urb->interval,
1780 /* allocate ITDs w/o locking anything */
1781 status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1782 if (unlikely (status < 0)) {
1783 ehci_dbg (ehci, "can't init itds\n");
1787 /* schedule ... need to lock */
1788 spin_lock_irqsave (&ehci->lock, flags);
1789 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1790 status = -ESHUTDOWN;
1791 goto done_not_linked;
1793 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1794 if (unlikely(status))
1795 goto done_not_linked;
1796 status = iso_stream_schedule(ehci, urb, stream);
1797 if (likely (status == 0))
1798 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1800 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1802 spin_unlock_irqrestore (&ehci->lock, flags);
1807 /*-------------------------------------------------------------------------*/
1810 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1811 * TTs in USB 2.0 hubs. These need microframe scheduling.
1816 struct ehci_hcd *ehci,
1817 struct ehci_iso_sched *iso_sched,
1818 struct ehci_iso_stream *stream,
1823 dma_addr_t dma = urb->transfer_dma;
1825 /* how many frames are needed for these transfers */
1826 iso_sched->span = urb->number_of_packets * stream->interval;
1828 /* figure out per-frame sitd fields that we'll need later
1829 * when we fit new sitds into the schedule.
1831 for (i = 0; i < urb->number_of_packets; i++) {
1832 struct ehci_iso_packet *packet = &iso_sched->packet [i];
1837 length = urb->iso_frame_desc [i].length & 0x03ff;
1838 buf = dma + urb->iso_frame_desc [i].offset;
1840 trans = SITD_STS_ACTIVE;
1841 if (((i + 1) == urb->number_of_packets)
1842 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1844 trans |= length << 16;
1845 packet->transaction = cpu_to_hc32(ehci, trans);
1847 /* might need to cross a buffer page within a td */
1849 packet->buf1 = (buf + length) & ~0x0fff;
1850 if (packet->buf1 != (buf & ~(u64)0x0fff))
1853 /* OUT uses multiple start-splits */
1854 if (stream->bEndpointAddress & USB_DIR_IN)
1856 length = (length + 187) / 188;
1857 if (length > 1) /* BEGIN vs ALL */
1859 packet->buf1 |= length;
1864 sitd_urb_transaction (
1865 struct ehci_iso_stream *stream,
1866 struct ehci_hcd *ehci,
1871 struct ehci_sitd *sitd;
1872 dma_addr_t sitd_dma;
1874 struct ehci_iso_sched *iso_sched;
1875 unsigned long flags;
1877 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1878 if (iso_sched == NULL)
1881 sitd_sched_init(ehci, iso_sched, stream, urb);
1883 /* allocate/init sITDs */
1884 spin_lock_irqsave (&ehci->lock, flags);
1885 for (i = 0; i < urb->number_of_packets; i++) {
1887 /* NOTE: for now, we don't try to handle wraparound cases
1888 * for IN (using sitd->hw_backpointer, like a FSTN), which
1889 * means we never need two sitds for full speed packets.
1893 * Use siTDs from the free list, but not siTDs that may
1894 * still be in use by the hardware.
1896 if (likely(!list_empty(&stream->free_list))) {
1897 sitd = list_first_entry(&stream->free_list,
1898 struct ehci_sitd, sitd_list);
1899 if (sitd->frame == ehci->now_frame)
1901 list_del (&sitd->sitd_list);
1902 sitd_dma = sitd->sitd_dma;
1905 spin_unlock_irqrestore (&ehci->lock, flags);
1906 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1908 spin_lock_irqsave (&ehci->lock, flags);
1910 iso_sched_free(stream, iso_sched);
1911 spin_unlock_irqrestore(&ehci->lock, flags);
1916 memset (sitd, 0, sizeof *sitd);
1917 sitd->sitd_dma = sitd_dma;
1918 list_add (&sitd->sitd_list, &iso_sched->td_list);
1921 /* temporarily store schedule info in hcpriv */
1922 urb->hcpriv = iso_sched;
1923 urb->error_count = 0;
1925 spin_unlock_irqrestore (&ehci->lock, flags);
1929 /*-------------------------------------------------------------------------*/
1933 struct ehci_hcd *ehci,
1934 struct ehci_iso_stream *stream,
1935 struct ehci_sitd *sitd,
1936 struct ehci_iso_sched *iso_sched,
1940 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1941 u64 bufp = uf->bufp;
1943 sitd->hw_next = EHCI_LIST_END(ehci);
1944 sitd->hw_fullspeed_ep = stream->address;
1945 sitd->hw_uframe = stream->splits;
1946 sitd->hw_results = uf->transaction;
1947 sitd->hw_backpointer = EHCI_LIST_END(ehci);
1950 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
1951 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
1953 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
1956 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
1957 sitd->index = index;
1961 sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1963 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1964 sitd->sitd_next = ehci->pshadow [frame];
1965 sitd->hw_next = ehci->periodic [frame];
1966 ehci->pshadow [frame].sitd = sitd;
1967 sitd->frame = frame;
1969 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
1972 /* fit urb's sitds into the selected schedule slot; activate as needed */
1973 static void sitd_link_urb(
1974 struct ehci_hcd *ehci,
1977 struct ehci_iso_stream *stream
1981 unsigned next_uframe;
1982 struct ehci_iso_sched *sched = urb->hcpriv;
1983 struct ehci_sitd *sitd;
1985 next_uframe = stream->next_uframe;
1987 if (list_empty(&stream->td_list)) {
1988 /* usbfs ignores TT bandwidth */
1989 ehci_to_hcd(ehci)->self.bandwidth_allocated
1990 += stream->bandwidth;
1992 "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
1993 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1994 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1995 (next_uframe >> 3) & (ehci->periodic_size - 1),
1996 stream->interval, hc32_to_cpu(ehci, stream->splits));
1999 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2000 if (ehci->amd_pll_fix == 1)
2001 usb_amd_quirk_pll_disable();
2004 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2006 /* fill sITDs frame by frame */
2007 for (packet = 0, sitd = NULL;
2008 packet < urb->number_of_packets;
2011 /* ASSERT: we have all necessary sitds */
2012 BUG_ON (list_empty (&sched->td_list));
2014 /* ASSERT: no itds for this endpoint in this frame */
2016 sitd = list_entry (sched->td_list.next,
2017 struct ehci_sitd, sitd_list);
2018 list_move_tail (&sitd->sitd_list, &stream->td_list);
2019 sitd->stream = stream;
2022 sitd_patch(ehci, stream, sitd, sched, packet);
2023 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2026 next_uframe += stream->interval << 3;
2028 stream->next_uframe = next_uframe & (mod - 1);
2030 /* don't need that schedule data any more */
2031 iso_sched_free (stream, sched);
2032 urb->hcpriv = stream;
2035 enable_periodic(ehci);
2038 /*-------------------------------------------------------------------------*/
2040 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2041 | SITD_STS_XACT | SITD_STS_MMF)
2043 /* Process and recycle a completed SITD. Return true iff its urb completed,
2044 * and hence its completion callback probably added things to the hardware
2047 * Note that we carefully avoid recycling this descriptor until after any
2048 * completion callback runs, so that it won't be reused quickly. That is,
2049 * assuming (a) no more than two urbs per frame on this endpoint, and also
2050 * (b) only this endpoint's completions submit URBs. It seems some silicon
2051 * corrupts things if you reuse completed descriptors very quickly...
2053 static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2055 struct urb *urb = sitd->urb;
2056 struct usb_iso_packet_descriptor *desc;
2059 struct ehci_iso_stream *stream = sitd->stream;
2060 struct usb_device *dev;
2061 bool retval = false;
2063 urb_index = sitd->index;
2064 desc = &urb->iso_frame_desc [urb_index];
2065 t = hc32_to_cpup(ehci, &sitd->hw_results);
2067 /* report transfer status */
2068 if (unlikely(t & SITD_ERRS)) {
2070 if (t & SITD_STS_DBE)
2071 desc->status = usb_pipein (urb->pipe)
2072 ? -ENOSR /* hc couldn't read */
2073 : -ECOMM; /* hc couldn't write */
2074 else if (t & SITD_STS_BABBLE)
2075 desc->status = -EOVERFLOW;
2076 else /* XACT, MMF, etc */
2077 desc->status = -EPROTO;
2078 } else if (unlikely(t & SITD_STS_ACTIVE)) {
2079 /* URB was too late */
2083 desc->actual_length = desc->length - SITD_LENGTH(t);
2084 urb->actual_length += desc->actual_length;
2087 /* handle completion now? */
2088 if ((urb_index + 1) != urb->number_of_packets)
2091 /* ASSERT: it's really the last sitd for this urb
2092 list_for_each_entry (sitd, &stream->td_list, sitd_list)
2093 BUG_ON (sitd->urb == urb);
2096 /* give urb back to the driver; completion often (re)submits */
2098 ehci_urb_done(ehci, urb, 0);
2103 disable_periodic(ehci);
2105 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2106 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2107 if (ehci->amd_pll_fix == 1)
2108 usb_amd_quirk_pll_enable();
2111 if (list_is_singular(&stream->td_list)) {
2112 ehci_to_hcd(ehci)->self.bandwidth_allocated
2113 -= stream->bandwidth;
2115 "deschedule devp %s ep%d%s-iso\n",
2116 dev->devpath, stream->bEndpointAddress & 0x0f,
2117 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2123 /* Add to the end of the free list for later reuse */
2124 list_move_tail(&sitd->sitd_list, &stream->free_list);
2126 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2127 if (list_empty(&stream->td_list)) {
2128 list_splice_tail_init(&stream->free_list,
2129 &ehci->cached_sitd_list);
2130 start_free_itds(ehci);
2137 static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2140 int status = -EINVAL;
2141 unsigned long flags;
2142 struct ehci_iso_stream *stream;
2144 /* Get iso_stream head */
2145 stream = iso_stream_find (ehci, urb);
2146 if (stream == NULL) {
2147 ehci_dbg (ehci, "can't get iso stream\n");
2150 if (urb->interval != stream->interval) {
2151 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2152 stream->interval, urb->interval);
2156 #ifdef EHCI_URB_TRACE
2158 "submit %p dev%s ep%d%s-iso len %d\n",
2159 urb, urb->dev->devpath,
2160 usb_pipeendpoint (urb->pipe),
2161 usb_pipein (urb->pipe) ? "in" : "out",
2162 urb->transfer_buffer_length);
2165 /* allocate SITDs */
2166 status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2168 ehci_dbg (ehci, "can't init sitds\n");
2172 /* schedule ... need to lock */
2173 spin_lock_irqsave (&ehci->lock, flags);
2174 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2175 status = -ESHUTDOWN;
2176 goto done_not_linked;
2178 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2179 if (unlikely(status))
2180 goto done_not_linked;
2181 status = iso_stream_schedule(ehci, urb, stream);
2183 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2185 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2187 spin_unlock_irqrestore (&ehci->lock, flags);
2192 /*-------------------------------------------------------------------------*/
2194 static void scan_isoc(struct ehci_hcd *ehci)
2196 unsigned uf, now_frame, frame;
2197 unsigned fmask = ehci->periodic_size - 1;
2198 bool modified, live;
2201 * When running, scan from last scan point up to "now"
2202 * else clean up by scanning everything that's left.
2203 * Touches as few pages as possible: cache-friendly.
2205 if (ehci->rh_state >= EHCI_RH_RUNNING) {
2206 uf = ehci_read_frame_index(ehci);
2207 now_frame = (uf >> 3) & fmask;
2210 now_frame = (ehci->last_iso_frame - 1) & fmask;
2213 ehci->now_frame = now_frame;
2215 frame = ehci->last_iso_frame;
2217 union ehci_shadow q, *q_p;
2221 /* scan each element in frame's queue for completions */
2222 q_p = &ehci->pshadow [frame];
2223 hw_p = &ehci->periodic [frame];
2225 type = Q_NEXT_TYPE(ehci, *hw_p);
2228 while (q.ptr != NULL) {
2229 switch (hc32_to_cpu(ehci, type)) {
2231 /* If this ITD is still active, leave it for
2232 * later processing ... check the next entry.
2233 * No need to check for activity unless the
2236 if (frame == now_frame && live) {
2238 for (uf = 0; uf < 8; uf++) {
2239 if (q.itd->hw_transaction[uf] &
2244 q_p = &q.itd->itd_next;
2245 hw_p = &q.itd->hw_next;
2246 type = Q_NEXT_TYPE(ehci,
2253 /* Take finished ITDs out of the schedule
2254 * and process them: recycle, maybe report
2255 * URB completion. HC won't cache the
2256 * pointer for much longer, if at all.
2258 *q_p = q.itd->itd_next;
2259 if (!ehci->use_dummy_qh ||
2260 q.itd->hw_next != EHCI_LIST_END(ehci))
2261 *hw_p = q.itd->hw_next;
2263 *hw_p = ehci->dummy->qh_dma;
2264 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2266 modified = itd_complete (ehci, q.itd);
2270 /* If this SITD is still active, leave it for
2271 * later processing ... check the next entry.
2272 * No need to check for activity unless the
2275 if (((frame == now_frame) ||
2276 (((frame + 1) & fmask) == now_frame))
2278 && (q.sitd->hw_results &
2279 SITD_ACTIVE(ehci))) {
2281 q_p = &q.sitd->sitd_next;
2282 hw_p = &q.sitd->hw_next;
2283 type = Q_NEXT_TYPE(ehci,
2289 /* Take finished SITDs out of the schedule
2290 * and process them: recycle, maybe report
2293 *q_p = q.sitd->sitd_next;
2294 if (!ehci->use_dummy_qh ||
2295 q.sitd->hw_next != EHCI_LIST_END(ehci))
2296 *hw_p = q.sitd->hw_next;
2298 *hw_p = ehci->dummy->qh_dma;
2299 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2301 modified = sitd_complete (ehci, q.sitd);
2305 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2306 type, frame, q.ptr);
2311 /* End of the iTDs and siTDs */
2316 /* assume completion callbacks modify the queue */
2317 if (unlikely(modified && ehci->isoc_count > 0))
2321 /* Stop when we have reached the current frame */
2322 if (frame == now_frame)
2325 /* The last frame may still have active siTDs */
2326 ehci->last_iso_frame = frame;
2327 frame = (frame + 1) & fmask;