2 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
4 * Copyright (C) 2004-2013 Synopsys, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * This file contains the Descriptor DMA implementation for Host mode
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
55 static u16 dwc2_frame_list_idx(u16 frame)
57 return frame & (FRLISTEN_64_SIZE - 1);
60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
63 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
64 MAX_DMA_DESC_NUM_GENERIC) - 1);
67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
70 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71 MAX_DMA_DESC_NUM_GENERIC) - 1);
74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
76 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
77 qh->dev_speed == USB_SPEED_HIGH) ?
78 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
83 return qh->dev_speed == USB_SPEED_HIGH ?
84 (qh->interval + 8 - 1) / 8 : qh->interval;
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
90 struct kmem_cache *desc_cache;
92 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
93 && qh->dev_speed == USB_SPEED_HIGH)
94 desc_cache = hsotg->desc_hsisoc_cache;
96 desc_cache = hsotg->desc_gen_cache;
98 qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
99 dwc2_max_desc_num(qh);
101 qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
105 qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
109 qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
111 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
114 kfree(qh->desc_list);
115 qh->desc_list = NULL;
122 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
124 struct kmem_cache *desc_cache;
126 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
127 && qh->dev_speed == USB_SPEED_HIGH)
128 desc_cache = hsotg->desc_hsisoc_cache;
130 desc_cache = hsotg->desc_gen_cache;
133 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
134 qh->desc_list_sz, DMA_FROM_DEVICE);
135 kmem_cache_free(desc_cache, qh->desc_list);
136 qh->desc_list = NULL;
143 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
145 if (hsotg->frame_list)
148 hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
149 hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
150 if (!hsotg->frame_list)
153 hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
154 hsotg->frame_list_sz,
160 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
164 spin_lock_irqsave(&hsotg->lock, flags);
166 if (!hsotg->frame_list) {
167 spin_unlock_irqrestore(&hsotg->lock, flags);
171 dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
172 hsotg->frame_list_sz, DMA_FROM_DEVICE);
174 kfree(hsotg->frame_list);
175 hsotg->frame_list = NULL;
177 spin_unlock_irqrestore(&hsotg->lock, flags);
181 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
186 spin_lock_irqsave(&hsotg->lock, flags);
188 hcfg = dwc2_readl(hsotg->regs + HCFG);
189 if (hcfg & HCFG_PERSCHEDENA) {
190 /* already enabled */
191 spin_unlock_irqrestore(&hsotg->lock, flags);
195 dwc2_writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
197 hcfg &= ~HCFG_FRLISTEN_MASK;
198 hcfg |= fr_list_en | HCFG_PERSCHEDENA;
199 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
200 dwc2_writel(hcfg, hsotg->regs + HCFG);
202 spin_unlock_irqrestore(&hsotg->lock, flags);
205 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
210 spin_lock_irqsave(&hsotg->lock, flags);
212 hcfg = dwc2_readl(hsotg->regs + HCFG);
213 if (!(hcfg & HCFG_PERSCHEDENA)) {
214 /* already disabled */
215 spin_unlock_irqrestore(&hsotg->lock, flags);
219 hcfg &= ~HCFG_PERSCHEDENA;
220 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
221 dwc2_writel(hcfg, hsotg->regs + HCFG);
223 spin_unlock_irqrestore(&hsotg->lock, flags);
227 * Activates/Deactivates FrameList entries for the channel based on endpoint
230 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
233 struct dwc2_host_chan *chan;
237 pr_err("hsotg = %p\n", hsotg);
242 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
246 if (!hsotg->frame_list) {
247 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
253 inc = dwc2_frame_incr_val(qh);
254 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
255 i = dwc2_frame_list_idx(qh->sched_frame);
262 hsotg->frame_list[j] |= 1 << chan->hc_num;
264 hsotg->frame_list[j] &= ~(1 << chan->hc_num);
265 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
269 * Sync frame list since controller will access it if periodic
270 * channel is currently enabled.
272 dma_sync_single_for_device(hsotg->dev,
273 hsotg->frame_list_dma,
274 hsotg->frame_list_sz,
281 if (chan->speed == USB_SPEED_HIGH && qh->interval) {
283 /* TODO - check this */
284 inc = (8 + qh->interval - 1) / qh->interval;
285 for (i = 0; i < inc; i++) {
287 j = j << qh->interval;
290 chan->schinfo = 0xff;
294 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
297 struct dwc2_host_chan *chan = qh->channel;
299 if (dwc2_qh_is_non_per(qh)) {
300 if (hsotg->core_params->uframe_sched > 0)
301 hsotg->available_host_channels++;
303 hsotg->non_periodic_channels--;
305 dwc2_update_frame_list(hsotg, qh, 0);
306 hsotg->available_host_channels++;
310 * The condition is added to prevent double cleanup try in case of
311 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
314 if (!list_empty(&chan->hc_list_entry))
315 list_del(&chan->hc_list_entry);
316 dwc2_hc_cleanup(hsotg, chan);
317 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
325 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
326 dwc2_max_desc_num(qh));
330 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
333 * @hsotg: The HCD state structure for the DWC OTG controller
334 * @qh: The QH to init
336 * Return: 0 if successful, negative error code otherwise
338 * Allocates memory for the descriptor list. For the first periodic QH,
339 * allocates memory for the FrameList and enables periodic scheduling.
341 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
348 "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
353 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
357 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
358 qh->ep_type == USB_ENDPOINT_XFER_INT) {
359 if (!hsotg->frame_list) {
360 retval = dwc2_frame_list_alloc(hsotg, mem_flags);
363 /* Enable periodic schedule on first periodic QH */
364 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
372 dwc2_desc_list_free(hsotg, qh);
378 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
381 * @hsotg: The HCD state structure for the DWC OTG controller
382 * @qh: The QH to free
384 * Frees descriptor list memory associated with the QH. If QH is periodic and
385 * the last, frees FrameList memory and disables periodic scheduling.
387 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
391 dwc2_desc_list_free(hsotg, qh);
394 * Channel still assigned due to some reasons.
395 * Seen on Isoc URB dequeue. Channel halted but no subsequent
396 * ChHalted interrupt to release the channel. Afterwards
397 * when it comes here from endpoint disable routine
398 * channel remains assigned.
400 spin_lock_irqsave(&hsotg->lock, flags);
402 dwc2_release_channel_ddma(hsotg, qh);
403 spin_unlock_irqrestore(&hsotg->lock, flags);
405 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
406 qh->ep_type == USB_ENDPOINT_XFER_INT) &&
407 (hsotg->core_params->uframe_sched > 0 ||
408 !hsotg->periodic_channels) && hsotg->frame_list) {
409 dwc2_per_sched_disable(hsotg);
410 dwc2_frame_list_free(hsotg);
414 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
416 if (qh->dev_speed == USB_SPEED_HIGH)
417 /* Descriptor set (8 descriptors) index which is 8-aligned */
418 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
420 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
424 * Determine starting frame for Isochronous transfer.
425 * Few frames skipped to prevent race condition with HC.
427 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
428 struct dwc2_qh *qh, u16 *skip_frames)
432 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
434 /* sched_frame is always frame number (not uFrame) both in FS and HS! */
437 * skip_frames is used to limit activated descriptors number
438 * to avoid the situation when HC services the last activated
439 * descriptor firstly.
441 * Current frame is 1, scheduled frame is 3. Since HC always fetches
442 * the descriptor corresponding to curr_frame+1, the descriptor
443 * corresponding to frame 2 will be fetched. If the number of
444 * descriptors is max=64 (or greather) the list will be fully programmed
445 * with Active descriptors and it is possible case (rare) that the
446 * latest descriptor(considering rollback) corresponding to frame 2 will
447 * be serviced first. HS case is more probable because, in fact, up to
448 * 11 uframes (16 in the code) may be skipped.
450 if (qh->dev_speed == USB_SPEED_HIGH) {
452 * Consider uframe counter also, to start xfer asap. If half of
453 * the frame elapsed skip 2 frames otherwise just 1 frame.
454 * Starting descriptor index must be 8-aligned, so if the
455 * current frame is near to complete the next one is skipped as
458 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
459 *skip_frames = 2 * 8;
460 frame = dwc2_frame_num_inc(hsotg->frame_number,
463 *skip_frames = 1 * 8;
464 frame = dwc2_frame_num_inc(hsotg->frame_number,
468 frame = dwc2_full_frame_num(frame);
471 * Two frames are skipped for FS - the current and the next.
472 * But for descriptor programming, 1 frame (descriptor) is
473 * enough, see example above.
476 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
483 * Calculate initial descriptor index for isochronous transfer based on
486 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
489 u16 frame, fr_idx, fr_idx_tmp, skip_frames;
492 * With current ISOC processing algorithm the channel is being released
493 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
494 * called only when qh->ntd == 0 and qh->channel == 0.
496 * So qh->channel != NULL branch is not used and just not removed from
497 * the source file. It is required for another possible approach which
498 * is, do not disable and release the channel when ISOC session
499 * completed, just move QH to inactive schedule until new QTD arrives.
500 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
501 * therefore starting desc_index are recalculated. In this case channel
502 * is released only on ep_disable.
506 * Calculate starting descriptor index. For INTERRUPT endpoint it is
510 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
512 * Calculate initial descriptor index based on FrameList current
513 * bitmap and servicing period
515 fr_idx_tmp = dwc2_frame_list_idx(frame);
516 fr_idx = (FRLISTEN_64_SIZE +
517 dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
518 % dwc2_frame_incr_val(qh);
519 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
521 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
523 fr_idx = dwc2_frame_list_idx(qh->sched_frame);
526 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
531 #define ISOC_URB_GIVEBACK_ASAP
533 #define MAX_ISOC_XFER_SIZE_FS 1023
534 #define MAX_ISOC_XFER_SIZE_HS 3072
535 #define DESCNUM_THRESHOLD 4
537 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
538 struct dwc2_qtd *qtd,
539 struct dwc2_qh *qh, u32 max_xfer_size,
542 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
543 struct dwc2_hcd_iso_packet_desc *frame_desc;
545 memset(dma_desc, 0, sizeof(*dma_desc));
546 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
548 if (frame_desc->length > max_xfer_size)
549 qh->n_bytes[idx] = max_xfer_size;
551 qh->n_bytes[idx] = frame_desc->length;
553 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
554 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
555 HOST_DMA_ISOC_NBYTES_MASK;
558 dma_desc->status |= HOST_DMA_A;
561 qtd->isoc_frame_index_last++;
563 #ifdef ISOC_URB_GIVEBACK_ASAP
564 /* Set IOC for each descriptor corresponding to last frame of URB */
565 if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
566 dma_desc->status |= HOST_DMA_IOC;
569 dma_sync_single_for_device(hsotg->dev,
571 (idx * sizeof(struct dwc2_hcd_dma_desc)),
572 sizeof(struct dwc2_hcd_dma_desc),
576 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
577 struct dwc2_qh *qh, u16 skip_frames)
579 struct dwc2_qtd *qtd;
581 u16 idx, inc, n_desc = 0, ntd_max = 0;
587 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
588 cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
589 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
592 * Ensure current frame number didn't overstep last scheduled
593 * descriptor. If it happens, the only way to recover is to move
594 * qh->td_last to current frame number + 1.
595 * So that next isoc descriptor will be scheduled on frame number + 1
596 * and not on a past frame.
598 if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
601 "current frame number overstep last descriptor\n");
602 qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
609 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
611 if (skip_frames && !qh->channel)
612 ntd_max -= skip_frames / qh->interval;
615 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
616 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
618 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
619 if (qtd->in_process &&
620 qtd->isoc_frame_index_last ==
621 qtd->urb->packet_count)
624 qtd->isoc_td_first = idx;
625 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
626 qtd->urb->packet_count) {
627 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
629 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
632 qtd->isoc_td_last = idx;
638 #ifdef ISOC_URB_GIVEBACK_ASAP
639 /* Set IOC for last descriptor if descriptor list is full */
640 if (qh->ntd == ntd_max) {
641 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
642 qh->desc_list[idx].status |= HOST_DMA_IOC;
643 dma_sync_single_for_device(hsotg->dev,
644 qh->desc_list_dma + (idx *
645 sizeof(struct dwc2_hcd_dma_desc)),
646 sizeof(struct dwc2_hcd_dma_desc),
651 * Set IOC bit only for one descriptor. Always try to be ahead of HW
652 * processing, i.e. on IOC generation driver activates next descriptor
653 * but core continues to process descriptors following the one with IOC
657 if (n_desc > DESCNUM_THRESHOLD)
659 * Move IOC "up". Required even if there is only one QTD
660 * in the list, because QTDs might continue to be queued,
661 * but during the activation it was only one queued.
662 * Actually more than one QTD might be in the list if this
663 * function called from XferCompletion - QTDs was queued during
664 * HW processing of the previous descriptor chunk.
666 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
670 * Set the IOC for the latest descriptor if either number of
671 * descriptors is not greater than threshold or no more new
672 * descriptors activated
674 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
676 qh->desc_list[idx].status |= HOST_DMA_IOC;
677 dma_sync_single_for_device(hsotg->dev,
679 (idx * sizeof(struct dwc2_hcd_dma_desc)),
680 sizeof(struct dwc2_hcd_dma_desc),
685 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
686 struct dwc2_host_chan *chan,
687 struct dwc2_qtd *qtd, struct dwc2_qh *qh,
690 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
691 int len = chan->xfer_len;
693 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
694 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
696 if (chan->ep_is_in) {
699 if (len > 0 && chan->max_packet)
700 num_packets = (len + chan->max_packet - 1)
703 /* Need 1 packet for transfer length of 0 */
706 /* Always program an integral # of packets for IN transfers */
707 len = num_packets * chan->max_packet;
710 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
711 qh->n_bytes[n_desc] = len;
713 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
714 qtd->control_phase == DWC2_CONTROL_SETUP)
715 dma_desc->status |= HOST_DMA_SUP;
717 dma_desc->buf = (u32)chan->xfer_dma;
719 dma_sync_single_for_device(hsotg->dev,
721 (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
722 sizeof(struct dwc2_hcd_dma_desc),
726 * Last (or only) descriptor of IN transfer with actual size less
729 if (len > chan->xfer_len) {
732 chan->xfer_dma += len;
733 chan->xfer_len -= len;
737 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
740 struct dwc2_qtd *qtd;
741 struct dwc2_host_chan *chan = qh->channel;
744 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
745 (unsigned long)chan->xfer_dma, chan->xfer_len);
748 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
749 * if SG transfer consists of multiple URBs, this pointer is re-assigned
750 * to the buffer of the currently processed QTD. For non-SG request
751 * there is always one QTD active.
754 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
755 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
758 /* SG request - more than 1 QTD */
759 chan->xfer_dma = qtd->urb->dma +
760 qtd->urb->actual_length;
761 chan->xfer_len = qtd->urb->length -
762 qtd->urb->actual_length;
763 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
764 (unsigned long)chan->xfer_dma, chan->xfer_len);
770 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
772 "set A bit in desc %d (%p)\n",
774 &qh->desc_list[n_desc - 1]);
775 dma_sync_single_for_device(hsotg->dev,
778 sizeof(struct dwc2_hcd_dma_desc)),
779 sizeof(struct dwc2_hcd_dma_desc),
782 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
784 "desc %d (%p) buf=%08x status=%08x\n",
785 n_desc, &qh->desc_list[n_desc],
786 qh->desc_list[n_desc].buf,
787 qh->desc_list[n_desc].status);
790 } while (chan->xfer_len > 0 &&
791 n_desc != MAX_DMA_DESC_NUM_GENERIC);
793 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
795 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
797 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
802 qh->desc_list[n_desc - 1].status |=
803 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
804 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
805 n_desc - 1, &qh->desc_list[n_desc - 1]);
806 dma_sync_single_for_device(hsotg->dev,
807 qh->desc_list_dma + (n_desc - 1) *
808 sizeof(struct dwc2_hcd_dma_desc),
809 sizeof(struct dwc2_hcd_dma_desc),
812 qh->desc_list[0].status |= HOST_DMA_A;
813 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
815 dma_sync_single_for_device(hsotg->dev,
817 sizeof(struct dwc2_hcd_dma_desc),
825 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
827 * @hsotg: The HCD state structure for the DWC OTG controller
828 * @qh: The QH to init
830 * Return: 0 if successful, negative error code otherwise
832 * For Control and Bulk endpoints, initializes descriptor list and starts the
833 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
834 * list then updates FrameList, marking appropriate entries as active.
836 * For Isochronous endpoints the starting descriptor index is calculated based
837 * on the scheduled frame, but only on the first transfer descriptor within a
838 * session. Then the transfer is started via enabling the channel.
840 * For Isochronous endpoints the channel is not halted on XferComplete
841 * interrupt so remains assigned to the endpoint(QH) until session is done.
843 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
845 /* Channel is already assigned */
846 struct dwc2_host_chan *chan = qh->channel;
849 switch (chan->ep_type) {
850 case USB_ENDPOINT_XFER_CONTROL:
851 case USB_ENDPOINT_XFER_BULK:
852 dwc2_init_non_isoc_dma_desc(hsotg, qh);
853 dwc2_hc_start_transfer_ddma(hsotg, chan);
855 case USB_ENDPOINT_XFER_INT:
856 dwc2_init_non_isoc_dma_desc(hsotg, qh);
857 dwc2_update_frame_list(hsotg, qh, 1);
858 dwc2_hc_start_transfer_ddma(hsotg, chan);
860 case USB_ENDPOINT_XFER_ISOC:
862 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
863 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
865 if (!chan->xfer_started) {
866 dwc2_update_frame_list(hsotg, qh, 1);
869 * Always set to max, instead of actual size. Otherwise
870 * ntd will be changed with channel being enabled. Not
873 chan->ntd = dwc2_max_desc_num(qh);
875 /* Enable channel only once for ISOC */
876 dwc2_hc_start_transfer_ddma(hsotg, chan);
885 #define DWC2_CMPL_DONE 1
886 #define DWC2_CMPL_STOP 2
888 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
889 struct dwc2_host_chan *chan,
890 struct dwc2_qtd *qtd,
891 struct dwc2_qh *qh, u16 idx)
893 struct dwc2_hcd_dma_desc *dma_desc;
894 struct dwc2_hcd_iso_packet_desc *frame_desc;
901 dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
902 sizeof(struct dwc2_hcd_dma_desc)),
903 sizeof(struct dwc2_hcd_dma_desc),
906 dma_desc = &qh->desc_list[idx];
908 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
909 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
911 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
912 HOST_DMA_ISOC_NBYTES_SHIFT;
914 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
916 * XactError, or unable to complete all the transactions
917 * in the scheduled micro-frame/frame, both indicated by
918 * HOST_DMA_STS_PKTERR
920 qtd->urb->error_count++;
921 frame_desc->actual_length = qh->n_bytes[idx] - remain;
922 frame_desc->status = -EPROTO;
925 frame_desc->actual_length = qh->n_bytes[idx] - remain;
926 frame_desc->status = 0;
929 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
931 * urb->status is not used for isoc transfers here. The
932 * individual frame_desc status are used instead.
934 dwc2_host_complete(hsotg, qtd, 0);
935 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
938 * This check is necessary because urb_dequeue can be called
939 * from urb complete callback (sound driver for example). All
940 * pending URBs are dequeued there, so no need for further
943 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
950 /* Stop if IOC requested descriptor reached */
951 if (dma_desc->status & HOST_DMA_IOC)
957 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
958 struct dwc2_host_chan *chan,
959 enum dwc2_halt_status halt_status)
961 struct dwc2_hcd_iso_packet_desc *frame_desc;
962 struct dwc2_qtd *qtd, *qtd_tmp;
970 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
971 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
976 if (halt_status == DWC2_HC_XFER_AHB_ERR ||
977 halt_status == DWC2_HC_XFER_BABBLE_ERR) {
979 * Channel is halted in these error cases, considered as serious
981 * Complete all URBs marking all frames as failed, irrespective
982 * whether some of the descriptors (frames) succeeded or not.
983 * Pass error code to completion routine as well, to update
984 * urb->status, some of class drivers might use it to stop
985 * queing transfer requests.
987 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
990 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
993 for (idx = 0; idx < qtd->urb->packet_count;
995 frame_desc = &qtd->urb->iso_descs[idx];
996 frame_desc->status = err;
999 dwc2_host_complete(hsotg, qtd, err);
1002 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1008 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1009 if (!qtd->in_process)
1013 * Ensure idx corresponds to descriptor where first urb of this
1014 * qtd was added. In fact, during isoc desc init, dwc2 may skip
1015 * an index if current frame number is already over this index.
1017 if (idx != qtd->isoc_td_first) {
1018 dev_vdbg(hsotg->dev,
1019 "try to complete %d instead of %d\n",
1020 idx, qtd->isoc_td_first);
1021 idx = qtd->isoc_td_first;
1025 struct dwc2_qtd *qtd_next;
1028 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1032 idx = dwc2_desclist_idx_inc(idx, qh->interval,
1037 if (rc == DWC2_CMPL_DONE)
1040 /* rc == DWC2_CMPL_STOP */
1042 if (qh->interval >= 32)
1046 cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1047 qtd_next = list_first_entry(&qh->qtd_list,
1050 if (dwc2_frame_idx_num_gt(cur_idx,
1051 qtd_next->isoc_td_last))
1056 } while (idx != qh->td_first);
1063 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1064 struct dwc2_host_chan *chan,
1065 struct dwc2_qtd *qtd,
1066 struct dwc2_hcd_dma_desc *dma_desc,
1067 enum dwc2_halt_status halt_status,
1068 u32 n_bytes, int *xfer_done)
1070 struct dwc2_hcd_urb *urb = qtd->urb;
1074 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1075 HOST_DMA_NBYTES_SHIFT;
1077 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1079 if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1080 dev_err(hsotg->dev, "EIO\n");
1085 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1086 switch (halt_status) {
1087 case DWC2_HC_XFER_STALL:
1088 dev_vdbg(hsotg->dev, "Stall\n");
1089 urb->status = -EPIPE;
1091 case DWC2_HC_XFER_BABBLE_ERR:
1092 dev_err(hsotg->dev, "Babble\n");
1093 urb->status = -EOVERFLOW;
1095 case DWC2_HC_XFER_XACT_ERR:
1096 dev_err(hsotg->dev, "XactErr\n");
1097 urb->status = -EPROTO;
1101 "%s: Unhandled descriptor error status (%d)\n",
1102 __func__, halt_status);
1108 if (dma_desc->status & HOST_DMA_A) {
1109 dev_vdbg(hsotg->dev,
1110 "Active descriptor encountered on channel %d\n",
1115 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1116 if (qtd->control_phase == DWC2_CONTROL_DATA) {
1117 urb->actual_length += n_bytes - remain;
1118 if (remain || urb->actual_length >= urb->length) {
1120 * For Control Data stage do not set urb->status
1121 * to 0, to prevent URB callback. Set it when
1122 * Status phase is done. See below.
1126 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1130 /* No handling for SETUP stage */
1133 urb->actual_length += n_bytes - remain;
1134 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1135 urb->actual_length);
1136 if (remain || urb->actual_length >= urb->length) {
1145 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1146 struct dwc2_host_chan *chan,
1147 int chnum, struct dwc2_qtd *qtd,
1149 enum dwc2_halt_status halt_status,
1152 struct dwc2_qh *qh = chan->qh;
1153 struct dwc2_hcd_urb *urb = qtd->urb;
1154 struct dwc2_hcd_dma_desc *dma_desc;
1158 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1163 dma_sync_single_for_cpu(hsotg->dev,
1164 qh->desc_list_dma + (desc_num *
1165 sizeof(struct dwc2_hcd_dma_desc)),
1166 sizeof(struct dwc2_hcd_dma_desc),
1169 dma_desc = &qh->desc_list[desc_num];
1170 n_bytes = qh->n_bytes[desc_num];
1171 dev_vdbg(hsotg->dev,
1172 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1173 qtd, urb, desc_num, dma_desc, n_bytes);
1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1175 halt_status, n_bytes,
1177 if (*xfer_done && urb->status != -EINPROGRESS)
1181 dwc2_host_complete(hsotg, qtd, urb->status);
1182 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1183 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
1184 failed, *xfer_done, urb->status);
1188 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1189 switch (qtd->control_phase) {
1190 case DWC2_CONTROL_SETUP:
1191 if (urb->length > 0)
1192 qtd->control_phase = DWC2_CONTROL_DATA;
1194 qtd->control_phase = DWC2_CONTROL_STATUS;
1195 dev_vdbg(hsotg->dev,
1196 " Control setup transaction done\n");
1198 case DWC2_CONTROL_DATA:
1200 qtd->control_phase = DWC2_CONTROL_STATUS;
1201 dev_vdbg(hsotg->dev,
1202 " Control data transfer done\n");
1203 } else if (desc_num + 1 == qtd->n_desc) {
1205 * Last descriptor for Control data stage which
1206 * is not completed yet
1208 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1220 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1221 struct dwc2_host_chan *chan,
1223 enum dwc2_halt_status halt_status)
1225 struct list_head *qtd_item, *qtd_tmp;
1226 struct dwc2_qh *qh = chan->qh;
1227 struct dwc2_qtd *qtd = NULL;
1231 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1232 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1233 qtd->in_process = 0;
1237 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1240 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1243 for (i = 0; i < qtd->n_desc; i++) {
1244 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1245 desc_num, halt_status,
1254 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1256 * Resetting the data toggle for bulk and interrupt endpoints
1257 * in case of stall. See handle_hc_stall_intr().
1259 if (halt_status == DWC2_HC_XFER_STALL)
1260 qh->data_toggle = DWC2_HC_PID_DATA0;
1262 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1265 if (halt_status == DWC2_HC_XFER_COMPLETE) {
1266 if (chan->hcint & HCINTMSK_NYET) {
1268 * Got a NYET on the last transaction of the transfer.
1269 * It means that the endpoint should be in the PING
1270 * state at the beginning of the next transfer.
1278 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1279 * status and calls completion routine for the URB if it's done. Called from
1280 * interrupt handlers.
1282 * @hsotg: The HCD state structure for the DWC OTG controller
1283 * @chan: Host channel the transfer is completed on
1284 * @chnum: Index of Host channel registers
1285 * @halt_status: Reason the channel is being halted or just XferComplete
1286 * for isochronous transfers
1288 * Releases the channel to be used by other transfers.
1289 * In case of Isochronous endpoint the channel is not halted until the end of
1290 * the session, i.e. QTD list is empty.
1291 * If periodic channel released the FrameList is updated accordingly.
1292 * Calls transaction selection routines to activate pending transfers.
1294 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1295 struct dwc2_host_chan *chan, int chnum,
1296 enum dwc2_halt_status halt_status)
1298 struct dwc2_qh *qh = chan->qh;
1299 int continue_isoc_xfer = 0;
1300 enum dwc2_transaction_type tr_type;
1302 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1303 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1305 /* Release the channel if halted or session completed */
1306 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1307 list_empty(&qh->qtd_list)) {
1308 struct dwc2_qtd *qtd, *qtd_tmp;
1311 * Kill all remainings QTDs since channel has been
1314 list_for_each_entry_safe(qtd, qtd_tmp,
1317 dwc2_host_complete(hsotg, qtd,
1319 dwc2_hcd_qtd_unlink_and_free(hsotg,
1323 /* Halt the channel if session completed */
1324 if (halt_status == DWC2_HC_XFER_COMPLETE)
1325 dwc2_hc_halt(hsotg, chan, halt_status);
1326 dwc2_release_channel_ddma(hsotg, qh);
1327 dwc2_hcd_qh_unlink(hsotg, qh);
1329 /* Keep in assigned schedule to continue transfer */
1330 list_move(&qh->qh_list_entry,
1331 &hsotg->periodic_sched_assigned);
1333 * If channel has been halted during giveback of urb
1334 * then prevent any new scheduling.
1336 if (!chan->halt_status)
1337 continue_isoc_xfer = 1;
1340 * Todo: Consider the case when period exceeds FrameList size.
1341 * Frame Rollover interrupt should be used.
1345 * Scan descriptor list to complete the URB(s), then release
1348 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1350 dwc2_release_channel_ddma(hsotg, qh);
1351 dwc2_hcd_qh_unlink(hsotg, qh);
1353 if (!list_empty(&qh->qtd_list)) {
1355 * Add back to inactive non-periodic schedule on normal
1358 dwc2_hcd_qh_add(hsotg, qh);
1362 tr_type = dwc2_hcd_select_transactions(hsotg);
1363 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1364 if (continue_isoc_xfer) {
1365 if (tr_type == DWC2_TRANSACTION_NONE)
1366 tr_type = DWC2_TRANSACTION_PERIODIC;
1367 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1368 tr_type = DWC2_TRANSACTION_ALL;
1370 dwc2_hcd_queue_transactions(hsotg, tr_type);