2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/dma-mapping.h>
46 #include "musb_core.h"
47 #include "musb_host.h"
50 /* MUSB HOST status 22-mar-2006
52 * - There's still lots of partial code duplication for fault paths, so
53 * they aren't handled as consistently as they need to be.
55 * - PIO mostly behaved when last tested.
56 * + including ep0, with all usbtest cases 9, 10
57 * + usbtest 14 (ep0out) doesn't seem to run at all
58 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
59 * configurations, but otherwise double buffering passes basic tests.
60 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
62 * - DMA (CPPI) ... partially behaves, not currently recommended
63 * + about 1/15 the speed of typical EHCI implementations (PCI)
64 * + RX, all too often reqpkt seems to misbehave after tx
65 * + TX, no known issues (other than evident silicon issue)
67 * - DMA (Mentor/OMAP) ...has at least toggle update problems
69 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
70 * starvation ... nothing yet for TX, interrupt, or bulk.
72 * - Not tested with HNP, but some SRP paths seem to behave.
74 * NOTE 24-August-2006:
76 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
77 * extra endpoint for periodic use enabling hub + keybd + mouse. That
78 * mostly works, except that with "usbnet" it's easy to trigger cases
79 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
80 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
81 * although ARP RX wins. (That test was done with a full speed link.)
86 * NOTE on endpoint usage:
88 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
89 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
90 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
94 * So far that scheduling is both dumb and optimistic: the endpoint will be
95 * "claimed" until its software queue is no longer refilled. No multiplexing
96 * of transfers between endpoints, or anything clever.
100 static void musb_ep_program(struct musb *musb, u8 epnum,
101 struct urb *urb, int is_out,
102 u8 *buf, u32 offset, u32 len);
105 * Clear TX fifo. Needed to avoid BABBLE errors.
107 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
109 struct musb *musb = ep->musb;
110 void __iomem *epio = ep->regs;
115 csr = musb_readw(epio, MUSB_TXCSR);
116 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
118 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
120 csr |= MUSB_TXCSR_FLUSHFIFO;
121 musb_writew(epio, MUSB_TXCSR, csr);
122 csr = musb_readw(epio, MUSB_TXCSR);
123 if (WARN(retries-- < 1,
124 "Could not flush host TX%d fifo: csr: %04x\n",
131 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
133 void __iomem *epio = ep->regs;
137 /* scrub any data left in the fifo */
139 csr = musb_readw(epio, MUSB_TXCSR);
140 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
142 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
143 csr = musb_readw(epio, MUSB_TXCSR);
147 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
150 /* and reset for the next transfer */
151 musb_writew(epio, MUSB_TXCSR, 0);
155 * Start transmit. Caller is responsible for locking shared resources.
156 * musb must be locked.
158 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
162 /* NOTE: no locks here; caller should lock and select EP */
164 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
165 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
166 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
168 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
169 musb_writew(ep->regs, MUSB_CSR0, txcsr);
174 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
178 /* NOTE: no locks here; caller should lock and select EP */
179 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
180 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
181 if (is_cppi_enabled())
182 txcsr |= MUSB_TXCSR_DMAMODE;
183 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
186 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
188 if (is_in != 0 || ep->is_shared_fifo)
190 if (is_in == 0 || ep->is_shared_fifo)
194 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
196 return is_in ? ep->in_qh : ep->out_qh;
200 * Start the URB at the front of an endpoint's queue
201 * end must be claimed from the caller.
203 * Context: controller locked, irqs blocked
206 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
210 void __iomem *mbase = musb->mregs;
211 struct urb *urb = next_urb(qh);
212 void *buf = urb->transfer_buffer;
214 struct musb_hw_ep *hw_ep = qh->hw_ep;
215 unsigned pipe = urb->pipe;
216 u8 address = usb_pipedevice(pipe);
217 int epnum = hw_ep->epnum;
219 /* initialize software qh state */
223 /* gather right source of data */
225 case USB_ENDPOINT_XFER_CONTROL:
226 /* control transfers always start with SETUP */
228 musb->ep0_stage = MUSB_EP0_START;
229 buf = urb->setup_packet;
232 case USB_ENDPOINT_XFER_ISOC:
235 offset = urb->iso_frame_desc[0].offset;
236 len = urb->iso_frame_desc[0].length;
238 default: /* bulk, interrupt */
239 /* actual_length may be nonzero on retry paths */
240 buf = urb->transfer_buffer + urb->actual_length;
241 len = urb->transfer_buffer_length - urb->actual_length;
244 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
245 qh, urb, address, qh->epnum,
246 is_in ? "in" : "out",
247 ({char *s; switch (qh->type) {
248 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
249 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
250 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
251 default: s = "-intr"; break;
253 epnum, buf + offset, len);
255 /* Configure endpoint */
256 musb_ep_set_qh(hw_ep, is_in, qh);
257 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
259 /* transmit may have more work: start it when it is time */
263 /* determine if the time is right for a periodic transfer */
265 case USB_ENDPOINT_XFER_ISOC:
266 case USB_ENDPOINT_XFER_INT:
267 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
268 frame = musb_readw(mbase, MUSB_FRAME);
269 /* FIXME this doesn't implement that scheduling policy ...
270 * or handle framecounter wrapping
272 if ((urb->transfer_flags & URB_ISO_ASAP)
273 || (frame >= urb->start_frame)) {
274 /* REVISIT the SOF irq handler shouldn't duplicate
275 * this code; and we don't init urb->start_frame...
280 qh->frame = urb->start_frame;
281 /* enable SOF interrupt so we can count down */
282 dev_dbg(musb->controller, "SOF for %d\n", epnum);
283 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
284 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
290 dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
291 hw_ep->tx_channel ? "dma" : "pio");
293 if (!hw_ep->tx_channel)
294 musb_h_tx_start(hw_ep);
295 else if (is_cppi_enabled() || tusb_dma_omap())
296 musb_h_tx_dma_start(hw_ep);
300 /* Context: caller owns controller lock, IRQs are blocked */
301 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
302 __releases(musb->lock)
303 __acquires(musb->lock)
305 dev_dbg(musb->controller,
306 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
307 urb, urb->complete, status,
308 usb_pipedevice(urb->pipe),
309 usb_pipeendpoint(urb->pipe),
310 usb_pipein(urb->pipe) ? "in" : "out",
311 urb->actual_length, urb->transfer_buffer_length
314 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
315 spin_unlock(&musb->lock);
316 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
317 spin_lock(&musb->lock);
320 /* For bulk/interrupt endpoints only */
321 static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
324 void __iomem *epio = qh->hw_ep->regs;
328 * FIXME: the current Mentor DMA code seems to have
329 * problems getting toggle correct.
333 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
335 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
337 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
341 * Advance this hardware endpoint's queue, completing the specified URB and
342 * advancing to either the next URB queued to that qh, or else invalidating
343 * that qh and advancing to the next qh scheduled after the current one.
345 * Context: caller owns controller lock, IRQs are blocked
347 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
348 struct musb_hw_ep *hw_ep, int is_in)
350 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
351 struct musb_hw_ep *ep = qh->hw_ep;
352 int ready = qh->is_ready;
355 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
357 /* save toggle eagerly, for paranoia */
359 case USB_ENDPOINT_XFER_BULK:
360 case USB_ENDPOINT_XFER_INT:
361 musb_save_toggle(qh, is_in, urb);
363 case USB_ENDPOINT_XFER_ISOC:
364 if (status == 0 && urb->error_count)
370 musb_giveback(musb, urb, status);
371 qh->is_ready = ready;
373 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
374 * invalidate qh as soon as list_empty(&hep->urb_list)
376 if (list_empty(&qh->hep->urb_list)) {
377 struct list_head *head;
378 struct dma_controller *dma = musb->dma_controller;
382 if (ep->rx_channel) {
383 dma->channel_release(ep->rx_channel);
384 ep->rx_channel = NULL;
388 if (ep->tx_channel) {
389 dma->channel_release(ep->tx_channel);
390 ep->tx_channel = NULL;
394 /* Clobber old pointers to this qh */
395 musb_ep_set_qh(ep, is_in, NULL);
396 qh->hep->hcpriv = NULL;
400 case USB_ENDPOINT_XFER_CONTROL:
401 case USB_ENDPOINT_XFER_BULK:
402 /* fifo policy for these lists, except that NAKing
403 * should rotate a qh to the end (for fairness).
406 head = qh->ring.prev;
413 case USB_ENDPOINT_XFER_ISOC:
414 case USB_ENDPOINT_XFER_INT:
415 /* this is where periodic bandwidth should be
416 * de-allocated if it's tracked and allocated;
417 * and where we'd update the schedule tree...
425 if (qh != NULL && qh->is_ready) {
426 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
427 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
428 musb_start_urb(musb, is_in, qh);
432 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
434 /* we don't want fifo to fill itself again;
435 * ignore dma (various models),
436 * leave toggle alone (may not have been saved yet)
438 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
439 csr &= ~(MUSB_RXCSR_H_REQPKT
440 | MUSB_RXCSR_H_AUTOREQ
441 | MUSB_RXCSR_AUTOCLEAR);
443 /* write 2x to allow double buffering */
444 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
445 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
447 /* flush writebuffer */
448 return musb_readw(hw_ep->regs, MUSB_RXCSR);
452 * PIO RX for a packet (or part of it).
455 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
463 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
464 void __iomem *epio = hw_ep->regs;
465 struct musb_qh *qh = hw_ep->in_qh;
466 int pipe = urb->pipe;
467 void *buffer = urb->transfer_buffer;
469 /* musb_ep_select(mbase, epnum); */
470 rx_count = musb_readw(epio, MUSB_RXCOUNT);
471 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
472 urb->transfer_buffer, qh->offset,
473 urb->transfer_buffer_length);
476 if (usb_pipeisoc(pipe)) {
478 struct usb_iso_packet_descriptor *d;
485 d = urb->iso_frame_desc + qh->iso_idx;
486 buf = buffer + d->offset;
488 if (rx_count > length) {
493 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
497 urb->actual_length += length;
498 d->actual_length = length;
502 /* see if we are done */
503 done = (++qh->iso_idx >= urb->number_of_packets);
506 buf = buffer + qh->offset;
507 length = urb->transfer_buffer_length - qh->offset;
508 if (rx_count > length) {
509 if (urb->status == -EINPROGRESS)
510 urb->status = -EOVERFLOW;
511 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
515 urb->actual_length += length;
516 qh->offset += length;
518 /* see if we are done */
519 done = (urb->actual_length == urb->transfer_buffer_length)
520 || (rx_count < qh->maxpacket)
521 || (urb->status != -EINPROGRESS);
523 && (urb->status == -EINPROGRESS)
524 && (urb->transfer_flags & URB_SHORT_NOT_OK)
525 && (urb->actual_length
526 < urb->transfer_buffer_length))
527 urb->status = -EREMOTEIO;
530 musb_read_fifo(hw_ep, length, buf);
532 csr = musb_readw(epio, MUSB_RXCSR);
533 csr |= MUSB_RXCSR_H_WZC_BITS;
534 if (unlikely(do_flush))
535 musb_h_flush_rxfifo(hw_ep, csr);
537 /* REVISIT this assumes AUTOCLEAR is never set */
538 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
540 csr |= MUSB_RXCSR_H_REQPKT;
541 musb_writew(epio, MUSB_RXCSR, csr);
547 /* we don't always need to reinit a given side of an endpoint...
548 * when we do, use tx/rx reinit routine and then construct a new CSR
549 * to address data toggle, NYET, and DMA or PIO.
551 * it's possible that driver bugs (especially for DMA) or aborting a
552 * transfer might have left the endpoint busier than it should be.
553 * the busy/not-empty tests are basically paranoia.
556 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
560 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
561 * That always uses tx_reinit since ep0 repurposes TX register
562 * offsets; the initial SETUP packet is also a kind of OUT.
565 /* if programmed for Tx, put it in RX mode */
566 if (ep->is_shared_fifo) {
567 csr = musb_readw(ep->regs, MUSB_TXCSR);
568 if (csr & MUSB_TXCSR_MODE) {
569 musb_h_tx_flush_fifo(ep);
570 csr = musb_readw(ep->regs, MUSB_TXCSR);
571 musb_writew(ep->regs, MUSB_TXCSR,
572 csr | MUSB_TXCSR_FRCDATATOG);
576 * Clear the MODE bit (and everything else) to enable Rx.
577 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
579 if (csr & MUSB_TXCSR_DMAMODE)
580 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
581 musb_writew(ep->regs, MUSB_TXCSR, 0);
583 /* scrub all previous state, clearing toggle */
585 csr = musb_readw(ep->regs, MUSB_RXCSR);
586 if (csr & MUSB_RXCSR_RXPKTRDY)
587 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
588 musb_readw(ep->regs, MUSB_RXCOUNT));
590 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
593 /* target addr and (for multipoint) hub addr/port */
594 if (musb->is_multipoint) {
595 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
596 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
597 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
600 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
602 /* protocol/endpoint, interval/NAKlimit, i/o size */
603 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
604 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
605 /* NOTE: bulk combining rewrites high bits of maxpacket */
606 /* Set RXMAXP with the FIFO size of the endpoint
607 * to disable double buffer mode.
609 if (musb->double_buffer_not_ok)
610 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
612 musb_writew(ep->regs, MUSB_RXMAXP,
613 qh->maxpacket | ((qh->hb_mult - 1) << 11));
618 static bool musb_tx_dma_program(struct dma_controller *dma,
619 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
620 struct urb *urb, u32 offset, u32 length)
622 struct dma_channel *channel = hw_ep->tx_channel;
623 void __iomem *epio = hw_ep->regs;
624 u16 pkt_size = qh->maxpacket;
628 #ifdef CONFIG_USB_INVENTRA_DMA
629 if (length > channel->max_len)
630 length = channel->max_len;
632 csr = musb_readw(epio, MUSB_TXCSR);
633 if (length > pkt_size) {
635 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
636 /* autoset shouldn't be set in high bandwidth */
637 if (qh->hb_mult == 1)
638 csr |= MUSB_TXCSR_AUTOSET;
641 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
642 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
644 channel->desired_mode = mode;
645 musb_writew(epio, MUSB_TXCSR, csr);
647 if (!is_cppi_enabled() && !tusb_dma_omap())
650 channel->actual_len = 0;
653 * TX uses "RNDIS" mode automatically but needs help
654 * to identify the zero-length-final-packet case.
656 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
659 qh->segsize = length;
662 * Ensure the data reaches to main memory before starting
667 if (!dma->channel_program(channel, pkt_size, mode,
668 urb->transfer_dma + offset, length)) {
669 dma->channel_release(channel);
670 hw_ep->tx_channel = NULL;
672 csr = musb_readw(epio, MUSB_TXCSR);
673 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
674 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
681 * Program an HDRC endpoint as per the given URB
682 * Context: irqs blocked, controller lock held
684 static void musb_ep_program(struct musb *musb, u8 epnum,
685 struct urb *urb, int is_out,
686 u8 *buf, u32 offset, u32 len)
688 struct dma_controller *dma_controller;
689 struct dma_channel *dma_channel;
691 void __iomem *mbase = musb->mregs;
692 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
693 void __iomem *epio = hw_ep->regs;
694 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
695 u16 packet_sz = qh->maxpacket;
699 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
700 "h_addr%02x h_port%02x bytes %d\n",
701 is_out ? "-->" : "<--",
702 epnum, urb, urb->dev->speed,
703 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
704 qh->h_addr_reg, qh->h_port_reg,
707 musb_ep_select(mbase, epnum);
709 if (is_out && !len) {
711 csr = musb_readw(epio, MUSB_TXCSR);
712 csr &= ~MUSB_TXCSR_DMAENAB;
713 musb_writew(epio, MUSB_TXCSR, csr);
714 hw_ep->tx_channel = NULL;
717 /* candidate for DMA? */
718 dma_controller = musb->dma_controller;
719 if (use_dma && is_dma_capable() && epnum && dma_controller) {
720 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
722 dma_channel = dma_controller->channel_alloc(
723 dma_controller, hw_ep, is_out);
725 hw_ep->tx_channel = dma_channel;
727 hw_ep->rx_channel = dma_channel;
732 /* make sure we clear DMAEnab, autoSet bits from previous run */
734 /* OUT/transmit/EP0 or IN/receive? */
740 csr = musb_readw(epio, MUSB_TXCSR);
742 /* disable interrupt in case we flush */
743 int_txe = musb->intrtxe;
744 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
746 /* general endpoint setup */
748 /* flush all old state, set default */
749 musb_h_tx_flush_fifo(hw_ep);
752 * We must not clear the DMAMODE bit before or in
753 * the same cycle with the DMAENAB bit, so we clear
754 * the latter first...
756 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
759 | MUSB_TXCSR_FRCDATATOG
760 | MUSB_TXCSR_H_RXSTALL
762 | MUSB_TXCSR_TXPKTRDY
764 csr |= MUSB_TXCSR_MODE;
766 if (usb_gettoggle(urb->dev, qh->epnum, 1))
767 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
768 | MUSB_TXCSR_H_DATATOGGLE;
770 csr |= MUSB_TXCSR_CLRDATATOG;
772 musb_writew(epio, MUSB_TXCSR, csr);
773 /* REVISIT may need to clear FLUSHFIFO ... */
774 csr &= ~MUSB_TXCSR_DMAMODE;
775 musb_writew(epio, MUSB_TXCSR, csr);
776 csr = musb_readw(epio, MUSB_TXCSR);
778 /* endpoint 0: just flush */
779 musb_h_ep0_flush_fifo(hw_ep);
782 /* target addr and (for multipoint) hub addr/port */
783 if (musb->is_multipoint) {
784 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
785 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
786 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
787 /* FIXME if !epnum, do the same for RX ... */
789 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
791 /* protocol/endpoint/interval/NAKlimit */
793 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
794 if (musb->double_buffer_not_ok)
795 musb_writew(epio, MUSB_TXMAXP,
796 hw_ep->max_packet_sz_tx);
797 else if (can_bulk_split(musb, qh->type))
798 musb_writew(epio, MUSB_TXMAXP, packet_sz
799 | ((hw_ep->max_packet_sz_tx /
800 packet_sz) - 1) << 11);
802 musb_writew(epio, MUSB_TXMAXP,
804 ((qh->hb_mult - 1) << 11));
805 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
807 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
808 if (musb->is_multipoint)
809 musb_writeb(epio, MUSB_TYPE0,
813 if (can_bulk_split(musb, qh->type))
814 load_count = min((u32) hw_ep->max_packet_sz_tx,
817 load_count = min((u32) packet_sz, len);
819 if (dma_channel && musb_tx_dma_program(dma_controller,
820 hw_ep, qh, urb, offset, len))
824 /* PIO to load FIFO */
825 qh->segsize = load_count;
827 sg_miter_start(&qh->sg_miter, urb->sg, 1,
830 if (!sg_miter_next(&qh->sg_miter)) {
831 dev_err(musb->controller,
834 sg_miter_stop(&qh->sg_miter);
837 buf = qh->sg_miter.addr + urb->sg->offset +
839 load_count = min_t(u32, load_count,
840 qh->sg_miter.length);
841 musb_write_fifo(hw_ep, load_count, buf);
842 qh->sg_miter.consumed = load_count;
843 sg_miter_stop(&qh->sg_miter);
845 musb_write_fifo(hw_ep, load_count, buf);
848 /* re-enable interrupt */
849 musb_writew(mbase, MUSB_INTRTXE, int_txe);
855 if (hw_ep->rx_reinit) {
856 musb_rx_reinit(musb, qh, hw_ep);
858 /* init new state: toggle and NYET, maybe DMA later */
859 if (usb_gettoggle(urb->dev, qh->epnum, 0))
860 csr = MUSB_RXCSR_H_WR_DATATOGGLE
861 | MUSB_RXCSR_H_DATATOGGLE;
864 if (qh->type == USB_ENDPOINT_XFER_INT)
865 csr |= MUSB_RXCSR_DISNYET;
868 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
870 if (csr & (MUSB_RXCSR_RXPKTRDY
872 | MUSB_RXCSR_H_REQPKT))
873 ERR("broken !rx_reinit, ep%d csr %04x\n",
876 /* scrub any stale state, leaving toggle alone */
877 csr &= MUSB_RXCSR_DISNYET;
880 /* kick things off */
882 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
883 /* Candidate for DMA */
884 dma_channel->actual_len = 0L;
887 /* AUTOREQ is in a DMA register */
888 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
889 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
892 * Unless caller treats short RX transfers as
893 * errors, we dare not queue multiple transfers.
895 dma_ok = dma_controller->channel_program(dma_channel,
896 packet_sz, !(urb->transfer_flags &
898 urb->transfer_dma + offset,
901 dma_controller->channel_release(dma_channel);
902 hw_ep->rx_channel = dma_channel = NULL;
904 csr |= MUSB_RXCSR_DMAENAB;
907 csr |= MUSB_RXCSR_H_REQPKT;
908 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
909 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
910 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
914 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
915 * the end; avoids starvation for other endpoints.
917 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
920 struct dma_channel *dma;
922 void __iomem *mbase = musb->mregs;
923 void __iomem *epio = ep->regs;
924 struct musb_qh *cur_qh, *next_qh;
927 musb_ep_select(mbase, ep->epnum);
929 dma = is_dma_capable() ? ep->rx_channel : NULL;
931 /* clear nak timeout bit */
932 rx_csr = musb_readw(epio, MUSB_RXCSR);
933 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
934 rx_csr &= ~MUSB_RXCSR_DATAERROR;
935 musb_writew(epio, MUSB_RXCSR, rx_csr);
937 cur_qh = first_qh(&musb->in_bulk);
939 dma = is_dma_capable() ? ep->tx_channel : NULL;
941 /* clear nak timeout bit */
942 tx_csr = musb_readw(epio, MUSB_TXCSR);
943 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
944 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
945 musb_writew(epio, MUSB_TXCSR, tx_csr);
947 cur_qh = first_qh(&musb->out_bulk);
950 urb = next_urb(cur_qh);
951 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
952 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
953 musb->dma_controller->channel_abort(dma);
954 urb->actual_length += dma->actual_len;
955 dma->actual_len = 0L;
957 musb_save_toggle(cur_qh, is_in, urb);
960 /* move cur_qh to end of queue */
961 list_move_tail(&cur_qh->ring, &musb->in_bulk);
963 /* get the next qh from musb->in_bulk */
964 next_qh = first_qh(&musb->in_bulk);
966 /* set rx_reinit and schedule the next qh */
969 /* move cur_qh to end of queue */
970 list_move_tail(&cur_qh->ring, &musb->out_bulk);
972 /* get the next qh from musb->out_bulk */
973 next_qh = first_qh(&musb->out_bulk);
975 /* set tx_reinit and schedule the next qh */
978 musb_start_urb(musb, is_in, next_qh);
983 * Service the default endpoint (ep0) as host.
984 * Return true until it's time to start the status stage.
986 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
989 u8 *fifo_dest = NULL;
991 struct musb_hw_ep *hw_ep = musb->control_ep;
992 struct musb_qh *qh = hw_ep->in_qh;
993 struct usb_ctrlrequest *request;
995 switch (musb->ep0_stage) {
997 fifo_dest = urb->transfer_buffer + urb->actual_length;
998 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1000 if (fifo_count < len)
1001 urb->status = -EOVERFLOW;
1003 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1005 urb->actual_length += fifo_count;
1006 if (len < qh->maxpacket) {
1007 /* always terminate on short read; it's
1008 * rarely reported as an error.
1010 } else if (urb->actual_length <
1011 urb->transfer_buffer_length)
1014 case MUSB_EP0_START:
1015 request = (struct usb_ctrlrequest *) urb->setup_packet;
1017 if (!request->wLength) {
1018 dev_dbg(musb->controller, "start no-DATA\n");
1020 } else if (request->bRequestType & USB_DIR_IN) {
1021 dev_dbg(musb->controller, "start IN-DATA\n");
1022 musb->ep0_stage = MUSB_EP0_IN;
1026 dev_dbg(musb->controller, "start OUT-DATA\n");
1027 musb->ep0_stage = MUSB_EP0_OUT;
1032 fifo_count = min_t(size_t, qh->maxpacket,
1033 urb->transfer_buffer_length -
1034 urb->actual_length);
1036 fifo_dest = (u8 *) (urb->transfer_buffer
1037 + urb->actual_length);
1038 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
1040 (fifo_count == 1) ? "" : "s",
1042 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1044 urb->actual_length += fifo_count;
1049 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1057 * Handle default endpoint interrupt as host. Only called in IRQ time
1058 * from musb_interrupt().
1060 * called with controller irqlocked
1062 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1067 void __iomem *mbase = musb->mregs;
1068 struct musb_hw_ep *hw_ep = musb->control_ep;
1069 void __iomem *epio = hw_ep->regs;
1070 struct musb_qh *qh = hw_ep->in_qh;
1071 bool complete = false;
1072 irqreturn_t retval = IRQ_NONE;
1074 /* ep0 only has one queue, "in" */
1077 musb_ep_select(mbase, 0);
1078 csr = musb_readw(epio, MUSB_CSR0);
1079 len = (csr & MUSB_CSR0_RXPKTRDY)
1080 ? musb_readb(epio, MUSB_COUNT0)
1083 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1084 csr, qh, len, urb, musb->ep0_stage);
1086 /* if we just did status stage, we are done */
1087 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1088 retval = IRQ_HANDLED;
1092 /* prepare status */
1093 if (csr & MUSB_CSR0_H_RXSTALL) {
1094 dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1097 } else if (csr & MUSB_CSR0_H_ERROR) {
1098 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1101 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1102 dev_dbg(musb->controller, "control NAK timeout\n");
1104 /* NOTE: this code path would be a good place to PAUSE a
1105 * control transfer, if another one is queued, so that
1106 * ep0 is more likely to stay busy. That's already done
1107 * for bulk RX transfers.
1109 * if (qh->ring.next != &musb->control), then
1110 * we have a candidate... NAKing is *NOT* an error
1112 musb_writew(epio, MUSB_CSR0, 0);
1113 retval = IRQ_HANDLED;
1117 dev_dbg(musb->controller, "aborting\n");
1118 retval = IRQ_HANDLED;
1120 urb->status = status;
1123 /* use the proper sequence to abort the transfer */
1124 if (csr & MUSB_CSR0_H_REQPKT) {
1125 csr &= ~MUSB_CSR0_H_REQPKT;
1126 musb_writew(epio, MUSB_CSR0, csr);
1127 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1128 musb_writew(epio, MUSB_CSR0, csr);
1130 musb_h_ep0_flush_fifo(hw_ep);
1133 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1136 musb_writew(epio, MUSB_CSR0, 0);
1139 if (unlikely(!urb)) {
1140 /* stop endpoint since we have no place for its data, this
1141 * SHOULD NEVER HAPPEN! */
1142 ERR("no URB for end 0\n");
1144 musb_h_ep0_flush_fifo(hw_ep);
1149 /* call common logic and prepare response */
1150 if (musb_h_ep0_continue(musb, len, urb)) {
1151 /* more packets required */
1152 csr = (MUSB_EP0_IN == musb->ep0_stage)
1153 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1155 /* data transfer complete; perform status phase */
1156 if (usb_pipeout(urb->pipe)
1157 || !urb->transfer_buffer_length)
1158 csr = MUSB_CSR0_H_STATUSPKT
1159 | MUSB_CSR0_H_REQPKT;
1161 csr = MUSB_CSR0_H_STATUSPKT
1162 | MUSB_CSR0_TXPKTRDY;
1164 /* flag status stage */
1165 musb->ep0_stage = MUSB_EP0_STATUS;
1167 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1170 musb_writew(epio, MUSB_CSR0, csr);
1171 retval = IRQ_HANDLED;
1173 musb->ep0_stage = MUSB_EP0_IDLE;
1175 /* call completion handler if done */
1177 musb_advance_schedule(musb, urb, hw_ep, 1);
1183 #ifdef CONFIG_USB_INVENTRA_DMA
1185 /* Host side TX (OUT) using Mentor DMA works as follows:
1187 - if queue was empty, Program Endpoint
1188 - ... which starts DMA to fifo in mode 1 or 0
1190 DMA Isr (transfer complete) -> TxAvail()
1191 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1192 only in musb_cleanup_urb)
1193 - TxPktRdy has to be set in mode 0 or for
1194 short packets in mode 1.
1199 /* Service a Tx-Available or dma completion irq for the endpoint */
1200 void musb_host_tx(struct musb *musb, u8 epnum)
1207 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1208 void __iomem *epio = hw_ep->regs;
1209 struct musb_qh *qh = hw_ep->out_qh;
1210 struct urb *urb = next_urb(qh);
1212 void __iomem *mbase = musb->mregs;
1213 struct dma_channel *dma;
1214 bool transfer_pending = false;
1217 musb_ep_select(mbase, epnum);
1218 tx_csr = musb_readw(epio, MUSB_TXCSR);
1220 /* with CPPI, DMA sometimes triggers "extra" irqs */
1222 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1227 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1228 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1229 dma ? ", dma" : "");
1231 /* check for errors */
1232 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1233 /* dma was disabled, fifo flushed */
1234 dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1236 /* stall; record URB status */
1239 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1240 /* (NON-ISO) dma was disabled, fifo flushed */
1241 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1243 status = -ETIMEDOUT;
1245 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1246 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1247 && !list_is_singular(&musb->out_bulk)) {
1248 dev_dbg(musb->controller,
1249 "NAK timeout on TX%d ep\n", epnum);
1250 musb_bulk_nak_timeout(musb, hw_ep, 0);
1252 dev_dbg(musb->controller,
1253 "TX end=%d device not responding\n", epnum);
1254 /* NOTE: this code path would be a good place to PAUSE a
1255 * transfer, if there's some other (nonperiodic) tx urb
1256 * that could use this fifo. (dma complicates it...)
1257 * That's already done for bulk RX transfers.
1259 * if (bulk && qh->ring.next != &musb->out_bulk), then
1260 * we have a candidate... NAKing is *NOT* an error
1262 musb_ep_select(mbase, epnum);
1263 musb_writew(epio, MUSB_TXCSR,
1264 MUSB_TXCSR_H_WZC_BITS
1265 | MUSB_TXCSR_TXPKTRDY);
1272 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1273 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1274 (void) musb->dma_controller->channel_abort(dma);
1277 /* do the proper sequence to abort the transfer in the
1278 * usb core; the dma engine should already be stopped.
1280 musb_h_tx_flush_fifo(hw_ep);
1281 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1282 | MUSB_TXCSR_DMAENAB
1283 | MUSB_TXCSR_H_ERROR
1284 | MUSB_TXCSR_H_RXSTALL
1285 | MUSB_TXCSR_H_NAKTIMEOUT
1288 musb_ep_select(mbase, epnum);
1289 musb_writew(epio, MUSB_TXCSR, tx_csr);
1290 /* REVISIT may need to clear FLUSHFIFO ... */
1291 musb_writew(epio, MUSB_TXCSR, tx_csr);
1292 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1297 /* second cppi case */
1298 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1299 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1303 if (is_dma_capable() && dma && !status) {
1305 * DMA has completed. But if we're using DMA mode 1 (multi
1306 * packet DMA), we need a terminal TXPKTRDY interrupt before
1307 * we can consider this transfer completed, lest we trash
1308 * its last packet when writing the next URB's data. So we
1309 * switch back to mode 0 to get that interrupt; we'll come
1310 * back here once it happens.
1312 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1314 * We shouldn't clear DMAMODE with DMAENAB set; so
1315 * clear them in a safe order. That should be OK
1316 * once TXPKTRDY has been set (and I've never seen
1317 * it being 0 at this moment -- DMA interrupt latency
1318 * is significant) but if it hasn't been then we have
1319 * no choice but to stop being polite and ignore the
1320 * programmer's guide... :-)
1322 * Note that we must write TXCSR with TXPKTRDY cleared
1323 * in order not to re-trigger the packet send (this bit
1324 * can't be cleared by CPU), and there's another caveat:
1325 * TXPKTRDY may be set shortly and then cleared in the
1326 * double-buffered FIFO mode, so we do an extra TXCSR
1327 * read for debouncing...
1329 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1330 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1331 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1332 MUSB_TXCSR_TXPKTRDY);
1333 musb_writew(epio, MUSB_TXCSR,
1334 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1336 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1337 MUSB_TXCSR_TXPKTRDY);
1338 musb_writew(epio, MUSB_TXCSR,
1339 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1342 * There is no guarantee that we'll get an interrupt
1343 * after clearing DMAMODE as we might have done this
1344 * too late (after TXPKTRDY was cleared by controller).
1345 * Re-read TXCSR as we have spoiled its previous value.
1347 tx_csr = musb_readw(epio, MUSB_TXCSR);
1351 * We may get here from a DMA completion or TXPKTRDY interrupt.
1352 * In any case, we must check the FIFO status here and bail out
1353 * only if the FIFO still has data -- that should prevent the
1354 * "missed" TXPKTRDY interrupts and deal with double-buffered
1357 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1358 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1359 "CSR %04x\n", tx_csr);
1364 if (!status || dma || usb_pipeisoc(pipe)) {
1366 length = dma->actual_len;
1368 length = qh->segsize;
1369 qh->offset += length;
1371 if (usb_pipeisoc(pipe)) {
1372 struct usb_iso_packet_descriptor *d;
1374 d = urb->iso_frame_desc + qh->iso_idx;
1375 d->actual_length = length;
1377 if (++qh->iso_idx >= urb->number_of_packets) {
1384 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1387 /* see if we need to send more data, or ZLP */
1388 if (qh->segsize < qh->maxpacket)
1390 else if (qh->offset == urb->transfer_buffer_length
1391 && !(urb->transfer_flags
1395 offset = qh->offset;
1396 length = urb->transfer_buffer_length - offset;
1397 transfer_pending = true;
1402 /* urb->status != -EINPROGRESS means request has been faulted,
1403 * so we must abort this transfer after cleanup
1405 if (urb->status != -EINPROGRESS) {
1408 status = urb->status;
1413 urb->status = status;
1414 urb->actual_length = qh->offset;
1415 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1417 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1418 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1420 if (is_cppi_enabled() || tusb_dma_omap())
1421 musb_h_tx_dma_start(hw_ep);
1424 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1425 dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1430 * PIO: start next packet in this URB.
1432 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1433 * (and presumably, FIFO is not half-full) we should write *two*
1434 * packets before updating TXCSR; other docs disagree...
1436 if (length > qh->maxpacket)
1437 length = qh->maxpacket;
1438 /* Unmap the buffer so that CPU can use it */
1439 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1442 * We need to map sg if the transfer_buffer is
1445 if (!urb->transfer_buffer)
1449 /* sg_miter_start is already done in musb_ep_program */
1450 if (!sg_miter_next(&qh->sg_miter)) {
1451 dev_err(musb->controller, "error: sg list empty\n");
1452 sg_miter_stop(&qh->sg_miter);
1456 urb->transfer_buffer = qh->sg_miter.addr;
1457 length = min_t(u32, length, qh->sg_miter.length);
1458 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1459 qh->sg_miter.consumed = length;
1460 sg_miter_stop(&qh->sg_miter);
1462 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1465 qh->segsize = length;
1468 if (offset + length >= urb->transfer_buffer_length)
1472 musb_ep_select(mbase, epnum);
1473 musb_writew(epio, MUSB_TXCSR,
1474 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1478 #ifdef CONFIG_USB_INVENTRA_DMA
1480 /* Host side RX (IN) using Mentor DMA works as follows:
1482 - if queue was empty, ProgramEndpoint
1483 - first IN token is sent out (by setting ReqPkt)
1484 LinuxIsr -> RxReady()
1485 /\ => first packet is received
1486 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1487 | -> DMA Isr (transfer complete) -> RxReady()
1488 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1489 | - if urb not complete, send next IN token (ReqPkt)
1490 | | else complete urb.
1492 ---------------------------
1494 * Nuances of mode 1:
1495 * For short packets, no ack (+RxPktRdy) is sent automatically
1496 * (even if AutoClear is ON)
1497 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1498 * automatically => major problem, as collecting the next packet becomes
1499 * difficult. Hence mode 1 is not used.
1502 * All we care about at this driver level is that
1503 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1504 * (b) termination conditions are: short RX, or buffer full;
1505 * (c) fault modes include
1506 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1507 * (and that endpoint's dma queue stops immediately)
1508 * - overflow (full, PLUS more bytes in the terminal packet)
1510 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1511 * thus be a great candidate for using mode 1 ... for all but the
1512 * last packet of one URB's transfer.
1518 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1519 * and high-bandwidth IN transfer cases.
1521 void musb_host_rx(struct musb *musb, u8 epnum)
1524 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1525 void __iomem *epio = hw_ep->regs;
1526 struct musb_qh *qh = hw_ep->in_qh;
1528 void __iomem *mbase = musb->mregs;
1531 bool iso_err = false;
1534 struct dma_channel *dma;
1536 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1538 musb_ep_select(mbase, epnum);
1541 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1545 rx_csr = musb_readw(epio, MUSB_RXCSR);
1548 if (unlikely(!urb)) {
1549 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1550 * usbtest #11 (unlinks) triggers it regularly, sometimes
1551 * with fifo full. (Only with DMA??)
1553 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1554 musb_readw(epio, MUSB_RXCOUNT));
1555 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1561 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1562 epnum, rx_csr, urb->actual_length,
1563 dma ? dma->actual_len : 0);
1565 /* check for errors, concurrent stall & unlink is not really
1567 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1568 dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1570 /* stall; record URB status */
1573 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1574 dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1577 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1579 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1581 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1582 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1584 /* NOTE: NAKing is *NOT* an error, so we want to
1585 * continue. Except ... if there's a request for
1586 * another QH, use that instead of starving it.
1588 * Devices like Ethernet and serial adapters keep
1589 * reads posted at all times, which will starve
1590 * other devices without this logic.
1592 if (usb_pipebulk(urb->pipe)
1594 && !list_is_singular(&musb->in_bulk)) {
1595 musb_bulk_nak_timeout(musb, hw_ep, 1);
1598 musb_ep_select(mbase, epnum);
1599 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1600 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1601 musb_writew(epio, MUSB_RXCSR, rx_csr);
1605 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1606 /* packet error reported later */
1609 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1610 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1615 /* faults abort the transfer */
1617 /* clean up dma and collect transfer count */
1618 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1619 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1620 (void) musb->dma_controller->channel_abort(dma);
1621 xfer_len = dma->actual_len;
1623 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1624 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1629 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1630 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1631 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1635 /* thorough shutdown for now ... given more precise fault handling
1636 * and better queueing support, we might keep a DMA pipeline going
1637 * while processing this irq for earlier completions.
1640 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1642 #ifndef CONFIG_USB_INVENTRA_DMA
1643 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1644 /* REVISIT this happened for a while on some short reads...
1645 * the cleanup still needs investigation... looks bad...
1646 * and also duplicates dma cleanup code above ... plus,
1647 * shouldn't this be the "half full" double buffer case?
1649 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1650 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1651 (void) musb->dma_controller->channel_abort(dma);
1652 xfer_len = dma->actual_len;
1656 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1657 xfer_len, dma ? ", dma" : "");
1658 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1660 musb_ep_select(mbase, epnum);
1661 musb_writew(epio, MUSB_RXCSR,
1662 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1665 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1666 xfer_len = dma->actual_len;
1668 val &= ~(MUSB_RXCSR_DMAENAB
1669 | MUSB_RXCSR_H_AUTOREQ
1670 | MUSB_RXCSR_AUTOCLEAR
1671 | MUSB_RXCSR_RXPKTRDY);
1672 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1674 #ifdef CONFIG_USB_INVENTRA_DMA
1675 if (usb_pipeisoc(pipe)) {
1676 struct usb_iso_packet_descriptor *d;
1678 d = urb->iso_frame_desc + qh->iso_idx;
1679 d->actual_length = xfer_len;
1681 /* even if there was an error, we did the dma
1682 * for iso_frame_desc->length
1684 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1687 if (++qh->iso_idx >= urb->number_of_packets)
1693 /* done if urb buffer is full or short packet is recd */
1694 done = (urb->actual_length + xfer_len >=
1695 urb->transfer_buffer_length
1696 || dma->actual_len < qh->maxpacket);
1699 /* send IN token for next packet, without AUTOREQ */
1701 val |= MUSB_RXCSR_H_REQPKT;
1702 musb_writew(epio, MUSB_RXCSR,
1703 MUSB_RXCSR_H_WZC_BITS | val);
1706 dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1707 done ? "off" : "reset",
1708 musb_readw(epio, MUSB_RXCSR),
1709 musb_readw(epio, MUSB_RXCOUNT));
1713 } else if (urb->status == -EINPROGRESS) {
1714 /* if no errors, be sure a packet is ready for unloading */
1715 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1717 ERR("Rx interrupt with no errors or packet!\n");
1719 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1722 /* do the proper sequence to abort the transfer */
1723 musb_ep_select(mbase, epnum);
1724 val &= ~MUSB_RXCSR_H_REQPKT;
1725 musb_writew(epio, MUSB_RXCSR, val);
1729 /* we are expecting IN packets */
1730 #ifdef CONFIG_USB_INVENTRA_DMA
1732 struct dma_controller *c;
1737 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1739 dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
1742 + urb->actual_length,
1744 urb->transfer_buffer_length);
1746 c = musb->dma_controller;
1748 if (usb_pipeisoc(pipe)) {
1750 struct usb_iso_packet_descriptor *d;
1752 d = urb->iso_frame_desc + qh->iso_idx;
1758 if (rx_count > d->length) {
1759 if (d_status == 0) {
1760 d_status = -EOVERFLOW;
1763 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1764 rx_count, d->length);
1769 d->status = d_status;
1770 buf = urb->transfer_dma + d->offset;
1773 buf = urb->transfer_dma +
1777 dma->desired_mode = 0;
1779 /* because of the issue below, mode 1 will
1780 * only rarely behave with correct semantics.
1782 if ((urb->transfer_flags &
1784 && (urb->transfer_buffer_length -
1787 dma->desired_mode = 1;
1788 if (rx_count < hw_ep->max_packet_sz_rx) {
1790 dma->desired_mode = 0;
1792 length = urb->transfer_buffer_length;
1796 /* Disadvantage of using mode 1:
1797 * It's basically usable only for mass storage class; essentially all
1798 * other protocols also terminate transfers on short packets.
1801 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1802 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1803 * to use the extra IN token to grab the last packet using mode 0, then
1804 * the problem is that you cannot be sure when the device will send the
1805 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1806 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1807 * transfer, while sometimes it is recd just a little late so that if you
1808 * try to configure for mode 0 soon after the mode 1 transfer is
1809 * completed, you will find rxcount 0. Okay, so you might think why not
1810 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1813 val = musb_readw(epio, MUSB_RXCSR);
1814 val &= ~MUSB_RXCSR_H_REQPKT;
1816 if (dma->desired_mode == 0)
1817 val &= ~MUSB_RXCSR_H_AUTOREQ;
1819 val |= MUSB_RXCSR_H_AUTOREQ;
1820 val |= MUSB_RXCSR_DMAENAB;
1822 /* autoclear shouldn't be set in high bandwidth */
1823 if (qh->hb_mult == 1)
1824 val |= MUSB_RXCSR_AUTOCLEAR;
1826 musb_writew(epio, MUSB_RXCSR,
1827 MUSB_RXCSR_H_WZC_BITS | val);
1829 /* REVISIT if when actual_length != 0,
1830 * transfer_buffer_length needs to be
1833 ret = c->channel_program(
1835 dma->desired_mode, buf, length);
1838 c->channel_release(dma);
1839 hw_ep->rx_channel = NULL;
1841 val = musb_readw(epio, MUSB_RXCSR);
1842 val &= ~(MUSB_RXCSR_DMAENAB
1843 | MUSB_RXCSR_H_AUTOREQ
1844 | MUSB_RXCSR_AUTOCLEAR);
1845 musb_writew(epio, MUSB_RXCSR, val);
1848 #endif /* Mentor DMA */
1851 unsigned int received_len;
1853 /* Unmap the buffer so that CPU can use it */
1854 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1857 * We need to map sg if the transfer_buffer is
1860 if (!urb->transfer_buffer) {
1862 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1867 if (!sg_miter_next(&qh->sg_miter)) {
1868 dev_err(musb->controller, "error: sg list empty\n");
1869 sg_miter_stop(&qh->sg_miter);
1874 urb->transfer_buffer = qh->sg_miter.addr;
1875 received_len = urb->actual_length;
1877 done = musb_host_packet_rx(musb, urb, epnum,
1879 /* Calculate the number of bytes received */
1880 received_len = urb->actual_length -
1882 qh->sg_miter.consumed = received_len;
1883 sg_miter_stop(&qh->sg_miter);
1885 done = musb_host_packet_rx(musb, urb,
1888 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1893 urb->actual_length += xfer_len;
1894 qh->offset += xfer_len;
1899 if (urb->status == -EINPROGRESS)
1900 urb->status = status;
1901 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1905 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1906 * the software schedule associates multiple such nodes with a given
1907 * host side hardware endpoint + direction; scheduling may activate
1908 * that hardware endpoint.
1910 static int musb_schedule(
1917 int best_end, epnum;
1918 struct musb_hw_ep *hw_ep = NULL;
1919 struct list_head *head = NULL;
1922 struct urb *urb = next_urb(qh);
1924 /* use fixed hardware for control and bulk */
1925 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1926 head = &musb->control;
1927 hw_ep = musb->control_ep;
1931 /* else, periodic transfers get muxed to other endpoints */
1934 * We know this qh hasn't been scheduled, so all we need to do
1935 * is choose which hardware endpoint to put it on ...
1937 * REVISIT what we really want here is a regular schedule tree
1938 * like e.g. OHCI uses.
1943 for (epnum = 1, hw_ep = musb->endpoints + 1;
1944 epnum < musb->nr_endpoints;
1948 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1951 if (hw_ep == musb->bulk_ep)
1955 diff = hw_ep->max_packet_sz_rx;
1957 diff = hw_ep->max_packet_sz_tx;
1958 diff -= (qh->maxpacket * qh->hb_mult);
1960 if (diff >= 0 && best_diff > diff) {
1963 * Mentor controller has a bug in that if we schedule
1964 * a BULK Tx transfer on an endpoint that had earlier
1965 * handled ISOC then the BULK transfer has to start on
1966 * a zero toggle. If the BULK transfer starts on a 1
1967 * toggle then this transfer will fail as the mentor
1968 * controller starts the Bulk transfer on a 0 toggle
1969 * irrespective of the programming of the toggle bits
1970 * in the TXCSR register. Check for this condition
1971 * while allocating the EP for a Tx Bulk transfer. If
1974 hw_ep = musb->endpoints + epnum;
1975 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1976 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1978 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1979 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1986 /* use bulk reserved ep1 if no other ep is free */
1987 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1988 hw_ep = musb->bulk_ep;
1990 head = &musb->in_bulk;
1992 head = &musb->out_bulk;
1994 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
1995 * multiplexed. This scheme doen't work in high speed to full
1996 * speed scenario as NAK interrupts are not coming from a
1997 * full speed device connected to a high speed device.
1998 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1999 * 4 (8 frame or 8ms) for FS device.
2003 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2005 } else if (best_end < 0) {
2011 hw_ep = musb->endpoints + best_end;
2012 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
2015 idle = list_empty(head);
2016 list_add_tail(&qh->ring, head);
2020 qh->hep->hcpriv = qh;
2022 musb_start_urb(musb, is_in, qh);
2026 static int musb_urb_enqueue(
2027 struct usb_hcd *hcd,
2031 unsigned long flags;
2032 struct musb *musb = hcd_to_musb(hcd);
2033 struct usb_host_endpoint *hep = urb->ep;
2035 struct usb_endpoint_descriptor *epd = &hep->desc;
2040 /* host role must be active */
2041 if (!is_host_active(musb) || !musb->is_active)
2044 spin_lock_irqsave(&musb->lock, flags);
2045 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2046 qh = ret ? NULL : hep->hcpriv;
2049 spin_unlock_irqrestore(&musb->lock, flags);
2051 /* DMA mapping was already done, if needed, and this urb is on
2052 * hep->urb_list now ... so we're done, unless hep wasn't yet
2053 * scheduled onto a live qh.
2055 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2056 * disabled, testing for empty qh->ring and avoiding qh setup costs
2057 * except for the first urb queued after a config change.
2062 /* Allocate and initialize qh, minimizing the work done each time
2063 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2065 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2066 * for bugs in other kernel code to break this driver...
2068 qh = kzalloc(sizeof *qh, mem_flags);
2070 spin_lock_irqsave(&musb->lock, flags);
2071 usb_hcd_unlink_urb_from_ep(hcd, urb);
2072 spin_unlock_irqrestore(&musb->lock, flags);
2078 INIT_LIST_HEAD(&qh->ring);
2081 qh->maxpacket = usb_endpoint_maxp(epd);
2082 qh->type = usb_endpoint_type(epd);
2084 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2085 * Some musb cores don't support high bandwidth ISO transfers; and
2086 * we don't (yet!) support high bandwidth interrupt transfers.
2088 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2089 if (qh->hb_mult > 1) {
2090 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2093 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2094 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2099 qh->maxpacket &= 0x7ff;
2102 qh->epnum = usb_endpoint_num(epd);
2104 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2105 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2107 /* precompute rxtype/txtype/type0 register */
2108 type_reg = (qh->type << 4) | qh->epnum;
2109 switch (urb->dev->speed) {
2113 case USB_SPEED_FULL:
2119 qh->type_reg = type_reg;
2121 /* Precompute RXINTERVAL/TXINTERVAL register */
2123 case USB_ENDPOINT_XFER_INT:
2125 * Full/low speeds use the linear encoding,
2126 * high speed uses the logarithmic encoding.
2128 if (urb->dev->speed <= USB_SPEED_FULL) {
2129 interval = max_t(u8, epd->bInterval, 1);
2133 case USB_ENDPOINT_XFER_ISOC:
2134 /* ISO always uses logarithmic encoding */
2135 interval = min_t(u8, epd->bInterval, 16);
2138 /* REVISIT we actually want to use NAK limits, hinting to the
2139 * transfer scheduling logic to try some other qh, e.g. try
2142 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2144 * The downside of disabling this is that transfer scheduling
2145 * gets VERY unfair for nonperiodic transfers; a misbehaving
2146 * peripheral could make that hurt. That's perfectly normal
2147 * for reads from network or serial adapters ... so we have
2148 * partial NAKlimit support for bulk RX.
2150 * The upside of disabling it is simpler transfer scheduling.
2154 qh->intv_reg = interval;
2156 /* precompute addressing for external hub/tt ports */
2157 if (musb->is_multipoint) {
2158 struct usb_device *parent = urb->dev->parent;
2160 if (parent != hcd->self.root_hub) {
2161 qh->h_addr_reg = (u8) parent->devnum;
2163 /* set up tt info if needed */
2165 qh->h_port_reg = (u8) urb->dev->ttport;
2166 if (urb->dev->tt->hub)
2168 (u8) urb->dev->tt->hub->devnum;
2169 if (urb->dev->tt->multi)
2170 qh->h_addr_reg |= 0x80;
2175 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2176 * until we get real dma queues (with an entry for each urb/buffer),
2177 * we only have work to do in the former case.
2179 spin_lock_irqsave(&musb->lock, flags);
2180 if (hep->hcpriv || !next_urb(qh)) {
2181 /* some concurrent activity submitted another urb to hep...
2182 * odd, rare, error prone, but legal.
2188 ret = musb_schedule(musb, qh,
2189 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2193 /* FIXME set urb->start_frame for iso/intr, it's tested in
2194 * musb_start_urb(), but otherwise only konicawc cares ...
2197 spin_unlock_irqrestore(&musb->lock, flags);
2201 spin_lock_irqsave(&musb->lock, flags);
2202 usb_hcd_unlink_urb_from_ep(hcd, urb);
2203 spin_unlock_irqrestore(&musb->lock, flags);
2211 * abort a transfer that's at the head of a hardware queue.
2212 * called with controller locked, irqs blocked
2213 * that hardware queue advances to the next transfer, unless prevented
2215 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2217 struct musb_hw_ep *ep = qh->hw_ep;
2218 struct musb *musb = ep->musb;
2219 void __iomem *epio = ep->regs;
2220 unsigned hw_end = ep->epnum;
2221 void __iomem *regs = ep->musb->mregs;
2222 int is_in = usb_pipein(urb->pipe);
2226 musb_ep_select(regs, hw_end);
2228 if (is_dma_capable()) {
2229 struct dma_channel *dma;
2231 dma = is_in ? ep->rx_channel : ep->tx_channel;
2233 status = ep->musb->dma_controller->channel_abort(dma);
2234 dev_dbg(musb->controller,
2235 "abort %cX%d DMA for urb %p --> %d\n",
2236 is_in ? 'R' : 'T', ep->epnum,
2238 urb->actual_length += dma->actual_len;
2242 /* turn off DMA requests, discard state, stop polling ... */
2243 if (ep->epnum && is_in) {
2244 /* giveback saves bulk toggle */
2245 csr = musb_h_flush_rxfifo(ep, 0);
2247 /* REVISIT we still get an irq; should likely clear the
2248 * endpoint's irq status here to avoid bogus irqs.
2249 * clearing that status is platform-specific...
2251 } else if (ep->epnum) {
2252 musb_h_tx_flush_fifo(ep);
2253 csr = musb_readw(epio, MUSB_TXCSR);
2254 csr &= ~(MUSB_TXCSR_AUTOSET
2255 | MUSB_TXCSR_DMAENAB
2256 | MUSB_TXCSR_H_RXSTALL
2257 | MUSB_TXCSR_H_NAKTIMEOUT
2258 | MUSB_TXCSR_H_ERROR
2259 | MUSB_TXCSR_TXPKTRDY);
2260 musb_writew(epio, MUSB_TXCSR, csr);
2261 /* REVISIT may need to clear FLUSHFIFO ... */
2262 musb_writew(epio, MUSB_TXCSR, csr);
2263 /* flush cpu writebuffer */
2264 csr = musb_readw(epio, MUSB_TXCSR);
2266 musb_h_ep0_flush_fifo(ep);
2269 musb_advance_schedule(ep->musb, urb, ep, is_in);
2273 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2275 struct musb *musb = hcd_to_musb(hcd);
2277 unsigned long flags;
2278 int is_in = usb_pipein(urb->pipe);
2281 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2282 usb_pipedevice(urb->pipe),
2283 usb_pipeendpoint(urb->pipe),
2284 is_in ? "in" : "out");
2286 spin_lock_irqsave(&musb->lock, flags);
2287 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2296 * Any URB not actively programmed into endpoint hardware can be
2297 * immediately given back; that's any URB not at the head of an
2298 * endpoint queue, unless someday we get real DMA queues. And even
2299 * if it's at the head, it might not be known to the hardware...
2301 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2302 * has already been updated. This is a synchronous abort; it'd be
2303 * OK to hold off until after some IRQ, though.
2305 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2308 || urb->urb_list.prev != &qh->hep->urb_list
2309 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2310 int ready = qh->is_ready;
2313 musb_giveback(musb, urb, 0);
2314 qh->is_ready = ready;
2316 /* If nothing else (usually musb_giveback) is using it
2317 * and its URB list has emptied, recycle this qh.
2319 if (ready && list_empty(&qh->hep->urb_list)) {
2320 qh->hep->hcpriv = NULL;
2321 list_del(&qh->ring);
2325 ret = musb_cleanup_urb(urb, qh);
2327 spin_unlock_irqrestore(&musb->lock, flags);
2331 /* disable an endpoint */
2333 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2335 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2336 unsigned long flags;
2337 struct musb *musb = hcd_to_musb(hcd);
2341 spin_lock_irqsave(&musb->lock, flags);
2347 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2349 /* Kick the first URB off the hardware, if needed */
2351 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2354 /* make software (then hardware) stop ASAP */
2356 urb->status = -ESHUTDOWN;
2359 musb_cleanup_urb(urb, qh);
2361 /* Then nuke all the others ... and advance the
2362 * queue on hw_ep (e.g. bulk ring) when we're done.
2364 while (!list_empty(&hep->urb_list)) {
2366 urb->status = -ESHUTDOWN;
2367 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2370 /* Just empty the queue; the hardware is busy with
2371 * other transfers, and since !qh->is_ready nothing
2372 * will activate any of these as it advances.
2374 while (!list_empty(&hep->urb_list))
2375 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2378 list_del(&qh->ring);
2382 spin_unlock_irqrestore(&musb->lock, flags);
2385 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2387 struct musb *musb = hcd_to_musb(hcd);
2389 return musb_readw(musb->mregs, MUSB_FRAME);
2392 static int musb_h_start(struct usb_hcd *hcd)
2394 struct musb *musb = hcd_to_musb(hcd);
2396 /* NOTE: musb_start() is called when the hub driver turns
2397 * on port power, or when (OTG) peripheral starts.
2399 hcd->state = HC_STATE_RUNNING;
2400 musb->port1_status = 0;
2404 static void musb_h_stop(struct usb_hcd *hcd)
2406 musb_stop(hcd_to_musb(hcd));
2407 hcd->state = HC_STATE_HALT;
2410 static int musb_bus_suspend(struct usb_hcd *hcd)
2412 struct musb *musb = hcd_to_musb(hcd);
2415 if (!is_host_active(musb))
2418 switch (musb->xceiv->state) {
2419 case OTG_STATE_A_SUSPEND:
2421 case OTG_STATE_A_WAIT_VRISE:
2422 /* ID could be grounded even if there's no device
2423 * on the other end of the cable. NOTE that the
2424 * A_WAIT_VRISE timers are messy with MUSB...
2426 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2427 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2428 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2434 if (musb->is_active) {
2435 WARNING("trying to suspend as %s while active\n",
2436 otg_state_string(musb->xceiv->state));
2442 static int musb_bus_resume(struct usb_hcd *hcd)
2444 /* resuming child port does the work */
2448 const struct hc_driver musb_hc_driver = {
2449 .description = "musb-hcd",
2450 .product_desc = "MUSB HDRC host driver",
2451 .hcd_priv_size = sizeof(struct musb),
2452 .flags = HCD_USB2 | HCD_MEMORY,
2454 /* not using irq handler or reset hooks from usbcore, since
2455 * those must be shared with peripheral code for OTG configs
2458 .start = musb_h_start,
2459 .stop = musb_h_stop,
2461 .get_frame_number = musb_h_get_frame_number,
2463 .urb_enqueue = musb_urb_enqueue,
2464 .urb_dequeue = musb_urb_dequeue,
2465 .endpoint_disable = musb_h_disable,
2467 .hub_status_data = musb_hub_status_data,
2468 .hub_control = musb_hub_control,
2469 .bus_suspend = musb_bus_suspend,
2470 .bus_resume = musb_bus_resume,
2471 /* .start_port_reset = NULL, */
2472 /* .hub_irq_enable = NULL, */