3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
112 * Life cycle governed by 'struct urb' (the refcount of the struct is
113 * that of the 'struct urb' and usb_free_urb() would free the whole
118 struct urb *dto_urb; /* for data output? */
119 struct list_head list_node; /* for rpipe->req_list */
120 struct wa_xfer *xfer; /* out xfer */
121 u8 index; /* which segment we are */
122 enum wa_seg_status status;
123 ssize_t result; /* bytes xfered or error */
124 struct wa_xfer_hdr xfer_hdr;
125 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
128 static void wa_seg_init(struct wa_seg *seg)
130 /* usb_init_urb() repeats a lot of work, so we do it here */
131 kref_init(&seg->urb.kref);
135 * Protected by xfer->lock
140 struct list_head list_node;
144 struct wahc *wa; /* Wire adapter we are plugged to */
145 struct usb_host_endpoint *ep;
146 struct urb *urb; /* URB we are transferring for */
147 struct wa_seg **seg; /* transfer segments */
148 u8 segs, segs_submitted, segs_done;
149 unsigned is_inbound:1;
154 gfp_t gfp; /* allocation mask */
156 struct wusb_dev *wusb_dev; /* for activity timestamps */
159 static inline void wa_xfer_init(struct wa_xfer *xfer)
161 kref_init(&xfer->refcnt);
162 INIT_LIST_HEAD(&xfer->list_node);
163 spin_lock_init(&xfer->lock);
167 * Destroy a transfer structure
169 * Note that freeing xfer->seg[cnt]->urb will free the containing
170 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
172 static void wa_xfer_destroy(struct kref *_xfer)
174 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
177 for (cnt = 0; cnt < xfer->segs; cnt++) {
178 usb_free_urb(xfer->seg[cnt]->dto_urb);
179 usb_free_urb(&xfer->seg[cnt]->urb);
185 static void wa_xfer_get(struct wa_xfer *xfer)
187 kref_get(&xfer->refcnt);
190 static void wa_xfer_put(struct wa_xfer *xfer)
192 kref_put(&xfer->refcnt, wa_xfer_destroy);
198 * xfer->lock has to be unlocked
200 * We take xfer->lock for setting the result; this is a barrier
201 * against drivers/usb/core/hcd.c:unlink1() being called after we call
202 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
203 * reference to the transfer.
205 static void wa_xfer_giveback(struct wa_xfer *xfer)
209 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
210 list_del_init(&xfer->list_node);
211 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
212 /* FIXME: segmentation broken -- kills DWA */
213 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
221 * xfer->lock has to be unlocked
223 static void wa_xfer_completion(struct wa_xfer *xfer)
226 wusb_dev_put(xfer->wusb_dev);
227 rpipe_put(xfer->ep->hcpriv);
228 wa_xfer_giveback(xfer);
232 * If transfer is done, wrap it up and return true
234 * xfer->lock has to be locked
236 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
238 struct device *dev = &xfer->wa->usb_iface->dev;
239 unsigned result, cnt;
241 struct urb *urb = xfer->urb;
242 unsigned found_short = 0;
244 result = xfer->segs_done == xfer->segs_submitted;
247 urb->actual_length = 0;
248 for (cnt = 0; cnt < xfer->segs; cnt++) {
249 seg = xfer->seg[cnt];
250 switch (seg->status) {
252 if (found_short && seg->result > 0) {
253 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
254 xfer, cnt, seg->result);
255 urb->status = -EINVAL;
258 urb->actual_length += seg->result;
259 if (seg->result < xfer->seg_size
260 && cnt != xfer->segs-1)
262 dev_dbg(dev, "xfer %p#%u: DONE short %d "
263 "result %zu urb->actual_length %d\n",
264 xfer, seg->index, found_short, seg->result,
268 xfer->result = seg->result;
269 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
270 xfer, seg->index, seg->result);
273 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
274 xfer, seg->index, urb->status);
275 xfer->result = urb->status;
278 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
279 xfer, cnt, seg->status);
280 xfer->result = -EINVAL;
290 * Initialize a transfer's ID
292 * We need to use a sequential number; if we use the pointer or the
293 * hash of the pointer, it can repeat over sequential transfers and
294 * then it will confuse the HWA....wonder why in hell they put a 32
295 * bit handle in there then.
297 static void wa_xfer_id_init(struct wa_xfer *xfer)
299 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
303 * Return the xfer's ID associated with xfer
307 static u32 wa_xfer_id(struct wa_xfer *xfer)
313 * Search for a transfer list ID on the HCD's URB list
315 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
316 * 32-bit hash of the pointer.
318 * @returns NULL if not found.
320 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
323 struct wa_xfer *xfer_itr;
324 spin_lock_irqsave(&wa->xfer_list_lock, flags);
325 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
326 if (id == xfer_itr->id) {
327 wa_xfer_get(xfer_itr);
333 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
337 struct wa_xfer_abort_buffer {
339 struct wa_xfer_abort cmd;
342 static void __wa_xfer_abort_cb(struct urb *urb)
344 struct wa_xfer_abort_buffer *b = urb->context;
345 usb_put_urb(&b->urb);
349 * Aborts an ongoing transaction
351 * Assumes the transfer is referenced and locked and in a submitted
352 * state (mainly that there is an endpoint/rpipe assigned).
354 * The callback (see above) does nothing but freeing up the data by
355 * putting the URB. Because the URB is allocated at the head of the
356 * struct, the whole space we allocated is kfreed.
358 * We'll get an 'aborted transaction' xfer result on DTI, that'll
359 * politely ignore because at this point the transaction has been
360 * marked as aborted already.
362 static void __wa_xfer_abort(struct wa_xfer *xfer)
365 struct device *dev = &xfer->wa->usb_iface->dev;
366 struct wa_xfer_abort_buffer *b;
367 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
369 b = kmalloc(sizeof(*b), GFP_ATOMIC);
372 b->cmd.bLength = sizeof(b->cmd);
373 b->cmd.bRequestType = WA_XFER_ABORT;
374 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
375 b->cmd.dwTransferID = wa_xfer_id(xfer);
377 usb_init_urb(&b->urb);
378 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
379 usb_sndbulkpipe(xfer->wa->usb_dev,
380 xfer->wa->dto_epd->bEndpointAddress),
381 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
382 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
385 return; /* callback frees! */
389 if (printk_ratelimit())
390 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
400 * @returns < 0 on error, transfer segment request size if ok
402 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
403 enum wa_xfer_type *pxfer_type)
406 struct device *dev = &xfer->wa->usb_iface->dev;
408 struct urb *urb = xfer->urb;
409 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
411 switch (rpipe->descr.bmAttribute & 0x3) {
412 case USB_ENDPOINT_XFER_CONTROL:
413 *pxfer_type = WA_XFER_TYPE_CTL;
414 result = sizeof(struct wa_xfer_ctl);
416 case USB_ENDPOINT_XFER_INT:
417 case USB_ENDPOINT_XFER_BULK:
418 *pxfer_type = WA_XFER_TYPE_BI;
419 result = sizeof(struct wa_xfer_bi);
421 case USB_ENDPOINT_XFER_ISOC:
422 dev_err(dev, "FIXME: ISOC not implemented\n");
428 result = -EINVAL; /* shut gcc up */
430 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
431 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
432 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
433 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
434 /* Compute the segment size and make sure it is a multiple of
435 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
437 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
438 if (xfer->seg_size < maxpktsize) {
439 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
440 "%zu\n", xfer->seg_size, maxpktsize);
444 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
445 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
446 if (xfer->segs >= WA_SEGS_MAX) {
447 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
448 (int)(urb->transfer_buffer_length / xfer->seg_size),
453 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
459 /* Fill in the common request header and xfer-type specific data. */
460 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
461 struct wa_xfer_hdr *xfer_hdr0,
462 enum wa_xfer_type xfer_type,
463 size_t xfer_hdr_size)
465 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
467 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
468 xfer_hdr0->bLength = xfer_hdr_size;
469 xfer_hdr0->bRequestType = xfer_type;
470 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
471 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
472 xfer_hdr0->bTransferSegment = 0;
474 case WA_XFER_TYPE_CTL: {
475 struct wa_xfer_ctl *xfer_ctl =
476 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
477 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
478 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
479 sizeof(xfer_ctl->baSetupData));
482 case WA_XFER_TYPE_BI:
484 case WA_XFER_TYPE_ISO:
485 printk(KERN_ERR "FIXME: ISOC not implemented\n");
492 * Callback for the OUT data phase of the segment request
494 * Check wa_seg_cb(); most comments also apply here because this
495 * function does almost the same thing and they work closely
498 * If the seg request has failed but this DTO phase has succeeded,
499 * wa_seg_cb() has already failed the segment and moved the
500 * status to WA_SEG_ERROR, so this will go through 'case 0' and
501 * effectively do nothing.
503 static void wa_seg_dto_cb(struct urb *urb)
505 struct wa_seg *seg = urb->context;
506 struct wa_xfer *xfer = seg->xfer;
509 struct wa_rpipe *rpipe;
511 unsigned rpipe_ready = 0;
514 switch (urb->status) {
516 spin_lock_irqsave(&xfer->lock, flags);
518 dev = &wa->usb_iface->dev;
519 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
520 xfer, seg->index, urb->actual_length);
521 if (seg->status < WA_SEG_PENDING)
522 seg->status = WA_SEG_PENDING;
523 seg->result = urb->actual_length;
524 spin_unlock_irqrestore(&xfer->lock, flags);
526 case -ECONNRESET: /* URB unlinked; no need to do anything */
527 case -ENOENT: /* as it was done by the who unlinked us */
529 default: /* Other errors ... */
530 spin_lock_irqsave(&xfer->lock, flags);
532 dev = &wa->usb_iface->dev;
533 rpipe = xfer->ep->hcpriv;
534 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
535 xfer, seg->index, urb->status);
536 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
537 EDC_ERROR_TIMEFRAME)){
538 dev_err(dev, "DTO: URB max acceptable errors "
539 "exceeded, resetting device\n");
542 if (seg->status != WA_SEG_ERROR) {
543 seg->status = WA_SEG_ERROR;
544 seg->result = urb->status;
546 __wa_xfer_abort(xfer);
547 rpipe_ready = rpipe_avail_inc(rpipe);
548 done = __wa_xfer_is_done(xfer);
550 spin_unlock_irqrestore(&xfer->lock, flags);
552 wa_xfer_completion(xfer);
554 wa_xfer_delayed_run(rpipe);
559 * Callback for the segment request
561 * If successful transition state (unless already transitioned or
562 * outbound transfer); otherwise, take a note of the error, mark this
563 * segment done and try completion.
565 * Note we don't access until we are sure that the transfer hasn't
566 * been cancelled (ECONNRESET, ENOENT), which could mean that
567 * seg->xfer could be already gone.
569 * We have to check before setting the status to WA_SEG_PENDING
570 * because sometimes the xfer result callback arrives before this
571 * callback (geeeeeeze), so it might happen that we are already in
572 * another state. As well, we don't set it if the transfer is inbound,
573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
576 static void wa_seg_cb(struct urb *urb)
578 struct wa_seg *seg = urb->context;
579 struct wa_xfer *xfer = seg->xfer;
582 struct wa_rpipe *rpipe;
584 unsigned rpipe_ready;
587 switch (urb->status) {
589 spin_lock_irqsave(&xfer->lock, flags);
591 dev = &wa->usb_iface->dev;
592 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
593 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
594 seg->status = WA_SEG_PENDING;
595 spin_unlock_irqrestore(&xfer->lock, flags);
597 case -ECONNRESET: /* URB unlinked; no need to do anything */
598 case -ENOENT: /* as it was done by the who unlinked us */
600 default: /* Other errors ... */
601 spin_lock_irqsave(&xfer->lock, flags);
603 dev = &wa->usb_iface->dev;
604 rpipe = xfer->ep->hcpriv;
605 if (printk_ratelimit())
606 dev_err(dev, "xfer %p#%u: request error %d\n",
607 xfer, seg->index, urb->status);
608 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
609 EDC_ERROR_TIMEFRAME)){
610 dev_err(dev, "DTO: URB max acceptable errors "
611 "exceeded, resetting device\n");
614 usb_unlink_urb(seg->dto_urb);
615 seg->status = WA_SEG_ERROR;
616 seg->result = urb->status;
618 __wa_xfer_abort(xfer);
619 rpipe_ready = rpipe_avail_inc(rpipe);
620 done = __wa_xfer_is_done(xfer);
621 spin_unlock_irqrestore(&xfer->lock, flags);
623 wa_xfer_completion(xfer);
625 wa_xfer_delayed_run(rpipe);
629 /* allocate an SG list to store bytes_to_transfer bytes and copy the
630 * subset of the in_sg that matches the buffer subset
631 * we are about to transfer. */
632 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
633 const unsigned int bytes_transferred,
634 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
636 struct scatterlist *out_sg;
637 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
639 struct scatterlist *current_xfer_sg = in_sg;
640 struct scatterlist *current_seg_sg, *last_seg_sg;
642 /* skip previously transferred pages. */
643 while ((current_xfer_sg) &&
644 (bytes_processed < bytes_transferred)) {
645 bytes_processed += current_xfer_sg->length;
647 /* advance the sg if current segment starts on or past the
649 if (bytes_processed <= bytes_transferred)
650 current_xfer_sg = sg_next(current_xfer_sg);
653 /* the data for the current segment starts in current_xfer_sg.
654 calculate the offset. */
655 if (bytes_processed > bytes_transferred) {
656 offset_into_current_page_data = current_xfer_sg->length -
657 (bytes_processed - bytes_transferred);
660 /* calculate the number of pages needed by this segment. */
661 nents = DIV_ROUND_UP((bytes_to_transfer +
662 offset_into_current_page_data +
663 current_xfer_sg->offset),
666 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
668 sg_init_table(out_sg, nents);
670 /* copy the portion of the incoming SG that correlates to the
671 * data to be transferred by this segment to the segment SG. */
672 last_seg_sg = current_seg_sg = out_sg;
675 /* reset nents and calculate the actual number of sg entries
678 while ((bytes_processed < bytes_to_transfer) &&
679 current_seg_sg && current_xfer_sg) {
680 unsigned int page_len = min((current_xfer_sg->length -
681 offset_into_current_page_data),
682 (bytes_to_transfer - bytes_processed));
684 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
686 current_xfer_sg->offset +
687 offset_into_current_page_data);
689 bytes_processed += page_len;
691 last_seg_sg = current_seg_sg;
692 current_seg_sg = sg_next(current_seg_sg);
693 current_xfer_sg = sg_next(current_xfer_sg);
695 /* only the first page may require additional offset. */
696 offset_into_current_page_data = 0;
700 /* update num_sgs and terminate the list since we may have
701 * concatenated pages. */
702 sg_mark_end(last_seg_sg);
703 *out_num_sgs = nents;
710 * Allocate the segs array and initialize each of them
712 * The segments are freed by wa_xfer_destroy() when the xfer use count
713 * drops to zero; however, because each segment is given the same life
714 * cycle as the USB URB it contains, it is actually freed by
715 * usb_put_urb() on the contained USB URB (twisted, eh?).
717 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
720 size_t alloc_size = sizeof(*xfer->seg[0])
721 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
722 struct usb_device *usb_dev = xfer->wa->usb_dev;
723 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
725 size_t buf_itr, buf_size, buf_itr_size;
728 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
729 if (xfer->seg == NULL)
730 goto error_segs_kzalloc;
732 buf_size = xfer->urb->transfer_buffer_length;
733 for (cnt = 0; cnt < xfer->segs; cnt++) {
734 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
736 goto error_seg_kzalloc;
740 usb_fill_bulk_urb(&seg->urb, usb_dev,
741 usb_sndbulkpipe(usb_dev,
742 dto_epd->bEndpointAddress),
743 &seg->xfer_hdr, xfer_hdr_size,
745 buf_itr_size = min(buf_size, xfer->seg_size);
746 if (xfer->is_inbound == 0 && buf_size > 0) {
748 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
749 if (seg->dto_urb == NULL)
750 goto error_dto_alloc;
752 seg->dto_urb, usb_dev,
753 usb_sndbulkpipe(usb_dev,
754 dto_epd->bEndpointAddress),
755 NULL, 0, wa_seg_dto_cb, seg);
757 seg->dto_urb->transfer_dma =
758 xfer->urb->transfer_dma + buf_itr;
759 seg->dto_urb->transfer_flags |=
760 URB_NO_TRANSFER_DMA_MAP;
761 seg->dto_urb->transfer_buffer = NULL;
762 seg->dto_urb->sg = NULL;
763 seg->dto_urb->num_sgs = 0;
765 /* do buffer or SG processing. */
766 seg->dto_urb->transfer_flags &=
767 ~URB_NO_TRANSFER_DMA_MAP;
768 /* this should always be 0 before a resubmit. */
769 seg->dto_urb->num_mapped_sgs = 0;
771 if (xfer->urb->transfer_buffer) {
772 seg->dto_urb->transfer_buffer =
773 xfer->urb->transfer_buffer +
775 seg->dto_urb->sg = NULL;
776 seg->dto_urb->num_sgs = 0;
778 /* allocate an SG list to store seg_size
779 bytes and copy the subset of the
780 xfer->urb->sg that matches the
781 buffer subset we are about to read.
784 wa_xfer_create_subset_sg(
786 buf_itr, buf_itr_size,
787 &(seg->dto_urb->num_sgs));
789 if (!(seg->dto_urb->sg)) {
790 seg->dto_urb->num_sgs = 0;
794 seg->dto_urb->transfer_buffer = NULL;
797 seg->dto_urb->transfer_buffer_length = buf_itr_size;
799 seg->status = WA_SEG_READY;
800 buf_itr += buf_itr_size;
801 buf_size -= buf_itr_size;
806 usb_free_urb(xfer->seg[cnt]->dto_urb);
808 kfree(xfer->seg[cnt]);
811 /* use the fact that cnt is left at were it failed */
812 for (; cnt >= 0; cnt--) {
813 if (xfer->seg[cnt] && xfer->is_inbound == 0) {
814 usb_free_urb(xfer->seg[cnt]->dto_urb);
815 kfree(xfer->seg[cnt]->dto_urb->sg);
817 kfree(xfer->seg[cnt]);
824 * Allocates all the stuff needed to submit a transfer
826 * Breaks the whole data buffer in a list of segments, each one has a
827 * structure allocated to it and linked in xfer->seg[index]
829 * FIXME: merge setup_segs() and the last part of this function, no
830 * need to do two for loops when we could run everything in a
833 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
836 struct device *dev = &xfer->wa->usb_iface->dev;
837 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
838 size_t xfer_hdr_size, cnt, transfer_size;
839 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
841 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
843 goto error_setup_sizes;
844 xfer_hdr_size = result;
845 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
847 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
848 xfer, xfer->segs, result);
849 goto error_setup_segs;
851 /* Fill the first header */
852 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
853 wa_xfer_id_init(xfer);
854 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
856 /* Fill remainig headers */
857 xfer_hdr = xfer_hdr0;
858 transfer_size = urb->transfer_buffer_length;
859 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
860 xfer->seg_size : transfer_size;
861 transfer_size -= xfer->seg_size;
862 for (cnt = 1; cnt < xfer->segs; cnt++) {
863 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
864 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
865 xfer_hdr->bTransferSegment = cnt;
866 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
867 cpu_to_le32(xfer->seg_size)
868 : cpu_to_le32(transfer_size);
869 xfer->seg[cnt]->status = WA_SEG_READY;
870 transfer_size -= xfer->seg_size;
872 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
882 * rpipe->seg_lock is held!
884 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
888 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
890 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
891 xfer, seg->index, result);
892 goto error_seg_submit;
895 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
897 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
898 xfer, seg->index, result);
899 goto error_dto_submit;
902 seg->status = WA_SEG_SUBMITTED;
903 rpipe_avail_dec(rpipe);
907 usb_unlink_urb(&seg->urb);
909 seg->status = WA_SEG_ERROR;
910 seg->result = result;
915 * Execute more queued request segments until the maximum concurrent allowed
917 * The ugly unlock/lock sequence on the error path is needed as the
918 * xfer->lock normally nests the seg_lock and not viceversa.
921 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
924 struct device *dev = &rpipe->wa->usb_iface->dev;
926 struct wa_xfer *xfer;
929 spin_lock_irqsave(&rpipe->seg_lock, flags);
930 while (atomic_read(&rpipe->segs_available) > 0
931 && !list_empty(&rpipe->seg_list)) {
932 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
934 list_del(&seg->list_node);
936 result = __wa_seg_submit(rpipe, xfer, seg);
937 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
938 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
939 if (unlikely(result < 0)) {
940 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
941 spin_lock_irqsave(&xfer->lock, flags);
942 __wa_xfer_abort(xfer);
944 spin_unlock_irqrestore(&xfer->lock, flags);
945 spin_lock_irqsave(&rpipe->seg_lock, flags);
948 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
953 * xfer->lock is taken
955 * On failure submitting we just stop submitting and return error;
956 * wa_urb_enqueue_b() will execute the completion path
958 static int __wa_xfer_submit(struct wa_xfer *xfer)
961 struct wahc *wa = xfer->wa;
962 struct device *dev = &wa->usb_iface->dev;
966 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
967 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
971 spin_lock_irqsave(&wa->xfer_list_lock, flags);
972 list_add_tail(&xfer->list_node, &wa->xfer_list);
973 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
975 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
977 spin_lock_irqsave(&rpipe->seg_lock, flags);
978 for (cnt = 0; cnt < xfer->segs; cnt++) {
979 available = atomic_read(&rpipe->segs_available);
980 empty = list_empty(&rpipe->seg_list);
981 seg = xfer->seg[cnt];
982 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
983 xfer, cnt, available, empty,
984 available == 0 || !empty ? "delayed" : "submitted");
985 if (available == 0 || !empty) {
986 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
987 seg->status = WA_SEG_DELAYED;
988 list_add_tail(&seg->list_node, &rpipe->seg_list);
990 result = __wa_seg_submit(rpipe, xfer, seg);
992 __wa_xfer_abort(xfer);
993 goto error_seg_submit;
996 xfer->segs_submitted++;
999 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1004 * Second part of a URB/transfer enqueuement
1006 * Assumes this comes from wa_urb_enqueue() [maybe through
1007 * wa_urb_enqueue_run()]. At this point:
1009 * xfer->wa filled and refcounted
1010 * xfer->ep filled with rpipe refcounted if
1012 * xfer->urb filled and refcounted (this is the case when called
1013 * from wa_urb_enqueue() as we come from usb_submit_urb()
1014 * and when called by wa_urb_enqueue_run(), as we took an
1015 * extra ref dropped by _run() after we return).
1018 * If we fail at __wa_xfer_submit(), then we just check if we are done
1019 * and if so, we run the completion procedure. However, if we are not
1020 * yet done, we do nothing and wait for the completion handlers from
1021 * the submitted URBs or from the xfer-result path to kick in. If xfer
1022 * result never kicks in, the xfer will timeout from the USB code and
1023 * dequeue() will be called.
1025 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1028 unsigned long flags;
1029 struct urb *urb = xfer->urb;
1030 struct wahc *wa = xfer->wa;
1031 struct wusbhc *wusbhc = wa->wusb;
1032 struct wusb_dev *wusb_dev;
1035 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1037 goto error_rpipe_get;
1039 /* FIXME: segmentation broken -- kills DWA */
1040 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1041 if (urb->dev == NULL) {
1042 mutex_unlock(&wusbhc->mutex);
1043 goto error_dev_gone;
1045 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1046 if (wusb_dev == NULL) {
1047 mutex_unlock(&wusbhc->mutex);
1048 goto error_dev_gone;
1050 mutex_unlock(&wusbhc->mutex);
1052 spin_lock_irqsave(&xfer->lock, flags);
1053 xfer->wusb_dev = wusb_dev;
1054 result = urb->status;
1055 if (urb->status != -EINPROGRESS)
1056 goto error_dequeued;
1058 result = __wa_xfer_setup(xfer, urb);
1060 goto error_xfer_setup;
1061 result = __wa_xfer_submit(xfer);
1063 goto error_xfer_submit;
1064 spin_unlock_irqrestore(&xfer->lock, flags);
1067 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1068 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1073 spin_unlock_irqrestore(&xfer->lock, flags);
1074 /* FIXME: segmentation broken, kills DWA */
1076 wusb_dev_put(wusb_dev);
1078 rpipe_put(xfer->ep->hcpriv);
1080 xfer->result = result;
1081 wa_xfer_giveback(xfer);
1085 done = __wa_xfer_is_done(xfer);
1086 xfer->result = result;
1087 spin_unlock_irqrestore(&xfer->lock, flags);
1089 wa_xfer_completion(xfer);
1093 * Execute the delayed transfers in the Wire Adapter @wa
1095 * We need to be careful here, as dequeue() could be called in the
1096 * middle. That's why we do the whole thing under the
1097 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1098 * and then checks the list -- so as we would be acquiring in inverse
1099 * order, we move the delayed list to a separate list while locked and then
1100 * submit them without the list lock held.
1102 void wa_urb_enqueue_run(struct work_struct *ws)
1104 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1105 struct wa_xfer *xfer, *next;
1107 LIST_HEAD(tmp_list);
1109 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1110 spin_lock_irq(&wa->xfer_list_lock);
1111 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1112 wa->xfer_delayed_list.prev);
1113 spin_unlock_irq(&wa->xfer_list_lock);
1116 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1117 * can take xfer->lock as well as lock mutexes.
1119 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1120 list_del_init(&xfer->list_node);
1123 wa_urb_enqueue_b(xfer);
1124 usb_put_urb(urb); /* taken when queuing */
1127 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1130 * Process the errored transfers on the Wire Adapter outside of interrupt.
1132 void wa_process_errored_transfers_run(struct work_struct *ws)
1134 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1135 struct wa_xfer *xfer, *next;
1136 LIST_HEAD(tmp_list);
1138 pr_info("%s: Run delayed STALL processing.\n", __func__);
1140 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1141 spin_lock_irq(&wa->xfer_list_lock);
1142 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1143 wa->xfer_errored_list.prev);
1144 spin_unlock_irq(&wa->xfer_list_lock);
1147 * run rpipe_clear_feature_stalled from temp list without list lock
1150 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1151 struct usb_host_endpoint *ep;
1152 unsigned long flags;
1153 struct wa_rpipe *rpipe;
1155 spin_lock_irqsave(&xfer->lock, flags);
1158 spin_unlock_irqrestore(&xfer->lock, flags);
1160 /* clear RPIPE feature stalled without holding a lock. */
1161 rpipe_clear_feature_stalled(wa, ep);
1163 /* complete the xfer. This removes it from the tmp list. */
1164 wa_xfer_completion(xfer);
1166 /* check for work. */
1167 wa_xfer_delayed_run(rpipe);
1170 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1173 * Submit a transfer to the Wire Adapter in a delayed way
1175 * The process of enqueuing involves possible sleeps() [see
1176 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1177 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1179 * @urb: We own a reference to it done by the HCI Linux USB stack that
1180 * will be given up by calling usb_hcd_giveback_urb() or by
1181 * returning error from this function -> ergo we don't have to
1184 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1185 struct urb *urb, gfp_t gfp)
1188 struct device *dev = &wa->usb_iface->dev;
1189 struct wa_xfer *xfer;
1190 unsigned long my_flags;
1191 unsigned cant_sleep = irqs_disabled() | in_atomic();
1193 if ((urb->transfer_buffer == NULL)
1194 && (urb->sg == NULL)
1195 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1196 && urb->transfer_buffer_length != 0) {
1197 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1202 xfer = kzalloc(sizeof(*xfer), gfp);
1207 if (urb->status != -EINPROGRESS) /* cancelled */
1208 goto error_dequeued; /* before starting? */
1210 xfer->wa = wa_get(wa);
1216 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1217 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1218 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1219 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1220 cant_sleep ? "deferred" : "inline");
1224 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1225 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1226 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1227 queue_work(wusbd, &wa->xfer_enqueue_work);
1229 wa_urb_enqueue_b(xfer);
1238 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1241 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1242 * handler] is called.
1244 * Until a transfer goes successfully through wa_urb_enqueue() it
1245 * needs to be dequeued with completion calling; when stuck in delayed
1246 * or before wa_xfer_setup() is called, we need to do completion.
1248 * not setup If there is no hcpriv yet, that means that that enqueue
1249 * still had no time to set the xfer up. Because
1250 * urb->status should be other than -EINPROGRESS,
1251 * enqueue() will catch that and bail out.
1253 * If the transfer has gone through setup, we just need to clean it
1254 * up. If it has gone through submit(), we have to abort it [with an
1255 * asynch request] and then make sure we cancel each segment.
1258 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1260 unsigned long flags, flags2;
1261 struct wa_xfer *xfer;
1263 struct wa_rpipe *rpipe;
1265 unsigned rpipe_ready = 0;
1270 * Nothing setup yet enqueue will see urb->status !=
1271 * -EINPROGRESS (by hcd layer) and bail out with
1272 * error, no need to do completion
1274 BUG_ON(urb->status == -EINPROGRESS);
1277 spin_lock_irqsave(&xfer->lock, flags);
1278 rpipe = xfer->ep->hcpriv;
1279 /* Check the delayed list -> if there, release and complete */
1280 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1281 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1282 goto dequeue_delayed;
1283 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1284 if (xfer->seg == NULL) /* still hasn't reached */
1285 goto out_unlock; /* setup(), enqueue_b() completes */
1286 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1287 __wa_xfer_abort(xfer);
1288 for (cnt = 0; cnt < xfer->segs; cnt++) {
1289 seg = xfer->seg[cnt];
1290 switch (seg->status) {
1291 case WA_SEG_NOTREADY:
1293 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1294 xfer, cnt, seg->status);
1297 case WA_SEG_DELAYED:
1298 seg->status = WA_SEG_ABORTED;
1299 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1300 list_del(&seg->list_node);
1302 rpipe_ready = rpipe_avail_inc(rpipe);
1303 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1305 case WA_SEG_SUBMITTED:
1306 seg->status = WA_SEG_ABORTED;
1307 usb_unlink_urb(&seg->urb);
1308 if (xfer->is_inbound == 0)
1309 usb_unlink_urb(seg->dto_urb);
1311 rpipe_ready = rpipe_avail_inc(rpipe);
1313 case WA_SEG_PENDING:
1314 seg->status = WA_SEG_ABORTED;
1316 rpipe_ready = rpipe_avail_inc(rpipe);
1318 case WA_SEG_DTI_PENDING:
1319 usb_unlink_urb(wa->dti_urb);
1320 seg->status = WA_SEG_ABORTED;
1322 rpipe_ready = rpipe_avail_inc(rpipe);
1326 case WA_SEG_ABORTED:
1330 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1331 __wa_xfer_is_done(xfer);
1332 spin_unlock_irqrestore(&xfer->lock, flags);
1333 wa_xfer_completion(xfer);
1335 wa_xfer_delayed_run(rpipe);
1339 spin_unlock_irqrestore(&xfer->lock, flags);
1344 list_del_init(&xfer->list_node);
1345 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1346 xfer->result = urb->status;
1347 spin_unlock_irqrestore(&xfer->lock, flags);
1348 wa_xfer_giveback(xfer);
1349 usb_put_urb(urb); /* we got a ref in enqueue() */
1352 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1355 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1358 * Positive errno values are internal inconsistencies and should be
1359 * flagged louder. Negative are to be passed up to the user in the
1362 * @status: USB WA status code -- high two bits are stripped.
1364 static int wa_xfer_status_to_errno(u8 status)
1367 u8 real_status = status;
1368 static int xlat[] = {
1369 [WA_XFER_STATUS_SUCCESS] = 0,
1370 [WA_XFER_STATUS_HALTED] = -EPIPE,
1371 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1372 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1373 [WA_XFER_RESERVED] = EINVAL,
1374 [WA_XFER_STATUS_NOT_FOUND] = 0,
1375 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1376 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1377 [WA_XFER_STATUS_ABORTED] = -EINTR,
1378 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1379 [WA_XFER_INVALID_FORMAT] = EINVAL,
1380 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1381 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1387 if (status >= ARRAY_SIZE(xlat)) {
1388 printk_ratelimited(KERN_ERR "%s(): BUG? "
1389 "Unknown WA transfer status 0x%02x\n",
1390 __func__, real_status);
1393 errno = xlat[status];
1394 if (unlikely(errno > 0)) {
1395 printk_ratelimited(KERN_ERR "%s(): BUG? "
1396 "Inconsistent WA status: 0x%02x\n",
1397 __func__, real_status);
1404 * Process a xfer result completion message
1406 * inbound transfers: need to schedule a DTI read
1408 * FIXME: this function needs to be broken up in parts
1410 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1413 struct device *dev = &wa->usb_iface->dev;
1414 unsigned long flags;
1417 struct wa_rpipe *rpipe;
1418 struct wa_xfer_result *xfer_result = wa->xfer_result;
1421 unsigned rpipe_ready = 0;
1423 spin_lock_irqsave(&xfer->lock, flags);
1424 seg_idx = xfer_result->bTransferSegment & 0x7f;
1425 if (unlikely(seg_idx >= xfer->segs))
1427 seg = xfer->seg[seg_idx];
1428 rpipe = xfer->ep->hcpriv;
1429 usb_status = xfer_result->bTransferStatus;
1430 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1431 xfer, seg_idx, usb_status, seg->status);
1432 if (seg->status == WA_SEG_ABORTED
1433 || seg->status == WA_SEG_ERROR) /* already handled */
1434 goto segment_aborted;
1435 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1436 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1437 if (seg->status != WA_SEG_PENDING) {
1438 if (printk_ratelimit())
1439 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1440 xfer, seg_idx, seg->status);
1441 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1443 if (usb_status & 0x80) {
1444 seg->result = wa_xfer_status_to_errno(usb_status);
1445 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1446 xfer, xfer->id, seg->index, usb_status);
1447 goto error_complete;
1449 /* FIXME: we ignore warnings, tally them for stats */
1450 if (usb_status & 0x40) /* Warning?... */
1451 usb_status = 0; /* ... pass */
1452 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1453 seg->status = WA_SEG_DTI_PENDING;
1454 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1455 /* this should always be 0 before a resubmit. */
1456 wa->buf_in_urb->num_mapped_sgs = 0;
1459 wa->buf_in_urb->transfer_dma =
1460 xfer->urb->transfer_dma
1461 + (seg_idx * xfer->seg_size);
1462 wa->buf_in_urb->transfer_flags
1463 |= URB_NO_TRANSFER_DMA_MAP;
1464 wa->buf_in_urb->transfer_buffer = NULL;
1465 wa->buf_in_urb->sg = NULL;
1466 wa->buf_in_urb->num_sgs = 0;
1468 /* do buffer or SG processing. */
1469 wa->buf_in_urb->transfer_flags
1470 &= ~URB_NO_TRANSFER_DMA_MAP;
1472 if (xfer->urb->transfer_buffer) {
1473 wa->buf_in_urb->transfer_buffer =
1474 xfer->urb->transfer_buffer
1475 + (seg_idx * xfer->seg_size);
1476 wa->buf_in_urb->sg = NULL;
1477 wa->buf_in_urb->num_sgs = 0;
1479 /* allocate an SG list to store seg_size bytes
1480 and copy the subset of the xfer->urb->sg
1481 that matches the buffer subset we are
1483 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1485 seg_idx * xfer->seg_size,
1487 xfer_result->dwTransferLength),
1488 &(wa->buf_in_urb->num_sgs));
1490 if (!(wa->buf_in_urb->sg)) {
1491 wa->buf_in_urb->num_sgs = 0;
1492 goto error_sg_alloc;
1494 wa->buf_in_urb->transfer_buffer = NULL;
1497 wa->buf_in_urb->transfer_buffer_length =
1498 le32_to_cpu(xfer_result->dwTransferLength);
1499 wa->buf_in_urb->context = seg;
1500 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1502 goto error_submit_buf_in;
1504 /* OUT data phase, complete it -- */
1505 seg->status = WA_SEG_DONE;
1506 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1508 rpipe_ready = rpipe_avail_inc(rpipe);
1509 done = __wa_xfer_is_done(xfer);
1511 spin_unlock_irqrestore(&xfer->lock, flags);
1513 wa_xfer_completion(xfer);
1515 wa_xfer_delayed_run(rpipe);
1518 error_submit_buf_in:
1519 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1520 dev_err(dev, "DTI: URB max acceptable errors "
1521 "exceeded, resetting device\n");
1524 if (printk_ratelimit())
1525 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1526 xfer, seg_idx, result);
1527 seg->result = result;
1528 kfree(wa->buf_in_urb->sg);
1530 __wa_xfer_abort(xfer);
1532 seg->status = WA_SEG_ERROR;
1534 rpipe_ready = rpipe_avail_inc(rpipe);
1535 done = __wa_xfer_is_done(xfer);
1537 * queue work item to clear STALL for control endpoints.
1538 * Otherwise, let endpoint_reset take care of it.
1540 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1541 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1544 dev_info(dev, "Control EP stall. Queue delayed work.\n");
1545 spin_lock_irq(&wa->xfer_list_lock);
1546 /* remove xfer from xfer_list. */
1547 list_del(&xfer->list_node);
1548 /* add xfer to xfer_errored_list. */
1549 list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
1550 spin_unlock_irq(&wa->xfer_list_lock);
1551 spin_unlock_irqrestore(&xfer->lock, flags);
1552 queue_work(wusbd, &wa->xfer_error_work);
1554 spin_unlock_irqrestore(&xfer->lock, flags);
1556 wa_xfer_completion(xfer);
1558 wa_xfer_delayed_run(rpipe);
1564 spin_unlock_irqrestore(&xfer->lock, flags);
1565 wa_urb_dequeue(wa, xfer->urb);
1566 if (printk_ratelimit())
1567 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1568 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1569 dev_err(dev, "DTI: URB max acceptable errors "
1570 "exceeded, resetting device\n");
1576 /* nothing to do, as the aborter did the completion */
1577 spin_unlock_irqrestore(&xfer->lock, flags);
1581 * Callback for the IN data phase
1583 * If successful transition state; otherwise, take a note of the
1584 * error, mark this segment done and try completion.
1586 * Note we don't access until we are sure that the transfer hasn't
1587 * been cancelled (ECONNRESET, ENOENT), which could mean that
1588 * seg->xfer could be already gone.
1590 static void wa_buf_in_cb(struct urb *urb)
1592 struct wa_seg *seg = urb->context;
1593 struct wa_xfer *xfer = seg->xfer;
1596 struct wa_rpipe *rpipe;
1597 unsigned rpipe_ready;
1598 unsigned long flags;
1601 /* free the sg if it was used. */
1605 switch (urb->status) {
1607 spin_lock_irqsave(&xfer->lock, flags);
1609 dev = &wa->usb_iface->dev;
1610 rpipe = xfer->ep->hcpriv;
1611 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1612 xfer, seg->index, (size_t)urb->actual_length);
1613 seg->status = WA_SEG_DONE;
1614 seg->result = urb->actual_length;
1616 rpipe_ready = rpipe_avail_inc(rpipe);
1617 done = __wa_xfer_is_done(xfer);
1618 spin_unlock_irqrestore(&xfer->lock, flags);
1620 wa_xfer_completion(xfer);
1622 wa_xfer_delayed_run(rpipe);
1624 case -ECONNRESET: /* URB unlinked; no need to do anything */
1625 case -ENOENT: /* as it was done by the who unlinked us */
1627 default: /* Other errors ... */
1628 spin_lock_irqsave(&xfer->lock, flags);
1630 dev = &wa->usb_iface->dev;
1631 rpipe = xfer->ep->hcpriv;
1632 if (printk_ratelimit())
1633 dev_err(dev, "xfer %p#%u: data in error %d\n",
1634 xfer, seg->index, urb->status);
1635 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1636 EDC_ERROR_TIMEFRAME)){
1637 dev_err(dev, "DTO: URB max acceptable errors "
1638 "exceeded, resetting device\n");
1641 seg->status = WA_SEG_ERROR;
1642 seg->result = urb->status;
1644 rpipe_ready = rpipe_avail_inc(rpipe);
1645 __wa_xfer_abort(xfer);
1646 done = __wa_xfer_is_done(xfer);
1647 spin_unlock_irqrestore(&xfer->lock, flags);
1649 wa_xfer_completion(xfer);
1651 wa_xfer_delayed_run(rpipe);
1656 * Handle an incoming transfer result buffer
1658 * Given a transfer result buffer, it completes the transfer (possibly
1659 * scheduling and buffer in read) and then resubmits the DTI URB for a
1660 * new transfer result read.
1663 * The xfer_result DTI URB state machine
1665 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1667 * We start in OFF mode, the first xfer_result notification [through
1668 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1671 * We receive a buffer -- if it is not a xfer_result, we complain and
1672 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1673 * request accounting. If it is an IN segment, we move to RBI and post
1674 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1675 * repost the DTI-URB and move to RXR state. if there was no IN
1676 * segment, it will repost the DTI-URB.
1678 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1679 * errors) in the URBs.
1681 static void wa_xfer_result_cb(struct urb *urb)
1684 struct wahc *wa = urb->context;
1685 struct device *dev = &wa->usb_iface->dev;
1686 struct wa_xfer_result *xfer_result;
1688 struct wa_xfer *xfer;
1691 BUG_ON(wa->dti_urb != urb);
1692 switch (wa->dti_urb->status) {
1694 /* We have a xfer result buffer; check it */
1695 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1696 urb->actual_length, urb->transfer_buffer);
1697 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1698 dev_err(dev, "DTI Error: xfer result--bad size "
1699 "xfer result (%d bytes vs %zu needed)\n",
1700 urb->actual_length, sizeof(*xfer_result));
1703 xfer_result = wa->xfer_result;
1704 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1705 dev_err(dev, "DTI Error: xfer result--"
1706 "bad header length %u\n",
1707 xfer_result->hdr.bLength);
1710 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1711 dev_err(dev, "DTI Error: xfer result--"
1712 "bad header type 0x%02x\n",
1713 xfer_result->hdr.bNotifyType);
1716 usb_status = xfer_result->bTransferStatus & 0x3f;
1717 if (usb_status == WA_XFER_STATUS_ABORTED
1718 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1719 /* taken care of already */
1721 xfer_id = xfer_result->dwTransferID;
1722 xfer = wa_xfer_get_by_id(wa, xfer_id);
1724 /* FIXME: transaction might have been cancelled */
1725 dev_err(dev, "DTI Error: xfer result--"
1726 "unknown xfer 0x%08x (status 0x%02x)\n",
1727 xfer_id, usb_status);
1730 wa_xfer_result_chew(wa, xfer);
1733 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1734 case -ESHUTDOWN: /* going away! */
1735 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1739 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1740 EDC_ERROR_TIMEFRAME)) {
1741 dev_err(dev, "DTI: URB max acceptable errors "
1742 "exceeded, resetting device\n");
1746 if (printk_ratelimit())
1747 dev_err(dev, "DTI: URB error %d\n", urb->status);
1750 /* Resubmit the DTI URB */
1751 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1753 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1754 "resetting\n", result);
1762 * Transfer complete notification
1764 * Called from the notif.c code. We get a notification on EP2 saying
1765 * that some endpoint has some transfer result data available. We are
1768 * To speed up things, we always have a URB reading the DTI URB; we
1769 * don't really set it up and start it until the first xfer complete
1770 * notification arrives, which is what we do here.
1772 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1775 * So here we just initialize the DTI URB for reading transfer result
1776 * notifications and also the buffer-in URB, for reading buffers. Then
1777 * we just submit the DTI URB.
1779 * @wa shall be referenced
1781 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1784 struct device *dev = &wa->usb_iface->dev;
1785 struct wa_notif_xfer *notif_xfer;
1786 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1788 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1789 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1791 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1792 /* FIXME: hardcoded limitation, adapt */
1793 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1794 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1797 if (wa->dti_urb != NULL) /* DTI URB already started */
1800 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1801 if (wa->dti_urb == NULL) {
1802 dev_err(dev, "Can't allocate DTI URB\n");
1803 goto error_dti_urb_alloc;
1806 wa->dti_urb, wa->usb_dev,
1807 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1808 wa->xfer_result, wa->xfer_result_size,
1809 wa_xfer_result_cb, wa);
1811 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1812 if (wa->buf_in_urb == NULL) {
1813 dev_err(dev, "Can't allocate BUF-IN URB\n");
1814 goto error_buf_in_urb_alloc;
1817 wa->buf_in_urb, wa->usb_dev,
1818 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1819 NULL, 0, wa_buf_in_cb, wa);
1820 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1822 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1823 "resetting\n", result);
1824 goto error_dti_urb_submit;
1829 error_dti_urb_submit:
1830 usb_put_urb(wa->buf_in_urb);
1831 error_buf_in_urb_alloc:
1832 usb_put_urb(wa->dti_urb);
1834 error_dti_urb_alloc: