3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
109 static void wa_xfer_delayed_run(struct wa_rpipe *);
112 * Life cycle governed by 'struct urb' (the refcount of the struct is
113 * that of the 'struct urb' and usb_free_urb() would free the whole
117 struct urb tr_urb; /* transfer request urb. */
118 struct urb *dto_urb; /* for data output. */
119 struct list_head list_node; /* for rpipe->req_list */
120 struct wa_xfer *xfer; /* out xfer */
121 u8 index; /* which segment we are */
122 enum wa_seg_status status;
123 ssize_t result; /* bytes xfered or error */
124 struct wa_xfer_hdr xfer_hdr;
125 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
128 static inline void wa_seg_init(struct wa_seg *seg)
130 usb_init_urb(&seg->tr_urb);
132 /* set the remaining memory to 0. */
133 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
134 sizeof(*seg) - sizeof(seg->tr_urb));
138 * Protected by xfer->lock
143 struct list_head list_node;
147 struct wahc *wa; /* Wire adapter we are plugged to */
148 struct usb_host_endpoint *ep;
149 struct urb *urb; /* URB we are transferring for */
150 struct wa_seg **seg; /* transfer segments */
151 u8 segs, segs_submitted, segs_done;
152 unsigned is_inbound:1;
157 gfp_t gfp; /* allocation mask */
159 struct wusb_dev *wusb_dev; /* for activity timestamps */
162 static inline void wa_xfer_init(struct wa_xfer *xfer)
164 kref_init(&xfer->refcnt);
165 INIT_LIST_HEAD(&xfer->list_node);
166 spin_lock_init(&xfer->lock);
170 * Destroy a transfer structure
172 * Note that freeing xfer->seg[cnt]->urb will free the containing
173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
175 static void wa_xfer_destroy(struct kref *_xfer)
177 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
180 for (cnt = 0; cnt < xfer->segs; cnt++) {
181 if (xfer->seg[cnt]) {
182 if (xfer->seg[cnt]->dto_urb) {
183 kfree(xfer->seg[cnt]->dto_urb->sg);
184 usb_free_urb(xfer->seg[cnt]->dto_urb);
186 usb_free_urb(&xfer->seg[cnt]->tr_urb);
194 static void wa_xfer_get(struct wa_xfer *xfer)
196 kref_get(&xfer->refcnt);
199 static void wa_xfer_put(struct wa_xfer *xfer)
201 kref_put(&xfer->refcnt, wa_xfer_destroy);
207 * xfer->lock has to be unlocked
209 * We take xfer->lock for setting the result; this is a barrier
210 * against drivers/usb/core/hcd.c:unlink1() being called after we call
211 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
212 * reference to the transfer.
214 static void wa_xfer_giveback(struct wa_xfer *xfer)
218 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
219 list_del_init(&xfer->list_node);
220 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
221 /* FIXME: segmentation broken -- kills DWA */
222 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
230 * xfer->lock has to be unlocked
232 static void wa_xfer_completion(struct wa_xfer *xfer)
235 wusb_dev_put(xfer->wusb_dev);
236 rpipe_put(xfer->ep->hcpriv);
237 wa_xfer_giveback(xfer);
241 * Initialize a transfer's ID
243 * We need to use a sequential number; if we use the pointer or the
244 * hash of the pointer, it can repeat over sequential transfers and
245 * then it will confuse the HWA....wonder why in hell they put a 32
246 * bit handle in there then.
248 static void wa_xfer_id_init(struct wa_xfer *xfer)
250 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
253 /* Return the xfer's ID. */
254 static inline u32 wa_xfer_id(struct wa_xfer *xfer)
259 /* Return the xfer's ID in transport format (little endian). */
260 static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
262 return cpu_to_le32(xfer->id);
266 * If transfer is done, wrap it up and return true
268 * xfer->lock has to be locked
270 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
272 struct device *dev = &xfer->wa->usb_iface->dev;
273 unsigned result, cnt;
275 struct urb *urb = xfer->urb;
276 unsigned found_short = 0;
278 result = xfer->segs_done == xfer->segs_submitted;
281 urb->actual_length = 0;
282 for (cnt = 0; cnt < xfer->segs; cnt++) {
283 seg = xfer->seg[cnt];
284 switch (seg->status) {
286 if (found_short && seg->result > 0) {
287 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
288 xfer, wa_xfer_id(xfer), cnt,
290 urb->status = -EINVAL;
293 urb->actual_length += seg->result;
294 if (seg->result < xfer->seg_size
295 && cnt != xfer->segs-1)
297 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
298 "result %zu urb->actual_length %d\n",
299 xfer, wa_xfer_id(xfer), seg->index, found_short,
300 seg->result, urb->actual_length);
303 xfer->result = seg->result;
304 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08X)\n",
305 xfer, wa_xfer_id(xfer), seg->index, seg->result,
309 dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n",
310 xfer, wa_xfer_id(xfer), seg->index,
312 xfer->result = urb->status;
315 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
316 xfer, wa_xfer_id(xfer), cnt, seg->status);
317 xfer->result = -EINVAL;
327 * Search for a transfer list ID on the HCD's URB list
329 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
330 * 32-bit hash of the pointer.
332 * @returns NULL if not found.
334 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
337 struct wa_xfer *xfer_itr;
338 spin_lock_irqsave(&wa->xfer_list_lock, flags);
339 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
340 if (id == xfer_itr->id) {
341 wa_xfer_get(xfer_itr);
347 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
351 struct wa_xfer_abort_buffer {
353 struct wa_xfer_abort cmd;
356 static void __wa_xfer_abort_cb(struct urb *urb)
358 struct wa_xfer_abort_buffer *b = urb->context;
359 usb_put_urb(&b->urb);
363 * Aborts an ongoing transaction
365 * Assumes the transfer is referenced and locked and in a submitted
366 * state (mainly that there is an endpoint/rpipe assigned).
368 * The callback (see above) does nothing but freeing up the data by
369 * putting the URB. Because the URB is allocated at the head of the
370 * struct, the whole space we allocated is kfreed. *
372 static int __wa_xfer_abort(struct wa_xfer *xfer)
374 int result = -ENOMEM;
375 struct device *dev = &xfer->wa->usb_iface->dev;
376 struct wa_xfer_abort_buffer *b;
377 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
379 b = kmalloc(sizeof(*b), GFP_ATOMIC);
382 b->cmd.bLength = sizeof(b->cmd);
383 b->cmd.bRequestType = WA_XFER_ABORT;
384 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
385 b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
387 usb_init_urb(&b->urb);
388 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
389 usb_sndbulkpipe(xfer->wa->usb_dev,
390 xfer->wa->dto_epd->bEndpointAddress),
391 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
392 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
395 return result; /* callback frees! */
399 if (printk_ratelimit())
400 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
410 * @returns < 0 on error, transfer segment request size if ok
412 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
413 enum wa_xfer_type *pxfer_type)
416 struct device *dev = &xfer->wa->usb_iface->dev;
418 struct urb *urb = xfer->urb;
419 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
421 switch (rpipe->descr.bmAttribute & 0x3) {
422 case USB_ENDPOINT_XFER_CONTROL:
423 *pxfer_type = WA_XFER_TYPE_CTL;
424 result = sizeof(struct wa_xfer_ctl);
426 case USB_ENDPOINT_XFER_INT:
427 case USB_ENDPOINT_XFER_BULK:
428 *pxfer_type = WA_XFER_TYPE_BI;
429 result = sizeof(struct wa_xfer_bi);
431 case USB_ENDPOINT_XFER_ISOC:
432 dev_err(dev, "FIXME: ISOC not implemented\n");
438 result = -EINVAL; /* shut gcc up */
440 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
441 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
442 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
443 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
444 /* Compute the segment size and make sure it is a multiple of
445 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
447 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
448 if (xfer->seg_size < maxpktsize) {
449 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
450 "%zu\n", xfer->seg_size, maxpktsize);
454 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
455 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, xfer->seg_size);
456 if (xfer->segs >= WA_SEGS_MAX) {
457 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
458 (int)(urb->transfer_buffer_length / xfer->seg_size),
463 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
469 /* Fill in the common request header and xfer-type specific data. */
470 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
471 struct wa_xfer_hdr *xfer_hdr0,
472 enum wa_xfer_type xfer_type,
473 size_t xfer_hdr_size)
475 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
477 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
478 xfer_hdr0->bLength = xfer_hdr_size;
479 xfer_hdr0->bRequestType = xfer_type;
480 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
481 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
482 xfer_hdr0->bTransferSegment = 0;
484 case WA_XFER_TYPE_CTL: {
485 struct wa_xfer_ctl *xfer_ctl =
486 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
487 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
488 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
489 sizeof(xfer_ctl->baSetupData));
492 case WA_XFER_TYPE_BI:
494 case WA_XFER_TYPE_ISO:
495 printk(KERN_ERR "FIXME: ISOC not implemented\n");
502 * Callback for the OUT data phase of the segment request
504 * Check wa_seg_tr_cb(); most comments also apply here because this
505 * function does almost the same thing and they work closely
508 * If the seg request has failed but this DTO phase has succeeded,
509 * wa_seg_tr_cb() has already failed the segment and moved the
510 * status to WA_SEG_ERROR, so this will go through 'case 0' and
511 * effectively do nothing.
513 static void wa_seg_dto_cb(struct urb *urb)
515 struct wa_seg *seg = urb->context;
516 struct wa_xfer *xfer = seg->xfer;
519 struct wa_rpipe *rpipe;
521 unsigned rpipe_ready = 0;
524 /* free the sg if it was used. */
528 switch (urb->status) {
530 spin_lock_irqsave(&xfer->lock, flags);
532 dev = &wa->usb_iface->dev;
533 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
534 xfer, seg->index, urb->actual_length);
535 if (seg->status < WA_SEG_PENDING)
536 seg->status = WA_SEG_PENDING;
537 seg->result = urb->actual_length;
538 spin_unlock_irqrestore(&xfer->lock, flags);
540 case -ECONNRESET: /* URB unlinked; no need to do anything */
541 case -ENOENT: /* as it was done by the who unlinked us */
543 default: /* Other errors ... */
544 spin_lock_irqsave(&xfer->lock, flags);
546 dev = &wa->usb_iface->dev;
547 rpipe = xfer->ep->hcpriv;
548 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
549 xfer, seg->index, urb->status);
550 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
551 EDC_ERROR_TIMEFRAME)){
552 dev_err(dev, "DTO: URB max acceptable errors "
553 "exceeded, resetting device\n");
556 if (seg->status != WA_SEG_ERROR) {
557 seg->status = WA_SEG_ERROR;
558 seg->result = urb->status;
560 __wa_xfer_abort(xfer);
561 rpipe_ready = rpipe_avail_inc(rpipe);
562 done = __wa_xfer_is_done(xfer);
564 spin_unlock_irqrestore(&xfer->lock, flags);
566 wa_xfer_completion(xfer);
568 wa_xfer_delayed_run(rpipe);
573 * Callback for the segment request
575 * If successful transition state (unless already transitioned or
576 * outbound transfer); otherwise, take a note of the error, mark this
577 * segment done and try completion.
579 * Note we don't access until we are sure that the transfer hasn't
580 * been cancelled (ECONNRESET, ENOENT), which could mean that
581 * seg->xfer could be already gone.
583 * We have to check before setting the status to WA_SEG_PENDING
584 * because sometimes the xfer result callback arrives before this
585 * callback (geeeeeeze), so it might happen that we are already in
586 * another state. As well, we don't set it if the transfer is inbound,
587 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
590 static void wa_seg_tr_cb(struct urb *urb)
592 struct wa_seg *seg = urb->context;
593 struct wa_xfer *xfer = seg->xfer;
596 struct wa_rpipe *rpipe;
598 unsigned rpipe_ready;
601 switch (urb->status) {
603 spin_lock_irqsave(&xfer->lock, flags);
605 dev = &wa->usb_iface->dev;
606 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
607 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
608 seg->status = WA_SEG_PENDING;
609 spin_unlock_irqrestore(&xfer->lock, flags);
611 case -ECONNRESET: /* URB unlinked; no need to do anything */
612 case -ENOENT: /* as it was done by the who unlinked us */
614 default: /* Other errors ... */
615 spin_lock_irqsave(&xfer->lock, flags);
617 dev = &wa->usb_iface->dev;
618 rpipe = xfer->ep->hcpriv;
619 if (printk_ratelimit())
620 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
621 xfer, wa_xfer_id(xfer), seg->index,
623 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
624 EDC_ERROR_TIMEFRAME)){
625 dev_err(dev, "DTO: URB max acceptable errors "
626 "exceeded, resetting device\n");
629 usb_unlink_urb(seg->dto_urb);
630 seg->status = WA_SEG_ERROR;
631 seg->result = urb->status;
633 __wa_xfer_abort(xfer);
634 rpipe_ready = rpipe_avail_inc(rpipe);
635 done = __wa_xfer_is_done(xfer);
636 spin_unlock_irqrestore(&xfer->lock, flags);
638 wa_xfer_completion(xfer);
640 wa_xfer_delayed_run(rpipe);
645 * Allocate an SG list to store bytes_to_transfer bytes and copy the
646 * subset of the in_sg that matches the buffer subset
647 * we are about to transfer.
649 static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
650 const unsigned int bytes_transferred,
651 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs)
653 struct scatterlist *out_sg;
654 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
656 struct scatterlist *current_xfer_sg = in_sg;
657 struct scatterlist *current_seg_sg, *last_seg_sg;
659 /* skip previously transferred pages. */
660 while ((current_xfer_sg) &&
661 (bytes_processed < bytes_transferred)) {
662 bytes_processed += current_xfer_sg->length;
664 /* advance the sg if current segment starts on or past the
666 if (bytes_processed <= bytes_transferred)
667 current_xfer_sg = sg_next(current_xfer_sg);
670 /* the data for the current segment starts in current_xfer_sg.
671 calculate the offset. */
672 if (bytes_processed > bytes_transferred) {
673 offset_into_current_page_data = current_xfer_sg->length -
674 (bytes_processed - bytes_transferred);
677 /* calculate the number of pages needed by this segment. */
678 nents = DIV_ROUND_UP((bytes_to_transfer +
679 offset_into_current_page_data +
680 current_xfer_sg->offset),
683 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
685 sg_init_table(out_sg, nents);
687 /* copy the portion of the incoming SG that correlates to the
688 * data to be transferred by this segment to the segment SG. */
689 last_seg_sg = current_seg_sg = out_sg;
692 /* reset nents and calculate the actual number of sg entries
695 while ((bytes_processed < bytes_to_transfer) &&
696 current_seg_sg && current_xfer_sg) {
697 unsigned int page_len = min((current_xfer_sg->length -
698 offset_into_current_page_data),
699 (bytes_to_transfer - bytes_processed));
701 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
703 current_xfer_sg->offset +
704 offset_into_current_page_data);
706 bytes_processed += page_len;
708 last_seg_sg = current_seg_sg;
709 current_seg_sg = sg_next(current_seg_sg);
710 current_xfer_sg = sg_next(current_xfer_sg);
712 /* only the first page may require additional offset. */
713 offset_into_current_page_data = 0;
717 /* update num_sgs and terminate the list since we may have
718 * concatenated pages. */
719 sg_mark_end(last_seg_sg);
720 *out_num_sgs = nents;
727 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
729 static int __wa_populate_dto_urb(struct wa_xfer *xfer,
730 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
735 seg->dto_urb->transfer_dma =
736 xfer->urb->transfer_dma + buf_itr_offset;
737 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
738 seg->dto_urb->sg = NULL;
739 seg->dto_urb->num_sgs = 0;
741 /* do buffer or SG processing. */
742 seg->dto_urb->transfer_flags &=
743 ~URB_NO_TRANSFER_DMA_MAP;
744 /* this should always be 0 before a resubmit. */
745 seg->dto_urb->num_mapped_sgs = 0;
747 if (xfer->urb->transfer_buffer) {
748 seg->dto_urb->transfer_buffer =
749 xfer->urb->transfer_buffer +
751 seg->dto_urb->sg = NULL;
752 seg->dto_urb->num_sgs = 0;
754 seg->dto_urb->transfer_buffer = NULL;
757 * allocate an SG list to store seg_size bytes
758 * and copy the subset of the xfer->urb->sg that
759 * matches the buffer subset we are about to
762 seg->dto_urb->sg = wa_xfer_create_subset_sg(
764 buf_itr_offset, buf_itr_size,
765 &(seg->dto_urb->num_sgs));
766 if (!(seg->dto_urb->sg))
770 seg->dto_urb->transfer_buffer_length = buf_itr_size;
776 * Allocate the segs array and initialize each of them
778 * The segments are freed by wa_xfer_destroy() when the xfer use count
779 * drops to zero; however, because each segment is given the same life
780 * cycle as the USB URB it contains, it is actually freed by
781 * usb_put_urb() on the contained USB URB (twisted, eh?).
783 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
786 size_t alloc_size = sizeof(*xfer->seg[0])
787 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
788 struct usb_device *usb_dev = xfer->wa->usb_dev;
789 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
791 size_t buf_itr, buf_size, buf_itr_size;
794 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
795 if (xfer->seg == NULL)
796 goto error_segs_kzalloc;
798 buf_size = xfer->urb->transfer_buffer_length;
799 for (cnt = 0; cnt < xfer->segs; cnt++) {
800 seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
802 goto error_seg_kmalloc;
806 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
807 usb_sndbulkpipe(usb_dev,
808 dto_epd->bEndpointAddress),
809 &seg->xfer_hdr, xfer_hdr_size,
811 buf_itr_size = min(buf_size, xfer->seg_size);
812 if (xfer->is_inbound == 0 && buf_size > 0) {
814 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
815 if (seg->dto_urb == NULL)
816 goto error_dto_alloc;
818 seg->dto_urb, usb_dev,
819 usb_sndbulkpipe(usb_dev,
820 dto_epd->bEndpointAddress),
821 NULL, 0, wa_seg_dto_cb, seg);
823 /* fill in the xfer buffer information. */
824 result = __wa_populate_dto_urb(xfer, seg,
825 buf_itr, buf_itr_size);
828 goto error_seg_outbound_populate;
830 seg->status = WA_SEG_READY;
831 buf_itr += buf_itr_size;
832 buf_size -= buf_itr_size;
837 * Free the memory for the current segment which failed to init.
838 * Use the fact that cnt is left at were it failed. The remaining
839 * segments will be cleaned up by wa_xfer_destroy.
841 error_seg_outbound_populate:
842 usb_free_urb(xfer->seg[cnt]->dto_urb);
844 kfree(xfer->seg[cnt]);
845 xfer->seg[cnt] = NULL;
852 * Allocates all the stuff needed to submit a transfer
854 * Breaks the whole data buffer in a list of segments, each one has a
855 * structure allocated to it and linked in xfer->seg[index]
857 * FIXME: merge setup_segs() and the last part of this function, no
858 * need to do two for loops when we could run everything in a
861 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
864 struct device *dev = &xfer->wa->usb_iface->dev;
865 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
866 size_t xfer_hdr_size, cnt, transfer_size;
867 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
869 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
871 goto error_setup_sizes;
872 xfer_hdr_size = result;
873 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
875 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
876 xfer, xfer->segs, result);
877 goto error_setup_segs;
879 /* Fill the first header */
880 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
881 wa_xfer_id_init(xfer);
882 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
884 /* Fill remainig headers */
885 xfer_hdr = xfer_hdr0;
886 transfer_size = urb->transfer_buffer_length;
887 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
888 xfer->seg_size : transfer_size;
889 transfer_size -= xfer->seg_size;
890 for (cnt = 1; cnt < xfer->segs; cnt++) {
891 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
892 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
893 xfer_hdr->bTransferSegment = cnt;
894 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
895 cpu_to_le32(xfer->seg_size)
896 : cpu_to_le32(transfer_size);
897 xfer->seg[cnt]->status = WA_SEG_READY;
898 transfer_size -= xfer->seg_size;
900 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
910 * rpipe->seg_lock is held!
912 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
916 /* submit the transfer request. */
917 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
919 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
920 xfer, seg->index, result);
921 goto error_seg_submit;
923 /* submit the out data if this is an out request. */
925 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
927 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
928 xfer, seg->index, result);
929 goto error_dto_submit;
932 seg->status = WA_SEG_SUBMITTED;
933 rpipe_avail_dec(rpipe);
937 usb_unlink_urb(&seg->tr_urb);
939 seg->status = WA_SEG_ERROR;
940 seg->result = result;
945 * Execute more queued request segments until the maximum concurrent allowed
947 * The ugly unlock/lock sequence on the error path is needed as the
948 * xfer->lock normally nests the seg_lock and not viceversa.
951 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
954 struct device *dev = &rpipe->wa->usb_iface->dev;
956 struct wa_xfer *xfer;
959 spin_lock_irqsave(&rpipe->seg_lock, flags);
960 while (atomic_read(&rpipe->segs_available) > 0
961 && !list_empty(&rpipe->seg_list)) {
962 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
964 list_del(&seg->list_node);
966 result = __wa_seg_submit(rpipe, xfer, seg);
967 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
968 xfer, wa_xfer_id(xfer), seg->index,
969 atomic_read(&rpipe->segs_available), result);
970 if (unlikely(result < 0)) {
971 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
972 spin_lock_irqsave(&xfer->lock, flags);
973 __wa_xfer_abort(xfer);
975 spin_unlock_irqrestore(&xfer->lock, flags);
976 spin_lock_irqsave(&rpipe->seg_lock, flags);
979 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
984 * xfer->lock is taken
986 * On failure submitting we just stop submitting and return error;
987 * wa_urb_enqueue_b() will execute the completion path
989 static int __wa_xfer_submit(struct wa_xfer *xfer)
992 struct wahc *wa = xfer->wa;
993 struct device *dev = &wa->usb_iface->dev;
997 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
998 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1002 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1003 list_add_tail(&xfer->list_node, &wa->xfer_list);
1004 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1006 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1008 spin_lock_irqsave(&rpipe->seg_lock, flags);
1009 for (cnt = 0; cnt < xfer->segs; cnt++) {
1010 available = atomic_read(&rpipe->segs_available);
1011 empty = list_empty(&rpipe->seg_list);
1012 seg = xfer->seg[cnt];
1013 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u (%s)\n",
1014 xfer, wa_xfer_id(xfer), cnt, available, empty,
1015 available == 0 || !empty ? "delayed" : "submitted");
1016 if (available == 0 || !empty) {
1017 seg->status = WA_SEG_DELAYED;
1018 list_add_tail(&seg->list_node, &rpipe->seg_list);
1020 result = __wa_seg_submit(rpipe, xfer, seg);
1022 __wa_xfer_abort(xfer);
1023 goto error_seg_submit;
1026 xfer->segs_submitted++;
1029 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1034 * Second part of a URB/transfer enqueuement
1036 * Assumes this comes from wa_urb_enqueue() [maybe through
1037 * wa_urb_enqueue_run()]. At this point:
1039 * xfer->wa filled and refcounted
1040 * xfer->ep filled with rpipe refcounted if
1042 * xfer->urb filled and refcounted (this is the case when called
1043 * from wa_urb_enqueue() as we come from usb_submit_urb()
1044 * and when called by wa_urb_enqueue_run(), as we took an
1045 * extra ref dropped by _run() after we return).
1048 * If we fail at __wa_xfer_submit(), then we just check if we are done
1049 * and if so, we run the completion procedure. However, if we are not
1050 * yet done, we do nothing and wait for the completion handlers from
1051 * the submitted URBs or from the xfer-result path to kick in. If xfer
1052 * result never kicks in, the xfer will timeout from the USB code and
1053 * dequeue() will be called.
1055 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
1058 unsigned long flags;
1059 struct urb *urb = xfer->urb;
1060 struct wahc *wa = xfer->wa;
1061 struct wusbhc *wusbhc = wa->wusb;
1062 struct wusb_dev *wusb_dev;
1065 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1067 goto error_rpipe_get;
1069 /* FIXME: segmentation broken -- kills DWA */
1070 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1071 if (urb->dev == NULL) {
1072 mutex_unlock(&wusbhc->mutex);
1073 goto error_dev_gone;
1075 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1076 if (wusb_dev == NULL) {
1077 mutex_unlock(&wusbhc->mutex);
1078 goto error_dev_gone;
1080 mutex_unlock(&wusbhc->mutex);
1082 spin_lock_irqsave(&xfer->lock, flags);
1083 xfer->wusb_dev = wusb_dev;
1084 result = urb->status;
1085 if (urb->status != -EINPROGRESS)
1086 goto error_dequeued;
1088 result = __wa_xfer_setup(xfer, urb);
1090 goto error_xfer_setup;
1091 result = __wa_xfer_submit(xfer);
1093 goto error_xfer_submit;
1094 spin_unlock_irqrestore(&xfer->lock, flags);
1097 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1098 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1103 spin_unlock_irqrestore(&xfer->lock, flags);
1104 /* FIXME: segmentation broken, kills DWA */
1106 wusb_dev_put(wusb_dev);
1108 rpipe_put(xfer->ep->hcpriv);
1110 xfer->result = result;
1111 wa_xfer_giveback(xfer);
1115 done = __wa_xfer_is_done(xfer);
1116 xfer->result = result;
1117 spin_unlock_irqrestore(&xfer->lock, flags);
1119 wa_xfer_completion(xfer);
1123 * Execute the delayed transfers in the Wire Adapter @wa
1125 * We need to be careful here, as dequeue() could be called in the
1126 * middle. That's why we do the whole thing under the
1127 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1128 * and then checks the list -- so as we would be acquiring in inverse
1129 * order, we move the delayed list to a separate list while locked and then
1130 * submit them without the list lock held.
1132 void wa_urb_enqueue_run(struct work_struct *ws)
1134 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1135 struct wa_xfer *xfer, *next;
1137 LIST_HEAD(tmp_list);
1139 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1140 spin_lock_irq(&wa->xfer_list_lock);
1141 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1142 wa->xfer_delayed_list.prev);
1143 spin_unlock_irq(&wa->xfer_list_lock);
1146 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1147 * can take xfer->lock as well as lock mutexes.
1149 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1150 list_del_init(&xfer->list_node);
1153 wa_urb_enqueue_b(xfer);
1154 usb_put_urb(urb); /* taken when queuing */
1157 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1160 * Process the errored transfers on the Wire Adapter outside of interrupt.
1162 void wa_process_errored_transfers_run(struct work_struct *ws)
1164 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1165 struct wa_xfer *xfer, *next;
1166 LIST_HEAD(tmp_list);
1168 pr_info("%s: Run delayed STALL processing.\n", __func__);
1170 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1171 spin_lock_irq(&wa->xfer_list_lock);
1172 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1173 wa->xfer_errored_list.prev);
1174 spin_unlock_irq(&wa->xfer_list_lock);
1177 * run rpipe_clear_feature_stalled from temp list without list lock
1180 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1181 struct usb_host_endpoint *ep;
1182 unsigned long flags;
1183 struct wa_rpipe *rpipe;
1185 spin_lock_irqsave(&xfer->lock, flags);
1188 spin_unlock_irqrestore(&xfer->lock, flags);
1190 /* clear RPIPE feature stalled without holding a lock. */
1191 rpipe_clear_feature_stalled(wa, ep);
1193 /* complete the xfer. This removes it from the tmp list. */
1194 wa_xfer_completion(xfer);
1196 /* check for work. */
1197 wa_xfer_delayed_run(rpipe);
1200 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1203 * Submit a transfer to the Wire Adapter in a delayed way
1205 * The process of enqueuing involves possible sleeps() [see
1206 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1207 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1209 * @urb: We own a reference to it done by the HCI Linux USB stack that
1210 * will be given up by calling usb_hcd_giveback_urb() or by
1211 * returning error from this function -> ergo we don't have to
1214 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1215 struct urb *urb, gfp_t gfp)
1218 struct device *dev = &wa->usb_iface->dev;
1219 struct wa_xfer *xfer;
1220 unsigned long my_flags;
1221 unsigned cant_sleep = irqs_disabled() | in_atomic();
1223 if ((urb->transfer_buffer == NULL)
1224 && (urb->sg == NULL)
1225 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1226 && urb->transfer_buffer_length != 0) {
1227 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1232 xfer = kzalloc(sizeof(*xfer), gfp);
1237 if (urb->status != -EINPROGRESS) /* cancelled */
1238 goto error_dequeued; /* before starting? */
1240 xfer->wa = wa_get(wa);
1246 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1247 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1248 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1249 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1250 cant_sleep ? "deferred" : "inline");
1254 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1255 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1256 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1257 queue_work(wusbd, &wa->xfer_enqueue_work);
1259 wa_urb_enqueue_b(xfer);
1268 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1271 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1272 * handler] is called.
1274 * Until a transfer goes successfully through wa_urb_enqueue() it
1275 * needs to be dequeued with completion calling; when stuck in delayed
1276 * or before wa_xfer_setup() is called, we need to do completion.
1278 * not setup If there is no hcpriv yet, that means that that enqueue
1279 * still had no time to set the xfer up. Because
1280 * urb->status should be other than -EINPROGRESS,
1281 * enqueue() will catch that and bail out.
1283 * If the transfer has gone through setup, we just need to clean it
1284 * up. If it has gone through submit(), we have to abort it [with an
1285 * asynch request] and then make sure we cancel each segment.
1288 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1290 unsigned long flags, flags2;
1291 struct wa_xfer *xfer;
1293 struct wa_rpipe *rpipe;
1294 unsigned cnt, done = 0, xfer_abort_pending;
1295 unsigned rpipe_ready = 0;
1300 * Nothing setup yet enqueue will see urb->status !=
1301 * -EINPROGRESS (by hcd layer) and bail out with
1302 * error, no need to do completion
1304 BUG_ON(urb->status == -EINPROGRESS);
1307 spin_lock_irqsave(&xfer->lock, flags);
1308 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1309 rpipe = xfer->ep->hcpriv;
1310 if (rpipe == NULL) {
1311 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1312 __func__, wa_xfer_id(xfer),
1313 "Probably already aborted.\n" );
1316 /* Check the delayed list -> if there, release and complete */
1317 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1318 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1319 goto dequeue_delayed;
1320 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1321 if (xfer->seg == NULL) /* still hasn't reached */
1322 goto out_unlock; /* setup(), enqueue_b() completes */
1323 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1324 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1325 for (cnt = 0; cnt < xfer->segs; cnt++) {
1326 seg = xfer->seg[cnt];
1327 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1328 __func__, wa_xfer_id(xfer), cnt, seg->status);
1329 switch (seg->status) {
1330 case WA_SEG_NOTREADY:
1332 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1333 xfer, cnt, seg->status);
1336 case WA_SEG_DELAYED:
1338 * delete from rpipe delayed list. If no segments on
1339 * this xfer have been submitted, __wa_xfer_is_done will
1340 * trigger a giveback below. Otherwise, the submitted
1341 * segments will be completed in the DTI interrupt.
1343 seg->status = WA_SEG_ABORTED;
1344 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1345 list_del(&seg->list_node);
1347 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1351 case WA_SEG_ABORTED:
1354 * In the states below, the HWA device already knows
1355 * about the transfer. If an abort request was sent,
1356 * allow the HWA to process it and wait for the
1357 * results. Otherwise, the DTI state and seg completed
1358 * counts can get out of sync.
1360 case WA_SEG_SUBMITTED:
1361 case WA_SEG_PENDING:
1362 case WA_SEG_DTI_PENDING:
1364 * Check if the abort was successfully sent. This could
1365 * be false if the HWA has been removed but we haven't
1366 * gotten the disconnect notification yet.
1368 if (!xfer_abort_pending) {
1369 seg->status = WA_SEG_ABORTED;
1370 rpipe_ready = rpipe_avail_inc(rpipe);
1376 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1377 done = __wa_xfer_is_done(xfer);
1378 spin_unlock_irqrestore(&xfer->lock, flags);
1380 wa_xfer_completion(xfer);
1382 wa_xfer_delayed_run(rpipe);
1386 spin_unlock_irqrestore(&xfer->lock, flags);
1391 list_del_init(&xfer->list_node);
1392 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1393 xfer->result = urb->status;
1394 spin_unlock_irqrestore(&xfer->lock, flags);
1395 wa_xfer_giveback(xfer);
1396 usb_put_urb(urb); /* we got a ref in enqueue() */
1399 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1402 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1405 * Positive errno values are internal inconsistencies and should be
1406 * flagged louder. Negative are to be passed up to the user in the
1409 * @status: USB WA status code -- high two bits are stripped.
1411 static int wa_xfer_status_to_errno(u8 status)
1414 u8 real_status = status;
1415 static int xlat[] = {
1416 [WA_XFER_STATUS_SUCCESS] = 0,
1417 [WA_XFER_STATUS_HALTED] = -EPIPE,
1418 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1419 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1420 [WA_XFER_RESERVED] = EINVAL,
1421 [WA_XFER_STATUS_NOT_FOUND] = 0,
1422 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1423 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1424 [WA_XFER_STATUS_ABORTED] = -EINTR,
1425 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1426 [WA_XFER_INVALID_FORMAT] = EINVAL,
1427 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1428 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1434 if (status >= ARRAY_SIZE(xlat)) {
1435 printk_ratelimited(KERN_ERR "%s(): BUG? "
1436 "Unknown WA transfer status 0x%02x\n",
1437 __func__, real_status);
1440 errno = xlat[status];
1441 if (unlikely(errno > 0)) {
1442 printk_ratelimited(KERN_ERR "%s(): BUG? "
1443 "Inconsistent WA status: 0x%02x\n",
1444 __func__, real_status);
1451 * If a last segment flag and/or a transfer result error is encountered,
1452 * no other segment transfer results will be returned from the device.
1453 * Mark the remaining submitted or pending xfers as completed so that
1454 * the xfer will complete cleanly.
1456 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
1457 struct wa_seg *incoming_seg)
1460 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1462 for (index = incoming_seg->index + 1; index < xfer->segs_submitted;
1464 struct wa_seg *current_seg = xfer->seg[index];
1466 BUG_ON(current_seg == NULL);
1468 switch (current_seg->status) {
1469 case WA_SEG_SUBMITTED:
1470 case WA_SEG_PENDING:
1471 case WA_SEG_DTI_PENDING:
1472 rpipe_avail_inc(rpipe);
1474 * do not increment RPIPE avail for the WA_SEG_DELAYED case
1475 * since it has not been submitted to the RPIPE.
1477 case WA_SEG_DELAYED:
1479 current_seg->status = incoming_seg->status;
1481 case WA_SEG_ABORTED:
1484 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
1485 __func__, wa_xfer_id(xfer), index,
1486 current_seg->status);
1493 * Process a xfer result completion message
1495 * inbound transfers: need to schedule a buf_in_urb read
1497 * FIXME: this function needs to be broken up in parts
1499 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
1500 struct wa_xfer_result *xfer_result)
1503 struct device *dev = &wa->usb_iface->dev;
1504 unsigned long flags;
1507 struct wa_rpipe *rpipe;
1510 unsigned rpipe_ready = 0;
1512 spin_lock_irqsave(&xfer->lock, flags);
1513 seg_idx = xfer_result->bTransferSegment & 0x7f;
1514 if (unlikely(seg_idx >= xfer->segs))
1516 seg = xfer->seg[seg_idx];
1517 rpipe = xfer->ep->hcpriv;
1518 usb_status = xfer_result->bTransferStatus;
1519 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
1520 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
1521 if (seg->status == WA_SEG_ABORTED
1522 || seg->status == WA_SEG_ERROR) /* already handled */
1523 goto segment_aborted;
1524 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1525 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1526 if (seg->status != WA_SEG_PENDING) {
1527 if (printk_ratelimit())
1528 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1529 xfer, seg_idx, seg->status);
1530 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1532 if (usb_status & 0x80) {
1533 seg->result = wa_xfer_status_to_errno(usb_status);
1534 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1535 xfer, xfer->id, seg->index, usb_status);
1536 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
1537 WA_SEG_ABORTED : WA_SEG_ERROR;
1538 goto error_complete;
1540 /* FIXME: we ignore warnings, tally them for stats */
1541 if (usb_status & 0x40) /* Warning?... */
1542 usb_status = 0; /* ... pass */
1543 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1544 seg->status = WA_SEG_DTI_PENDING;
1545 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1546 /* this should always be 0 before a resubmit. */
1547 wa->buf_in_urb->num_mapped_sgs = 0;
1550 wa->buf_in_urb->transfer_dma =
1551 xfer->urb->transfer_dma
1552 + (seg_idx * xfer->seg_size);
1553 wa->buf_in_urb->transfer_flags
1554 |= URB_NO_TRANSFER_DMA_MAP;
1555 wa->buf_in_urb->transfer_buffer = NULL;
1556 wa->buf_in_urb->sg = NULL;
1557 wa->buf_in_urb->num_sgs = 0;
1559 /* do buffer or SG processing. */
1560 wa->buf_in_urb->transfer_flags
1561 &= ~URB_NO_TRANSFER_DMA_MAP;
1563 if (xfer->urb->transfer_buffer) {
1564 wa->buf_in_urb->transfer_buffer =
1565 xfer->urb->transfer_buffer
1566 + (seg_idx * xfer->seg_size);
1567 wa->buf_in_urb->sg = NULL;
1568 wa->buf_in_urb->num_sgs = 0;
1570 /* allocate an SG list to store seg_size bytes
1571 and copy the subset of the xfer->urb->sg
1572 that matches the buffer subset we are
1574 wa->buf_in_urb->sg = wa_xfer_create_subset_sg(
1576 seg_idx * xfer->seg_size,
1578 xfer_result->dwTransferLength),
1579 &(wa->buf_in_urb->num_sgs));
1581 if (!(wa->buf_in_urb->sg)) {
1582 wa->buf_in_urb->num_sgs = 0;
1583 goto error_sg_alloc;
1585 wa->buf_in_urb->transfer_buffer = NULL;
1588 wa->buf_in_urb->transfer_buffer_length =
1589 le32_to_cpu(xfer_result->dwTransferLength);
1590 wa->buf_in_urb->context = seg;
1591 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1593 goto error_submit_buf_in;
1595 /* OUT data phase, complete it -- */
1596 seg->status = WA_SEG_DONE;
1597 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1599 rpipe_ready = rpipe_avail_inc(rpipe);
1600 done = __wa_xfer_is_done(xfer);
1602 spin_unlock_irqrestore(&xfer->lock, flags);
1604 wa_xfer_completion(xfer);
1606 wa_xfer_delayed_run(rpipe);
1609 error_submit_buf_in:
1610 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1611 dev_err(dev, "DTI: URB max acceptable errors "
1612 "exceeded, resetting device\n");
1615 if (printk_ratelimit())
1616 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1617 xfer, seg_idx, result);
1618 seg->result = result;
1619 kfree(wa->buf_in_urb->sg);
1620 wa->buf_in_urb->sg = NULL;
1622 __wa_xfer_abort(xfer);
1623 seg->status = WA_SEG_ERROR;
1626 rpipe_ready = rpipe_avail_inc(rpipe);
1627 wa_complete_remaining_xfer_segs(xfer, seg);
1628 done = __wa_xfer_is_done(xfer);
1630 * queue work item to clear STALL for control endpoints.
1631 * Otherwise, let endpoint_reset take care of it.
1633 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
1634 usb_endpoint_xfer_control(&xfer->ep->desc) &&
1637 dev_info(dev, "Control EP stall. Queue delayed work.\n");
1638 spin_lock_irq(&wa->xfer_list_lock);
1639 /* move xfer from xfer_list to xfer_errored_list. */
1640 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
1641 spin_unlock_irq(&wa->xfer_list_lock);
1642 spin_unlock_irqrestore(&xfer->lock, flags);
1643 queue_work(wusbd, &wa->xfer_error_work);
1645 spin_unlock_irqrestore(&xfer->lock, flags);
1647 wa_xfer_completion(xfer);
1649 wa_xfer_delayed_run(rpipe);
1655 spin_unlock_irqrestore(&xfer->lock, flags);
1656 wa_urb_dequeue(wa, xfer->urb);
1657 if (printk_ratelimit())
1658 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1659 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1660 dev_err(dev, "DTI: URB max acceptable errors "
1661 "exceeded, resetting device\n");
1667 /* nothing to do, as the aborter did the completion */
1668 spin_unlock_irqrestore(&xfer->lock, flags);
1672 * Callback for the IN data phase
1674 * If successful transition state; otherwise, take a note of the
1675 * error, mark this segment done and try completion.
1677 * Note we don't access until we are sure that the transfer hasn't
1678 * been cancelled (ECONNRESET, ENOENT), which could mean that
1679 * seg->xfer could be already gone.
1681 static void wa_buf_in_cb(struct urb *urb)
1683 struct wa_seg *seg = urb->context;
1684 struct wa_xfer *xfer = seg->xfer;
1687 struct wa_rpipe *rpipe;
1688 unsigned rpipe_ready;
1689 unsigned long flags;
1692 /* free the sg if it was used. */
1696 switch (urb->status) {
1698 spin_lock_irqsave(&xfer->lock, flags);
1700 dev = &wa->usb_iface->dev;
1701 rpipe = xfer->ep->hcpriv;
1702 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1703 xfer, seg->index, (size_t)urb->actual_length);
1704 seg->status = WA_SEG_DONE;
1705 seg->result = urb->actual_length;
1707 rpipe_ready = rpipe_avail_inc(rpipe);
1708 done = __wa_xfer_is_done(xfer);
1709 spin_unlock_irqrestore(&xfer->lock, flags);
1711 wa_xfer_completion(xfer);
1713 wa_xfer_delayed_run(rpipe);
1715 case -ECONNRESET: /* URB unlinked; no need to do anything */
1716 case -ENOENT: /* as it was done by the who unlinked us */
1718 default: /* Other errors ... */
1719 spin_lock_irqsave(&xfer->lock, flags);
1721 dev = &wa->usb_iface->dev;
1722 rpipe = xfer->ep->hcpriv;
1723 if (printk_ratelimit())
1724 dev_err(dev, "xfer %p#%u: data in error %d\n",
1725 xfer, seg->index, urb->status);
1726 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1727 EDC_ERROR_TIMEFRAME)){
1728 dev_err(dev, "DTO: URB max acceptable errors "
1729 "exceeded, resetting device\n");
1732 seg->status = WA_SEG_ERROR;
1733 seg->result = urb->status;
1735 rpipe_ready = rpipe_avail_inc(rpipe);
1736 __wa_xfer_abort(xfer);
1737 done = __wa_xfer_is_done(xfer);
1738 spin_unlock_irqrestore(&xfer->lock, flags);
1740 wa_xfer_completion(xfer);
1742 wa_xfer_delayed_run(rpipe);
1747 * Handle an incoming transfer result buffer
1749 * Given a transfer result buffer, it completes the transfer (possibly
1750 * scheduling and buffer in read) and then resubmits the DTI URB for a
1751 * new transfer result read.
1754 * The xfer_result DTI URB state machine
1756 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1758 * We start in OFF mode, the first xfer_result notification [through
1759 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1762 * We receive a buffer -- if it is not a xfer_result, we complain and
1763 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1764 * request accounting. If it is an IN segment, we move to RBI and post
1765 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1766 * repost the DTI-URB and move to RXR state. if there was no IN
1767 * segment, it will repost the DTI-URB.
1769 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1770 * errors) in the URBs.
1772 static void wa_dti_cb(struct urb *urb)
1775 struct wahc *wa = urb->context;
1776 struct device *dev = &wa->usb_iface->dev;
1777 struct wa_xfer_result *xfer_result;
1779 struct wa_xfer *xfer;
1782 BUG_ON(wa->dti_urb != urb);
1783 switch (wa->dti_urb->status) {
1785 /* We have a xfer result buffer; check it */
1786 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1787 urb->actual_length, urb->transfer_buffer);
1788 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1789 dev_err(dev, "DTI Error: xfer result--bad size "
1790 "xfer result (%d bytes vs %zu needed)\n",
1791 urb->actual_length, sizeof(*xfer_result));
1794 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
1795 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1796 dev_err(dev, "DTI Error: xfer result--"
1797 "bad header length %u\n",
1798 xfer_result->hdr.bLength);
1801 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1802 dev_err(dev, "DTI Error: xfer result--"
1803 "bad header type 0x%02x\n",
1804 xfer_result->hdr.bNotifyType);
1807 usb_status = xfer_result->bTransferStatus & 0x3f;
1808 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1809 /* taken care of already */
1811 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
1812 xfer = wa_xfer_get_by_id(wa, xfer_id);
1814 /* FIXME: transaction might have been cancelled */
1815 dev_err(dev, "DTI Error: xfer result--"
1816 "unknown xfer 0x%08x (status 0x%02x)\n",
1817 xfer_id, usb_status);
1820 wa_xfer_result_chew(wa, xfer, xfer_result);
1823 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1824 case -ESHUTDOWN: /* going away! */
1825 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1829 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1830 EDC_ERROR_TIMEFRAME)) {
1831 dev_err(dev, "DTI: URB max acceptable errors "
1832 "exceeded, resetting device\n");
1836 if (printk_ratelimit())
1837 dev_err(dev, "DTI: URB error %d\n", urb->status);
1840 /* Resubmit the DTI URB */
1841 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1843 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1844 "resetting\n", result);
1852 * Transfer complete notification
1854 * Called from the notif.c code. We get a notification on EP2 saying
1855 * that some endpoint has some transfer result data available. We are
1858 * To speed up things, we always have a URB reading the DTI URB; we
1859 * don't really set it up and start it until the first xfer complete
1860 * notification arrives, which is what we do here.
1862 * Follow up in wa_dti_cb(), as that's where the whole state
1865 * So here we just initialize the DTI URB for reading transfer result
1866 * notifications and also the buffer-in URB, for reading buffers. Then
1867 * we just submit the DTI URB.
1869 * @wa shall be referenced
1871 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1874 struct device *dev = &wa->usb_iface->dev;
1875 struct wa_notif_xfer *notif_xfer;
1876 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1878 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1879 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1881 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1882 /* FIXME: hardcoded limitation, adapt */
1883 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1884 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1887 if (wa->dti_urb != NULL) /* DTI URB already started */
1890 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1891 if (wa->dti_urb == NULL) {
1892 dev_err(dev, "Can't allocate DTI URB\n");
1893 goto error_dti_urb_alloc;
1896 wa->dti_urb, wa->usb_dev,
1897 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1898 wa->dti_buf, wa->dti_buf_size,
1901 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1902 if (wa->buf_in_urb == NULL) {
1903 dev_err(dev, "Can't allocate BUF-IN URB\n");
1904 goto error_buf_in_urb_alloc;
1907 wa->buf_in_urb, wa->usb_dev,
1908 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1909 NULL, 0, wa_buf_in_cb, wa);
1910 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1912 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1913 "resetting\n", result);
1914 goto error_dti_urb_submit;
1919 error_dti_urb_submit:
1920 usb_put_urb(wa->buf_in_urb);
1921 wa->buf_in_urb = NULL;
1922 error_buf_in_urb_alloc:
1923 usb_put_urb(wa->dti_urb);
1925 error_dti_urb_alloc: