3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer everytime an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different requried components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
106 static void wa_xfer_delayed_run(struct wa_rpipe *);
109 * Life cycle governed by 'struct urb' (the refcount of the struct is
110 * that of the 'struct urb' and usb_free_urb() would free the whole
115 struct urb *dto_urb; /* for data output? */
116 struct list_head list_node; /* for rpipe->req_list */
117 struct wa_xfer *xfer; /* out xfer */
118 u8 index; /* which segment we are */
119 enum wa_seg_status status;
120 ssize_t result; /* bytes xfered or error */
121 struct wa_xfer_hdr xfer_hdr;
122 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
125 static void wa_seg_init(struct wa_seg *seg)
127 /* usb_init_urb() repeats a lot of work, so we do it here */
128 kref_init(&seg->urb.kref);
132 * Protected by xfer->lock
137 struct list_head list_node;
141 struct wahc *wa; /* Wire adapter we are plugged to */
142 struct usb_host_endpoint *ep;
143 struct urb *urb; /* URB we are transfering for */
144 struct wa_seg **seg; /* transfer segments */
145 u8 segs, segs_submitted, segs_done;
146 unsigned is_inbound:1;
151 gfp_t gfp; /* allocation mask */
153 struct wusb_dev *wusb_dev; /* for activity timestamps */
156 static inline void wa_xfer_init(struct wa_xfer *xfer)
158 kref_init(&xfer->refcnt);
159 INIT_LIST_HEAD(&xfer->list_node);
160 spin_lock_init(&xfer->lock);
164 * Destory a transfer structure
166 * Note that the xfer->seg[index] thingies follow the URB life cycle,
167 * so we need to put them, not free them.
169 static void wa_xfer_destroy(struct kref *_xfer)
171 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
174 for (cnt = 0; cnt < xfer->segs; cnt++) {
175 if (xfer->is_inbound)
176 usb_put_urb(xfer->seg[cnt]->dto_urb);
177 usb_put_urb(&xfer->seg[cnt]->urb);
183 static void wa_xfer_get(struct wa_xfer *xfer)
185 kref_get(&xfer->refcnt);
188 static void wa_xfer_put(struct wa_xfer *xfer)
190 kref_put(&xfer->refcnt, wa_xfer_destroy);
196 * xfer->lock has to be unlocked
198 * We take xfer->lock for setting the result; this is a barrier
199 * against drivers/usb/core/hcd.c:unlink1() being called after we call
200 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
201 * reference to the transfer.
203 static void wa_xfer_giveback(struct wa_xfer *xfer)
207 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
208 list_del_init(&xfer->list_node);
209 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
210 /* FIXME: segmentation broken -- kills DWA */
211 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
219 * xfer->lock has to be unlocked
221 static void wa_xfer_completion(struct wa_xfer *xfer)
224 wusb_dev_put(xfer->wusb_dev);
225 rpipe_put(xfer->ep->hcpriv);
226 wa_xfer_giveback(xfer);
230 * If transfer is done, wrap it up and return true
232 * xfer->lock has to be locked
234 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
236 struct device *dev = &xfer->wa->usb_iface->dev;
237 unsigned result, cnt;
239 struct urb *urb = xfer->urb;
240 unsigned found_short = 0;
242 result = xfer->segs_done == xfer->segs_submitted;
245 urb->actual_length = 0;
246 for (cnt = 0; cnt < xfer->segs; cnt++) {
247 seg = xfer->seg[cnt];
248 switch (seg->status) {
250 if (found_short && seg->result > 0) {
251 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
252 xfer, cnt, seg->result);
253 urb->status = -EINVAL;
256 urb->actual_length += seg->result;
257 if (seg->result < xfer->seg_size
258 && cnt != xfer->segs-1)
260 dev_dbg(dev, "xfer %p#%u: DONE short %d "
261 "result %zu urb->actual_length %d\n",
262 xfer, seg->index, found_short, seg->result,
266 xfer->result = seg->result;
267 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
268 xfer, seg->index, seg->result);
271 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
272 xfer, seg->index, urb->status);
273 xfer->result = urb->status;
276 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
277 xfer, cnt, seg->status);
278 xfer->result = -EINVAL;
288 * Initialize a transfer's ID
290 * We need to use a sequential number; if we use the pointer or the
291 * hash of the pointer, it can repeat over sequential transfers and
292 * then it will confuse the HWA....wonder why in hell they put a 32
293 * bit handle in there then.
295 static void wa_xfer_id_init(struct wa_xfer *xfer)
297 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
301 * Return the xfer's ID associated with xfer
305 static u32 wa_xfer_id(struct wa_xfer *xfer)
311 * Search for a transfer list ID on the HCD's URB list
313 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
314 * 32-bit hash of the pointer.
316 * @returns NULL if not found.
318 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
321 struct wa_xfer *xfer_itr;
322 spin_lock_irqsave(&wa->xfer_list_lock, flags);
323 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
324 if (id == xfer_itr->id) {
325 wa_xfer_get(xfer_itr);
331 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
335 struct wa_xfer_abort_buffer {
337 struct wa_xfer_abort cmd;
340 static void __wa_xfer_abort_cb(struct urb *urb)
342 struct wa_xfer_abort_buffer *b = urb->context;
343 usb_put_urb(&b->urb);
347 * Aborts an ongoing transaction
349 * Assumes the transfer is referenced and locked and in a submitted
350 * state (mainly that there is an endpoint/rpipe assigned).
352 * The callback (see above) does nothing but freeing up the data by
353 * putting the URB. Because the URB is allocated at the head of the
354 * struct, the whole space we allocated is kfreed.
356 * We'll get an 'aborted transaction' xfer result on DTI, that'll
357 * politely ignore because at this point the transaction has been
358 * marked as aborted already.
360 static void __wa_xfer_abort(struct wa_xfer *xfer)
363 struct device *dev = &xfer->wa->usb_iface->dev;
364 struct wa_xfer_abort_buffer *b;
365 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
367 b = kmalloc(sizeof(*b), GFP_ATOMIC);
370 b->cmd.bLength = sizeof(b->cmd);
371 b->cmd.bRequestType = WA_XFER_ABORT;
372 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
373 b->cmd.dwTransferID = wa_xfer_id(xfer);
375 usb_init_urb(&b->urb);
376 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
377 usb_sndbulkpipe(xfer->wa->usb_dev,
378 xfer->wa->dto_epd->bEndpointAddress),
379 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
380 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
383 return; /* callback frees! */
387 if (printk_ratelimit())
388 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
398 * @returns < 0 on error, transfer segment request size if ok
400 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
401 enum wa_xfer_type *pxfer_type)
404 struct device *dev = &xfer->wa->usb_iface->dev;
406 struct urb *urb = xfer->urb;
407 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
409 switch (rpipe->descr.bmAttribute & 0x3) {
410 case USB_ENDPOINT_XFER_CONTROL:
411 *pxfer_type = WA_XFER_TYPE_CTL;
412 result = sizeof(struct wa_xfer_ctl);
414 case USB_ENDPOINT_XFER_INT:
415 case USB_ENDPOINT_XFER_BULK:
416 *pxfer_type = WA_XFER_TYPE_BI;
417 result = sizeof(struct wa_xfer_bi);
419 case USB_ENDPOINT_XFER_ISOC:
420 dev_err(dev, "FIXME: ISOC not implemented\n");
426 result = -EINVAL; /* shut gcc up */
428 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
429 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
430 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
431 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
432 /* Compute the segment size and make sure it is a multiple of
433 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
435 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
436 if (xfer->seg_size < maxpktsize) {
437 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
438 "%zu\n", xfer->seg_size, maxpktsize);
442 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
443 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
445 if (xfer->segs >= WA_SEGS_MAX) {
446 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
447 (int)(urb->transfer_buffer_length / xfer->seg_size),
452 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
458 /* Fill in the common request header and xfer-type specific data. */
459 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
460 struct wa_xfer_hdr *xfer_hdr0,
461 enum wa_xfer_type xfer_type,
462 size_t xfer_hdr_size)
464 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
466 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
467 xfer_hdr0->bLength = xfer_hdr_size;
468 xfer_hdr0->bRequestType = xfer_type;
469 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
470 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
471 xfer_hdr0->bTransferSegment = 0;
473 case WA_XFER_TYPE_CTL: {
474 struct wa_xfer_ctl *xfer_ctl =
475 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
476 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
477 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
478 && xfer->urb->setup_packet == NULL);
479 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
480 sizeof(xfer_ctl->baSetupData));
483 case WA_XFER_TYPE_BI:
485 case WA_XFER_TYPE_ISO:
486 printk(KERN_ERR "FIXME: ISOC not implemented\n");
493 * Callback for the OUT data phase of the segment request
495 * Check wa_seg_cb(); most comments also apply here because this
496 * function does almost the same thing and they work closely
499 * If the seg request has failed but this DTO phase has suceeded,
500 * wa_seg_cb() has already failed the segment and moved the
501 * status to WA_SEG_ERROR, so this will go through 'case 0' and
502 * effectively do nothing.
504 static void wa_seg_dto_cb(struct urb *urb)
506 struct wa_seg *seg = urb->context;
507 struct wa_xfer *xfer = seg->xfer;
510 struct wa_rpipe *rpipe;
512 unsigned rpipe_ready = 0;
515 switch (urb->status) {
517 spin_lock_irqsave(&xfer->lock, flags);
519 dev = &wa->usb_iface->dev;
520 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
521 xfer, seg->index, urb->actual_length);
522 if (seg->status < WA_SEG_PENDING)
523 seg->status = WA_SEG_PENDING;
524 seg->result = urb->actual_length;
525 spin_unlock_irqrestore(&xfer->lock, flags);
527 case -ECONNRESET: /* URB unlinked; no need to do anything */
528 case -ENOENT: /* as it was done by the who unlinked us */
530 default: /* Other errors ... */
531 spin_lock_irqsave(&xfer->lock, flags);
533 dev = &wa->usb_iface->dev;
534 rpipe = xfer->ep->hcpriv;
535 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
536 xfer, seg->index, urb->status);
537 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
538 EDC_ERROR_TIMEFRAME)){
539 dev_err(dev, "DTO: URB max acceptable errors "
540 "exceeded, resetting device\n");
543 if (seg->status != WA_SEG_ERROR) {
544 seg->status = WA_SEG_ERROR;
545 seg->result = urb->status;
547 __wa_xfer_abort(xfer);
548 rpipe_ready = rpipe_avail_inc(rpipe);
549 done = __wa_xfer_is_done(xfer);
551 spin_unlock_irqrestore(&xfer->lock, flags);
553 wa_xfer_completion(xfer);
555 wa_xfer_delayed_run(rpipe);
560 * Callback for the segment request
562 * If successful transition state (unless already transitioned or
563 * outbound transfer); otherwise, take a note of the error, mark this
564 * segment done and try completion.
566 * Note we don't access until we are sure that the transfer hasn't
567 * been cancelled (ECONNRESET, ENOENT), which could mean that
568 * seg->xfer could be already gone.
570 * We have to check before setting the status to WA_SEG_PENDING
571 * because sometimes the xfer result callback arrives before this
572 * callback (geeeeeeze), so it might happen that we are already in
573 * another state. As well, we don't set it if the transfer is inbound,
574 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
577 static void wa_seg_cb(struct urb *urb)
579 struct wa_seg *seg = urb->context;
580 struct wa_xfer *xfer = seg->xfer;
583 struct wa_rpipe *rpipe;
585 unsigned rpipe_ready;
588 switch (urb->status) {
590 spin_lock_irqsave(&xfer->lock, flags);
592 dev = &wa->usb_iface->dev;
593 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
594 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
595 seg->status = WA_SEG_PENDING;
596 spin_unlock_irqrestore(&xfer->lock, flags);
598 case -ECONNRESET: /* URB unlinked; no need to do anything */
599 case -ENOENT: /* as it was done by the who unlinked us */
601 default: /* Other errors ... */
602 spin_lock_irqsave(&xfer->lock, flags);
604 dev = &wa->usb_iface->dev;
605 rpipe = xfer->ep->hcpriv;
606 if (printk_ratelimit())
607 dev_err(dev, "xfer %p#%u: request error %d\n",
608 xfer, seg->index, urb->status);
609 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
610 EDC_ERROR_TIMEFRAME)){
611 dev_err(dev, "DTO: URB max acceptable errors "
612 "exceeded, resetting device\n");
615 usb_unlink_urb(seg->dto_urb);
616 seg->status = WA_SEG_ERROR;
617 seg->result = urb->status;
619 __wa_xfer_abort(xfer);
620 rpipe_ready = rpipe_avail_inc(rpipe);
621 done = __wa_xfer_is_done(xfer);
622 spin_unlock_irqrestore(&xfer->lock, flags);
624 wa_xfer_completion(xfer);
626 wa_xfer_delayed_run(rpipe);
631 * Allocate the segs array and initialize each of them
633 * The segments are freed by wa_xfer_destroy() when the xfer use count
634 * drops to zero; however, because each segment is given the same life
635 * cycle as the USB URB it contains, it is actually freed by
636 * usb_put_urb() on the contained USB URB (twisted, eh?).
638 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
641 size_t alloc_size = sizeof(*xfer->seg[0])
642 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
643 struct usb_device *usb_dev = xfer->wa->usb_dev;
644 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
646 size_t buf_itr, buf_size, buf_itr_size;
649 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
650 if (xfer->seg == NULL)
651 goto error_segs_kzalloc;
653 buf_size = xfer->urb->transfer_buffer_length;
654 for (cnt = 0; cnt < xfer->segs; cnt++) {
655 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
657 goto error_seg_kzalloc;
661 usb_fill_bulk_urb(&seg->urb, usb_dev,
662 usb_sndbulkpipe(usb_dev,
663 dto_epd->bEndpointAddress),
664 &seg->xfer_hdr, xfer_hdr_size,
666 buf_itr_size = buf_size > xfer->seg_size ?
667 xfer->seg_size : buf_size;
668 if (xfer->is_inbound == 0 && buf_size > 0) {
669 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
670 if (seg->dto_urb == NULL)
671 goto error_dto_alloc;
673 seg->dto_urb, usb_dev,
674 usb_sndbulkpipe(usb_dev,
675 dto_epd->bEndpointAddress),
676 NULL, 0, wa_seg_dto_cb, seg);
678 seg->dto_urb->transfer_dma =
679 xfer->urb->transfer_dma + buf_itr;
680 seg->dto_urb->transfer_flags |=
681 URB_NO_TRANSFER_DMA_MAP;
683 seg->dto_urb->transfer_buffer =
684 xfer->urb->transfer_buffer + buf_itr;
685 seg->dto_urb->transfer_buffer_length = buf_itr_size;
687 seg->status = WA_SEG_READY;
688 buf_itr += buf_itr_size;
689 buf_size -= buf_itr_size;
694 kfree(xfer->seg[cnt]);
697 /* use the fact that cnt is left at were it failed */
698 for (; cnt > 0; cnt--) {
699 if (xfer->is_inbound == 0)
700 kfree(xfer->seg[cnt]->dto_urb);
701 kfree(xfer->seg[cnt]);
708 * Allocates all the stuff needed to submit a transfer
710 * Breaks the whole data buffer in a list of segments, each one has a
711 * structure allocated to it and linked in xfer->seg[index]
713 * FIXME: merge setup_segs() and the last part of this function, no
714 * need to do two for loops when we could run everything in a
717 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
720 struct device *dev = &xfer->wa->usb_iface->dev;
721 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
722 size_t xfer_hdr_size, cnt, transfer_size;
723 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
725 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
727 goto error_setup_sizes;
728 xfer_hdr_size = result;
729 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
731 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
732 xfer, xfer->segs, result);
733 goto error_setup_segs;
735 /* Fill the first header */
736 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
737 wa_xfer_id_init(xfer);
738 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
740 /* Fill remainig headers */
741 xfer_hdr = xfer_hdr0;
742 transfer_size = urb->transfer_buffer_length;
743 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
744 xfer->seg_size : transfer_size;
745 transfer_size -= xfer->seg_size;
746 for (cnt = 1; cnt < xfer->segs; cnt++) {
747 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
748 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
749 xfer_hdr->bTransferSegment = cnt;
750 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
751 cpu_to_le32(xfer->seg_size)
752 : cpu_to_le32(transfer_size);
753 xfer->seg[cnt]->status = WA_SEG_READY;
754 transfer_size -= xfer->seg_size;
756 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
766 * rpipe->seg_lock is held!
768 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
772 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
774 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
775 xfer, seg->index, result);
776 goto error_seg_submit;
779 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
781 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
782 xfer, seg->index, result);
783 goto error_dto_submit;
786 seg->status = WA_SEG_SUBMITTED;
787 rpipe_avail_dec(rpipe);
791 usb_unlink_urb(&seg->urb);
793 seg->status = WA_SEG_ERROR;
794 seg->result = result;
799 * Execute more queued request segments until the maximum concurrent allowed
801 * The ugly unlock/lock sequence on the error path is needed as the
802 * xfer->lock normally nests the seg_lock and not viceversa.
805 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
808 struct device *dev = &rpipe->wa->usb_iface->dev;
810 struct wa_xfer *xfer;
813 spin_lock_irqsave(&rpipe->seg_lock, flags);
814 while (atomic_read(&rpipe->segs_available) > 0
815 && !list_empty(&rpipe->seg_list)) {
816 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
818 list_del(&seg->list_node);
820 result = __wa_seg_submit(rpipe, xfer, seg);
821 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
822 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
823 if (unlikely(result < 0)) {
824 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
825 spin_lock_irqsave(&xfer->lock, flags);
826 __wa_xfer_abort(xfer);
828 spin_unlock_irqrestore(&xfer->lock, flags);
829 spin_lock_irqsave(&rpipe->seg_lock, flags);
832 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
837 * xfer->lock is taken
839 * On failure submitting we just stop submitting and return error;
840 * wa_urb_enqueue_b() will execute the completion path
842 static int __wa_xfer_submit(struct wa_xfer *xfer)
845 struct wahc *wa = xfer->wa;
846 struct device *dev = &wa->usb_iface->dev;
850 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
851 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
855 spin_lock_irqsave(&wa->xfer_list_lock, flags);
856 list_add_tail(&xfer->list_node, &wa->xfer_list);
857 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
859 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
861 spin_lock_irqsave(&rpipe->seg_lock, flags);
862 for (cnt = 0; cnt < xfer->segs; cnt++) {
863 available = atomic_read(&rpipe->segs_available);
864 empty = list_empty(&rpipe->seg_list);
865 seg = xfer->seg[cnt];
866 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
867 xfer, cnt, available, empty,
868 available == 0 || !empty ? "delayed" : "submitted");
869 if (available == 0 || !empty) {
870 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
871 seg->status = WA_SEG_DELAYED;
872 list_add_tail(&seg->list_node, &rpipe->seg_list);
874 result = __wa_seg_submit(rpipe, xfer, seg);
876 __wa_xfer_abort(xfer);
877 goto error_seg_submit;
880 xfer->segs_submitted++;
883 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
888 * Second part of a URB/transfer enqueuement
890 * Assumes this comes from wa_urb_enqueue() [maybe through
891 * wa_urb_enqueue_run()]. At this point:
893 * xfer->wa filled and refcounted
894 * xfer->ep filled with rpipe refcounted if
896 * xfer->urb filled and refcounted (this is the case when called
897 * from wa_urb_enqueue() as we come from usb_submit_urb()
898 * and when called by wa_urb_enqueue_run(), as we took an
899 * extra ref dropped by _run() after we return).
902 * If we fail at __wa_xfer_submit(), then we just check if we are done
903 * and if so, we run the completion procedure. However, if we are not
904 * yet done, we do nothing and wait for the completion handlers from
905 * the submitted URBs or from the xfer-result path to kick in. If xfer
906 * result never kicks in, the xfer will timeout from the USB code and
907 * dequeue() will be called.
909 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
913 struct urb *urb = xfer->urb;
914 struct wahc *wa = xfer->wa;
915 struct wusbhc *wusbhc = wa->wusb;
916 struct wusb_dev *wusb_dev;
919 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
921 goto error_rpipe_get;
923 /* FIXME: segmentation broken -- kills DWA */
924 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
925 if (urb->dev == NULL) {
926 mutex_unlock(&wusbhc->mutex);
929 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
930 if (wusb_dev == NULL) {
931 mutex_unlock(&wusbhc->mutex);
934 mutex_unlock(&wusbhc->mutex);
936 spin_lock_irqsave(&xfer->lock, flags);
937 xfer->wusb_dev = wusb_dev;
938 result = urb->status;
939 if (urb->status != -EINPROGRESS)
942 result = __wa_xfer_setup(xfer, urb);
944 goto error_xfer_setup;
945 result = __wa_xfer_submit(xfer);
947 goto error_xfer_submit;
948 spin_unlock_irqrestore(&xfer->lock, flags);
951 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
952 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
957 spin_unlock_irqrestore(&xfer->lock, flags);
958 /* FIXME: segmentation broken, kills DWA */
960 wusb_dev_put(wusb_dev);
962 rpipe_put(xfer->ep->hcpriv);
964 xfer->result = result;
965 wa_xfer_giveback(xfer);
969 done = __wa_xfer_is_done(xfer);
970 xfer->result = result;
971 spin_unlock_irqrestore(&xfer->lock, flags);
973 wa_xfer_completion(xfer);
977 * Execute the delayed transfers in the Wire Adapter @wa
979 * We need to be careful here, as dequeue() could be called in the
980 * middle. That's why we do the whole thing under the
981 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
982 * and then checks the list -- so as we would be acquiring in inverse
983 * order, we just drop the lock once we have the xfer and reacquire it
986 void wa_urb_enqueue_run(struct work_struct *ws)
988 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
989 struct wa_xfer *xfer, *next;
992 spin_lock_irq(&wa->xfer_list_lock);
993 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
995 list_del_init(&xfer->list_node);
996 spin_unlock_irq(&wa->xfer_list_lock);
999 wa_urb_enqueue_b(xfer);
1000 usb_put_urb(urb); /* taken when queuing */
1002 spin_lock_irq(&wa->xfer_list_lock);
1004 spin_unlock_irq(&wa->xfer_list_lock);
1006 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1009 * Submit a transfer to the Wire Adapter in a delayed way
1011 * The process of enqueuing involves possible sleeps() [see
1012 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1013 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1015 * @urb: We own a reference to it done by the HCI Linux USB stack that
1016 * will be given up by calling usb_hcd_giveback_urb() or by
1017 * returning error from this function -> ergo we don't have to
1020 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1021 struct urb *urb, gfp_t gfp)
1024 struct device *dev = &wa->usb_iface->dev;
1025 struct wa_xfer *xfer;
1026 unsigned long my_flags;
1027 unsigned cant_sleep = irqs_disabled() | in_atomic();
1029 if (urb->transfer_buffer == NULL
1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1031 && urb->transfer_buffer_length != 0) {
1032 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1037 xfer = kzalloc(sizeof(*xfer), gfp);
1042 if (urb->status != -EINPROGRESS) /* cancelled */
1043 goto error_dequeued; /* before starting? */
1045 xfer->wa = wa_get(wa);
1051 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1052 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1053 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1054 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1055 cant_sleep ? "deferred" : "inline");
1059 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1060 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1061 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1062 queue_work(wusbd, &wa->xfer_work);
1064 wa_urb_enqueue_b(xfer);
1073 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1076 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1077 * handler] is called.
1079 * Until a transfer goes successfully through wa_urb_enqueue() it
1080 * needs to be dequeued with completion calling; when stuck in delayed
1081 * or before wa_xfer_setup() is called, we need to do completion.
1083 * not setup If there is no hcpriv yet, that means that that enqueue
1084 * still had no time to set the xfer up. Because
1085 * urb->status should be other than -EINPROGRESS,
1086 * enqueue() will catch that and bail out.
1088 * If the transfer has gone through setup, we just need to clean it
1089 * up. If it has gone through submit(), we have to abort it [with an
1090 * asynch request] and then make sure we cancel each segment.
1093 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1095 unsigned long flags, flags2;
1096 struct wa_xfer *xfer;
1098 struct wa_rpipe *rpipe;
1100 unsigned rpipe_ready = 0;
1104 /* NOthing setup yet enqueue will see urb->status !=
1105 * -EINPROGRESS (by hcd layer) and bail out with
1106 * error, no need to do completion
1108 BUG_ON(urb->status == -EINPROGRESS);
1111 spin_lock_irqsave(&xfer->lock, flags);
1112 rpipe = xfer->ep->hcpriv;
1113 /* Check the delayed list -> if there, release and complete */
1114 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1115 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1116 goto dequeue_delayed;
1117 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1118 if (xfer->seg == NULL) /* still hasn't reached */
1119 goto out_unlock; /* setup(), enqueue_b() completes */
1120 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1121 __wa_xfer_abort(xfer);
1122 for (cnt = 0; cnt < xfer->segs; cnt++) {
1123 seg = xfer->seg[cnt];
1124 switch (seg->status) {
1125 case WA_SEG_NOTREADY:
1127 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1128 xfer, cnt, seg->status);
1131 case WA_SEG_DELAYED:
1132 seg->status = WA_SEG_ABORTED;
1133 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1134 list_del(&seg->list_node);
1136 rpipe_ready = rpipe_avail_inc(rpipe);
1137 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1139 case WA_SEG_SUBMITTED:
1140 seg->status = WA_SEG_ABORTED;
1141 usb_unlink_urb(&seg->urb);
1142 if (xfer->is_inbound == 0)
1143 usb_unlink_urb(seg->dto_urb);
1145 rpipe_ready = rpipe_avail_inc(rpipe);
1147 case WA_SEG_PENDING:
1148 seg->status = WA_SEG_ABORTED;
1150 rpipe_ready = rpipe_avail_inc(rpipe);
1152 case WA_SEG_DTI_PENDING:
1153 usb_unlink_urb(wa->dti_urb);
1154 seg->status = WA_SEG_ABORTED;
1156 rpipe_ready = rpipe_avail_inc(rpipe);
1160 case WA_SEG_ABORTED:
1164 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1165 __wa_xfer_is_done(xfer);
1166 spin_unlock_irqrestore(&xfer->lock, flags);
1167 wa_xfer_completion(xfer);
1169 wa_xfer_delayed_run(rpipe);
1173 spin_unlock_irqrestore(&xfer->lock, flags);
1178 list_del_init(&xfer->list_node);
1179 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1180 xfer->result = urb->status;
1181 spin_unlock_irqrestore(&xfer->lock, flags);
1182 wa_xfer_giveback(xfer);
1183 usb_put_urb(urb); /* we got a ref in enqueue() */
1186 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1189 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1192 * Positive errno values are internal inconsistencies and should be
1193 * flagged louder. Negative are to be passed up to the user in the
1196 * @status: USB WA status code -- high two bits are stripped.
1198 static int wa_xfer_status_to_errno(u8 status)
1201 u8 real_status = status;
1202 static int xlat[] = {
1203 [WA_XFER_STATUS_SUCCESS] = 0,
1204 [WA_XFER_STATUS_HALTED] = -EPIPE,
1205 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1206 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1207 [WA_XFER_RESERVED] = EINVAL,
1208 [WA_XFER_STATUS_NOT_FOUND] = 0,
1209 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1210 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1211 [WA_XFER_STATUS_ABORTED] = -EINTR,
1212 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1213 [WA_XFER_INVALID_FORMAT] = EINVAL,
1214 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1215 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1221 if (status >= ARRAY_SIZE(xlat)) {
1222 if (printk_ratelimit())
1223 printk(KERN_ERR "%s(): BUG? "
1224 "Unknown WA transfer status 0x%02x\n",
1225 __func__, real_status);
1228 errno = xlat[status];
1229 if (unlikely(errno > 0)) {
1230 if (printk_ratelimit())
1231 printk(KERN_ERR "%s(): BUG? "
1232 "Inconsistent WA status: 0x%02x\n",
1233 __func__, real_status);
1240 * Process a xfer result completion message
1242 * inbound transfers: need to schedule a DTI read
1244 * FIXME: this functio needs to be broken up in parts
1246 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1249 struct device *dev = &wa->usb_iface->dev;
1250 unsigned long flags;
1253 struct wa_rpipe *rpipe;
1254 struct wa_xfer_result *xfer_result = wa->xfer_result;
1257 unsigned rpipe_ready = 0;
1259 spin_lock_irqsave(&xfer->lock, flags);
1260 seg_idx = xfer_result->bTransferSegment & 0x7f;
1261 if (unlikely(seg_idx >= xfer->segs))
1263 seg = xfer->seg[seg_idx];
1264 rpipe = xfer->ep->hcpriv;
1265 usb_status = xfer_result->bTransferStatus;
1266 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1267 xfer, seg_idx, usb_status, seg->status);
1268 if (seg->status == WA_SEG_ABORTED
1269 || seg->status == WA_SEG_ERROR) /* already handled */
1270 goto segment_aborted;
1271 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1272 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1273 if (seg->status != WA_SEG_PENDING) {
1274 if (printk_ratelimit())
1275 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1276 xfer, seg_idx, seg->status);
1277 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1279 if (usb_status & 0x80) {
1280 seg->result = wa_xfer_status_to_errno(usb_status);
1281 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1282 xfer, seg->index, usb_status);
1283 goto error_complete;
1285 /* FIXME: we ignore warnings, tally them for stats */
1286 if (usb_status & 0x40) /* Warning?... */
1287 usb_status = 0; /* ... pass */
1288 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1289 seg->status = WA_SEG_DTI_PENDING;
1290 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1292 wa->buf_in_urb->transfer_dma =
1293 xfer->urb->transfer_dma
1294 + seg_idx * xfer->seg_size;
1295 wa->buf_in_urb->transfer_flags
1296 |= URB_NO_TRANSFER_DMA_MAP;
1298 wa->buf_in_urb->transfer_buffer =
1299 xfer->urb->transfer_buffer
1300 + seg_idx * xfer->seg_size;
1301 wa->buf_in_urb->transfer_flags
1302 &= ~URB_NO_TRANSFER_DMA_MAP;
1304 wa->buf_in_urb->transfer_buffer_length =
1305 le32_to_cpu(xfer_result->dwTransferLength);
1306 wa->buf_in_urb->context = seg;
1307 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1309 goto error_submit_buf_in;
1311 /* OUT data phase, complete it -- */
1312 seg->status = WA_SEG_DONE;
1313 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1315 rpipe_ready = rpipe_avail_inc(rpipe);
1316 done = __wa_xfer_is_done(xfer);
1318 spin_unlock_irqrestore(&xfer->lock, flags);
1320 wa_xfer_completion(xfer);
1322 wa_xfer_delayed_run(rpipe);
1325 error_submit_buf_in:
1326 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1327 dev_err(dev, "DTI: URB max acceptable errors "
1328 "exceeded, resetting device\n");
1331 if (printk_ratelimit())
1332 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1333 xfer, seg_idx, result);
1334 seg->result = result;
1336 seg->status = WA_SEG_ERROR;
1338 rpipe_ready = rpipe_avail_inc(rpipe);
1339 __wa_xfer_abort(xfer);
1340 done = __wa_xfer_is_done(xfer);
1341 spin_unlock_irqrestore(&xfer->lock, flags);
1343 wa_xfer_completion(xfer);
1345 wa_xfer_delayed_run(rpipe);
1349 spin_unlock_irqrestore(&xfer->lock, flags);
1350 wa_urb_dequeue(wa, xfer->urb);
1351 if (printk_ratelimit())
1352 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1353 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1354 dev_err(dev, "DTI: URB max acceptable errors "
1355 "exceeded, resetting device\n");
1361 /* nothing to do, as the aborter did the completion */
1362 spin_unlock_irqrestore(&xfer->lock, flags);
1366 * Callback for the IN data phase
1368 * If successful transition state; otherwise, take a note of the
1369 * error, mark this segment done and try completion.
1371 * Note we don't access until we are sure that the transfer hasn't
1372 * been cancelled (ECONNRESET, ENOENT), which could mean that
1373 * seg->xfer could be already gone.
1375 static void wa_buf_in_cb(struct urb *urb)
1377 struct wa_seg *seg = urb->context;
1378 struct wa_xfer *xfer = seg->xfer;
1381 struct wa_rpipe *rpipe;
1382 unsigned rpipe_ready;
1383 unsigned long flags;
1386 switch (urb->status) {
1388 spin_lock_irqsave(&xfer->lock, flags);
1390 dev = &wa->usb_iface->dev;
1391 rpipe = xfer->ep->hcpriv;
1392 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1393 xfer, seg->index, (size_t)urb->actual_length);
1394 seg->status = WA_SEG_DONE;
1395 seg->result = urb->actual_length;
1397 rpipe_ready = rpipe_avail_inc(rpipe);
1398 done = __wa_xfer_is_done(xfer);
1399 spin_unlock_irqrestore(&xfer->lock, flags);
1401 wa_xfer_completion(xfer);
1403 wa_xfer_delayed_run(rpipe);
1405 case -ECONNRESET: /* URB unlinked; no need to do anything */
1406 case -ENOENT: /* as it was done by the who unlinked us */
1408 default: /* Other errors ... */
1409 spin_lock_irqsave(&xfer->lock, flags);
1411 dev = &wa->usb_iface->dev;
1412 rpipe = xfer->ep->hcpriv;
1413 if (printk_ratelimit())
1414 dev_err(dev, "xfer %p#%u: data in error %d\n",
1415 xfer, seg->index, urb->status);
1416 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1417 EDC_ERROR_TIMEFRAME)){
1418 dev_err(dev, "DTO: URB max acceptable errors "
1419 "exceeded, resetting device\n");
1422 seg->status = WA_SEG_ERROR;
1423 seg->result = urb->status;
1425 rpipe_ready = rpipe_avail_inc(rpipe);
1426 __wa_xfer_abort(xfer);
1427 done = __wa_xfer_is_done(xfer);
1428 spin_unlock_irqrestore(&xfer->lock, flags);
1430 wa_xfer_completion(xfer);
1432 wa_xfer_delayed_run(rpipe);
1437 * Handle an incoming transfer result buffer
1439 * Given a transfer result buffer, it completes the transfer (possibly
1440 * scheduling and buffer in read) and then resubmits the DTI URB for a
1441 * new transfer result read.
1444 * The xfer_result DTI URB state machine
1446 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1448 * We start in OFF mode, the first xfer_result notification [through
1449 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1452 * We receive a buffer -- if it is not a xfer_result, we complain and
1453 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1454 * request accounting. If it is an IN segment, we move to RBI and post
1455 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1456 * repost the DTI-URB and move to RXR state. if there was no IN
1457 * segment, it will repost the DTI-URB.
1459 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1460 * errors) in the URBs.
1462 static void wa_xfer_result_cb(struct urb *urb)
1465 struct wahc *wa = urb->context;
1466 struct device *dev = &wa->usb_iface->dev;
1467 struct wa_xfer_result *xfer_result;
1469 struct wa_xfer *xfer;
1472 BUG_ON(wa->dti_urb != urb);
1473 switch (wa->dti_urb->status) {
1475 /* We have a xfer result buffer; check it */
1476 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1477 urb->actual_length, urb->transfer_buffer);
1478 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1479 dev_err(dev, "DTI Error: xfer result--bad size "
1480 "xfer result (%d bytes vs %zu needed)\n",
1481 urb->actual_length, sizeof(*xfer_result));
1484 xfer_result = wa->xfer_result;
1485 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1486 dev_err(dev, "DTI Error: xfer result--"
1487 "bad header length %u\n",
1488 xfer_result->hdr.bLength);
1491 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1492 dev_err(dev, "DTI Error: xfer result--"
1493 "bad header type 0x%02x\n",
1494 xfer_result->hdr.bNotifyType);
1497 usb_status = xfer_result->bTransferStatus & 0x3f;
1498 if (usb_status == WA_XFER_STATUS_ABORTED
1499 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1500 /* taken care of already */
1502 xfer_id = xfer_result->dwTransferID;
1503 xfer = wa_xfer_get_by_id(wa, xfer_id);
1505 /* FIXME: transaction might have been cancelled */
1506 dev_err(dev, "DTI Error: xfer result--"
1507 "unknown xfer 0x%08x (status 0x%02x)\n",
1508 xfer_id, usb_status);
1511 wa_xfer_result_chew(wa, xfer);
1514 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1515 case -ESHUTDOWN: /* going away! */
1516 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1520 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1521 EDC_ERROR_TIMEFRAME)) {
1522 dev_err(dev, "DTI: URB max acceptable errors "
1523 "exceeded, resetting device\n");
1527 if (printk_ratelimit())
1528 dev_err(dev, "DTI: URB error %d\n", urb->status);
1531 /* Resubmit the DTI URB */
1532 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1534 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1535 "resetting\n", result);
1543 * Transfer complete notification
1545 * Called from the notif.c code. We get a notification on EP2 saying
1546 * that some endpoint has some transfer result data available. We are
1549 * To speed up things, we always have a URB reading the DTI URB; we
1550 * don't really set it up and start it until the first xfer complete
1551 * notification arrives, which is what we do here.
1553 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1556 * So here we just initialize the DTI URB for reading transfer result
1557 * notifications and also the buffer-in URB, for reading buffers. Then
1558 * we just submit the DTI URB.
1560 * @wa shall be referenced
1562 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1565 struct device *dev = &wa->usb_iface->dev;
1566 struct wa_notif_xfer *notif_xfer;
1567 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1569 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1570 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1572 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1573 /* FIXME: hardcoded limitation, adapt */
1574 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1575 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1578 if (wa->dti_urb != NULL) /* DTI URB already started */
1581 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1582 if (wa->dti_urb == NULL) {
1583 dev_err(dev, "Can't allocate DTI URB\n");
1584 goto error_dti_urb_alloc;
1587 wa->dti_urb, wa->usb_dev,
1588 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1589 wa->xfer_result, wa->xfer_result_size,
1590 wa_xfer_result_cb, wa);
1592 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1593 if (wa->buf_in_urb == NULL) {
1594 dev_err(dev, "Can't allocate BUF-IN URB\n");
1595 goto error_buf_in_urb_alloc;
1598 wa->buf_in_urb, wa->usb_dev,
1599 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1600 NULL, 0, wa_buf_in_cb, wa);
1601 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1603 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1604 "resetting\n", result);
1605 goto error_dti_urb_submit;
1610 error_dti_urb_submit:
1611 usb_put_urb(wa->buf_in_urb);
1612 error_buf_in_urb_alloc:
1613 usb_put_urb(wa->dti_urb);
1615 error_dti_urb_alloc: