1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
5 * This file provides the implementation of a USB host controller device that
6 * does not have any associated hardware. Instead the virtual device is
7 * connected to the WiFi network and emulates the operation of a USB hcd by
8 * receiving and sending network frames.
10 * We take great pains to reduce the amount of code where interrupts need to be
11 * disabled and in this respect we are different from standard HCD's. In
12 * particular we don't want in_irq() code bleeding over to the protocol side of
14 * The troublesome functions are the urb enqueue and dequeue functions both of
15 * which can be called in_irq(). So for these functions we put the urbs into a
16 * queue and request a tasklet to process them. This means that a spinlock with
17 * interrupts disabled must be held for insertion and removal but most code is
18 * is in tasklet or soft irq context. The lock that protects this list is called
19 * the tasklet lock and serves the purpose of the 'HCD lock' which must be held
20 * when calling the following functions.
21 * usb_hcd_link_urb_to_ep()
22 * usb_hcd_unlink_urb_from_ep()
23 * usb_hcd_flush_endpoint()
24 * usb_hcd_check_unlink_urb()
25 * -----------------------------------------------------------------------------
27 #include <linux/platform_device.h>
28 #include <linux/usb.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include "linux/usb/hcd.h"
32 #include <asm/unaligned.h>
35 #include "ozurbparanoia.h"
39 * Number of units of buffering to capture for an isochronous IN endpoint before
40 * allowing data to be indicated up.
42 #define OZ_IN_BUFFERING_UNITS 50
44 /* Name of our platform device.
46 #define OZ_PLAT_DEV_NAME "ozwpan"
48 /* Maximum number of free urb links that can be kept in the pool.
50 #define OZ_MAX_LINK_POOL_SIZE 16
52 /* Get endpoint object from the containing link.
54 #define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
56 /*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
58 #define EP0_TIMEOUT_COUNTER 13
61 * Used to link urbs together and also store some status information for each
63 * A cache of these are kept in a pool to reduce number of calls to kmalloc.
66 struct list_head link;
71 unsigned submit_counter;
74 /* Holds state information about a USB endpoint.
77 struct list_head urb_list; /* List of oz_urb_link items. */
78 struct list_head link; /* For isoc ep, links in to isoc
80 struct timespec timestamp;
94 /* Bits in the flags field. */
95 #define OZ_F_EP_BUFFERING 0x1
96 #define OZ_F_EP_HAVE_STREAM 0x2
98 /* Holds state information about a USB interface.
100 struct oz_interface {
105 /* Holds state information about an hcd port.
107 #define OZ_NB_ENDPOINTS 16
112 struct oz_hcd *ozhcd;
113 spinlock_t port_lock;
118 struct oz_interface *iface;
119 struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
120 struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
121 struct list_head isoc_out_ep;
122 struct list_head isoc_in_ep;
125 #define OZ_PORT_F_PRESENT 0x1
126 #define OZ_PORT_F_CHANGED 0x2
127 #define OZ_PORT_F_DYING 0x4
129 /* Data structure in the private context area of struct usb_hcd.
131 #define OZ_NB_PORTS 8
134 struct list_head urb_pending_list;
135 struct list_head urb_cancel_list;
136 struct list_head orphanage;
137 int conn_port; /* Port that is currently connecting, -1 if none.*/
138 struct oz_port ports[OZ_NB_PORTS];
143 /* Bits in flags field.
145 #define OZ_HDC_F_SUSPENDED 0x1
148 * Static function prototypes.
150 static int oz_hcd_start(struct usb_hcd *hcd);
151 static void oz_hcd_stop(struct usb_hcd *hcd);
152 static void oz_hcd_shutdown(struct usb_hcd *hcd);
153 static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
155 static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
156 static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
157 struct usb_host_endpoint *ep);
158 static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
159 struct usb_host_endpoint *ep);
160 static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
161 static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
162 static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
163 u16 windex, char *buf, u16 wlength);
164 static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
165 static int oz_hcd_bus_resume(struct usb_hcd *hcd);
166 static int oz_plat_probe(struct platform_device *dev);
167 static int oz_plat_remove(struct platform_device *dev);
168 static void oz_plat_shutdown(struct platform_device *dev);
169 static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
170 static int oz_plat_resume(struct platform_device *dev);
171 static void oz_urb_process_tasklet(unsigned long unused);
172 static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
173 struct oz_port *port, struct usb_host_config *config,
175 static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
176 struct oz_port *port);
177 static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
178 struct oz_port *port,
179 struct usb_host_interface *intf, gfp_t mem_flags);
180 static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
181 struct oz_port *port, int if_ix);
182 static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
184 static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
186 static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
189 * Static external variables.
191 static struct platform_device *g_plat_dev;
192 static struct oz_hcd *g_ozhcd;
193 static DEFINE_SPINLOCK(g_hcdlock); /* Guards g_ozhcd. */
194 static const char g_hcd_name[] = "Ozmo WPAN";
195 static struct list_head *g_link_pool;
196 static int g_link_pool_size;
197 static DEFINE_SPINLOCK(g_link_lock);
198 static DEFINE_SPINLOCK(g_tasklet_lock);
199 static struct tasklet_struct g_urb_process_tasklet;
200 static struct tasklet_struct g_urb_cancel_tasklet;
201 static atomic_t g_pending_urbs = ATOMIC_INIT(0);
202 static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
203 static const struct hc_driver g_oz_hc_drv = {
204 .description = g_hcd_name,
205 .product_desc = "Ozmo Devices WPAN",
206 .hcd_priv_size = sizeof(struct oz_hcd),
208 .start = oz_hcd_start,
210 .shutdown = oz_hcd_shutdown,
211 .urb_enqueue = oz_hcd_urb_enqueue,
212 .urb_dequeue = oz_hcd_urb_dequeue,
213 .endpoint_disable = oz_hcd_endpoint_disable,
214 .endpoint_reset = oz_hcd_endpoint_reset,
215 .get_frame_number = oz_hcd_get_frame_number,
216 .hub_status_data = oz_hcd_hub_status_data,
217 .hub_control = oz_hcd_hub_control,
218 .bus_suspend = oz_hcd_bus_suspend,
219 .bus_resume = oz_hcd_bus_resume,
222 static struct platform_driver g_oz_plat_drv = {
223 .probe = oz_plat_probe,
224 .remove = oz_plat_remove,
225 .shutdown = oz_plat_shutdown,
226 .suspend = oz_plat_suspend,
227 .resume = oz_plat_resume,
229 .name = OZ_PLAT_DEV_NAME,
230 .owner = THIS_MODULE,
235 * Gets our private context area (which is of type struct oz_hcd) from the
239 static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
241 return (struct oz_hcd *)hcd->hcd_priv;
245 * Searches list of ports to find the index of the one with a specified USB
246 * bus address. If none of the ports has the bus address then the connection
247 * port is returned, if there is one or -1 otherwise.
250 static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
254 for (i = 0; i < OZ_NB_PORTS; i++) {
255 if (ozhcd->ports[i].bus_addr == bus_addr)
258 return ozhcd->conn_port;
262 * Allocates an urb link, first trying the pool but going to heap if empty.
265 static struct oz_urb_link *oz_alloc_urb_link(void)
267 struct oz_urb_link *urbl = NULL;
268 unsigned long irq_state;
270 spin_lock_irqsave(&g_link_lock, irq_state);
272 urbl = container_of(g_link_pool, struct oz_urb_link, link);
273 g_link_pool = urbl->link.next;
276 spin_unlock_irqrestore(&g_link_lock, irq_state);
278 urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
283 * Frees an urb link by putting it in the pool if there is enough space or
284 * deallocating it to heap otherwise.
287 static void oz_free_urb_link(struct oz_urb_link *urbl)
290 unsigned long irq_state;
291 spin_lock_irqsave(&g_link_lock, irq_state);
292 if (g_link_pool_size < OZ_MAX_LINK_POOL_SIZE) {
293 urbl->link.next = g_link_pool;
294 g_link_pool = &urbl->link;
298 spin_unlock_irqrestore(&g_link_lock, irq_state);
304 * Deallocates all the urb links in the pool.
307 static void oz_empty_link_pool(void)
310 unsigned long irq_state;
312 spin_lock_irqsave(&g_link_lock, irq_state);
315 g_link_pool_size = 0;
316 spin_unlock_irqrestore(&g_link_lock, irq_state);
318 struct oz_urb_link *urbl =
319 container_of(e, struct oz_urb_link, link);
326 * Allocates endpoint structure and optionally a buffer. If a buffer is
327 * allocated it immediately follows the endpoint structure.
330 static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
332 struct oz_endpoint *ep =
333 kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
335 INIT_LIST_HEAD(&ep->urb_list);
336 INIT_LIST_HEAD(&ep->link);
339 ep->buffer_size = buffer_size;
340 ep->buffer = (u8 *)(ep+1);
347 * Pre-condition: Must be called with g_tasklet_lock held and interrupts
349 * Context: softirq or process
351 static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
353 struct oz_urb_link *urbl;
356 list_for_each(e, &ozhcd->urb_cancel_list) {
357 urbl = container_of(e, struct oz_urb_link, link);
358 if (urb == urbl->urb) {
367 * This is called when we have finished processing an urb. It unlinks it from
368 * the ep and returns it to the core.
369 * Context: softirq or process
371 static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
374 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
375 unsigned long irq_state;
376 struct oz_urb_link *cancel_urbl;
378 spin_lock_irqsave(&g_tasklet_lock, irq_state);
379 usb_hcd_unlink_urb_from_ep(hcd, urb);
380 /* Clear hcpriv which will prevent it being put in the cancel list
381 * in the event that an attempt is made to cancel it.
384 /* Walk the cancel list in case the urb is already sitting there.
385 * Since we process the cancel list in a tasklet rather than in
386 * the dequeue function this could happen.
388 cancel_urbl = oz_uncancel_urb(ozhcd, urb);
389 /* Note: we release lock but do not enable local irqs.
390 * It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
391 * or at least other host controllers disable interrupts at this point
392 * so we do the same. We must, however, release the lock otherwise a
393 * deadlock will occur if an urb is submitted to our driver in the urb
394 * completion function. Because we disable interrupts it is possible
395 * that the urb_enqueue function can be called with them disabled.
397 spin_unlock(&g_tasklet_lock);
398 if (oz_forget_urb(urb)) {
399 oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
401 atomic_dec(&g_pending_urbs);
402 usb_hcd_giveback_urb(hcd, urb, status);
404 spin_lock(&g_tasklet_lock);
405 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
407 oz_free_urb_link(cancel_urbl);
411 * Deallocates an endpoint including deallocating any associated stream and
412 * returning any queued urbs to the core.
415 static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
418 struct list_head list;
419 struct oz_hcd *ozhcd = port->ozhcd;
420 INIT_LIST_HEAD(&list);
421 if (ep->flags & OZ_F_EP_HAVE_STREAM)
422 oz_usb_stream_delete(port->hpd, ep->ep_num);
423 /* Transfer URBs to the orphanage while we hold the lock. */
424 spin_lock_bh(&ozhcd->hcd_lock);
425 /* Note: this works even if ep->urb_list is empty.*/
426 list_replace_init(&ep->urb_list, &list);
427 /* Put the URBs in the orphanage. */
428 list_splice_tail(&list, &ozhcd->orphanage);
429 spin_unlock_bh(&ozhcd->hcd_lock);
431 oz_dbg(ON, "Freeing endpoint memory\n");
438 static void oz_complete_buffered_urb(struct oz_port *port,
439 struct oz_endpoint *ep,
442 u8 data_len, available_space, copy_len;
444 memcpy(&data_len, &ep->buffer[ep->out_ix], sizeof(u8));
445 if (data_len <= urb->transfer_buffer_length)
446 available_space = data_len;
448 available_space = urb->transfer_buffer_length;
450 if (++ep->out_ix == ep->buffer_size)
452 copy_len = ep->buffer_size - ep->out_ix;
453 if (copy_len >= available_space)
454 copy_len = available_space;
455 memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len);
457 if (copy_len < available_space) {
458 memcpy((urb->transfer_buffer + copy_len), ep->buffer,
459 (available_space - copy_len));
460 ep->out_ix = available_space - copy_len;
462 ep->out_ix += copy_len;
464 urb->actual_length = available_space;
465 if (ep->out_ix == ep->buffer_size)
468 ep->buffered_units--;
469 oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
471 oz_complete_urb(port->ozhcd->hcd, urb, 0);
477 static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
478 struct urb *urb, u8 req_id)
480 struct oz_urb_link *urbl;
481 struct oz_endpoint *ep = NULL;
484 if (ep_addr >= OZ_NB_ENDPOINTS) {
485 oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
488 urbl = oz_alloc_urb_link();
491 urbl->submit_counter = 0;
493 urbl->req_id = req_id;
494 urbl->ep_num = ep_addr;
495 /* Hold lock while we insert the URB into the list within the
496 * endpoint structure.
498 spin_lock_bh(&port->ozhcd->hcd_lock);
499 /* If the urb has been unlinked while out of any list then
503 spin_unlock_bh(&port->ozhcd->hcd_lock);
504 oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
505 oz_complete_urb(port->ozhcd->hcd, urb, 0);
506 oz_free_urb_link(urbl);
511 ep = port->in_ep[ep_addr];
513 ep = port->out_ep[ep_addr];
519 /*For interrupt endpoint check for buffered data
522 if (((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
523 && ep->buffered_units > 0) {
524 oz_free_urb_link(urbl);
525 spin_unlock_bh(&port->ozhcd->hcd_lock);
526 oz_complete_buffered_urb(port, ep, urb);
531 list_add_tail(&urbl->link, &ep->urb_list);
532 if (!in_dir && ep_addr && (ep->credit < 0)) {
533 getrawmonotonic(&ep->timestamp);
540 spin_unlock_bh(&port->ozhcd->hcd_lock);
542 oz_free_urb_link(urbl);
547 * Removes an urb from the queue in the endpoint.
548 * Returns 0 if it is found and -EIDRM otherwise.
551 static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
554 struct oz_urb_link *urbl = NULL;
555 struct oz_endpoint *ep;
557 spin_lock_bh(&port->ozhcd->hcd_lock);
559 ep = port->in_ep[ep_addr];
561 ep = port->out_ep[ep_addr];
564 list_for_each(e, &ep->urb_list) {
565 urbl = container_of(e, struct oz_urb_link, link);
566 if (urbl->urb == urb) {
573 spin_unlock_bh(&port->ozhcd->hcd_lock);
575 oz_free_urb_link(urbl);
576 return urbl ? 0 : -EIDRM;
580 * Finds an urb given its request id.
583 static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
586 struct oz_hcd *ozhcd = port->ozhcd;
587 struct urb *urb = NULL;
588 struct oz_urb_link *urbl;
589 struct oz_endpoint *ep;
591 spin_lock_bh(&ozhcd->hcd_lock);
592 ep = port->out_ep[ep_ix];
595 list_for_each(e, &ep->urb_list) {
596 urbl = container_of(e, struct oz_urb_link, link);
597 if (urbl->req_id == req_id) {
604 spin_unlock_bh(&ozhcd->hcd_lock);
605 /* If urb is non-zero then we we must have an urb link to delete.
608 oz_free_urb_link(urbl);
613 * Pre-condition: Port lock must be held.
616 static void oz_acquire_port(struct oz_port *port, void *hpd)
618 INIT_LIST_HEAD(&port->isoc_out_ep);
619 INIT_LIST_HEAD(&port->isoc_in_ep);
620 port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
621 port->status |= USB_PORT_STAT_CONNECTION |
622 (USB_PORT_STAT_C_CONNECTION << 16);
630 static struct oz_hcd *oz_hcd_claim(void)
632 struct oz_hcd *ozhcd;
634 spin_lock_bh(&g_hcdlock);
637 usb_get_hcd(ozhcd->hcd);
638 spin_unlock_bh(&g_hcdlock);
645 static inline void oz_hcd_put(struct oz_hcd *ozhcd)
648 usb_put_hcd(ozhcd->hcd);
652 * This is called by the protocol handler to notify that a PD has arrived.
653 * We allocate a port to associate with the PD and create a structure for
654 * endpoint 0. This port is made the connection port.
655 * In the event that one of the other port is already a connection port then
657 * TODO We should be able to do better than fail and should be able remember
658 * that this port needs configuring and make it the connection port once the
659 * current connection port has been assigned an address. Collisions here are
660 * probably very rare indeed.
663 struct oz_port *oz_hcd_pd_arrived(void *hpd)
666 struct oz_port *hport;
667 struct oz_hcd *ozhcd;
668 struct oz_endpoint *ep;
670 ozhcd = oz_hcd_claim();
673 /* Allocate an endpoint object in advance (before holding hcd lock) to
674 * use for out endpoint 0.
676 ep = oz_ep_alloc(0, GFP_ATOMIC);
680 spin_lock_bh(&ozhcd->hcd_lock);
681 if (ozhcd->conn_port >= 0)
684 for (i = 0; i < OZ_NB_PORTS; i++) {
685 struct oz_port *port = &ozhcd->ports[i];
687 spin_lock(&port->port_lock);
688 if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
689 oz_acquire_port(port, hpd);
690 spin_unlock(&port->port_lock);
693 spin_unlock(&port->port_lock);
695 if (i == OZ_NB_PORTS)
698 ozhcd->conn_port = i;
699 hport = &ozhcd->ports[i];
700 hport->out_ep[0] = ep;
701 spin_unlock_bh(&ozhcd->hcd_lock);
702 if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
703 usb_hcd_resume_root_hub(ozhcd->hcd);
704 usb_hcd_poll_rh_status(ozhcd->hcd);
710 spin_unlock_bh(&ozhcd->hcd_lock);
711 oz_ep_free(NULL, ep);
718 * This is called by the protocol handler to notify that the PD has gone away.
719 * We need to deallocate all resources and then request that the root hub is
720 * polled. We release the reference we hold on the PD.
723 void oz_hcd_pd_departed(struct oz_port *port)
725 struct oz_hcd *ozhcd;
727 struct oz_endpoint *ep = NULL;
730 oz_dbg(ON, "%s: port = 0\n", __func__);
736 /* Check if this is the connection port - if so clear it.
738 spin_lock_bh(&ozhcd->hcd_lock);
739 if ((ozhcd->conn_port >= 0) &&
740 (port == &ozhcd->ports[ozhcd->conn_port])) {
741 oz_dbg(ON, "Clearing conn_port\n");
742 ozhcd->conn_port = -1;
744 spin_lock(&port->port_lock);
745 port->flags |= OZ_PORT_F_DYING;
746 spin_unlock(&port->port_lock);
747 spin_unlock_bh(&ozhcd->hcd_lock);
749 oz_clean_endpoints_for_config(ozhcd->hcd, port);
750 spin_lock_bh(&port->port_lock);
753 port->bus_addr = 0xff;
754 port->config_num = 0;
755 port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
756 port->flags |= OZ_PORT_F_CHANGED;
757 port->status &= ~USB_PORT_STAT_CONNECTION;
758 port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
759 /* If there is an endpont 0 then clear the pointer while we hold
760 * the spinlock be we deallocate it after releasing the lock.
762 if (port->out_ep[0]) {
763 ep = port->out_ep[0];
764 port->out_ep[0] = NULL;
766 spin_unlock_bh(&port->port_lock);
768 oz_ep_free(port, ep);
769 usb_hcd_poll_rh_status(ozhcd->hcd);
776 void oz_hcd_pd_reset(void *hpd, void *hport)
778 /* Cleanup the current configuration and report reset to the core.
780 struct oz_port *port = (struct oz_port *)hport;
781 struct oz_hcd *ozhcd = port->ozhcd;
783 oz_dbg(ON, "PD Reset\n");
784 spin_lock_bh(&port->port_lock);
785 port->flags |= OZ_PORT_F_CHANGED;
786 port->status |= USB_PORT_STAT_RESET;
787 port->status |= (USB_PORT_STAT_C_RESET << 16);
788 spin_unlock_bh(&port->port_lock);
789 oz_clean_endpoints_for_config(ozhcd->hcd, port);
790 usb_hcd_poll_rh_status(ozhcd->hcd);
796 void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
797 int length, int offset, int total_size)
799 struct oz_port *port = (struct oz_port *)hport;
803 oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
804 length, offset, total_size);
805 urb = oz_find_urb_by_id(port, 0, req_id);
810 int required_size = urb->transfer_buffer_length;
811 if (required_size > total_size)
812 required_size = total_size;
813 copy_len = required_size-offset;
814 if (length <= copy_len)
816 memcpy(urb->transfer_buffer+offset, desc, copy_len);
818 if (offset < required_size) {
819 struct usb_ctrlrequest *setup =
820 (struct usb_ctrlrequest *)urb->setup_packet;
821 unsigned wvalue = le16_to_cpu(setup->wValue);
822 if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
824 else if (oz_usb_get_desc_req(port->hpd, req_id,
825 setup->bRequestType, (u8)(wvalue>>8),
826 (u8)wvalue, setup->wIndex, offset,
827 required_size-offset)) {
828 oz_dequeue_ep_urb(port, 0, 0, urb);
835 urb->actual_length = total_size;
836 oz_complete_urb(port->ozhcd->hcd, urb, 0);
842 static void oz_display_conf_type(u8 t)
845 case USB_REQ_GET_STATUS:
846 oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
848 case USB_REQ_CLEAR_FEATURE:
849 oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
851 case USB_REQ_SET_FEATURE:
852 oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
854 case USB_REQ_SET_ADDRESS:
855 oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
857 case USB_REQ_GET_DESCRIPTOR:
858 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
860 case USB_REQ_SET_DESCRIPTOR:
861 oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
863 case USB_REQ_GET_CONFIGURATION:
864 oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
866 case USB_REQ_SET_CONFIGURATION:
867 oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
869 case USB_REQ_GET_INTERFACE:
870 oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
872 case USB_REQ_SET_INTERFACE:
873 oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
875 case USB_REQ_SYNCH_FRAME:
876 oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
884 static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
885 u8 rcode, u8 config_num)
888 struct usb_hcd *hcd = port->ozhcd->hcd;
891 port->config_num = config_num;
892 oz_clean_endpoints_for_config(hcd, port);
893 if (oz_build_endpoints_for_config(hcd, port,
894 &urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
900 oz_complete_urb(hcd, urb, rc);
906 static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
907 u8 rcode, u8 if_num, u8 alt)
909 struct usb_hcd *hcd = port->ozhcd->hcd;
913 struct usb_host_config *config;
914 struct usb_host_interface *intf;
915 oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
916 oz_clean_endpoints_for_interface(hcd, port, if_num);
917 config = &urb->dev->config[port->config_num-1];
918 intf = &config->intf_cache[if_num]->altsetting[alt];
919 if (oz_build_endpoints_for_interface(hcd, port, intf,
923 port->iface[if_num].alt = alt;
927 oz_complete_urb(hcd, urb, rc);
933 void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
936 struct oz_port *port = (struct oz_port *)hport;
938 struct usb_ctrlrequest *setup;
939 struct usb_hcd *hcd = port->ozhcd->hcd;
943 oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
944 urb = oz_find_urb_by_id(port, 0, req_id);
946 oz_dbg(ON, "URB not found\n");
949 setup = (struct usb_ctrlrequest *)urb->setup_packet;
950 windex = le16_to_cpu(setup->wIndex);
951 wvalue = le16_to_cpu(setup->wValue);
952 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
953 /* Standard requests */
954 oz_display_conf_type(setup->bRequest);
955 switch (setup->bRequest) {
956 case USB_REQ_SET_CONFIGURATION:
957 oz_hcd_complete_set_config(port, urb, rcode,
960 case USB_REQ_SET_INTERFACE:
961 oz_hcd_complete_set_interface(port, urb, rcode,
962 (u8)windex, (u8)wvalue);
965 oz_complete_urb(hcd, urb, 0);
970 oz_dbg(ON, "VENDOR-CLASS - cnf\n");
972 if (data_len <= urb->transfer_buffer_length)
975 copy_len = urb->transfer_buffer_length;
976 memcpy(urb->transfer_buffer, data, copy_len);
977 urb->actual_length = copy_len;
979 oz_complete_urb(hcd, urb, 0);
984 * Context: softirq-serialized
986 static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
994 space = ep->out_ix-ep->in_ix-1;
996 space += ep->buffer_size;
997 if (space < (data_len+1)) {
998 oz_dbg(ON, "Buffer full\n");
1001 ep->buffer[ep->in_ix] = (u8)data_len;
1002 if (++ep->in_ix == ep->buffer_size)
1004 copy_len = ep->buffer_size - ep->in_ix;
1005 if (copy_len > data_len)
1006 copy_len = data_len;
1007 memcpy(&ep->buffer[ep->in_ix], data, copy_len);
1009 if (copy_len < data_len) {
1010 memcpy(ep->buffer, data+copy_len, data_len-copy_len);
1011 ep->in_ix = data_len-copy_len;
1013 ep->in_ix += copy_len;
1015 if (ep->in_ix == ep->buffer_size)
1017 ep->buffered_units++;
1022 * Context: softirq-serialized
1024 void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
1026 struct oz_port *port = (struct oz_port *)hport;
1027 struct oz_endpoint *ep;
1028 struct oz_hcd *ozhcd = port->ozhcd;
1030 spin_lock_bh(&ozhcd->hcd_lock);
1031 ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
1034 switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
1035 case USB_ENDPOINT_XFER_INT:
1036 case USB_ENDPOINT_XFER_BULK:
1037 if (!list_empty(&ep->urb_list)) {
1038 struct oz_urb_link *urbl =
1039 list_first_entry(&ep->urb_list,
1040 struct oz_urb_link, link);
1043 list_del_init(&urbl->link);
1044 spin_unlock_bh(&ozhcd->hcd_lock);
1046 oz_free_urb_link(urbl);
1047 if (data_len <= urb->transfer_buffer_length)
1048 copy_len = data_len;
1050 copy_len = urb->transfer_buffer_length;
1051 memcpy(urb->transfer_buffer, data, copy_len);
1052 urb->actual_length = copy_len;
1053 oz_complete_urb(port->ozhcd->hcd, urb, 0);
1056 oz_dbg(ON, "buffering frame as URB is not available\n");
1057 oz_hcd_buffer_data(ep, data, data_len);
1060 case USB_ENDPOINT_XFER_ISOC:
1061 oz_hcd_buffer_data(ep, data, data_len);
1065 spin_unlock_bh(&ozhcd->hcd_lock);
1071 static inline int oz_usb_get_frame_number(void)
1073 return atomic_inc_return(&g_usb_frame_number);
1079 int oz_hcd_heartbeat(void *hport)
1082 struct oz_port *port = (struct oz_port *)hport;
1083 struct oz_hcd *ozhcd = port->ozhcd;
1084 struct oz_urb_link *urbl;
1085 struct list_head xfr_list;
1086 struct list_head *e;
1087 struct list_head *n;
1089 struct oz_endpoint *ep;
1090 struct timespec ts, delta;
1092 getrawmonotonic(&ts);
1093 INIT_LIST_HEAD(&xfr_list);
1094 /* Check the OUT isoc endpoints to see if any URB data can be sent.
1096 spin_lock_bh(&ozhcd->hcd_lock);
1097 list_for_each(e, &port->isoc_out_ep) {
1098 ep = ep_from_link(e);
1101 delta = timespec_sub(ts, ep->timestamp);
1102 ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
1103 if (ep->credit > ep->credit_ceiling)
1104 ep->credit = ep->credit_ceiling;
1106 while (ep->credit && !list_empty(&ep->urb_list)) {
1107 urbl = list_first_entry(&ep->urb_list,
1108 struct oz_urb_link, link);
1110 if ((ep->credit + 1) < urb->number_of_packets)
1112 ep->credit -= urb->number_of_packets;
1115 list_move_tail(&urbl->link, &xfr_list);
1118 spin_unlock_bh(&ozhcd->hcd_lock);
1119 /* Send to PD and complete URBs.
1121 list_for_each_safe(e, n, &xfr_list) {
1122 urbl = container_of(e, struct oz_urb_link, link);
1125 urb->error_count = 0;
1126 urb->start_frame = oz_usb_get_frame_number();
1127 oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
1128 oz_free_urb_link(urbl);
1129 oz_complete_urb(port->ozhcd->hcd, urb, 0);
1131 /* Check the IN isoc endpoints to see if any URBs can be completed.
1133 spin_lock_bh(&ozhcd->hcd_lock);
1134 list_for_each(e, &port->isoc_in_ep) {
1135 struct oz_endpoint *ep = ep_from_link(e);
1136 if (ep->flags & OZ_F_EP_BUFFERING) {
1137 if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
1138 ep->flags &= ~OZ_F_EP_BUFFERING;
1141 ep->start_frame = 0;
1145 delta = timespec_sub(ts, ep->timestamp);
1146 ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
1148 while (!list_empty(&ep->urb_list)) {
1149 struct oz_urb_link *urbl =
1150 list_first_entry(&ep->urb_list,
1151 struct oz_urb_link, link);
1152 struct urb *urb = urbl->urb;
1156 if (ep->credit < urb->number_of_packets)
1158 if (ep->buffered_units < urb->number_of_packets)
1160 urb->actual_length = 0;
1161 for (i = 0; i < urb->number_of_packets; i++) {
1162 len = ep->buffer[ep->out_ix];
1163 if (++ep->out_ix == ep->buffer_size)
1165 copy_len = ep->buffer_size - ep->out_ix;
1168 memcpy(urb->transfer_buffer,
1169 &ep->buffer[ep->out_ix], copy_len);
1170 if (copy_len < len) {
1171 memcpy(urb->transfer_buffer+copy_len,
1172 ep->buffer, len-copy_len);
1173 ep->out_ix = len-copy_len;
1175 ep->out_ix += copy_len;
1176 if (ep->out_ix == ep->buffer_size)
1178 urb->iso_frame_desc[i].offset =
1180 urb->actual_length += len;
1181 urb->iso_frame_desc[i].actual_length = len;
1182 urb->iso_frame_desc[i].status = 0;
1184 ep->buffered_units -= urb->number_of_packets;
1185 urb->error_count = 0;
1186 urb->start_frame = ep->start_frame;
1187 ep->start_frame += urb->number_of_packets;
1188 list_move_tail(&urbl->link, &xfr_list);
1189 ep->credit -= urb->number_of_packets;
1192 if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
1194 spin_unlock_bh(&ozhcd->hcd_lock);
1195 /* Complete the filled URBs.
1197 list_for_each_safe(e, n, &xfr_list) {
1198 urbl = container_of(e, struct oz_urb_link, link);
1201 oz_free_urb_link(urbl);
1202 oz_complete_urb(port->ozhcd->hcd, urb, 0);
1204 /* Check if there are any ep0 requests that have timed out.
1205 * If so resent to PD.
1207 ep = port->out_ep[0];
1209 struct list_head *e;
1210 struct list_head *n;
1211 spin_lock_bh(&ozhcd->hcd_lock);
1212 list_for_each_safe(e, n, &ep->urb_list) {
1213 urbl = container_of(e, struct oz_urb_link, link);
1214 if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
1215 oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
1216 list_move_tail(e, &xfr_list);
1217 urbl->submit_counter = 0;
1219 urbl->submit_counter++;
1222 if (!list_empty(&ep->urb_list))
1224 spin_unlock_bh(&ozhcd->hcd_lock);
1226 while (e != &xfr_list) {
1227 urbl = container_of(e, struct oz_urb_link, link);
1229 oz_dbg(ON, "Resending request to PD\n");
1230 oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
1231 oz_free_urb_link(urbl);
1240 static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
1241 struct oz_port *port,
1242 struct usb_host_interface *intf, gfp_t mem_flags)
1244 struct oz_hcd *ozhcd = port->ozhcd;
1246 int if_ix = intf->desc.bInterfaceNumber;
1247 int request_heartbeat = 0;
1249 oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
1250 if (if_ix >= port->num_iface || port->iface == NULL)
1252 for (i = 0; i < intf->desc.bNumEndpoints; i++) {
1253 struct usb_host_endpoint *hep = &intf->endpoint[i];
1254 u8 ep_addr = hep->desc.bEndpointAddress;
1255 u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
1256 struct oz_endpoint *ep;
1257 int buffer_size = 0;
1259 oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
1260 if (ep_addr & USB_ENDPOINT_DIR_MASK) {
1261 switch (hep->desc.bmAttributes &
1262 USB_ENDPOINT_XFERTYPE_MASK) {
1263 case USB_ENDPOINT_XFER_ISOC:
1264 buffer_size = 24*1024;
1266 case USB_ENDPOINT_XFER_INT:
1272 ep = oz_ep_alloc(buffer_size, mem_flags);
1274 oz_clean_endpoints_for_interface(hcd, port, if_ix);
1277 ep->attrib = hep->desc.bmAttributes;
1278 ep->ep_num = ep_num;
1279 if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
1280 == USB_ENDPOINT_XFER_ISOC) {
1281 oz_dbg(ON, "wMaxPacketSize = %d\n",
1282 usb_endpoint_maxp(&hep->desc));
1283 ep->credit_ceiling = 200;
1284 if (ep_addr & USB_ENDPOINT_DIR_MASK) {
1285 ep->flags |= OZ_F_EP_BUFFERING;
1287 ep->flags |= OZ_F_EP_HAVE_STREAM;
1288 if (oz_usb_stream_create(port->hpd, ep_num))
1289 ep->flags &= ~OZ_F_EP_HAVE_STREAM;
1292 spin_lock_bh(&ozhcd->hcd_lock);
1293 if (ep_addr & USB_ENDPOINT_DIR_MASK) {
1294 port->in_ep[ep_num] = ep;
1295 port->iface[if_ix].ep_mask |=
1296 (1<<(ep_num+OZ_NB_ENDPOINTS));
1297 if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
1298 == USB_ENDPOINT_XFER_ISOC) {
1299 list_add_tail(&ep->link, &port->isoc_in_ep);
1300 request_heartbeat = 1;
1303 port->out_ep[ep_num] = ep;
1304 port->iface[if_ix].ep_mask |= (1<<ep_num);
1305 if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
1306 == USB_ENDPOINT_XFER_ISOC) {
1307 list_add_tail(&ep->link, &port->isoc_out_ep);
1308 request_heartbeat = 1;
1311 spin_unlock_bh(&ozhcd->hcd_lock);
1312 if (request_heartbeat && port->hpd)
1313 oz_usb_request_heartbeat(port->hpd);
1321 static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
1322 struct oz_port *port, int if_ix)
1324 struct oz_hcd *ozhcd = port->ozhcd;
1327 struct list_head ep_list;
1329 oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
1330 if (if_ix >= port->num_iface)
1332 INIT_LIST_HEAD(&ep_list);
1333 spin_lock_bh(&ozhcd->hcd_lock);
1334 mask = port->iface[if_ix].ep_mask;
1335 port->iface[if_ix].ep_mask = 0;
1336 for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
1337 struct list_head *e;
1338 /* Gather OUT endpoints.
1340 if ((mask & (1<<i)) && port->out_ep[i]) {
1341 e = &port->out_ep[i]->link;
1342 port->out_ep[i] = NULL;
1343 /* Remove from isoc list if present.
1345 list_move_tail(e, &ep_list);
1347 /* Gather IN endpoints.
1349 if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
1350 e = &port->in_ep[i]->link;
1351 port->in_ep[i] = NULL;
1352 list_move_tail(e, &ep_list);
1355 spin_unlock_bh(&ozhcd->hcd_lock);
1356 while (!list_empty(&ep_list)) {
1357 struct oz_endpoint *ep =
1358 list_first_entry(&ep_list, struct oz_endpoint, link);
1359 list_del_init(&ep->link);
1360 oz_ep_free(port, ep);
1367 static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
1368 struct oz_port *port, struct usb_host_config *config,
1371 struct oz_hcd *ozhcd = port->ozhcd;
1373 int num_iface = config->desc.bNumInterfaces;
1376 struct oz_interface *iface;
1378 iface = kmalloc(num_iface*sizeof(struct oz_interface),
1379 mem_flags | __GFP_ZERO);
1382 spin_lock_bh(&ozhcd->hcd_lock);
1383 port->iface = iface;
1384 port->num_iface = num_iface;
1385 spin_unlock_bh(&ozhcd->hcd_lock);
1387 for (i = 0; i < num_iface; i++) {
1388 struct usb_host_interface *intf =
1389 &config->intf_cache[i]->altsetting[0];
1390 if (oz_build_endpoints_for_interface(hcd, port, intf,
1396 oz_clean_endpoints_for_config(hcd, port);
1403 static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
1404 struct oz_port *port)
1406 struct oz_hcd *ozhcd = port->ozhcd;
1409 oz_dbg(ON, "Deleting endpoints for configuration\n");
1410 for (i = 0; i < port->num_iface; i++)
1411 oz_clean_endpoints_for_interface(hcd, port, i);
1412 spin_lock_bh(&ozhcd->hcd_lock);
1414 oz_dbg(ON, "Freeing interfaces object\n");
1418 port->num_iface = 0;
1419 spin_unlock_bh(&ozhcd->hcd_lock);
1425 static void *oz_claim_hpd(struct oz_port *port)
1428 struct oz_hcd *ozhcd = port->ozhcd;
1430 spin_lock_bh(&ozhcd->hcd_lock);
1434 spin_unlock_bh(&ozhcd->hcd_lock);
1441 static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
1444 struct usb_ctrlrequest *setup;
1451 unsigned complete = 0;
1454 struct oz_port *port = NULL;
1456 oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
1457 port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
1462 port = &ozhcd->ports[port_ix];
1463 if (((port->flags & OZ_PORT_F_PRESENT) == 0)
1464 || (port->flags & OZ_PORT_F_DYING)) {
1465 oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
1466 port_ix, urb->dev->devnum);
1470 /* Store port in private context data.
1473 setup = (struct usb_ctrlrequest *)urb->setup_packet;
1474 windex = le16_to_cpu(setup->wIndex);
1475 wvalue = le16_to_cpu(setup->wValue);
1476 wlength = le16_to_cpu(setup->wLength);
1477 oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
1478 oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
1479 oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
1480 oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
1481 oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
1483 req_id = port->next_req_id++;
1484 hpd = oz_claim_hpd(port);
1486 oz_dbg(ON, "Cannot claim port\n");
1491 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1492 /* Standard requests
1494 switch (setup->bRequest) {
1495 case USB_REQ_GET_DESCRIPTOR:
1496 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
1498 case USB_REQ_SET_ADDRESS:
1499 oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
1500 oz_dbg(ON, "Port %d address is 0x%x\n",
1502 (u8)le16_to_cpu(setup->wValue));
1503 spin_lock_bh(&ozhcd->hcd_lock);
1504 if (ozhcd->conn_port >= 0) {
1505 ozhcd->ports[ozhcd->conn_port].bus_addr =
1506 (u8)le16_to_cpu(setup->wValue);
1507 oz_dbg(ON, "Clearing conn_port\n");
1508 ozhcd->conn_port = -1;
1510 spin_unlock_bh(&ozhcd->hcd_lock);
1513 case USB_REQ_SET_CONFIGURATION:
1514 oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
1516 case USB_REQ_GET_CONFIGURATION:
1517 /* We short circuit this case and reply directly since
1518 * we have the selected configuration number cached.
1520 oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
1521 if (urb->transfer_buffer_length >= 1) {
1522 urb->actual_length = 1;
1523 *((u8 *)urb->transfer_buffer) =
1530 case USB_REQ_GET_INTERFACE:
1531 /* We short circuit this case and reply directly since
1532 * we have the selected interface alternative cached.
1534 oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
1535 if (urb->transfer_buffer_length >= 1) {
1536 urb->actual_length = 1;
1537 *((u8 *)urb->transfer_buffer) =
1538 port->iface[(u8)windex].alt;
1539 oz_dbg(ON, "interface = %d alt = %d\n",
1540 windex, port->iface[(u8)windex].alt);
1546 case USB_REQ_SET_INTERFACE:
1547 oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
1551 if (!rc && !complete) {
1553 if ((setup->bRequestType & USB_DIR_IN) == 0)
1555 urb->actual_length = data_len;
1556 if (oz_usb_control_req(port->hpd, req_id, setup,
1557 urb->transfer_buffer, data_len)) {
1560 /* Note: we are queuing the request after we have
1561 * submitted it to be transmitted. If the request were
1562 * to complete before we queued it then it would not
1563 * be found in the queue. It seems impossible for
1564 * this to happen but if it did the request would
1565 * be resubmitted so the problem would hopefully
1566 * resolve itself. Putting the request into the
1567 * queue before it has been sent is worse since the
1568 * urb could be cancelled while we are using it
1569 * to build the request.
1571 if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
1577 if (rc || complete) {
1578 oz_dbg(ON, "Completing request locally\n");
1579 oz_complete_urb(ozhcd->hcd, urb, rc);
1581 oz_usb_request_heartbeat(port->hpd);
1588 static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
1591 struct oz_port *port = urb->hcpriv;
1594 /* When we are paranoid we keep a list of urbs which we check against
1595 * before handing one back. This is just for debugging during
1596 * development and should be turned off in the released driver.
1598 oz_remember_urb(urb);
1599 /* Check buffer is valid.
1601 if (!urb->transfer_buffer && urb->transfer_buffer_length)
1603 /* Check if there is a device at the port - refuse if not.
1605 if ((port->flags & OZ_PORT_F_PRESENT) == 0)
1607 ep_addr = usb_pipeendpoint(urb->pipe);
1609 /* If the request is not for EP0 then queue it.
1611 if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
1615 oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
1623 static void oz_urb_process_tasklet(unsigned long unused)
1625 unsigned long irq_state;
1627 struct oz_hcd *ozhcd = oz_hcd_claim();
1632 /* This is called from a tasklet so is in softirq context but the urb
1633 * list is filled from any context so we need to lock
1634 * appropriately while removing urbs.
1636 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1637 while (!list_empty(&ozhcd->urb_pending_list)) {
1638 struct oz_urb_link *urbl =
1639 list_first_entry(&ozhcd->urb_pending_list,
1640 struct oz_urb_link, link);
1641 list_del_init(&urbl->link);
1642 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1644 oz_free_urb_link(urbl);
1645 rc = oz_urb_process(ozhcd, urb);
1647 oz_complete_urb(ozhcd->hcd, urb, rc);
1648 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1650 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1655 * This function searches for the urb in any of the lists it could be in.
1656 * If it is found it is removed from the list and completed. If the urb is
1657 * being processed then it won't be in a list so won't be found. However, the
1658 * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
1659 * to a non-zero value. When an attempt is made to put the urb back in a list
1660 * the unlinked field will be checked and the urb will then be completed.
1663 static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
1665 struct oz_urb_link *urbl = NULL;
1666 struct list_head *e;
1667 struct oz_hcd *ozhcd;
1668 unsigned long irq_state;
1672 oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
1675 ozhcd = port->ozhcd;
1676 if (ozhcd == NULL) {
1677 oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
1681 /* Look in the tasklet queue.
1683 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1684 list_for_each(e, &ozhcd->urb_cancel_list) {
1685 urbl = container_of(e, struct oz_urb_link, link);
1686 if (urb == urbl->urb) {
1688 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1692 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1695 /* Look in the orphanage.
1697 spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
1698 list_for_each(e, &ozhcd->orphanage) {
1699 urbl = container_of(e, struct oz_urb_link, link);
1700 if (urbl->urb == urb) {
1702 oz_dbg(ON, "Found urb in orphanage\n");
1706 ix = (ep_num & 0xf);
1708 if ((ep_num & USB_DIR_IN) && ix)
1709 urbl = oz_remove_urb(port->in_ep[ix], urb);
1711 urbl = oz_remove_urb(port->out_ep[ix], urb);
1713 spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
1716 urb->actual_length = 0;
1717 oz_free_urb_link(urbl);
1718 oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
1725 static void oz_urb_cancel_tasklet(unsigned long unused)
1727 unsigned long irq_state;
1729 struct oz_hcd *ozhcd = oz_hcd_claim();
1733 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1734 while (!list_empty(&ozhcd->urb_cancel_list)) {
1735 struct oz_urb_link *urbl =
1736 list_first_entry(&ozhcd->urb_cancel_list,
1737 struct oz_urb_link, link);
1738 list_del_init(&urbl->link);
1739 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1742 oz_urb_cancel(urbl->port, urbl->ep_num, urb);
1743 oz_free_urb_link(urbl);
1744 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1746 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1753 static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
1756 struct oz_urb_link *urbl;
1757 while (!list_empty(&ozhcd->orphanage)) {
1758 urbl = list_first_entry(&ozhcd->orphanage,
1759 struct oz_urb_link, link);
1760 list_del(&urbl->link);
1761 oz_complete_urb(ozhcd->hcd, urbl->urb, status);
1762 oz_free_urb_link(urbl);
1770 static int oz_hcd_start(struct usb_hcd *hcd)
1772 hcd->power_budget = 200;
1773 hcd->state = HC_STATE_RUNNING;
1774 hcd->uses_new_polling = 1;
1781 static void oz_hcd_stop(struct usb_hcd *hcd)
1788 static void oz_hcd_shutdown(struct usb_hcd *hcd)
1793 * Called to queue an urb for the device.
1794 * This function should return a non-zero error code if it fails the urb but
1795 * should not call usb_hcd_giveback_urb().
1798 static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1801 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1804 struct oz_port *port;
1805 unsigned long irq_state;
1806 struct oz_urb_link *urbl;
1808 oz_dbg(URB, "%s: (%p)\n", __func__, urb);
1809 if (unlikely(ozhcd == NULL)) {
1810 oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
1813 if (unlikely(hcd->state != HC_STATE_RUNNING)) {
1814 oz_dbg(URB, "Refused urb(%p) not running\n", urb);
1817 port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
1820 port = &ozhcd->ports[port_ix];
1823 if (!(port->flags & OZ_PORT_F_PRESENT) ||
1824 (port->flags & OZ_PORT_F_CHANGED)) {
1825 oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
1826 port_ix, urb->dev->devnum);
1830 /* Put request in queue for processing by tasklet.
1832 urbl = oz_alloc_urb_link();
1833 if (unlikely(urbl == NULL))
1836 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1837 rc = usb_hcd_link_urb_to_ep(hcd, urb);
1839 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1840 oz_free_urb_link(urbl);
1843 list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
1844 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1845 tasklet_schedule(&g_urb_process_tasklet);
1846 atomic_inc(&g_pending_urbs);
1853 static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
1856 struct oz_urb_link *urbl;
1857 struct list_head *e;
1859 if (unlikely(ep == NULL))
1861 list_for_each(e, &ep->urb_list) {
1862 urbl = container_of(e, struct oz_urb_link, link);
1863 if (urbl->urb == urb) {
1865 if (usb_pipeisoc(urb->pipe)) {
1866 ep->credit -= urb->number_of_packets;
1877 * Called to dequeue a previously submitted urb for the device.
1880 static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1882 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1883 struct oz_urb_link *urbl;
1885 unsigned long irq_state;
1887 oz_dbg(URB, "%s: (%p)\n", __func__, urb);
1888 urbl = oz_alloc_urb_link();
1889 if (unlikely(urbl == NULL))
1891 spin_lock_irqsave(&g_tasklet_lock, irq_state);
1892 /* The following function checks the urb is still in the queue
1893 * maintained by the core and that the unlinked field is zero.
1894 * If both are true the function sets the unlinked field and returns
1895 * zero. Otherwise it returns an error.
1897 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1898 /* We have to check we haven't completed the urb or are about
1899 * to complete it. When we do we set hcpriv to 0 so if this has
1900 * already happened we don't put the urb in the cancel queue.
1902 if ((rc == 0) && urb->hcpriv) {
1904 urbl->port = (struct oz_port *)urb->hcpriv;
1905 urbl->ep_num = usb_pipeendpoint(urb->pipe);
1906 if (usb_pipein(urb->pipe))
1907 urbl->ep_num |= USB_DIR_IN;
1908 list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
1909 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1910 tasklet_schedule(&g_urb_cancel_tasklet);
1912 spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
1913 oz_free_urb_link(urbl);
1921 static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
1922 struct usb_host_endpoint *ep)
1929 static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
1930 struct usb_host_endpoint *ep)
1937 static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
1939 oz_dbg(ON, "oz_hcd_get_frame_number\n");
1940 return oz_usb_get_frame_number();
1945 * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
1946 * always do that in softirq context.
1948 static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
1950 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1956 spin_lock_bh(&ozhcd->hcd_lock);
1957 for (i = 0; i < OZ_NB_PORTS; i++) {
1958 if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
1959 oz_dbg(HUB, "Port %d changed\n", i);
1960 ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
1962 buf[0] |= 1 << (i + 1);
1964 buf[1] |= 1 << (i - 7);
1967 spin_unlock_bh(&ozhcd->hcd_lock);
1968 if (buf[0] != 0 || buf[1] != 0)
1977 static void oz_get_hub_descriptor(struct usb_hcd *hcd,
1978 struct usb_hub_descriptor *desc)
1980 memset(desc, 0, sizeof(*desc));
1981 desc->bDescriptorType = 0x29;
1982 desc->bDescLength = 9;
1983 desc->wHubCharacteristics = (__force __u16)
1984 __constant_cpu_to_le16(0x0001);
1985 desc->bNbrPorts = OZ_NB_PORTS;
1991 static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
1993 struct oz_port *port;
1995 u8 port_id = (u8)windex;
1996 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
1997 unsigned set_bits = 0;
1998 unsigned clear_bits = 0;
2000 if ((port_id < 1) || (port_id > OZ_NB_PORTS))
2002 port = &ozhcd->ports[port_id-1];
2004 case USB_PORT_FEAT_CONNECTION:
2005 oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
2007 case USB_PORT_FEAT_ENABLE:
2008 oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
2010 case USB_PORT_FEAT_SUSPEND:
2011 oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
2013 case USB_PORT_FEAT_OVER_CURRENT:
2014 oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
2016 case USB_PORT_FEAT_RESET:
2017 oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
2018 set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
2019 clear_bits = USB_PORT_STAT_RESET;
2020 ozhcd->ports[port_id-1].bus_addr = 0;
2022 case USB_PORT_FEAT_POWER:
2023 oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
2024 set_bits |= USB_PORT_STAT_POWER;
2026 case USB_PORT_FEAT_LOWSPEED:
2027 oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
2029 case USB_PORT_FEAT_C_CONNECTION:
2030 oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
2032 case USB_PORT_FEAT_C_ENABLE:
2033 oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
2035 case USB_PORT_FEAT_C_SUSPEND:
2036 oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
2038 case USB_PORT_FEAT_C_OVER_CURRENT:
2039 oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
2041 case USB_PORT_FEAT_C_RESET:
2042 oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
2044 case USB_PORT_FEAT_TEST:
2045 oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
2047 case USB_PORT_FEAT_INDICATOR:
2048 oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
2051 oz_dbg(HUB, "Other %d\n", wvalue);
2054 if (set_bits || clear_bits) {
2055 spin_lock_bh(&port->port_lock);
2056 port->status &= ~clear_bits;
2057 port->status |= set_bits;
2058 spin_unlock_bh(&port->port_lock);
2060 oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
2067 static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
2069 struct oz_port *port;
2071 u8 port_id = (u8)windex;
2072 struct oz_hcd *ozhcd = oz_hcd_private(hcd);
2073 unsigned clear_bits = 0;
2075 if ((port_id < 1) || (port_id > OZ_NB_PORTS))
2077 port = &ozhcd->ports[port_id-1];
2079 case USB_PORT_FEAT_CONNECTION:
2080 oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
2082 case USB_PORT_FEAT_ENABLE:
2083 oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
2084 clear_bits = USB_PORT_STAT_ENABLE;
2086 case USB_PORT_FEAT_SUSPEND:
2087 oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
2089 case USB_PORT_FEAT_OVER_CURRENT:
2090 oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
2092 case USB_PORT_FEAT_RESET:
2093 oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
2095 case USB_PORT_FEAT_POWER:
2096 oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
2097 clear_bits |= USB_PORT_STAT_POWER;
2099 case USB_PORT_FEAT_LOWSPEED:
2100 oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
2102 case USB_PORT_FEAT_C_CONNECTION:
2103 oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
2104 clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
2106 case USB_PORT_FEAT_C_ENABLE:
2107 oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
2108 clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
2110 case USB_PORT_FEAT_C_SUSPEND:
2111 oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
2113 case USB_PORT_FEAT_C_OVER_CURRENT:
2114 oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
2116 case USB_PORT_FEAT_C_RESET:
2117 oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
2118 clear_bits = (USB_PORT_FEAT_C_RESET << 16);
2120 case USB_PORT_FEAT_TEST:
2121 oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
2123 case USB_PORT_FEAT_INDICATOR:
2124 oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
2127 oz_dbg(HUB, "Other %d\n", wvalue);
2131 spin_lock_bh(&port->port_lock);
2132 port->status &= ~clear_bits;
2133 spin_unlock_bh(&port->port_lock);
2135 oz_dbg(HUB, "Port[%d] status = 0x%x\n",
2136 port_id, ozhcd->ports[port_id-1].status);
2143 static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
2145 struct oz_hcd *ozhcd;
2148 if ((windex < 1) || (windex > OZ_NB_PORTS))
2150 ozhcd = oz_hcd_private(hcd);
2151 oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
2152 status = ozhcd->ports[windex-1].status;
2153 put_unaligned(cpu_to_le32(status), (__le32 *)buf);
2154 oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
2161 static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
2162 u16 windex, char *buf, u16 wlength)
2167 case ClearHubFeature:
2168 oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
2170 case ClearPortFeature:
2171 err = oz_clear_port_feature(hcd, wvalue, windex);
2173 case GetHubDescriptor:
2174 oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
2177 oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
2178 put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
2181 err = oz_get_port_status(hcd, windex, buf);
2184 oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
2186 case SetPortFeature:
2187 err = oz_set_port_feature(hcd, wvalue, windex);
2190 oz_dbg(HUB, "Other: %d\n", req_type);
2199 static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
2201 struct oz_hcd *ozhcd;
2203 ozhcd = oz_hcd_private(hcd);
2204 spin_lock_bh(&ozhcd->hcd_lock);
2205 hcd->state = HC_STATE_SUSPENDED;
2206 ozhcd->flags |= OZ_HDC_F_SUSPENDED;
2207 spin_unlock_bh(&ozhcd->hcd_lock);
2214 static int oz_hcd_bus_resume(struct usb_hcd *hcd)
2216 struct oz_hcd *ozhcd;
2218 ozhcd = oz_hcd_private(hcd);
2219 spin_lock_bh(&ozhcd->hcd_lock);
2220 ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
2221 hcd->state = HC_STATE_RUNNING;
2222 spin_unlock_bh(&ozhcd->hcd_lock);
2226 static void oz_plat_shutdown(struct platform_device *dev)
2233 static int oz_plat_probe(struct platform_device *dev)
2237 struct usb_hcd *hcd;
2238 struct oz_hcd *ozhcd;
2240 hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
2242 oz_dbg(ON, "Failed to created hcd object OK\n");
2245 ozhcd = oz_hcd_private(hcd);
2246 memset(ozhcd, 0, sizeof(*ozhcd));
2247 INIT_LIST_HEAD(&ozhcd->urb_pending_list);
2248 INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
2249 INIT_LIST_HEAD(&ozhcd->orphanage);
2251 ozhcd->conn_port = -1;
2252 spin_lock_init(&ozhcd->hcd_lock);
2253 for (i = 0; i < OZ_NB_PORTS; i++) {
2254 struct oz_port *port = &ozhcd->ports[i];
2255 port->ozhcd = ozhcd;
2258 port->bus_addr = 0xff;
2259 spin_lock_init(&port->port_lock);
2261 err = usb_add_hcd(hcd, 0, 0);
2263 oz_dbg(ON, "Failed to add hcd object OK\n");
2267 spin_lock_bh(&g_hcdlock);
2269 spin_unlock_bh(&g_hcdlock);
2276 static int oz_plat_remove(struct platform_device *dev)
2278 struct usb_hcd *hcd = platform_get_drvdata(dev);
2279 struct oz_hcd *ozhcd;
2283 ozhcd = oz_hcd_private(hcd);
2284 spin_lock_bh(&g_hcdlock);
2285 if (ozhcd == g_ozhcd)
2287 spin_unlock_bh(&g_hcdlock);
2288 oz_dbg(ON, "Clearing orphanage\n");
2289 oz_hcd_clear_orphanage(ozhcd, -EPIPE);
2290 oz_dbg(ON, "Removing hcd\n");
2291 usb_remove_hcd(hcd);
2293 oz_empty_link_pool();
2300 static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
2309 static int oz_plat_resume(struct platform_device *dev)
2317 int oz_hcd_init(void)
2323 tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
2324 tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
2325 err = platform_driver_register(&g_oz_plat_drv);
2326 oz_dbg(ON, "platform_driver_register() returned %d\n", err);
2329 g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
2330 if (g_plat_dev == NULL) {
2334 oz_dbg(ON, "platform_device_alloc() succeeded\n");
2335 err = platform_device_add(g_plat_dev);
2338 oz_dbg(ON, "platform_device_add() succeeded\n");
2341 platform_device_put(g_plat_dev);
2343 platform_driver_unregister(&g_oz_plat_drv);
2345 tasklet_disable(&g_urb_process_tasklet);
2346 tasklet_disable(&g_urb_cancel_tasklet);
2347 oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
2354 void oz_hcd_term(void)
2356 tasklet_kill(&g_urb_process_tasklet);
2357 tasklet_kill(&g_urb_cancel_tasklet);
2358 platform_device_unregister(g_plat_dev);
2359 platform_driver_unregister(&g_oz_plat_drv);
2360 oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));