]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/usb/host/imx21-hcd.c
Merge remote-tracking branch 'arm-current/fixes'
[karo-tx-linux.git] / drivers / usb / host / imx21-hcd.c
1 /*
2  * USB Host Controller Driver for IMX21
3  *
4  * Copyright (C) 2006 Loping Dog Embedded Systems
5  * Copyright (C) 2009 Martin Fuzzey
6  * Originally written by Jay Monkman <jtm@lopingdog.com>
7  * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the
11  * Free Software Foundation; either version 2 of the License, or (at your
12  * option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17  * for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24
25  /*
26   * The i.MX21 USB hardware contains
27   *    * 32 transfer descriptors (called ETDs)
28   *    * 4Kb of Data memory
29   *
30   * The data memory is shared between the host and function controllers
31   * (but this driver only supports the host controller)
32   *
33   * So setting up a transfer involves:
34   *    * Allocating a ETD
35   *    * Fill in ETD with appropriate information
36   *    * Allocating data memory (and putting the offset in the ETD)
37   *    * Activate the ETD
38   *    * Get interrupt when done.
39   *
40   * An ETD is assigned to each active endpoint.
41   *
42   * Low resource (ETD and Data memory) situations are handled differently for
43   * isochronous and non insosynchronous transactions :
44   *
45   * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46   *
47   * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48   * They allocate both ETDs and Data memory during URB submission
49   * (and fail if unavailable).
50   */
51
52 #include <linux/clk.h>
53 #include <linux/io.h>
54 #include <linux/kernel.h>
55 #include <linux/list.h>
56 #include <linux/platform_device.h>
57 #include <linux/slab.h>
58 #include <linux/usb.h>
59 #include <linux/usb/hcd.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/module.h>
62
63 #include "imx21-hcd.h"
64
65 #ifdef DEBUG
66 #define DEBUG_LOG_FRAME(imx21, etd, event) \
67         (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
68 #else
69 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
70 #endif
71
72 static const char hcd_name[] = "imx21-hcd";
73
74 static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
75 {
76         return (struct imx21 *)hcd->hcd_priv;
77 }
78
79
80 /* =========================================== */
81 /* Hardware access helpers                      */
82 /* =========================================== */
83
84 static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
85 {
86         void __iomem *reg = imx21->regs + offset;
87         writel(readl(reg) | mask, reg);
88 }
89
90 static inline void clear_register_bits(struct imx21 *imx21,
91         u32 offset, u32 mask)
92 {
93         void __iomem *reg = imx21->regs + offset;
94         writel(readl(reg) & ~mask, reg);
95 }
96
97 static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
98 {
99         void __iomem *reg = imx21->regs + offset;
100
101         if (readl(reg) & mask)
102                 writel(mask, reg);
103 }
104
105 static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
106 {
107         void __iomem *reg = imx21->regs + offset;
108
109         if (!(readl(reg) & mask))
110                 writel(mask, reg);
111 }
112
113 static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
114 {
115         writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
116 }
117
118 static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
119 {
120         return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
121 }
122
123 static inline int wrap_frame(int counter)
124 {
125         return counter & 0xFFFF;
126 }
127
128 static inline int frame_after(int frame, int after)
129 {
130         /* handle wrapping like jiffies time_afer */
131         return (s16)((s16)after - (s16)frame) < 0;
132 }
133
134 static int imx21_hc_get_frame(struct usb_hcd *hcd)
135 {
136         struct imx21 *imx21 = hcd_to_imx21(hcd);
137
138         return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
139 }
140
141 static inline bool unsuitable_for_dma(dma_addr_t addr)
142 {
143         return (addr & 3) != 0;
144 }
145
146 #include "imx21-dbg.c"
147
148 static void nonisoc_urb_completed_for_etd(
149         struct imx21 *imx21, struct etd_priv *etd, int status);
150 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
151 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
152
153 /* =========================================== */
154 /* ETD management                               */
155 /* ===========================================  */
156
157 static int alloc_etd(struct imx21 *imx21)
158 {
159         int i;
160         struct etd_priv *etd = imx21->etd;
161
162         for (i = 0; i < USB_NUM_ETD; i++, etd++) {
163                 if (etd->alloc == 0) {
164                         memset(etd, 0, sizeof(imx21->etd[0]));
165                         etd->alloc = 1;
166                         debug_etd_allocated(imx21);
167                         return i;
168                 }
169         }
170         return -1;
171 }
172
173 static void disactivate_etd(struct imx21 *imx21, int num)
174 {
175         int etd_mask = (1 << num);
176         struct etd_priv *etd = &imx21->etd[num];
177
178         writel(etd_mask, imx21->regs + USBH_ETDENCLR);
179         clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
180         writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
181         clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
182
183         etd->active_count = 0;
184
185         DEBUG_LOG_FRAME(imx21, etd, disactivated);
186 }
187
188 static void reset_etd(struct imx21 *imx21, int num)
189 {
190         struct etd_priv *etd = imx21->etd + num;
191         int i;
192
193         disactivate_etd(imx21, num);
194
195         for (i = 0; i < 4; i++)
196                 etd_writel(imx21, num, i, 0);
197         etd->urb = NULL;
198         etd->ep = NULL;
199         etd->td = NULL;
200         etd->bounce_buffer = NULL;
201 }
202
203 static void free_etd(struct imx21 *imx21, int num)
204 {
205         if (num < 0)
206                 return;
207
208         if (num >= USB_NUM_ETD) {
209                 dev_err(imx21->dev, "BAD etd=%d!\n", num);
210                 return;
211         }
212         if (imx21->etd[num].alloc == 0) {
213                 dev_err(imx21->dev, "ETD %d already free!\n", num);
214                 return;
215         }
216
217         debug_etd_freed(imx21);
218         reset_etd(imx21, num);
219         memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
220 }
221
222
223 static void setup_etd_dword0(struct imx21 *imx21,
224         int etd_num, struct urb *urb,  u8 dir, u16 maxpacket)
225 {
226         etd_writel(imx21, etd_num, 0,
227                 ((u32) usb_pipedevice(urb->pipe)) <<  DW0_ADDRESS |
228                 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
229                 ((u32) dir << DW0_DIRECT) |
230                 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
231                         1 : 0) << DW0_SPEED) |
232                 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
233                 ((u32) maxpacket << DW0_MAXPKTSIZ));
234 }
235
236 /**
237  * Copy buffer to data controller data memory.
238  * We cannot use memcpy_toio() because the hardware requires 32bit writes
239  */
240 static void copy_to_dmem(
241         struct imx21 *imx21, int dmem_offset, void *src, int count)
242 {
243         void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
244         u32 word = 0;
245         u8 *p = src;
246         int byte = 0;
247         int i;
248
249         for (i = 0; i < count; i++) {
250                 byte = i % 4;
251                 word += (*p++ << (byte * 8));
252                 if (byte == 3) {
253                         writel(word, dmem);
254                         dmem += 4;
255                         word = 0;
256                 }
257         }
258
259         if (count && byte != 3)
260                 writel(word, dmem);
261 }
262
263 static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
264 {
265         u32 etd_mask = 1 << etd_num;
266         struct etd_priv *etd = &imx21->etd[etd_num];
267
268         if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
269                 /* For non aligned isoc the condition below is always true */
270                 if (etd->len <= etd->dmem_size) {
271                         /* Fits into data memory, use PIO */
272                         if (dir != TD_DIR_IN) {
273                                 copy_to_dmem(imx21,
274                                                 etd->dmem_offset,
275                                                 etd->cpu_buffer, etd->len);
276                         }
277                         etd->dma_handle = 0;
278
279                 } else {
280                         /* Too big for data memory, use bounce buffer */
281                         enum dma_data_direction dmadir;
282
283                         if (dir == TD_DIR_IN) {
284                                 dmadir = DMA_FROM_DEVICE;
285                                 etd->bounce_buffer = kmalloc(etd->len,
286                                                                 GFP_ATOMIC);
287                         } else {
288                                 dmadir = DMA_TO_DEVICE;
289                                 etd->bounce_buffer = kmemdup(etd->cpu_buffer,
290                                                                 etd->len,
291                                                                 GFP_ATOMIC);
292                         }
293                         if (!etd->bounce_buffer) {
294                                 dev_err(imx21->dev, "failed bounce alloc\n");
295                                 goto err_bounce_alloc;
296                         }
297
298                         etd->dma_handle =
299                                 dma_map_single(imx21->dev,
300                                                 etd->bounce_buffer,
301                                                 etd->len,
302                                                 dmadir);
303                         if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
304                                 dev_err(imx21->dev, "failed bounce map\n");
305                                 goto err_bounce_map;
306                         }
307                 }
308         }
309
310         clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
311         set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
312         clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
313         clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
314
315         if (etd->dma_handle) {
316                 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
317                 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
318                 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
319                 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
320                 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
321         } else {
322                 if (dir != TD_DIR_IN) {
323                         /* need to set for ZLP and PIO */
324                         set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
325                         set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
326                 }
327         }
328
329         DEBUG_LOG_FRAME(imx21, etd, activated);
330
331 #ifdef DEBUG
332         if (!etd->active_count) {
333                 int i;
334                 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
335                 etd->disactivated_frame = -1;
336                 etd->last_int_frame = -1;
337                 etd->last_req_frame = -1;
338
339                 for (i = 0; i < 4; i++)
340                         etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
341         }
342 #endif
343
344         etd->active_count = 1;
345         writel(etd_mask, imx21->regs + USBH_ETDENSET);
346         return;
347
348 err_bounce_map:
349         kfree(etd->bounce_buffer);
350
351 err_bounce_alloc:
352         free_dmem(imx21, etd);
353         nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
354 }
355
356 /* ===========================================  */
357 /* Data memory management                       */
358 /* ===========================================  */
359
360 static int alloc_dmem(struct imx21 *imx21, unsigned int size,
361                       struct usb_host_endpoint *ep)
362 {
363         unsigned int offset = 0;
364         struct imx21_dmem_area *area;
365         struct imx21_dmem_area *tmp;
366
367         size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
368
369         if (size > DMEM_SIZE) {
370                 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
371                         size, DMEM_SIZE);
372                 return -EINVAL;
373         }
374
375         list_for_each_entry(tmp, &imx21->dmem_list, list) {
376                 if ((size + offset) < offset)
377                         goto fail;
378                 if ((size + offset) <= tmp->offset)
379                         break;
380                 offset = tmp->size + tmp->offset;
381                 if ((offset + size) > DMEM_SIZE)
382                         goto fail;
383         }
384
385         area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
386         if (area == NULL)
387                 return -ENOMEM;
388
389         area->ep = ep;
390         area->offset = offset;
391         area->size = size;
392         list_add_tail(&area->list, &tmp->list);
393         debug_dmem_allocated(imx21, size);
394         return offset;
395
396 fail:
397         return -ENOMEM;
398 }
399
400 /* Memory now available for a queued ETD - activate it */
401 static void activate_queued_etd(struct imx21 *imx21,
402         struct etd_priv *etd, u32 dmem_offset)
403 {
404         struct urb_priv *urb_priv = etd->urb->hcpriv;
405         int etd_num = etd - &imx21->etd[0];
406         u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
407         u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
408
409         dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
410                 etd_num);
411         etd_writel(imx21, etd_num, 1,
412             ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
413
414         etd->dmem_offset = dmem_offset;
415         urb_priv->active = 1;
416         activate_etd(imx21, etd_num, dir);
417 }
418
419 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
420 {
421         struct imx21_dmem_area *area;
422         struct etd_priv *tmp;
423         int found = 0;
424         int offset;
425
426         if (!etd->dmem_size)
427                 return;
428         etd->dmem_size = 0;
429
430         offset = etd->dmem_offset;
431         list_for_each_entry(area, &imx21->dmem_list, list) {
432                 if (area->offset == offset) {
433                         debug_dmem_freed(imx21, area->size);
434                         list_del(&area->list);
435                         kfree(area);
436                         found = 1;
437                         break;
438                 }
439         }
440
441         if (!found)  {
442                 dev_err(imx21->dev,
443                         "Trying to free unallocated DMEM %d\n", offset);
444                 return;
445         }
446
447         /* Try again to allocate memory for anything we've queued */
448         list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
449                 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
450                 if (offset >= 0) {
451                         list_del(&etd->queue);
452                         activate_queued_etd(imx21, etd, (u32)offset);
453                 }
454         }
455 }
456
457 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
458 {
459         struct imx21_dmem_area *area, *tmp;
460
461         list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
462                 if (area->ep == ep) {
463                         dev_err(imx21->dev,
464                                 "Active DMEM %d for disabled ep=%p\n",
465                                 area->offset, ep);
466                         list_del(&area->list);
467                         kfree(area);
468                 }
469         }
470 }
471
472
473 /* ===========================================  */
474 /* End handling                                 */
475 /* ===========================================  */
476
477 /* Endpoint now idle - release its ETD(s) or assign to queued request */
478 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
479 {
480         int i;
481
482         for (i = 0; i < NUM_ISO_ETDS; i++) {
483                 int etd_num = ep_priv->etd[i];
484                 struct etd_priv *etd;
485                 if (etd_num < 0)
486                         continue;
487
488                 etd = &imx21->etd[etd_num];
489                 ep_priv->etd[i] = -1;
490
491                 free_dmem(imx21, etd); /* for isoc */
492
493                 if (list_empty(&imx21->queue_for_etd)) {
494                         free_etd(imx21, etd_num);
495                         continue;
496                 }
497
498                 dev_dbg(imx21->dev,
499                         "assigning idle etd %d for queued request\n", etd_num);
500                 ep_priv = list_first_entry(&imx21->queue_for_etd,
501                         struct ep_priv, queue);
502                 list_del(&ep_priv->queue);
503                 reset_etd(imx21, etd_num);
504                 ep_priv->waiting_etd = 0;
505                 ep_priv->etd[i] = etd_num;
506
507                 if (list_empty(&ep_priv->ep->urb_list)) {
508                         dev_err(imx21->dev, "No urb for queued ep!\n");
509                         continue;
510                 }
511                 schedule_nonisoc_etd(imx21, list_first_entry(
512                         &ep_priv->ep->urb_list, struct urb, urb_list));
513         }
514 }
515
516 static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
517 __releases(imx21->lock)
518 __acquires(imx21->lock)
519 {
520         struct imx21 *imx21 = hcd_to_imx21(hcd);
521         struct ep_priv *ep_priv = urb->ep->hcpriv;
522         struct urb_priv *urb_priv = urb->hcpriv;
523
524         debug_urb_completed(imx21, urb, status);
525         dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
526
527         kfree(urb_priv->isoc_td);
528         kfree(urb->hcpriv);
529         urb->hcpriv = NULL;
530         usb_hcd_unlink_urb_from_ep(hcd, urb);
531         spin_unlock(&imx21->lock);
532         usb_hcd_giveback_urb(hcd, urb, status);
533         spin_lock(&imx21->lock);
534         if (list_empty(&ep_priv->ep->urb_list))
535                 ep_idle(imx21, ep_priv);
536 }
537
538 static void nonisoc_urb_completed_for_etd(
539         struct imx21 *imx21, struct etd_priv *etd, int status)
540 {
541         struct usb_host_endpoint *ep = etd->ep;
542
543         urb_done(imx21->hcd, etd->urb, status);
544         etd->urb = NULL;
545
546         if (!list_empty(&ep->urb_list)) {
547                 struct urb *urb = list_first_entry(
548                                         &ep->urb_list, struct urb, urb_list);
549
550                 dev_vdbg(imx21->dev, "next URB %p\n", urb);
551                 schedule_nonisoc_etd(imx21, urb);
552         }
553 }
554
555
556 /* ===========================================  */
557 /* ISOC Handling ...                            */
558 /* ===========================================  */
559
560 static void schedule_isoc_etds(struct usb_hcd *hcd,
561         struct usb_host_endpoint *ep)
562 {
563         struct imx21 *imx21 = hcd_to_imx21(hcd);
564         struct ep_priv *ep_priv = ep->hcpriv;
565         struct etd_priv *etd;
566         struct urb_priv *urb_priv;
567         struct td *td;
568         int etd_num;
569         int i;
570         int cur_frame;
571         u8 dir;
572
573         for (i = 0; i < NUM_ISO_ETDS; i++) {
574 too_late:
575                 if (list_empty(&ep_priv->td_list))
576                         break;
577
578                 etd_num = ep_priv->etd[i];
579                 if (etd_num < 0)
580                         break;
581
582                 etd = &imx21->etd[etd_num];
583                 if (etd->urb)
584                         continue;
585
586                 td = list_entry(ep_priv->td_list.next, struct td, list);
587                 list_del(&td->list);
588                 urb_priv = td->urb->hcpriv;
589
590                 cur_frame = imx21_hc_get_frame(hcd);
591                 if (frame_after(cur_frame, td->frame)) {
592                         dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
593                                 cur_frame, td->frame);
594                         urb_priv->isoc_status = -EXDEV;
595                         td->urb->iso_frame_desc[
596                                 td->isoc_index].actual_length = 0;
597                         td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
598                         if (--urb_priv->isoc_remaining == 0)
599                                 urb_done(hcd, td->urb, urb_priv->isoc_status);
600                         goto too_late;
601                 }
602
603                 urb_priv->active = 1;
604                 etd->td = td;
605                 etd->ep = td->ep;
606                 etd->urb = td->urb;
607                 etd->len = td->len;
608                 etd->dma_handle = td->dma_handle;
609                 etd->cpu_buffer = td->cpu_buffer;
610
611                 debug_isoc_submitted(imx21, cur_frame, td);
612
613                 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
614                 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
615                 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
616                 etd_writel(imx21, etd_num, 2,
617                         (TD_NOTACCESSED << DW2_COMPCODE) |
618                         ((td->frame & 0xFFFF) << DW2_STARTFRM));
619                 etd_writel(imx21, etd_num, 3,
620                         (TD_NOTACCESSED << DW3_COMPCODE0) |
621                         (td->len << DW3_PKTLEN0));
622
623                 activate_etd(imx21, etd_num, dir);
624         }
625 }
626
627 static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
628 {
629         struct imx21 *imx21 = hcd_to_imx21(hcd);
630         int etd_mask = 1 << etd_num;
631         struct etd_priv *etd = imx21->etd + etd_num;
632         struct urb *urb = etd->urb;
633         struct urb_priv *urb_priv = urb->hcpriv;
634         struct td *td = etd->td;
635         struct usb_host_endpoint *ep = etd->ep;
636         int isoc_index = td->isoc_index;
637         unsigned int pipe = urb->pipe;
638         int dir_in = usb_pipein(pipe);
639         int cc;
640         int bytes_xfrd;
641
642         disactivate_etd(imx21, etd_num);
643
644         cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
645         bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
646
647         /* Input doesn't always fill the buffer, don't generate an error
648          * when this happens.
649          */
650         if (dir_in && (cc == TD_DATAUNDERRUN))
651                 cc = TD_CC_NOERROR;
652
653         if (cc == TD_NOTACCESSED)
654                 bytes_xfrd = 0;
655
656         debug_isoc_completed(imx21,
657                 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
658         if (cc) {
659                 urb_priv->isoc_status = -EXDEV;
660                 dev_dbg(imx21->dev,
661                         "bad iso cc=0x%X frame=%d sched frame=%d "
662                         "cnt=%d len=%d urb=%p etd=%d index=%d\n",
663                         cc,  imx21_hc_get_frame(hcd), td->frame,
664                         bytes_xfrd, td->len, urb, etd_num, isoc_index);
665         }
666
667         if (dir_in) {
668                 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
669                 if (!etd->dma_handle)
670                         memcpy_fromio(etd->cpu_buffer,
671                                 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
672                                 bytes_xfrd);
673         }
674
675         urb->actual_length += bytes_xfrd;
676         urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
677         urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
678
679         etd->td = NULL;
680         etd->urb = NULL;
681         etd->ep = NULL;
682
683         if (--urb_priv->isoc_remaining == 0)
684                 urb_done(hcd, urb, urb_priv->isoc_status);
685
686         schedule_isoc_etds(hcd, ep);
687 }
688
689 static struct ep_priv *alloc_isoc_ep(
690         struct imx21 *imx21, struct usb_host_endpoint *ep)
691 {
692         struct ep_priv *ep_priv;
693         int i;
694
695         ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
696         if (!ep_priv)
697                 return NULL;
698
699         for (i = 0; i < NUM_ISO_ETDS; i++)
700                 ep_priv->etd[i] = -1;
701
702         INIT_LIST_HEAD(&ep_priv->td_list);
703         ep_priv->ep = ep;
704         ep->hcpriv = ep_priv;
705         return ep_priv;
706 }
707
708 static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
709 {
710         int i, j;
711         int etd_num;
712
713         /* Allocate the ETDs if required */
714         for (i = 0; i < NUM_ISO_ETDS; i++) {
715                 if (ep_priv->etd[i] < 0) {
716                         etd_num = alloc_etd(imx21);
717                         if (etd_num < 0)
718                                 goto alloc_etd_failed;
719
720                         ep_priv->etd[i] = etd_num;
721                         imx21->etd[etd_num].ep = ep_priv->ep;
722                 }
723         }
724         return 0;
725
726 alloc_etd_failed:
727         dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
728         for (j = 0; j < i; j++) {
729                 free_etd(imx21, ep_priv->etd[j]);
730                 ep_priv->etd[j] = -1;
731         }
732         return -ENOMEM;
733 }
734
735 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
736                                      struct usb_host_endpoint *ep,
737                                      struct urb *urb, gfp_t mem_flags)
738 {
739         struct imx21 *imx21 = hcd_to_imx21(hcd);
740         struct urb_priv *urb_priv;
741         unsigned long flags;
742         struct ep_priv *ep_priv;
743         struct td *td = NULL;
744         int i;
745         int ret;
746         int cur_frame;
747         u16 maxpacket;
748
749         urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
750         if (urb_priv == NULL)
751                 return -ENOMEM;
752
753         urb_priv->isoc_td = kzalloc(
754                 sizeof(struct td) * urb->number_of_packets, mem_flags);
755         if (urb_priv->isoc_td == NULL) {
756                 ret = -ENOMEM;
757                 goto alloc_td_failed;
758         }
759
760         spin_lock_irqsave(&imx21->lock, flags);
761
762         if (ep->hcpriv == NULL) {
763                 ep_priv = alloc_isoc_ep(imx21, ep);
764                 if (ep_priv == NULL) {
765                         ret = -ENOMEM;
766                         goto alloc_ep_failed;
767                 }
768         } else {
769                 ep_priv = ep->hcpriv;
770         }
771
772         ret = alloc_isoc_etds(imx21, ep_priv);
773         if (ret)
774                 goto alloc_etd_failed;
775
776         ret = usb_hcd_link_urb_to_ep(hcd, urb);
777         if (ret)
778                 goto link_failed;
779
780         urb->status = -EINPROGRESS;
781         urb->actual_length = 0;
782         urb->error_count = 0;
783         urb->hcpriv = urb_priv;
784         urb_priv->ep = ep;
785
786         /* allocate data memory for largest packets if not already done */
787         maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
788         for (i = 0; i < NUM_ISO_ETDS; i++) {
789                 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
790
791                 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
792                         /* not sure if this can really occur.... */
793                         dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
794                                 etd->dmem_size, maxpacket);
795                         ret = -EMSGSIZE;
796                         goto alloc_dmem_failed;
797                 }
798
799                 if (etd->dmem_size == 0) {
800                         etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
801                         if (etd->dmem_offset < 0) {
802                                 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
803                                 ret = -EAGAIN;
804                                 goto alloc_dmem_failed;
805                         }
806                         etd->dmem_size = maxpacket;
807                 }
808         }
809
810         /* calculate frame */
811         cur_frame = imx21_hc_get_frame(hcd);
812         i = 0;
813         if (list_empty(&ep_priv->td_list)) {
814                 urb->start_frame = wrap_frame(cur_frame + 5);
815         } else {
816                 urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
817                                 struct td, list)->frame + urb->interval);
818
819                 if (frame_after(cur_frame, urb->start_frame)) {
820                         dev_dbg(imx21->dev,
821                                 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
822                                 urb->start_frame, cur_frame,
823                                 (urb->transfer_flags & URB_ISO_ASAP) != 0);
824                         i = DIV_ROUND_UP(wrap_frame(
825                                         cur_frame - urb->start_frame),
826                                         urb->interval);
827
828                         /* Treat underruns as if URB_ISO_ASAP was set */
829                         if ((urb->transfer_flags & URB_ISO_ASAP) ||
830                                         i >= urb->number_of_packets) {
831                                 urb->start_frame = wrap_frame(urb->start_frame
832                                                 + i * urb->interval);
833                                 i = 0;
834                         }
835                 }
836         }
837
838         /* set up transfers */
839         urb_priv->isoc_remaining = urb->number_of_packets - i;
840         td = urb_priv->isoc_td;
841         for (; i < urb->number_of_packets; i++, td++) {
842                 unsigned int offset = urb->iso_frame_desc[i].offset;
843                 td->ep = ep;
844                 td->urb = urb;
845                 td->len = urb->iso_frame_desc[i].length;
846                 td->isoc_index = i;
847                 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
848                 td->dma_handle = urb->transfer_dma + offset;
849                 td->cpu_buffer = urb->transfer_buffer + offset;
850                 list_add_tail(&td->list, &ep_priv->td_list);
851         }
852
853         dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
854                 urb->number_of_packets, urb->start_frame, td->frame);
855
856         debug_urb_submitted(imx21, urb);
857         schedule_isoc_etds(hcd, ep);
858
859         spin_unlock_irqrestore(&imx21->lock, flags);
860         return 0;
861
862 alloc_dmem_failed:
863         usb_hcd_unlink_urb_from_ep(hcd, urb);
864
865 link_failed:
866 alloc_etd_failed:
867 alloc_ep_failed:
868         spin_unlock_irqrestore(&imx21->lock, flags);
869         kfree(urb_priv->isoc_td);
870
871 alloc_td_failed:
872         kfree(urb_priv);
873         return ret;
874 }
875
876 static void dequeue_isoc_urb(struct imx21 *imx21,
877         struct urb *urb, struct ep_priv *ep_priv)
878 {
879         struct urb_priv *urb_priv = urb->hcpriv;
880         struct td *td, *tmp;
881         int i;
882
883         if (urb_priv->active) {
884                 for (i = 0; i < NUM_ISO_ETDS; i++) {
885                         int etd_num = ep_priv->etd[i];
886                         if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
887                                 struct etd_priv *etd = imx21->etd + etd_num;
888
889                                 reset_etd(imx21, etd_num);
890                                 free_dmem(imx21, etd);
891                         }
892                 }
893         }
894
895         list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
896                 if (td->urb == urb) {
897                         dev_vdbg(imx21->dev, "removing td %p\n", td);
898                         list_del(&td->list);
899                 }
900         }
901 }
902
903 /* =========================================== */
904 /* NON ISOC Handling ...                        */
905 /* =========================================== */
906
907 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
908 {
909         unsigned int pipe = urb->pipe;
910         struct urb_priv *urb_priv = urb->hcpriv;
911         struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
912         int state = urb_priv->state;
913         int etd_num = ep_priv->etd[0];
914         struct etd_priv *etd;
915         u32 count;
916         u16 etd_buf_size;
917         u16 maxpacket;
918         u8 dir;
919         u8 bufround;
920         u8 datatoggle;
921         u8 interval = 0;
922         u8 relpolpos = 0;
923
924         if (etd_num < 0) {
925                 dev_err(imx21->dev, "No valid ETD\n");
926                 return;
927         }
928         if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
929                 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
930
931         etd = &imx21->etd[etd_num];
932         maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
933         if (!maxpacket)
934                 maxpacket = 8;
935
936         if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
937                 if (state == US_CTRL_SETUP) {
938                         dir = TD_DIR_SETUP;
939                         if (unsuitable_for_dma(urb->setup_dma))
940                                 usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
941                                         urb);
942                         etd->dma_handle = urb->setup_dma;
943                         etd->cpu_buffer = urb->setup_packet;
944                         bufround = 0;
945                         count = 8;
946                         datatoggle = TD_TOGGLE_DATA0;
947                 } else {        /* US_CTRL_ACK */
948                         dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
949                         bufround = 0;
950                         count = 0;
951                         datatoggle = TD_TOGGLE_DATA1;
952                 }
953         } else {
954                 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
955                 bufround = (dir == TD_DIR_IN) ? 1 : 0;
956                 if (unsuitable_for_dma(urb->transfer_dma))
957                         usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
958
959                 etd->dma_handle = urb->transfer_dma;
960                 etd->cpu_buffer = urb->transfer_buffer;
961                 if (usb_pipebulk(pipe) && (state == US_BULK0))
962                         count = 0;
963                 else
964                         count = urb->transfer_buffer_length;
965
966                 if (usb_pipecontrol(pipe)) {
967                         datatoggle = TD_TOGGLE_DATA1;
968                 } else {
969                         if (usb_gettoggle(
970                                         urb->dev,
971                                         usb_pipeendpoint(urb->pipe),
972                                         usb_pipeout(urb->pipe)))
973                                 datatoggle = TD_TOGGLE_DATA1;
974                         else
975                                 datatoggle = TD_TOGGLE_DATA0;
976                 }
977         }
978
979         etd->urb = urb;
980         etd->ep = urb_priv->ep;
981         etd->len = count;
982
983         if (usb_pipeint(pipe)) {
984                 interval = urb->interval;
985                 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
986         }
987
988         /* Write ETD to device memory */
989         setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
990
991         etd_writel(imx21, etd_num, 2,
992                 (u32) interval << DW2_POLINTERV |
993                 ((u32) relpolpos << DW2_RELPOLPOS) |
994                 ((u32) dir << DW2_DIRPID) |
995                 ((u32) bufround << DW2_BUFROUND) |
996                 ((u32) datatoggle << DW2_DATATOG) |
997                 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
998
999         /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
1000            is smaller. Make sure we don't overrun the buffer!
1001          */
1002         if (count && count < maxpacket)
1003                 etd_buf_size = count;
1004         else
1005                 etd_buf_size = maxpacket;
1006
1007         etd_writel(imx21, etd_num, 3,
1008                 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
1009
1010         if (!count)
1011                 etd->dma_handle = 0;
1012
1013         /* allocate x and y buffer space at once */
1014         etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1015         etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1016         if (etd->dmem_offset < 0) {
1017                 /* Setup everything we can in HW and update when we get DMEM */
1018                 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
1019
1020                 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1021                 debug_urb_queued_for_dmem(imx21, urb);
1022                 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1023                 return;
1024         }
1025
1026         etd_writel(imx21, etd_num, 1,
1027                 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
1028                 (u32) etd->dmem_offset);
1029
1030         urb_priv->active = 1;
1031
1032         /* enable the ETD to kick off transfer */
1033         dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1034                 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
1035         activate_etd(imx21, etd_num, dir);
1036
1037 }
1038
1039 static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
1040 {
1041         struct imx21 *imx21 = hcd_to_imx21(hcd);
1042         struct etd_priv *etd = &imx21->etd[etd_num];
1043         struct urb *urb = etd->urb;
1044         u32 etd_mask = 1 << etd_num;
1045         struct urb_priv *urb_priv = urb->hcpriv;
1046         int dir;
1047         int cc;
1048         u32 bytes_xfrd;
1049         int etd_done;
1050
1051         disactivate_etd(imx21, etd_num);
1052
1053         dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
1054         cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
1055         bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1056
1057         /* save toggle carry */
1058         usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1059                       usb_pipeout(urb->pipe),
1060                       (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
1061
1062         if (dir == TD_DIR_IN) {
1063                 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
1064                 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1065
1066                 if (etd->bounce_buffer) {
1067                         memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1068                         dma_unmap_single(imx21->dev,
1069                                 etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1070                 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1071                         memcpy_fromio(etd->cpu_buffer,
1072                                 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1073                                 bytes_xfrd);
1074                 }
1075         }
1076
1077         kfree(etd->bounce_buffer);
1078         etd->bounce_buffer = NULL;
1079         free_dmem(imx21, etd);
1080
1081         urb->error_count = 0;
1082         if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
1083                         && (cc == TD_DATAUNDERRUN))
1084                 cc = TD_CC_NOERROR;
1085
1086         if (cc != 0)
1087                 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
1088
1089         etd_done = (cc_to_error[cc] != 0);      /* stop if error */
1090
1091         switch (usb_pipetype(urb->pipe)) {
1092         case PIPE_CONTROL:
1093                 switch (urb_priv->state) {
1094                 case US_CTRL_SETUP:
1095                         if (urb->transfer_buffer_length > 0)
1096                                 urb_priv->state = US_CTRL_DATA;
1097                         else
1098                                 urb_priv->state = US_CTRL_ACK;
1099                         break;
1100                 case US_CTRL_DATA:
1101                         urb->actual_length += bytes_xfrd;
1102                         urb_priv->state = US_CTRL_ACK;
1103                         break;
1104                 case US_CTRL_ACK:
1105                         etd_done = 1;
1106                         break;
1107                 default:
1108                         dev_err(imx21->dev,
1109                                 "Invalid pipe state %d\n", urb_priv->state);
1110                         etd_done = 1;
1111                         break;
1112                 }
1113                 break;
1114
1115         case PIPE_BULK:
1116                 urb->actual_length += bytes_xfrd;
1117                 if ((urb_priv->state == US_BULK)
1118                     && (urb->transfer_flags & URB_ZERO_PACKET)
1119                     && urb->transfer_buffer_length > 0
1120                     && ((urb->transfer_buffer_length %
1121                          usb_maxpacket(urb->dev, urb->pipe,
1122                                        usb_pipeout(urb->pipe))) == 0)) {
1123                         /* need a 0-packet */
1124                         urb_priv->state = US_BULK0;
1125                 } else {
1126                         etd_done = 1;
1127                 }
1128                 break;
1129
1130         case PIPE_INTERRUPT:
1131                 urb->actual_length += bytes_xfrd;
1132                 etd_done = 1;
1133                 break;
1134         }
1135
1136         if (etd_done)
1137                 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1138         else {
1139                 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
1140                 schedule_nonisoc_etd(imx21, urb);
1141         }
1142 }
1143
1144
1145 static struct ep_priv *alloc_ep(void)
1146 {
1147         int i;
1148         struct ep_priv *ep_priv;
1149
1150         ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
1151         if (!ep_priv)
1152                 return NULL;
1153
1154         for (i = 0; i < NUM_ISO_ETDS; ++i)
1155                 ep_priv->etd[i] = -1;
1156
1157         return ep_priv;
1158 }
1159
1160 static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1161                                 struct urb *urb, gfp_t mem_flags)
1162 {
1163         struct imx21 *imx21 = hcd_to_imx21(hcd);
1164         struct usb_host_endpoint *ep = urb->ep;
1165         struct urb_priv *urb_priv;
1166         struct ep_priv *ep_priv;
1167         struct etd_priv *etd;
1168         int ret;
1169         unsigned long flags;
1170
1171         dev_vdbg(imx21->dev,
1172                 "enqueue urb=%p ep=%p len=%d "
1173                 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1174                 urb, ep,
1175                 urb->transfer_buffer_length,
1176                 urb->transfer_buffer, urb->transfer_dma,
1177                 urb->setup_packet, urb->setup_dma);
1178
1179         if (usb_pipeisoc(urb->pipe))
1180                 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1181
1182         urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1183         if (!urb_priv)
1184                 return -ENOMEM;
1185
1186         spin_lock_irqsave(&imx21->lock, flags);
1187
1188         ep_priv = ep->hcpriv;
1189         if (ep_priv == NULL) {
1190                 ep_priv = alloc_ep();
1191                 if (!ep_priv) {
1192                         ret = -ENOMEM;
1193                         goto failed_alloc_ep;
1194                 }
1195                 ep->hcpriv = ep_priv;
1196                 ep_priv->ep = ep;
1197         }
1198
1199         ret = usb_hcd_link_urb_to_ep(hcd, urb);
1200         if (ret)
1201                 goto failed_link;
1202
1203         urb->status = -EINPROGRESS;
1204         urb->actual_length = 0;
1205         urb->error_count = 0;
1206         urb->hcpriv = urb_priv;
1207         urb_priv->ep = ep;
1208
1209         switch (usb_pipetype(urb->pipe)) {
1210         case PIPE_CONTROL:
1211                 urb_priv->state = US_CTRL_SETUP;
1212                 break;
1213         case PIPE_BULK:
1214                 urb_priv->state = US_BULK;
1215                 break;
1216         }
1217
1218         debug_urb_submitted(imx21, urb);
1219         if (ep_priv->etd[0] < 0) {
1220                 if (ep_priv->waiting_etd) {
1221                         dev_dbg(imx21->dev,
1222                                 "no ETD available already queued %p\n",
1223                                 ep_priv);
1224                         debug_urb_queued_for_etd(imx21, urb);
1225                         goto out;
1226                 }
1227                 ep_priv->etd[0] = alloc_etd(imx21);
1228                 if (ep_priv->etd[0] < 0) {
1229                         dev_dbg(imx21->dev,
1230                                 "no ETD available queueing %p\n", ep_priv);
1231                         debug_urb_queued_for_etd(imx21, urb);
1232                         list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1233                         ep_priv->waiting_etd = 1;
1234                         goto out;
1235                 }
1236         }
1237
1238         /* Schedule if no URB already active for this endpoint */
1239         etd = &imx21->etd[ep_priv->etd[0]];
1240         if (etd->urb == NULL) {
1241                 DEBUG_LOG_FRAME(imx21, etd, last_req);
1242                 schedule_nonisoc_etd(imx21, urb);
1243         }
1244
1245 out:
1246         spin_unlock_irqrestore(&imx21->lock, flags);
1247         return 0;
1248
1249 failed_link:
1250 failed_alloc_ep:
1251         spin_unlock_irqrestore(&imx21->lock, flags);
1252         kfree(urb_priv);
1253         return ret;
1254 }
1255
1256 static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1257                                 int status)
1258 {
1259         struct imx21 *imx21 = hcd_to_imx21(hcd);
1260         unsigned long flags;
1261         struct usb_host_endpoint *ep;
1262         struct ep_priv *ep_priv;
1263         struct urb_priv *urb_priv = urb->hcpriv;
1264         int ret = -EINVAL;
1265
1266         dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1267                 urb, usb_pipeisoc(urb->pipe), status);
1268
1269         spin_lock_irqsave(&imx21->lock, flags);
1270
1271         ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1272         if (ret)
1273                 goto fail;
1274         ep = urb_priv->ep;
1275         ep_priv = ep->hcpriv;
1276
1277         debug_urb_unlinked(imx21, urb);
1278
1279         if (usb_pipeisoc(urb->pipe)) {
1280                 dequeue_isoc_urb(imx21, urb, ep_priv);
1281                 schedule_isoc_etds(hcd, ep);
1282         } else if (urb_priv->active) {
1283                 int etd_num = ep_priv->etd[0];
1284                 if (etd_num != -1) {
1285                         struct etd_priv *etd = &imx21->etd[etd_num];
1286
1287                         disactivate_etd(imx21, etd_num);
1288                         free_dmem(imx21, etd);
1289                         etd->urb = NULL;
1290                         kfree(etd->bounce_buffer);
1291                         etd->bounce_buffer = NULL;
1292                 }
1293         }
1294
1295         urb_done(hcd, urb, status);
1296
1297         spin_unlock_irqrestore(&imx21->lock, flags);
1298         return 0;
1299
1300 fail:
1301         spin_unlock_irqrestore(&imx21->lock, flags);
1302         return ret;
1303 }
1304
1305 /* =========================================== */
1306 /* Interrupt dispatch                           */
1307 /* =========================================== */
1308
1309 static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1310 {
1311         int etd_num;
1312         int enable_sof_int = 0;
1313         unsigned long flags;
1314
1315         spin_lock_irqsave(&imx21->lock, flags);
1316
1317         for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1318                 u32 etd_mask = 1 << etd_num;
1319                 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1320                 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1321                 struct etd_priv *etd = &imx21->etd[etd_num];
1322
1323
1324                 if (done) {
1325                         DEBUG_LOG_FRAME(imx21, etd, last_int);
1326                 } else {
1327 /*
1328  * Kludge warning!
1329  *
1330  * When multiple transfers are using the bus we sometimes get into a state
1331  * where the transfer has completed (the CC field of the ETD is != 0x0F),
1332  * the ETD has self disabled but the ETDDONESTAT flag is not set
1333  * (and hence no interrupt occurs).
1334  * This causes the transfer in question to hang.
1335  * The kludge below checks for this condition at each SOF and processes any
1336  * blocked ETDs (after an arbitrary 10 frame wait)
1337  *
1338  * With a single active transfer the usbtest test suite will run for days
1339  * without the kludge.
1340  * With other bus activity (eg mass storage) even just test1 will hang without
1341  * the kludge.
1342  */
1343                         u32 dword0;
1344                         int cc;
1345
1346                         if (etd->active_count && !enabled) /* suspicious... */
1347                                 enable_sof_int = 1;
1348
1349                         if (!sof || enabled || !etd->active_count)
1350                                 continue;
1351
1352                         cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1353                         if (cc == TD_NOTACCESSED)
1354                                 continue;
1355
1356                         if (++etd->active_count < 10)
1357                                 continue;
1358
1359                         dword0 = etd_readl(imx21, etd_num, 0);
1360                         dev_dbg(imx21->dev,
1361                                 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1362                                 etd_num, dword0 & 0x7F,
1363                                 (dword0 >> DW0_ENDPNT) & 0x0F,
1364                                 cc);
1365
1366 #ifdef DEBUG
1367                         dev_dbg(imx21->dev,
1368                                 "frame: act=%d disact=%d"
1369                                 " int=%d req=%d cur=%d\n",
1370                                 etd->activated_frame,
1371                                 etd->disactivated_frame,
1372                                 etd->last_int_frame,
1373                                 etd->last_req_frame,
1374                                 readl(imx21->regs + USBH_FRMNUB));
1375                         imx21->debug_unblocks++;
1376 #endif
1377                         etd->active_count = 0;
1378 /* End of kludge */
1379                 }
1380
1381                 if (etd->ep == NULL || etd->urb == NULL) {
1382                         dev_dbg(imx21->dev,
1383                                 "Interrupt for unexpected etd %d"
1384                                 " ep=%p urb=%p\n",
1385                                 etd_num, etd->ep, etd->urb);
1386                         disactivate_etd(imx21, etd_num);
1387                         continue;
1388                 }
1389
1390                 if (usb_pipeisoc(etd->urb->pipe))
1391                         isoc_etd_done(hcd, etd_num);
1392                 else
1393                         nonisoc_etd_done(hcd, etd_num);
1394         }
1395
1396         /* only enable SOF interrupt if it may be needed for the kludge */
1397         if (enable_sof_int)
1398                 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1399         else
1400                 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1401
1402
1403         spin_unlock_irqrestore(&imx21->lock, flags);
1404 }
1405
1406 static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1407 {
1408         struct imx21 *imx21 = hcd_to_imx21(hcd);
1409         u32 ints = readl(imx21->regs + USBH_SYSISR);
1410
1411         if (ints & USBH_SYSIEN_HERRINT)
1412                 dev_dbg(imx21->dev, "Scheduling error\n");
1413
1414         if (ints & USBH_SYSIEN_SORINT)
1415                 dev_dbg(imx21->dev, "Scheduling overrun\n");
1416
1417         if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1418                 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1419
1420         writel(ints, imx21->regs + USBH_SYSISR);
1421         return IRQ_HANDLED;
1422 }
1423
1424 static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1425                                       struct usb_host_endpoint *ep)
1426 {
1427         struct imx21 *imx21 = hcd_to_imx21(hcd);
1428         unsigned long flags;
1429         struct ep_priv *ep_priv;
1430         int i;
1431
1432         if (ep == NULL)
1433                 return;
1434
1435         spin_lock_irqsave(&imx21->lock, flags);
1436         ep_priv = ep->hcpriv;
1437         dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1438
1439         if (!list_empty(&ep->urb_list))
1440                 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1441
1442         if (ep_priv != NULL) {
1443                 for (i = 0; i < NUM_ISO_ETDS; i++) {
1444                         if (ep_priv->etd[i] > -1)
1445                                 dev_dbg(imx21->dev, "free etd %d for disable\n",
1446                                         ep_priv->etd[i]);
1447
1448                         free_etd(imx21, ep_priv->etd[i]);
1449                 }
1450                 kfree(ep_priv);
1451                 ep->hcpriv = NULL;
1452         }
1453
1454         for (i = 0; i < USB_NUM_ETD; i++) {
1455                 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1456                         dev_err(imx21->dev,
1457                                 "Active etd %d for disabled ep=%p!\n", i, ep);
1458                         free_etd(imx21, i);
1459                 }
1460         }
1461         free_epdmem(imx21, ep);
1462         spin_unlock_irqrestore(&imx21->lock, flags);
1463 }
1464
1465 /* =========================================== */
1466 /* Hub handling                                 */
1467 /* =========================================== */
1468
1469 static int get_hub_descriptor(struct usb_hcd *hcd,
1470                               struct usb_hub_descriptor *desc)
1471 {
1472         struct imx21 *imx21 = hcd_to_imx21(hcd);
1473         desc->bDescriptorType = 0x29;   /* HUB descriptor */
1474         desc->bHubContrCurrent = 0;
1475
1476         desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1477                 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1478         desc->bDescLength = 9;
1479         desc->bPwrOn2PwrGood = 0;
1480         desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1481                 0x0002 |        /* No power switching */
1482                 0x0010 |        /* No over current protection */
1483                 0);
1484
1485         desc->u.hs.DeviceRemovable[0] = 1 << 1;
1486         desc->u.hs.DeviceRemovable[1] = ~0;
1487         return 0;
1488 }
1489
1490 static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1491 {
1492         struct imx21 *imx21 = hcd_to_imx21(hcd);
1493         int ports;
1494         int changed = 0;
1495         int i;
1496         unsigned long flags;
1497
1498         spin_lock_irqsave(&imx21->lock, flags);
1499         ports = readl(imx21->regs + USBH_ROOTHUBA)
1500                 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1501         if (ports > 7) {
1502                 ports = 7;
1503                 dev_err(imx21->dev, "ports %d > 7\n", ports);
1504         }
1505         for (i = 0; i < ports; i++) {
1506                 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1507                         (USBH_PORTSTAT_CONNECTSC |
1508                         USBH_PORTSTAT_PRTENBLSC |
1509                         USBH_PORTSTAT_PRTSTATSC |
1510                         USBH_PORTSTAT_OVRCURIC |
1511                         USBH_PORTSTAT_PRTRSTSC)) {
1512
1513                         changed = 1;
1514                         buf[0] |= 1 << (i + 1);
1515                 }
1516         }
1517         spin_unlock_irqrestore(&imx21->lock, flags);
1518
1519         if (changed)
1520                 dev_info(imx21->dev, "Hub status changed\n");
1521         return changed;
1522 }
1523
1524 static int imx21_hc_hub_control(struct usb_hcd *hcd,
1525                                 u16 typeReq,
1526                                 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1527 {
1528         struct imx21 *imx21 = hcd_to_imx21(hcd);
1529         int rc = 0;
1530         u32 status_write = 0;
1531
1532         switch (typeReq) {
1533         case ClearHubFeature:
1534                 dev_dbg(imx21->dev, "ClearHubFeature\n");
1535                 switch (wValue) {
1536                 case C_HUB_OVER_CURRENT:
1537                         dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1538                         break;
1539                 case C_HUB_LOCAL_POWER:
1540                         dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1541                         break;
1542                 default:
1543                         dev_dbg(imx21->dev, "    unknown\n");
1544                         rc = -EINVAL;
1545                         break;
1546                 }
1547                 break;
1548
1549         case ClearPortFeature:
1550                 dev_dbg(imx21->dev, "ClearPortFeature\n");
1551                 switch (wValue) {
1552                 case USB_PORT_FEAT_ENABLE:
1553                         dev_dbg(imx21->dev, "    ENABLE\n");
1554                         status_write = USBH_PORTSTAT_CURCONST;
1555                         break;
1556                 case USB_PORT_FEAT_SUSPEND:
1557                         dev_dbg(imx21->dev, "    SUSPEND\n");
1558                         status_write = USBH_PORTSTAT_PRTOVRCURI;
1559                         break;
1560                 case USB_PORT_FEAT_POWER:
1561                         dev_dbg(imx21->dev, "    POWER\n");
1562                         status_write = USBH_PORTSTAT_LSDEVCON;
1563                         break;
1564                 case USB_PORT_FEAT_C_ENABLE:
1565                         dev_dbg(imx21->dev, "    C_ENABLE\n");
1566                         status_write = USBH_PORTSTAT_PRTENBLSC;
1567                         break;
1568                 case USB_PORT_FEAT_C_SUSPEND:
1569                         dev_dbg(imx21->dev, "    C_SUSPEND\n");
1570                         status_write = USBH_PORTSTAT_PRTSTATSC;
1571                         break;
1572                 case USB_PORT_FEAT_C_CONNECTION:
1573                         dev_dbg(imx21->dev, "    C_CONNECTION\n");
1574                         status_write = USBH_PORTSTAT_CONNECTSC;
1575                         break;
1576                 case USB_PORT_FEAT_C_OVER_CURRENT:
1577                         dev_dbg(imx21->dev, "    C_OVER_CURRENT\n");
1578                         status_write = USBH_PORTSTAT_OVRCURIC;
1579                         break;
1580                 case USB_PORT_FEAT_C_RESET:
1581                         dev_dbg(imx21->dev, "    C_RESET\n");
1582                         status_write = USBH_PORTSTAT_PRTRSTSC;
1583                         break;
1584                 default:
1585                         dev_dbg(imx21->dev, "    unknown\n");
1586                         rc = -EINVAL;
1587                         break;
1588                 }
1589
1590                 break;
1591
1592         case GetHubDescriptor:
1593                 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1594                 rc = get_hub_descriptor(hcd, (void *)buf);
1595                 break;
1596
1597         case GetHubStatus:
1598                 dev_dbg(imx21->dev, "  GetHubStatus\n");
1599                 *(__le32 *) buf = 0;
1600                 break;
1601
1602         case GetPortStatus:
1603                 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1604                     wIndex, USBH_PORTSTAT(wIndex - 1));
1605                 *(__le32 *) buf = readl(imx21->regs +
1606                         USBH_PORTSTAT(wIndex - 1));
1607                 break;
1608
1609         case SetHubFeature:
1610                 dev_dbg(imx21->dev, "SetHubFeature\n");
1611                 switch (wValue) {
1612                 case C_HUB_OVER_CURRENT:
1613                         dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1614                         break;
1615
1616                 case C_HUB_LOCAL_POWER:
1617                         dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1618                         break;
1619                 default:
1620                         dev_dbg(imx21->dev, "    unknown\n");
1621                         rc = -EINVAL;
1622                         break;
1623                 }
1624
1625                 break;
1626
1627         case SetPortFeature:
1628                 dev_dbg(imx21->dev, "SetPortFeature\n");
1629                 switch (wValue) {
1630                 case USB_PORT_FEAT_SUSPEND:
1631                         dev_dbg(imx21->dev, "    SUSPEND\n");
1632                         status_write = USBH_PORTSTAT_PRTSUSPST;
1633                         break;
1634                 case USB_PORT_FEAT_POWER:
1635                         dev_dbg(imx21->dev, "    POWER\n");
1636                         status_write = USBH_PORTSTAT_PRTPWRST;
1637                         break;
1638                 case USB_PORT_FEAT_RESET:
1639                         dev_dbg(imx21->dev, "    RESET\n");
1640                         status_write = USBH_PORTSTAT_PRTRSTST;
1641                         break;
1642                 default:
1643                         dev_dbg(imx21->dev, "    unknown\n");
1644                         rc = -EINVAL;
1645                         break;
1646                 }
1647                 break;
1648
1649         default:
1650                 dev_dbg(imx21->dev, "  unknown\n");
1651                 rc = -EINVAL;
1652                 break;
1653         }
1654
1655         if (status_write)
1656                 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1657         return rc;
1658 }
1659
1660 /* =========================================== */
1661 /* Host controller management                   */
1662 /* =========================================== */
1663
1664 static int imx21_hc_reset(struct usb_hcd *hcd)
1665 {
1666         struct imx21 *imx21 = hcd_to_imx21(hcd);
1667         unsigned long timeout;
1668         unsigned long flags;
1669
1670         spin_lock_irqsave(&imx21->lock, flags);
1671
1672         /* Reset the Host controller modules */
1673         writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1674                 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1675                 imx21->regs + USBOTG_RST_CTRL);
1676
1677         /* Wait for reset to finish */
1678         timeout = jiffies + HZ;
1679         while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1680                 if (time_after(jiffies, timeout)) {
1681                         spin_unlock_irqrestore(&imx21->lock, flags);
1682                         dev_err(imx21->dev, "timeout waiting for reset\n");
1683                         return -ETIMEDOUT;
1684                 }
1685                 spin_unlock_irq(&imx21->lock);
1686                 schedule_timeout_uninterruptible(1);
1687                 spin_lock_irq(&imx21->lock);
1688         }
1689         spin_unlock_irqrestore(&imx21->lock, flags);
1690         return 0;
1691 }
1692
1693 static int imx21_hc_start(struct usb_hcd *hcd)
1694 {
1695         struct imx21 *imx21 = hcd_to_imx21(hcd);
1696         unsigned long flags;
1697         int i, j;
1698         u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1699         u32 usb_control = 0;
1700
1701         hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1702                         USBOTG_HWMODE_HOSTXCVR_MASK);
1703         hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1704                         USBOTG_HWMODE_OTGXCVR_MASK);
1705
1706         if (imx21->pdata->host1_txenoe)
1707                 usb_control |= USBCTRL_HOST1_TXEN_OE;
1708
1709         if (!imx21->pdata->host1_xcverless)
1710                 usb_control |= USBCTRL_HOST1_BYP_TLL;
1711
1712         if (imx21->pdata->otg_ext_xcvr)
1713                 usb_control |= USBCTRL_OTC_RCV_RXDP;
1714
1715
1716         spin_lock_irqsave(&imx21->lock, flags);
1717
1718         writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1719                 imx21->regs + USBOTG_CLK_CTRL);
1720         writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1721         writel(usb_control, imx21->regs + USBCTRL);
1722         writel(USB_MISCCONTROL_SKPRTRY  | USB_MISCCONTROL_ARBMODE,
1723                 imx21->regs + USB_MISCCONTROL);
1724
1725         /* Clear the ETDs */
1726         for (i = 0; i < USB_NUM_ETD; i++)
1727                 for (j = 0; j < 4; j++)
1728                         etd_writel(imx21, i, j, 0);
1729
1730         /* Take the HC out of reset */
1731         writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1732                 imx21->regs + USBH_HOST_CTRL);
1733
1734         /* Enable ports */
1735         if (imx21->pdata->enable_otg_host)
1736                 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1737                         imx21->regs + USBH_PORTSTAT(0));
1738
1739         if (imx21->pdata->enable_host1)
1740                 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1741                         imx21->regs + USBH_PORTSTAT(1));
1742
1743         if (imx21->pdata->enable_host2)
1744                 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1745                         imx21->regs + USBH_PORTSTAT(2));
1746
1747
1748         hcd->state = HC_STATE_RUNNING;
1749
1750         /* Enable host controller interrupts */
1751         set_register_bits(imx21, USBH_SYSIEN,
1752                 USBH_SYSIEN_HERRINT |
1753                 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1754         set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1755
1756         spin_unlock_irqrestore(&imx21->lock, flags);
1757
1758         return 0;
1759 }
1760
1761 static void imx21_hc_stop(struct usb_hcd *hcd)
1762 {
1763         struct imx21 *imx21 = hcd_to_imx21(hcd);
1764         unsigned long flags;
1765
1766         spin_lock_irqsave(&imx21->lock, flags);
1767
1768         writel(0, imx21->regs + USBH_SYSIEN);
1769         clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1770         clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1771                                         USBOTG_CLK_CTRL);
1772         spin_unlock_irqrestore(&imx21->lock, flags);
1773 }
1774
1775 /* =========================================== */
1776 /* Driver glue                                  */
1777 /* =========================================== */
1778
1779 static struct hc_driver imx21_hc_driver = {
1780         .description = hcd_name,
1781         .product_desc = "IMX21 USB Host Controller",
1782         .hcd_priv_size = sizeof(struct imx21),
1783
1784         .flags = HCD_USB11,
1785         .irq = imx21_irq,
1786
1787         .reset = imx21_hc_reset,
1788         .start = imx21_hc_start,
1789         .stop = imx21_hc_stop,
1790
1791         /* I/O requests */
1792         .urb_enqueue = imx21_hc_urb_enqueue,
1793         .urb_dequeue = imx21_hc_urb_dequeue,
1794         .endpoint_disable = imx21_hc_endpoint_disable,
1795
1796         /* scheduling support */
1797         .get_frame_number = imx21_hc_get_frame,
1798
1799         /* Root hub support */
1800         .hub_status_data = imx21_hc_hub_status_data,
1801         .hub_control = imx21_hc_hub_control,
1802
1803 };
1804
1805 static struct mx21_usbh_platform_data default_pdata = {
1806         .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1807         .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1808         .enable_host1 = 1,
1809         .enable_host2 = 1,
1810         .enable_otg_host = 1,
1811
1812 };
1813
1814 static int imx21_remove(struct platform_device *pdev)
1815 {
1816         struct usb_hcd *hcd = platform_get_drvdata(pdev);
1817         struct imx21 *imx21 = hcd_to_imx21(hcd);
1818         struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1819
1820         remove_debug_files(imx21);
1821         usb_remove_hcd(hcd);
1822
1823         if (res != NULL) {
1824                 clk_disable_unprepare(imx21->clk);
1825                 clk_put(imx21->clk);
1826                 iounmap(imx21->regs);
1827                 release_mem_region(res->start, resource_size(res));
1828         }
1829
1830         kfree(hcd);
1831         return 0;
1832 }
1833
1834
1835 static int imx21_probe(struct platform_device *pdev)
1836 {
1837         struct usb_hcd *hcd;
1838         struct imx21 *imx21;
1839         struct resource *res;
1840         int ret;
1841         int irq;
1842
1843         printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1844
1845         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1846         if (!res)
1847                 return -ENODEV;
1848         irq = platform_get_irq(pdev, 0);
1849         if (irq < 0)
1850                 return -ENXIO;
1851
1852         hcd = usb_create_hcd(&imx21_hc_driver,
1853                 &pdev->dev, dev_name(&pdev->dev));
1854         if (hcd == NULL) {
1855                 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1856                     dev_name(&pdev->dev));
1857                 return -ENOMEM;
1858         }
1859
1860         imx21 = hcd_to_imx21(hcd);
1861         imx21->hcd = hcd;
1862         imx21->dev = &pdev->dev;
1863         imx21->pdata = dev_get_platdata(&pdev->dev);
1864         if (!imx21->pdata)
1865                 imx21->pdata = &default_pdata;
1866
1867         spin_lock_init(&imx21->lock);
1868         INIT_LIST_HEAD(&imx21->dmem_list);
1869         INIT_LIST_HEAD(&imx21->queue_for_etd);
1870         INIT_LIST_HEAD(&imx21->queue_for_dmem);
1871         create_debug_files(imx21);
1872
1873         res = request_mem_region(res->start, resource_size(res), hcd_name);
1874         if (!res) {
1875                 ret = -EBUSY;
1876                 goto failed_request_mem;
1877         }
1878
1879         imx21->regs = ioremap(res->start, resource_size(res));
1880         if (imx21->regs == NULL) {
1881                 dev_err(imx21->dev, "Cannot map registers\n");
1882                 ret = -ENOMEM;
1883                 goto failed_ioremap;
1884         }
1885
1886         /* Enable clocks source */
1887         imx21->clk = clk_get(imx21->dev, NULL);
1888         if (IS_ERR(imx21->clk)) {
1889                 dev_err(imx21->dev, "no clock found\n");
1890                 ret = PTR_ERR(imx21->clk);
1891                 goto failed_clock_get;
1892         }
1893
1894         ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1895         if (ret)
1896                 goto failed_clock_set;
1897         ret = clk_prepare_enable(imx21->clk);
1898         if (ret)
1899                 goto failed_clock_enable;
1900
1901         dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1902                 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1903
1904         ret = usb_add_hcd(hcd, irq, 0);
1905         if (ret != 0) {
1906                 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1907                 goto failed_add_hcd;
1908         }
1909
1910         return 0;
1911
1912 failed_add_hcd:
1913         clk_disable_unprepare(imx21->clk);
1914 failed_clock_enable:
1915 failed_clock_set:
1916         clk_put(imx21->clk);
1917 failed_clock_get:
1918         iounmap(imx21->regs);
1919 failed_ioremap:
1920         release_mem_region(res->start, resource_size(res));
1921 failed_request_mem:
1922         remove_debug_files(imx21);
1923         usb_put_hcd(hcd);
1924         return ret;
1925 }
1926
1927 static struct platform_driver imx21_hcd_driver = {
1928         .driver = {
1929                    .name = (char *)hcd_name,
1930                    },
1931         .probe = imx21_probe,
1932         .remove = imx21_remove,
1933         .suspend = NULL,
1934         .resume = NULL,
1935 };
1936
1937 module_platform_driver(imx21_hcd_driver);
1938
1939 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1940 MODULE_AUTHOR("Martin Fuzzey");
1941 MODULE_LICENSE("GPL");
1942 MODULE_ALIAS("platform:imx21-hcd");