]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/usb/chipidea/udc.c
Merge remote-tracking branch 'arm-current/fixes'
[karo-tx-linux.git] / drivers / usb / chipidea / udc.c
1 /*
2  * udc.c - ChipIdea UDC driver
3  *
4  * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5  *
6  * Author: David Lopo
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/dmapool.h>
16 #include <linux/err.h>
17 #include <linux/irqreturn.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/otg.h>
24 #include <linux/usb/chipidea.h>
25
26 #include "ci.h"
27 #include "udc.h"
28 #include "bits.h"
29 #include "debug.h"
30 #include "otg.h"
31
32 /* control endpoint description */
33 static const struct usb_endpoint_descriptor
34 ctrl_endpt_out_desc = {
35         .bLength         = USB_DT_ENDPOINT_SIZE,
36         .bDescriptorType = USB_DT_ENDPOINT,
37
38         .bEndpointAddress = USB_DIR_OUT,
39         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
40         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
41 };
42
43 static const struct usb_endpoint_descriptor
44 ctrl_endpt_in_desc = {
45         .bLength         = USB_DT_ENDPOINT_SIZE,
46         .bDescriptorType = USB_DT_ENDPOINT,
47
48         .bEndpointAddress = USB_DIR_IN,
49         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
50         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
51 };
52
53 /**
54  * hw_ep_bit: calculates the bit number
55  * @num: endpoint number
56  * @dir: endpoint direction
57  *
58  * This function returns bit number
59  */
60 static inline int hw_ep_bit(int num, int dir)
61 {
62         return num + (dir ? 16 : 0);
63 }
64
65 static inline int ep_to_bit(struct ci_hdrc *ci, int n)
66 {
67         int fill = 16 - ci->hw_ep_max / 2;
68
69         if (n >= ci->hw_ep_max / 2)
70                 n += fill;
71
72         return n;
73 }
74
75 /**
76  * hw_device_state: enables/disables interrupts (execute without interruption)
77  * @dma: 0 => disable, !0 => enable and set dma engine
78  *
79  * This function returns an error code
80  */
81 static int hw_device_state(struct ci_hdrc *ci, u32 dma)
82 {
83         if (dma) {
84                 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
85                 /* interrupt, error, port change, reset, sleep/suspend */
86                 hw_write(ci, OP_USBINTR, ~0,
87                              USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
88                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
89         } else {
90                 hw_write(ci, OP_USBINTR, ~0, 0);
91                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
92         }
93         return 0;
94 }
95
96 /**
97  * hw_ep_flush: flush endpoint fifo (execute without interruption)
98  * @num: endpoint number
99  * @dir: endpoint direction
100  *
101  * This function returns an error code
102  */
103 static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
104 {
105         int n = hw_ep_bit(num, dir);
106
107         do {
108                 /* flush any pending transfer */
109                 hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
110                 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
111                         cpu_relax();
112         } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
113
114         return 0;
115 }
116
117 /**
118  * hw_ep_disable: disables endpoint (execute without interruption)
119  * @num: endpoint number
120  * @dir: endpoint direction
121  *
122  * This function returns an error code
123  */
124 static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
125 {
126         hw_ep_flush(ci, num, dir);
127         hw_write(ci, OP_ENDPTCTRL + num,
128                  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
129         return 0;
130 }
131
132 /**
133  * hw_ep_enable: enables endpoint (execute without interruption)
134  * @num:  endpoint number
135  * @dir:  endpoint direction
136  * @type: endpoint type
137  *
138  * This function returns an error code
139  */
140 static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
141 {
142         u32 mask, data;
143
144         if (dir) {
145                 mask  = ENDPTCTRL_TXT;  /* type    */
146                 data  = type << __ffs(mask);
147
148                 mask |= ENDPTCTRL_TXS;  /* unstall */
149                 mask |= ENDPTCTRL_TXR;  /* reset data toggle */
150                 data |= ENDPTCTRL_TXR;
151                 mask |= ENDPTCTRL_TXE;  /* enable  */
152                 data |= ENDPTCTRL_TXE;
153         } else {
154                 mask  = ENDPTCTRL_RXT;  /* type    */
155                 data  = type << __ffs(mask);
156
157                 mask |= ENDPTCTRL_RXS;  /* unstall */
158                 mask |= ENDPTCTRL_RXR;  /* reset data toggle */
159                 data |= ENDPTCTRL_RXR;
160                 mask |= ENDPTCTRL_RXE;  /* enable  */
161                 data |= ENDPTCTRL_RXE;
162         }
163         hw_write(ci, OP_ENDPTCTRL + num, mask, data);
164         return 0;
165 }
166
167 /**
168  * hw_ep_get_halt: return endpoint halt status
169  * @num: endpoint number
170  * @dir: endpoint direction
171  *
172  * This function returns 1 if endpoint halted
173  */
174 static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
175 {
176         u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
177
178         return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
179 }
180
181 /**
182  * hw_test_and_clear_setup_status: test & clear setup status (execute without
183  *                                 interruption)
184  * @n: endpoint number
185  *
186  * This function returns setup status
187  */
188 static int hw_test_and_clear_setup_status(struct ci_hdrc *ci, int n)
189 {
190         n = ep_to_bit(ci, n);
191         return hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(n));
192 }
193
194 /**
195  * hw_ep_prime: primes endpoint (execute without interruption)
196  * @num:     endpoint number
197  * @dir:     endpoint direction
198  * @is_ctrl: true if control endpoint
199  *
200  * This function returns an error code
201  */
202 static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
203 {
204         int n = hw_ep_bit(num, dir);
205
206         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
207                 return -EAGAIN;
208
209         hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
210
211         while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
212                 cpu_relax();
213         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
214                 return -EAGAIN;
215
216         /* status shoult be tested according with manual but it doesn't work */
217         return 0;
218 }
219
220 /**
221  * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
222  *                 without interruption)
223  * @num:   endpoint number
224  * @dir:   endpoint direction
225  * @value: true => stall, false => unstall
226  *
227  * This function returns an error code
228  */
229 static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
230 {
231         if (value != 0 && value != 1)
232                 return -EINVAL;
233
234         do {
235                 enum ci_hw_regs reg = OP_ENDPTCTRL + num;
236                 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
237                 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
238
239                 /* data toggle - reserved for EP0 but it's in ESS */
240                 hw_write(ci, reg, mask_xs|mask_xr,
241                           value ? mask_xs : mask_xr);
242         } while (value != hw_ep_get_halt(ci, num, dir));
243
244         return 0;
245 }
246
247 /**
248  * hw_is_port_high_speed: test if port is high speed
249  *
250  * This function returns true if high speed port
251  */
252 static int hw_port_is_high_speed(struct ci_hdrc *ci)
253 {
254         return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
255                 hw_read(ci, OP_PORTSC, PORTSC_HSP);
256 }
257
258 /**
259  * hw_read_intr_enable: returns interrupt enable register
260  *
261  * This function returns register data
262  */
263 static u32 hw_read_intr_enable(struct ci_hdrc *ci)
264 {
265         return hw_read(ci, OP_USBINTR, ~0);
266 }
267
268 /**
269  * hw_read_intr_status: returns interrupt status register
270  *
271  * This function returns register data
272  */
273 static u32 hw_read_intr_status(struct ci_hdrc *ci)
274 {
275         return hw_read(ci, OP_USBSTS, ~0);
276 }
277
278 /**
279  * hw_test_and_clear_complete: test & clear complete status (execute without
280  *                             interruption)
281  * @n: endpoint number
282  *
283  * This function returns complete status
284  */
285 static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
286 {
287         n = ep_to_bit(ci, n);
288         return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
289 }
290
291 /**
292  * hw_test_and_clear_intr_active: test & clear active interrupts (execute
293  *                                without interruption)
294  *
295  * This function returns active interrutps
296  */
297 static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
298 {
299         u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
300
301         hw_write(ci, OP_USBSTS, ~0, reg);
302         return reg;
303 }
304
305 /**
306  * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
307  *                                interruption)
308  *
309  * This function returns guard value
310  */
311 static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
312 {
313         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
314 }
315
316 /**
317  * hw_test_and_set_setup_guard: test & set setup guard (execute without
318  *                              interruption)
319  *
320  * This function returns guard value
321  */
322 static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
323 {
324         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
325 }
326
327 /**
328  * hw_usb_set_address: configures USB address (execute without interruption)
329  * @value: new USB address
330  *
331  * This function explicitly sets the address, without the "USBADRA" (advance)
332  * feature, which is not supported by older versions of the controller.
333  */
334 static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
335 {
336         hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
337                  value << __ffs(DEVICEADDR_USBADR));
338 }
339
340 /**
341  * hw_usb_reset: restart device after a bus reset (execute without
342  *               interruption)
343  *
344  * This function returns an error code
345  */
346 static int hw_usb_reset(struct ci_hdrc *ci)
347 {
348         hw_usb_set_address(ci, 0);
349
350         /* ESS flushes only at end?!? */
351         hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
352
353         /* clear setup token semaphores */
354         hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
355
356         /* clear complete status */
357         hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
358
359         /* wait until all bits cleared */
360         while (hw_read(ci, OP_ENDPTPRIME, ~0))
361                 udelay(10);             /* not RTOS friendly */
362
363         /* reset all endpoints ? */
364
365         /* reset internal status and wait for further instructions
366            no need to verify the port reset status (ESS does it) */
367
368         return 0;
369 }
370
371 /******************************************************************************
372  * UTIL block
373  *****************************************************************************/
374
375 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
376                           unsigned length)
377 {
378         int i;
379         u32 temp;
380         struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
381                                                   GFP_ATOMIC);
382
383         if (node == NULL)
384                 return -ENOMEM;
385
386         node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
387                                    &node->dma);
388         if (node->ptr == NULL) {
389                 kfree(node);
390                 return -ENOMEM;
391         }
392
393         memset(node->ptr, 0, sizeof(struct ci_hw_td));
394         node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
395         node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
396         node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
397
398         temp = (u32) (hwreq->req.dma + hwreq->req.actual);
399         if (length) {
400                 node->ptr->page[0] = cpu_to_le32(temp);
401                 for (i = 1; i < TD_PAGE_COUNT; i++) {
402                         u32 page = temp + i * CI_HDRC_PAGE_SIZE;
403                         page &= ~TD_RESERVED_MASK;
404                         node->ptr->page[i] = cpu_to_le32(page);
405                 }
406         }
407
408         hwreq->req.actual += length;
409
410         if (!list_empty(&hwreq->tds)) {
411                 /* get the last entry */
412                 lastnode = list_entry(hwreq->tds.prev,
413                                 struct td_node, td);
414                 lastnode->ptr->next = cpu_to_le32(node->dma);
415         }
416
417         INIT_LIST_HEAD(&node->td);
418         list_add_tail(&node->td, &hwreq->tds);
419
420         return 0;
421 }
422
423 /**
424  * _usb_addr: calculates endpoint address from direction & number
425  * @ep:  endpoint
426  */
427 static inline u8 _usb_addr(struct ci_hw_ep *ep)
428 {
429         return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
430 }
431
432 /**
433  * _hardware_queue: configures a request at hardware level
434  * @gadget: gadget
435  * @hwep:   endpoint
436  *
437  * This function returns an error code
438  */
439 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
440 {
441         struct ci_hdrc *ci = hwep->ci;
442         int ret = 0;
443         unsigned rest = hwreq->req.length;
444         int pages = TD_PAGE_COUNT;
445         struct td_node *firstnode, *lastnode;
446
447         /* don't queue twice */
448         if (hwreq->req.status == -EALREADY)
449                 return -EALREADY;
450
451         hwreq->req.status = -EALREADY;
452
453         ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
454         if (ret)
455                 return ret;
456
457         /*
458          * The first buffer could be not page aligned.
459          * In that case we have to span into one extra td.
460          */
461         if (hwreq->req.dma % PAGE_SIZE)
462                 pages--;
463
464         if (rest == 0)
465                 add_td_to_list(hwep, hwreq, 0);
466
467         while (rest > 0) {
468                 unsigned count = min(hwreq->req.length - hwreq->req.actual,
469                                         (unsigned)(pages * CI_HDRC_PAGE_SIZE));
470                 add_td_to_list(hwep, hwreq, count);
471                 rest -= count;
472         }
473
474         if (hwreq->req.zero && hwreq->req.length
475             && (hwreq->req.length % hwep->ep.maxpacket == 0))
476                 add_td_to_list(hwep, hwreq, 0);
477
478         firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
479
480         lastnode = list_entry(hwreq->tds.prev,
481                 struct td_node, td);
482
483         lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
484         if (!hwreq->req.no_interrupt)
485                 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
486         wmb();
487
488         hwreq->req.actual = 0;
489         if (!list_empty(&hwep->qh.queue)) {
490                 struct ci_hw_req *hwreqprev;
491                 int n = hw_ep_bit(hwep->num, hwep->dir);
492                 int tmp_stat;
493                 struct td_node *prevlastnode;
494                 u32 next = firstnode->dma & TD_ADDR_MASK;
495
496                 hwreqprev = list_entry(hwep->qh.queue.prev,
497                                 struct ci_hw_req, queue);
498                 prevlastnode = list_entry(hwreqprev->tds.prev,
499                                 struct td_node, td);
500
501                 prevlastnode->ptr->next = cpu_to_le32(next);
502                 wmb();
503                 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
504                         goto done;
505                 do {
506                         hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
507                         tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
508                 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
509                 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
510                 if (tmp_stat)
511                         goto done;
512         }
513
514         /*  QH configuration */
515         hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
516         hwep->qh.ptr->td.token &=
517                 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
518
519         if (hwep->type == USB_ENDPOINT_XFER_ISOC) {
520                 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
521
522                 if (hwreq->req.length % hwep->ep.maxpacket)
523                         mul++;
524                 hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
525         }
526
527         wmb();   /* synchronize before ep prime */
528
529         ret = hw_ep_prime(ci, hwep->num, hwep->dir,
530                            hwep->type == USB_ENDPOINT_XFER_CONTROL);
531 done:
532         return ret;
533 }
534
535 /*
536  * free_pending_td: remove a pending request for the endpoint
537  * @hwep: endpoint
538  */
539 static void free_pending_td(struct ci_hw_ep *hwep)
540 {
541         struct td_node *pending = hwep->pending_td;
542
543         dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
544         hwep->pending_td = NULL;
545         kfree(pending);
546 }
547
548 /**
549  * _hardware_dequeue: handles a request at hardware level
550  * @gadget: gadget
551  * @hwep:   endpoint
552  *
553  * This function returns an error code
554  */
555 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
556 {
557         u32 tmptoken;
558         struct td_node *node, *tmpnode;
559         unsigned remaining_length;
560         unsigned actual = hwreq->req.length;
561
562         if (hwreq->req.status != -EALREADY)
563                 return -EINVAL;
564
565         hwreq->req.status = 0;
566
567         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
568                 tmptoken = le32_to_cpu(node->ptr->token);
569                 if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
570                         hwreq->req.status = -EALREADY;
571                         return -EBUSY;
572                 }
573
574                 remaining_length = (tmptoken & TD_TOTAL_BYTES);
575                 remaining_length >>= __ffs(TD_TOTAL_BYTES);
576                 actual -= remaining_length;
577
578                 hwreq->req.status = tmptoken & TD_STATUS;
579                 if ((TD_STATUS_HALTED & hwreq->req.status)) {
580                         hwreq->req.status = -EPIPE;
581                         break;
582                 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
583                         hwreq->req.status = -EPROTO;
584                         break;
585                 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
586                         hwreq->req.status = -EILSEQ;
587                         break;
588                 }
589
590                 if (remaining_length) {
591                         if (hwep->dir) {
592                                 hwreq->req.status = -EPROTO;
593                                 break;
594                         }
595                 }
596                 /*
597                  * As the hardware could still address the freed td
598                  * which will run the udc unusable, the cleanup of the
599                  * td has to be delayed by one.
600                  */
601                 if (hwep->pending_td)
602                         free_pending_td(hwep);
603
604                 hwep->pending_td = node;
605                 list_del_init(&node->td);
606         }
607
608         usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
609
610         hwreq->req.actual += actual;
611
612         if (hwreq->req.status)
613                 return hwreq->req.status;
614
615         return hwreq->req.actual;
616 }
617
618 /**
619  * _ep_nuke: dequeues all endpoint requests
620  * @hwep: endpoint
621  *
622  * This function returns an error code
623  * Caller must hold lock
624  */
625 static int _ep_nuke(struct ci_hw_ep *hwep)
626 __releases(hwep->lock)
627 __acquires(hwep->lock)
628 {
629         struct td_node *node, *tmpnode;
630         if (hwep == NULL)
631                 return -EINVAL;
632
633         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
634
635         while (!list_empty(&hwep->qh.queue)) {
636
637                 /* pop oldest request */
638                 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
639                                                      struct ci_hw_req, queue);
640
641                 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
642                         dma_pool_free(hwep->td_pool, node->ptr, node->dma);
643                         list_del_init(&node->td);
644                         node->ptr = NULL;
645                         kfree(node);
646                 }
647
648                 list_del_init(&hwreq->queue);
649                 hwreq->req.status = -ESHUTDOWN;
650
651                 if (hwreq->req.complete != NULL) {
652                         spin_unlock(hwep->lock);
653                         hwreq->req.complete(&hwep->ep, &hwreq->req);
654                         spin_lock(hwep->lock);
655                 }
656         }
657
658         if (hwep->pending_td)
659                 free_pending_td(hwep);
660
661         return 0;
662 }
663
664 /**
665  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
666  * @gadget: gadget
667  *
668  * This function returns an error code
669  */
670 static int _gadget_stop_activity(struct usb_gadget *gadget)
671 {
672         struct usb_ep *ep;
673         struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
674         unsigned long flags;
675
676         spin_lock_irqsave(&ci->lock, flags);
677         ci->gadget.speed = USB_SPEED_UNKNOWN;
678         ci->remote_wakeup = 0;
679         ci->suspended = 0;
680         spin_unlock_irqrestore(&ci->lock, flags);
681
682         /* flush all endpoints */
683         gadget_for_each_ep(ep, gadget) {
684                 usb_ep_fifo_flush(ep);
685         }
686         usb_ep_fifo_flush(&ci->ep0out->ep);
687         usb_ep_fifo_flush(&ci->ep0in->ep);
688
689         if (ci->driver)
690                 ci->driver->disconnect(gadget);
691
692         /* make sure to disable all endpoints */
693         gadget_for_each_ep(ep, gadget) {
694                 usb_ep_disable(ep);
695         }
696
697         if (ci->status != NULL) {
698                 usb_ep_free_request(&ci->ep0in->ep, ci->status);
699                 ci->status = NULL;
700         }
701
702         return 0;
703 }
704
705 /******************************************************************************
706  * ISR block
707  *****************************************************************************/
708 /**
709  * isr_reset_handler: USB reset interrupt handler
710  * @ci: UDC device
711  *
712  * This function resets USB engine after a bus reset occurred
713  */
714 static void isr_reset_handler(struct ci_hdrc *ci)
715 __releases(ci->lock)
716 __acquires(ci->lock)
717 {
718         int retval;
719
720         spin_unlock(&ci->lock);
721         retval = _gadget_stop_activity(&ci->gadget);
722         if (retval)
723                 goto done;
724
725         retval = hw_usb_reset(ci);
726         if (retval)
727                 goto done;
728
729         ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
730         if (ci->status == NULL)
731                 retval = -ENOMEM;
732
733 done:
734         spin_lock(&ci->lock);
735
736         if (retval)
737                 dev_err(ci->dev, "error: %i\n", retval);
738 }
739
740 /**
741  * isr_get_status_complete: get_status request complete function
742  * @ep:  endpoint
743  * @req: request handled
744  *
745  * Caller must release lock
746  */
747 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
748 {
749         if (ep == NULL || req == NULL)
750                 return;
751
752         kfree(req->buf);
753         usb_ep_free_request(ep, req);
754 }
755
756 /**
757  * _ep_queue: queues (submits) an I/O request to an endpoint
758  *
759  * Caller must hold lock
760  */
761 static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
762                     gfp_t __maybe_unused gfp_flags)
763 {
764         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
765         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
766         struct ci_hdrc *ci = hwep->ci;
767         int retval = 0;
768
769         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
770                 return -EINVAL;
771
772         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
773                 if (req->length)
774                         hwep = (ci->ep0_dir == RX) ?
775                                ci->ep0out : ci->ep0in;
776                 if (!list_empty(&hwep->qh.queue)) {
777                         _ep_nuke(hwep);
778                         retval = -EOVERFLOW;
779                         dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
780                                  _usb_addr(hwep));
781                 }
782         }
783
784         if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
785             hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
786                 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
787                 return -EMSGSIZE;
788         }
789
790         /* first nuke then test link, e.g. previous status has not sent */
791         if (!list_empty(&hwreq->queue)) {
792                 dev_err(hwep->ci->dev, "request already in queue\n");
793                 return -EBUSY;
794         }
795
796         /* push request */
797         hwreq->req.status = -EINPROGRESS;
798         hwreq->req.actual = 0;
799
800         retval = _hardware_enqueue(hwep, hwreq);
801
802         if (retval == -EALREADY)
803                 retval = 0;
804         if (!retval)
805                 list_add_tail(&hwreq->queue, &hwep->qh.queue);
806
807         return retval;
808 }
809
810 /**
811  * isr_get_status_response: get_status request response
812  * @ci: ci struct
813  * @setup: setup request packet
814  *
815  * This function returns an error code
816  */
817 static int isr_get_status_response(struct ci_hdrc *ci,
818                                    struct usb_ctrlrequest *setup)
819 __releases(hwep->lock)
820 __acquires(hwep->lock)
821 {
822         struct ci_hw_ep *hwep = ci->ep0in;
823         struct usb_request *req = NULL;
824         gfp_t gfp_flags = GFP_ATOMIC;
825         int dir, num, retval;
826
827         if (hwep == NULL || setup == NULL)
828                 return -EINVAL;
829
830         spin_unlock(hwep->lock);
831         req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
832         spin_lock(hwep->lock);
833         if (req == NULL)
834                 return -ENOMEM;
835
836         req->complete = isr_get_status_complete;
837         req->length   = 2;
838         req->buf      = kzalloc(req->length, gfp_flags);
839         if (req->buf == NULL) {
840                 retval = -ENOMEM;
841                 goto err_free_req;
842         }
843
844         if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
845                 /* Assume that device is bus powered for now. */
846                 *(u16 *)req->buf = ci->remote_wakeup << 1;
847                 retval = 0;
848         } else if ((setup->bRequestType & USB_RECIP_MASK) \
849                    == USB_RECIP_ENDPOINT) {
850                 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
851                         TX : RX;
852                 num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
853                 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
854         }
855         /* else do nothing; reserved for future use */
856
857         retval = _ep_queue(&hwep->ep, req, gfp_flags);
858         if (retval)
859                 goto err_free_buf;
860
861         return 0;
862
863  err_free_buf:
864         kfree(req->buf);
865  err_free_req:
866         spin_unlock(hwep->lock);
867         usb_ep_free_request(&hwep->ep, req);
868         spin_lock(hwep->lock);
869         return retval;
870 }
871
872 /**
873  * isr_setup_status_complete: setup_status request complete function
874  * @ep:  endpoint
875  * @req: request handled
876  *
877  * Caller must release lock. Put the port in test mode if test mode
878  * feature is selected.
879  */
880 static void
881 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
882 {
883         struct ci_hdrc *ci = req->context;
884         unsigned long flags;
885
886         if (ci->setaddr) {
887                 hw_usb_set_address(ci, ci->address);
888                 ci->setaddr = false;
889         }
890
891         spin_lock_irqsave(&ci->lock, flags);
892         if (ci->test_mode)
893                 hw_port_test_set(ci, ci->test_mode);
894         spin_unlock_irqrestore(&ci->lock, flags);
895 }
896
897 /**
898  * isr_setup_status_phase: queues the status phase of a setup transation
899  * @ci: ci struct
900  *
901  * This function returns an error code
902  */
903 static int isr_setup_status_phase(struct ci_hdrc *ci)
904 {
905         int retval;
906         struct ci_hw_ep *hwep;
907
908         hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
909         ci->status->context = ci;
910         ci->status->complete = isr_setup_status_complete;
911
912         retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
913
914         return retval;
915 }
916
917 /**
918  * isr_tr_complete_low: transaction complete low level handler
919  * @hwep: endpoint
920  *
921  * This function returns an error code
922  * Caller must hold lock
923  */
924 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
925 __releases(hwep->lock)
926 __acquires(hwep->lock)
927 {
928         struct ci_hw_req *hwreq, *hwreqtemp;
929         struct ci_hw_ep *hweptemp = hwep;
930         int retval = 0;
931
932         list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
933                         queue) {
934                 retval = _hardware_dequeue(hwep, hwreq);
935                 if (retval < 0)
936                         break;
937                 list_del_init(&hwreq->queue);
938                 if (hwreq->req.complete != NULL) {
939                         spin_unlock(hwep->lock);
940                         if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
941                                         hwreq->req.length)
942                                 hweptemp = hwep->ci->ep0in;
943                         hwreq->req.complete(&hweptemp->ep, &hwreq->req);
944                         spin_lock(hwep->lock);
945                 }
946         }
947
948         if (retval == -EBUSY)
949                 retval = 0;
950
951         return retval;
952 }
953
954 /**
955  * isr_tr_complete_handler: transaction complete interrupt handler
956  * @ci: UDC descriptor
957  *
958  * This function handles traffic events
959  */
960 static void isr_tr_complete_handler(struct ci_hdrc *ci)
961 __releases(ci->lock)
962 __acquires(ci->lock)
963 {
964         unsigned i;
965         u8 tmode = 0;
966
967         for (i = 0; i < ci->hw_ep_max; i++) {
968                 struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
969                 int type, num, dir, err = -EINVAL;
970                 struct usb_ctrlrequest req;
971
972                 if (hwep->ep.desc == NULL)
973                         continue;   /* not configured */
974
975                 if (hw_test_and_clear_complete(ci, i)) {
976                         err = isr_tr_complete_low(hwep);
977                         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
978                                 if (err > 0)   /* needs status phase */
979                                         err = isr_setup_status_phase(ci);
980                                 if (err < 0) {
981                                         spin_unlock(&ci->lock);
982                                         if (usb_ep_set_halt(&hwep->ep))
983                                                 dev_err(ci->dev,
984                                                         "error: ep_set_halt\n");
985                                         spin_lock(&ci->lock);
986                                 }
987                         }
988                 }
989
990                 if (hwep->type != USB_ENDPOINT_XFER_CONTROL ||
991                     !hw_test_and_clear_setup_status(ci, i))
992                         continue;
993
994                 if (i != 0) {
995                         dev_warn(ci->dev, "ctrl traffic at endpoint %d\n", i);
996                         continue;
997                 }
998
999                 /*
1000                  * Flush data and handshake transactions of previous
1001                  * setup packet.
1002                  */
1003                 _ep_nuke(ci->ep0out);
1004                 _ep_nuke(ci->ep0in);
1005
1006                 /* read_setup_packet */
1007                 do {
1008                         hw_test_and_set_setup_guard(ci);
1009                         memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1010                 } while (!hw_test_and_clear_setup_guard(ci));
1011
1012                 type = req.bRequestType;
1013
1014                 ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1015
1016                 switch (req.bRequest) {
1017                 case USB_REQ_CLEAR_FEATURE:
1018                         if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1019                                         le16_to_cpu(req.wValue) ==
1020                                         USB_ENDPOINT_HALT) {
1021                                 if (req.wLength != 0)
1022                                         break;
1023                                 num  = le16_to_cpu(req.wIndex);
1024                                 dir = num & USB_ENDPOINT_DIR_MASK;
1025                                 num &= USB_ENDPOINT_NUMBER_MASK;
1026                                 if (dir) /* TX */
1027                                         num += ci->hw_ep_max/2;
1028                                 if (!ci->ci_hw_ep[num].wedge) {
1029                                         spin_unlock(&ci->lock);
1030                                         err = usb_ep_clear_halt(
1031                                                 &ci->ci_hw_ep[num].ep);
1032                                         spin_lock(&ci->lock);
1033                                         if (err)
1034                                                 break;
1035                                 }
1036                                 err = isr_setup_status_phase(ci);
1037                         } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1038                                         le16_to_cpu(req.wValue) ==
1039                                         USB_DEVICE_REMOTE_WAKEUP) {
1040                                 if (req.wLength != 0)
1041                                         break;
1042                                 ci->remote_wakeup = 0;
1043                                 err = isr_setup_status_phase(ci);
1044                         } else {
1045                                 goto delegate;
1046                         }
1047                         break;
1048                 case USB_REQ_GET_STATUS:
1049                         if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
1050                             type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1051                             type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1052                                 goto delegate;
1053                         if (le16_to_cpu(req.wLength) != 2 ||
1054                             le16_to_cpu(req.wValue)  != 0)
1055                                 break;
1056                         err = isr_get_status_response(ci, &req);
1057                         break;
1058                 case USB_REQ_SET_ADDRESS:
1059                         if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1060                                 goto delegate;
1061                         if (le16_to_cpu(req.wLength) != 0 ||
1062                             le16_to_cpu(req.wIndex)  != 0)
1063                                 break;
1064                         ci->address = (u8)le16_to_cpu(req.wValue);
1065                         ci->setaddr = true;
1066                         err = isr_setup_status_phase(ci);
1067                         break;
1068                 case USB_REQ_SET_FEATURE:
1069                         if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1070                                         le16_to_cpu(req.wValue) ==
1071                                         USB_ENDPOINT_HALT) {
1072                                 if (req.wLength != 0)
1073                                         break;
1074                                 num  = le16_to_cpu(req.wIndex);
1075                                 dir = num & USB_ENDPOINT_DIR_MASK;
1076                                 num &= USB_ENDPOINT_NUMBER_MASK;
1077                                 if (dir) /* TX */
1078                                         num += ci->hw_ep_max/2;
1079
1080                                 spin_unlock(&ci->lock);
1081                                 err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
1082                                 spin_lock(&ci->lock);
1083                                 if (!err)
1084                                         isr_setup_status_phase(ci);
1085                         } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1086                                 if (req.wLength != 0)
1087                                         break;
1088                                 switch (le16_to_cpu(req.wValue)) {
1089                                 case USB_DEVICE_REMOTE_WAKEUP:
1090                                         ci->remote_wakeup = 1;
1091                                         err = isr_setup_status_phase(ci);
1092                                         break;
1093                                 case USB_DEVICE_TEST_MODE:
1094                                         tmode = le16_to_cpu(req.wIndex) >> 8;
1095                                         switch (tmode) {
1096                                         case TEST_J:
1097                                         case TEST_K:
1098                                         case TEST_SE0_NAK:
1099                                         case TEST_PACKET:
1100                                         case TEST_FORCE_EN:
1101                                                 ci->test_mode = tmode;
1102                                                 err = isr_setup_status_phase(
1103                                                                 ci);
1104                                                 break;
1105                                         default:
1106                                                 break;
1107                                         }
1108                                 default:
1109                                         goto delegate;
1110                                 }
1111                         } else {
1112                                 goto delegate;
1113                         }
1114                         break;
1115                 default:
1116 delegate:
1117                         if (req.wLength == 0)   /* no data phase */
1118                                 ci->ep0_dir = TX;
1119
1120                         spin_unlock(&ci->lock);
1121                         err = ci->driver->setup(&ci->gadget, &req);
1122                         spin_lock(&ci->lock);
1123                         break;
1124                 }
1125
1126                 if (err < 0) {
1127                         spin_unlock(&ci->lock);
1128                         if (usb_ep_set_halt(&hwep->ep))
1129                                 dev_err(ci->dev, "error: ep_set_halt\n");
1130                         spin_lock(&ci->lock);
1131                 }
1132         }
1133 }
1134
1135 /******************************************************************************
1136  * ENDPT block
1137  *****************************************************************************/
1138 /**
1139  * ep_enable: configure endpoint, making it usable
1140  *
1141  * Check usb_ep_enable() at "usb_gadget.h" for details
1142  */
1143 static int ep_enable(struct usb_ep *ep,
1144                      const struct usb_endpoint_descriptor *desc)
1145 {
1146         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1147         int retval = 0;
1148         unsigned long flags;
1149         u32 cap = 0;
1150
1151         if (ep == NULL || desc == NULL)
1152                 return -EINVAL;
1153
1154         spin_lock_irqsave(hwep->lock, flags);
1155
1156         /* only internal SW should enable ctrl endpts */
1157
1158         hwep->ep.desc = desc;
1159
1160         if (!list_empty(&hwep->qh.queue))
1161                 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1162
1163         hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1164         hwep->num  = usb_endpoint_num(desc);
1165         hwep->type = usb_endpoint_type(desc);
1166
1167         hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
1168         hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
1169
1170         if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1171                 cap |= QH_IOS;
1172         if (hwep->num)
1173                 cap |= QH_ZLT;
1174         cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1175
1176         hwep->qh.ptr->cap = cpu_to_le32(cap);
1177
1178         hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1179
1180         /*
1181          * Enable endpoints in the HW other than ep0 as ep0
1182          * is always enabled
1183          */
1184         if (hwep->num)
1185                 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1186                                        hwep->type);
1187
1188         spin_unlock_irqrestore(hwep->lock, flags);
1189         return retval;
1190 }
1191
1192 /**
1193  * ep_disable: endpoint is no longer usable
1194  *
1195  * Check usb_ep_disable() at "usb_gadget.h" for details
1196  */
1197 static int ep_disable(struct usb_ep *ep)
1198 {
1199         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1200         int direction, retval = 0;
1201         unsigned long flags;
1202
1203         if (ep == NULL)
1204                 return -EINVAL;
1205         else if (hwep->ep.desc == NULL)
1206                 return -EBUSY;
1207
1208         spin_lock_irqsave(hwep->lock, flags);
1209
1210         /* only internal SW should disable ctrl endpts */
1211
1212         direction = hwep->dir;
1213         do {
1214                 retval |= _ep_nuke(hwep);
1215                 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1216
1217                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1218                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1219
1220         } while (hwep->dir != direction);
1221
1222         hwep->ep.desc = NULL;
1223
1224         spin_unlock_irqrestore(hwep->lock, flags);
1225         return retval;
1226 }
1227
1228 /**
1229  * ep_alloc_request: allocate a request object to use with this endpoint
1230  *
1231  * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1232  */
1233 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1234 {
1235         struct ci_hw_req *hwreq = NULL;
1236
1237         if (ep == NULL)
1238                 return NULL;
1239
1240         hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1241         if (hwreq != NULL) {
1242                 INIT_LIST_HEAD(&hwreq->queue);
1243                 INIT_LIST_HEAD(&hwreq->tds);
1244         }
1245
1246         return (hwreq == NULL) ? NULL : &hwreq->req;
1247 }
1248
1249 /**
1250  * ep_free_request: frees a request object
1251  *
1252  * Check usb_ep_free_request() at "usb_gadget.h" for details
1253  */
1254 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1255 {
1256         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1257         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1258         struct td_node *node, *tmpnode;
1259         unsigned long flags;
1260
1261         if (ep == NULL || req == NULL) {
1262                 return;
1263         } else if (!list_empty(&hwreq->queue)) {
1264                 dev_err(hwep->ci->dev, "freeing queued request\n");
1265                 return;
1266         }
1267
1268         spin_lock_irqsave(hwep->lock, flags);
1269
1270         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1271                 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1272                 list_del_init(&node->td);
1273                 node->ptr = NULL;
1274                 kfree(node);
1275         }
1276
1277         kfree(hwreq);
1278
1279         spin_unlock_irqrestore(hwep->lock, flags);
1280 }
1281
1282 /**
1283  * ep_queue: queues (submits) an I/O request to an endpoint
1284  *
1285  * Check usb_ep_queue()* at usb_gadget.h" for details
1286  */
1287 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1288                     gfp_t __maybe_unused gfp_flags)
1289 {
1290         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1291         int retval = 0;
1292         unsigned long flags;
1293
1294         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1295                 return -EINVAL;
1296
1297         spin_lock_irqsave(hwep->lock, flags);
1298         retval = _ep_queue(ep, req, gfp_flags);
1299         spin_unlock_irqrestore(hwep->lock, flags);
1300         return retval;
1301 }
1302
1303 /**
1304  * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1305  *
1306  * Check usb_ep_dequeue() at "usb_gadget.h" for details
1307  */
1308 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1309 {
1310         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1311         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1312         unsigned long flags;
1313
1314         if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1315                 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1316                 list_empty(&hwep->qh.queue))
1317                 return -EINVAL;
1318
1319         spin_lock_irqsave(hwep->lock, flags);
1320
1321         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1322
1323         /* pop request */
1324         list_del_init(&hwreq->queue);
1325
1326         usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1327
1328         req->status = -ECONNRESET;
1329
1330         if (hwreq->req.complete != NULL) {
1331                 spin_unlock(hwep->lock);
1332                 hwreq->req.complete(&hwep->ep, &hwreq->req);
1333                 spin_lock(hwep->lock);
1334         }
1335
1336         spin_unlock_irqrestore(hwep->lock, flags);
1337         return 0;
1338 }
1339
1340 /**
1341  * ep_set_halt: sets the endpoint halt feature
1342  *
1343  * Check usb_ep_set_halt() at "usb_gadget.h" for details
1344  */
1345 static int ep_set_halt(struct usb_ep *ep, int value)
1346 {
1347         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1348         int direction, retval = 0;
1349         unsigned long flags;
1350
1351         if (ep == NULL || hwep->ep.desc == NULL)
1352                 return -EINVAL;
1353
1354         if (usb_endpoint_xfer_isoc(hwep->ep.desc))
1355                 return -EOPNOTSUPP;
1356
1357         spin_lock_irqsave(hwep->lock, flags);
1358
1359 #ifndef STALL_IN
1360         /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1361         if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
1362             !list_empty(&hwep->qh.queue)) {
1363                 spin_unlock_irqrestore(hwep->lock, flags);
1364                 return -EAGAIN;
1365         }
1366 #endif
1367
1368         direction = hwep->dir;
1369         do {
1370                 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
1371
1372                 if (!value)
1373                         hwep->wedge = 0;
1374
1375                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1376                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1377
1378         } while (hwep->dir != direction);
1379
1380         spin_unlock_irqrestore(hwep->lock, flags);
1381         return retval;
1382 }
1383
1384 /**
1385  * ep_set_wedge: sets the halt feature and ignores clear requests
1386  *
1387  * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1388  */
1389 static int ep_set_wedge(struct usb_ep *ep)
1390 {
1391         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1392         unsigned long flags;
1393
1394         if (ep == NULL || hwep->ep.desc == NULL)
1395                 return -EINVAL;
1396
1397         spin_lock_irqsave(hwep->lock, flags);
1398         hwep->wedge = 1;
1399         spin_unlock_irqrestore(hwep->lock, flags);
1400
1401         return usb_ep_set_halt(ep);
1402 }
1403
1404 /**
1405  * ep_fifo_flush: flushes contents of a fifo
1406  *
1407  * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1408  */
1409 static void ep_fifo_flush(struct usb_ep *ep)
1410 {
1411         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1412         unsigned long flags;
1413
1414         if (ep == NULL) {
1415                 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1416                 return;
1417         }
1418
1419         spin_lock_irqsave(hwep->lock, flags);
1420
1421         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1422
1423         spin_unlock_irqrestore(hwep->lock, flags);
1424 }
1425
1426 /**
1427  * Endpoint-specific part of the API to the USB controller hardware
1428  * Check "usb_gadget.h" for details
1429  */
1430 static const struct usb_ep_ops usb_ep_ops = {
1431         .enable        = ep_enable,
1432         .disable       = ep_disable,
1433         .alloc_request = ep_alloc_request,
1434         .free_request  = ep_free_request,
1435         .queue         = ep_queue,
1436         .dequeue       = ep_dequeue,
1437         .set_halt      = ep_set_halt,
1438         .set_wedge     = ep_set_wedge,
1439         .fifo_flush    = ep_fifo_flush,
1440 };
1441
1442 /******************************************************************************
1443  * GADGET block
1444  *****************************************************************************/
1445 static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1446 {
1447         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1448         unsigned long flags;
1449         int gadget_ready = 0;
1450
1451         spin_lock_irqsave(&ci->lock, flags);
1452         ci->vbus_active = is_active;
1453         if (ci->driver)
1454                 gadget_ready = 1;
1455         spin_unlock_irqrestore(&ci->lock, flags);
1456
1457         if (gadget_ready) {
1458                 if (is_active) {
1459                         pm_runtime_get_sync(&_gadget->dev);
1460                         hw_device_reset(ci, USBMODE_CM_DC);
1461                         hw_device_state(ci, ci->ep0out->qh.dma);
1462                         dev_dbg(ci->dev, "Connected to host\n");
1463                 } else {
1464                         hw_device_state(ci, 0);
1465                         if (ci->platdata->notify_event)
1466                                 ci->platdata->notify_event(ci,
1467                                 CI_HDRC_CONTROLLER_STOPPED_EVENT);
1468                         _gadget_stop_activity(&ci->gadget);
1469                         pm_runtime_put_sync(&_gadget->dev);
1470                         dev_dbg(ci->dev, "Disconnected from host\n");
1471                 }
1472         }
1473
1474         return 0;
1475 }
1476
1477 static int ci_udc_wakeup(struct usb_gadget *_gadget)
1478 {
1479         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1480         unsigned long flags;
1481         int ret = 0;
1482
1483         spin_lock_irqsave(&ci->lock, flags);
1484         if (!ci->remote_wakeup) {
1485                 ret = -EOPNOTSUPP;
1486                 goto out;
1487         }
1488         if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1489                 ret = -EINVAL;
1490                 goto out;
1491         }
1492         hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1493 out:
1494         spin_unlock_irqrestore(&ci->lock, flags);
1495         return ret;
1496 }
1497
1498 static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1499 {
1500         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1501
1502         if (ci->transceiver)
1503                 return usb_phy_set_power(ci->transceiver, ma);
1504         return -ENOTSUPP;
1505 }
1506
1507 /* Change Data+ pullup status
1508  * this func is used by usb_gadget_connect/disconnet
1509  */
1510 static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1511 {
1512         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1513
1514         if (!ci->vbus_active)
1515                 return -EOPNOTSUPP;
1516
1517         if (is_on)
1518                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1519         else
1520                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1521
1522         return 0;
1523 }
1524
1525 static int ci_udc_start(struct usb_gadget *gadget,
1526                          struct usb_gadget_driver *driver);
1527 static int ci_udc_stop(struct usb_gadget *gadget,
1528                         struct usb_gadget_driver *driver);
1529 /**
1530  * Device operations part of the API to the USB controller hardware,
1531  * which don't involve endpoints (or i/o)
1532  * Check  "usb_gadget.h" for details
1533  */
1534 static const struct usb_gadget_ops usb_gadget_ops = {
1535         .vbus_session   = ci_udc_vbus_session,
1536         .wakeup         = ci_udc_wakeup,
1537         .pullup         = ci_udc_pullup,
1538         .vbus_draw      = ci_udc_vbus_draw,
1539         .udc_start      = ci_udc_start,
1540         .udc_stop       = ci_udc_stop,
1541 };
1542
1543 static int init_eps(struct ci_hdrc *ci)
1544 {
1545         int retval = 0, i, j;
1546
1547         for (i = 0; i < ci->hw_ep_max/2; i++)
1548                 for (j = RX; j <= TX; j++) {
1549                         int k = i + j * ci->hw_ep_max/2;
1550                         struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1551
1552                         scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1553                                         (j == TX)  ? "in" : "out");
1554
1555                         hwep->ci          = ci;
1556                         hwep->lock         = &ci->lock;
1557                         hwep->td_pool      = ci->td_pool;
1558
1559                         hwep->ep.name      = hwep->name;
1560                         hwep->ep.ops       = &usb_ep_ops;
1561                         /*
1562                          * for ep0: maxP defined in desc, for other
1563                          * eps, maxP is set by epautoconfig() called
1564                          * by gadget layer
1565                          */
1566                         hwep->ep.maxpacket = (unsigned short)~0;
1567
1568                         INIT_LIST_HEAD(&hwep->qh.queue);
1569                         hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
1570                                                      &hwep->qh.dma);
1571                         if (hwep->qh.ptr == NULL)
1572                                 retval = -ENOMEM;
1573                         else
1574                                 memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
1575
1576                         /*
1577                          * set up shorthands for ep0 out and in endpoints,
1578                          * don't add to gadget's ep_list
1579                          */
1580                         if (i == 0) {
1581                                 if (j == RX)
1582                                         ci->ep0out = hwep;
1583                                 else
1584                                         ci->ep0in = hwep;
1585
1586                                 hwep->ep.maxpacket = CTRL_PAYLOAD_MAX;
1587                                 continue;
1588                         }
1589
1590                         list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1591                 }
1592
1593         return retval;
1594 }
1595
1596 static void destroy_eps(struct ci_hdrc *ci)
1597 {
1598         int i;
1599
1600         for (i = 0; i < ci->hw_ep_max; i++) {
1601                 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1602
1603                 if (hwep->pending_td)
1604                         free_pending_td(hwep);
1605                 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1606         }
1607 }
1608
1609 /**
1610  * ci_udc_start: register a gadget driver
1611  * @gadget: our gadget
1612  * @driver: the driver being registered
1613  *
1614  * Interrupts are enabled here.
1615  */
1616 static int ci_udc_start(struct usb_gadget *gadget,
1617                          struct usb_gadget_driver *driver)
1618 {
1619         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1620         unsigned long flags;
1621         int retval = -ENOMEM;
1622
1623         if (driver->disconnect == NULL)
1624                 return -EINVAL;
1625
1626
1627         ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1628         retval = usb_ep_enable(&ci->ep0out->ep);
1629         if (retval)
1630                 return retval;
1631
1632         ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1633         retval = usb_ep_enable(&ci->ep0in->ep);
1634         if (retval)
1635                 return retval;
1636         spin_lock_irqsave(&ci->lock, flags);
1637
1638         ci->driver = driver;
1639         pm_runtime_get_sync(&ci->gadget.dev);
1640         if (ci->vbus_active) {
1641                 hw_device_reset(ci, USBMODE_CM_DC);
1642         } else {
1643                 pm_runtime_put_sync(&ci->gadget.dev);
1644                 goto done;
1645         }
1646
1647         retval = hw_device_state(ci, ci->ep0out->qh.dma);
1648         if (retval)
1649                 pm_runtime_put_sync(&ci->gadget.dev);
1650
1651  done:
1652         spin_unlock_irqrestore(&ci->lock, flags);
1653         return retval;
1654 }
1655
1656 /**
1657  * ci_udc_stop: unregister a gadget driver
1658  */
1659 static int ci_udc_stop(struct usb_gadget *gadget,
1660                         struct usb_gadget_driver *driver)
1661 {
1662         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1663         unsigned long flags;
1664
1665         spin_lock_irqsave(&ci->lock, flags);
1666
1667         if (ci->vbus_active) {
1668                 hw_device_state(ci, 0);
1669                 if (ci->platdata->notify_event)
1670                         ci->platdata->notify_event(ci,
1671                         CI_HDRC_CONTROLLER_STOPPED_EVENT);
1672                 spin_unlock_irqrestore(&ci->lock, flags);
1673                 _gadget_stop_activity(&ci->gadget);
1674                 spin_lock_irqsave(&ci->lock, flags);
1675                 pm_runtime_put(&ci->gadget.dev);
1676         }
1677
1678         ci->driver = NULL;
1679         spin_unlock_irqrestore(&ci->lock, flags);
1680
1681         return 0;
1682 }
1683
1684 /******************************************************************************
1685  * BUS block
1686  *****************************************************************************/
1687 /**
1688  * udc_irq: ci interrupt handler
1689  *
1690  * This function returns IRQ_HANDLED if the IRQ has been handled
1691  * It locks access to registers
1692  */
1693 static irqreturn_t udc_irq(struct ci_hdrc *ci)
1694 {
1695         irqreturn_t retval;
1696         u32 intr;
1697
1698         if (ci == NULL)
1699                 return IRQ_HANDLED;
1700
1701         spin_lock(&ci->lock);
1702
1703         if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1704                 if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1705                                 USBMODE_CM_DC) {
1706                         spin_unlock(&ci->lock);
1707                         return IRQ_NONE;
1708                 }
1709         }
1710         intr = hw_test_and_clear_intr_active(ci);
1711
1712         if (intr) {
1713                 /* order defines priority - do NOT change it */
1714                 if (USBi_URI & intr)
1715                         isr_reset_handler(ci);
1716
1717                 if (USBi_PCI & intr) {
1718                         ci->gadget.speed = hw_port_is_high_speed(ci) ?
1719                                 USB_SPEED_HIGH : USB_SPEED_FULL;
1720                         if (ci->suspended && ci->driver->resume) {
1721                                 spin_unlock(&ci->lock);
1722                                 ci->driver->resume(&ci->gadget);
1723                                 spin_lock(&ci->lock);
1724                                 ci->suspended = 0;
1725                         }
1726                 }
1727
1728                 if (USBi_UI  & intr)
1729                         isr_tr_complete_handler(ci);
1730
1731                 if (USBi_SLI & intr) {
1732                         if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
1733                             ci->driver->suspend) {
1734                                 ci->suspended = 1;
1735                                 spin_unlock(&ci->lock);
1736                                 ci->driver->suspend(&ci->gadget);
1737                                 spin_lock(&ci->lock);
1738                         }
1739                 }
1740                 retval = IRQ_HANDLED;
1741         } else {
1742                 retval = IRQ_NONE;
1743         }
1744         spin_unlock(&ci->lock);
1745
1746         return retval;
1747 }
1748
1749 /**
1750  * udc_start: initialize gadget role
1751  * @ci: chipidea controller
1752  */
1753 static int udc_start(struct ci_hdrc *ci)
1754 {
1755         struct device *dev = ci->dev;
1756         int retval = 0;
1757
1758         spin_lock_init(&ci->lock);
1759
1760         ci->gadget.ops          = &usb_gadget_ops;
1761         ci->gadget.speed        = USB_SPEED_UNKNOWN;
1762         ci->gadget.max_speed    = USB_SPEED_HIGH;
1763         ci->gadget.is_otg       = 0;
1764         ci->gadget.name         = ci->platdata->name;
1765
1766         INIT_LIST_HEAD(&ci->gadget.ep_list);
1767
1768         /* alloc resources */
1769         ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
1770                                        sizeof(struct ci_hw_qh),
1771                                        64, CI_HDRC_PAGE_SIZE);
1772         if (ci->qh_pool == NULL)
1773                 return -ENOMEM;
1774
1775         ci->td_pool = dma_pool_create("ci_hw_td", dev,
1776                                        sizeof(struct ci_hw_td),
1777                                        64, CI_HDRC_PAGE_SIZE);
1778         if (ci->td_pool == NULL) {
1779                 retval = -ENOMEM;
1780                 goto free_qh_pool;
1781         }
1782
1783         retval = init_eps(ci);
1784         if (retval)
1785                 goto free_pools;
1786
1787         ci->gadget.ep0 = &ci->ep0in->ep;
1788
1789         if (ci->global_phy) {
1790                 ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
1791                 if (IS_ERR(ci->transceiver))
1792                         ci->transceiver = NULL;
1793         }
1794
1795         if (ci->platdata->flags & CI_HDRC_REQUIRE_TRANSCEIVER) {
1796                 if (ci->transceiver == NULL) {
1797                         retval = -ENODEV;
1798                         goto destroy_eps;
1799                 }
1800         }
1801
1802         if (ci->transceiver) {
1803                 retval = otg_set_peripheral(ci->transceiver->otg,
1804                                                 &ci->gadget);
1805                 /*
1806                  * If we implement all USB functions using chipidea drivers,
1807                  * it doesn't need to call above API, meanwhile, if we only
1808                  * use gadget function, calling above API is useless.
1809                  */
1810                 if (retval && retval != -ENOTSUPP)
1811                         goto put_transceiver;
1812         }
1813
1814         retval = usb_add_gadget_udc(dev, &ci->gadget);
1815         if (retval)
1816                 goto remove_trans;
1817
1818         pm_runtime_no_callbacks(&ci->gadget.dev);
1819         pm_runtime_enable(&ci->gadget.dev);
1820
1821         /* Update ci->vbus_active */
1822         ci_handle_vbus_change(ci);
1823
1824         return retval;
1825
1826 remove_trans:
1827         if (ci->transceiver) {
1828                 otg_set_peripheral(ci->transceiver->otg, NULL);
1829                 if (ci->global_phy)
1830                         usb_put_phy(ci->transceiver);
1831         }
1832
1833         dev_err(dev, "error = %i\n", retval);
1834 put_transceiver:
1835         if (ci->transceiver && ci->global_phy)
1836                 usb_put_phy(ci->transceiver);
1837 destroy_eps:
1838         destroy_eps(ci);
1839 free_pools:
1840         dma_pool_destroy(ci->td_pool);
1841 free_qh_pool:
1842         dma_pool_destroy(ci->qh_pool);
1843         return retval;
1844 }
1845
1846 /**
1847  * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
1848  *
1849  * No interrupts active, the IRQ has been released
1850  */
1851 void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
1852 {
1853         if (!ci->roles[CI_ROLE_GADGET])
1854                 return;
1855
1856         usb_del_gadget_udc(&ci->gadget);
1857
1858         destroy_eps(ci);
1859
1860         dma_pool_destroy(ci->td_pool);
1861         dma_pool_destroy(ci->qh_pool);
1862
1863         if (ci->transceiver) {
1864                 otg_set_peripheral(ci->transceiver->otg, NULL);
1865                 if (ci->global_phy)
1866                         usb_put_phy(ci->transceiver);
1867         }
1868 }
1869
1870 static int udc_id_switch_for_device(struct ci_hdrc *ci)
1871 {
1872         if (ci->is_otg) {
1873                 ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
1874                 ci_enable_otg_interrupt(ci, OTGSC_BSVIE);
1875         }
1876
1877         return 0;
1878 }
1879
1880 static void udc_id_switch_for_host(struct ci_hdrc *ci)
1881 {
1882         if (ci->is_otg) {
1883                 /* host doesn't care B_SESSION_VALID event */
1884                 ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
1885                 ci_disable_otg_interrupt(ci, OTGSC_BSVIE);
1886         }
1887 }
1888
1889 /**
1890  * ci_hdrc_gadget_init - initialize device related bits
1891  * ci: the controller
1892  *
1893  * This function initializes the gadget, if the device is "device capable".
1894  */
1895 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1896 {
1897         struct ci_role_driver *rdrv;
1898
1899         if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1900                 return -ENXIO;
1901
1902         rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
1903         if (!rdrv)
1904                 return -ENOMEM;
1905
1906         rdrv->start     = udc_id_switch_for_device;
1907         rdrv->stop      = udc_id_switch_for_host;
1908         rdrv->irq       = udc_irq;
1909         rdrv->name      = "gadget";
1910         ci->roles[CI_ROLE_GADGET] = rdrv;
1911
1912         return udc_start(ci);
1913 }