]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/usb/host/whci/qset.c
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mv-sheeva.git] / drivers / usb / host / whci / qset.c
1 /*
2  * Wireless Host Controller (WHC) qset management.
3  *
4  * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/uwb/umc.h>
21 #include <linux/usb.h>
22
23 #include "../../wusbcore/wusbhc.h"
24
25 #include "whcd.h"
26
27 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
28 {
29         struct whc_qset *qset;
30         dma_addr_t dma;
31
32         qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
33         if (qset == NULL)
34                 return NULL;
35         memset(qset, 0, sizeof(struct whc_qset));
36
37         qset->qset_dma = dma;
38         qset->whc = whc;
39
40         INIT_LIST_HEAD(&qset->list_node);
41         INIT_LIST_HEAD(&qset->stds);
42
43         return qset;
44 }
45
46 /**
47  * qset_fill_qh - fill the static endpoint state in a qset's QHead
48  * @qset: the qset whose QH needs initializing with static endpoint
49  *        state
50  * @urb:  an urb for a transfer to this endpoint
51  */
52 static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
53 {
54         struct usb_device *usb_dev = urb->dev;
55         struct usb_wireless_ep_comp_descriptor *epcd;
56         bool is_out;
57
58         is_out = usb_pipeout(urb->pipe);
59
60         epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
61
62         if (epcd) {
63                 qset->max_seq = epcd->bMaxSequence;
64                 qset->max_burst = epcd->bMaxBurst;
65         } else {
66                 qset->max_seq = 2;
67                 qset->max_burst = 1;
68         }
69
70         qset->qh.info1 = cpu_to_le32(
71                 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
72                 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
73                 | usb_pipe_to_qh_type(urb->pipe)
74                 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
75                 | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
76                 );
77         qset->qh.info2 = cpu_to_le32(
78                 QH_INFO2_BURST(qset->max_burst)
79                 | QH_INFO2_DBP(0)
80                 | QH_INFO2_MAX_COUNT(3)
81                 | QH_INFO2_MAX_RETRY(3)
82                 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
83                 );
84         /* FIXME: where can we obtain these Tx parameters from?  Why
85          * doesn't the chip know what Tx power to use? It knows the Rx
86          * strength and can presumably guess the Tx power required
87          * from that? */
88         qset->qh.info3 = cpu_to_le32(
89                 QH_INFO3_TX_RATE_53_3
90                 | QH_INFO3_TX_PWR(0) /* 0 == max power */
91                 );
92
93         qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
94 }
95
96 /**
97  * qset_clear - clear fields in a qset so it may be reinserted into a
98  * schedule.
99  *
100  * The sequence number and current window are not cleared (see
101  * qset_reset()).
102  */
103 void qset_clear(struct whc *whc, struct whc_qset *qset)
104 {
105         qset->td_start = qset->td_end = qset->ntds = 0;
106         qset->remove = 0;
107
108         qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
109         qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
110         qset->qh.err_count = 0;
111         qset->qh.scratch[0] = 0;
112         qset->qh.scratch[1] = 0;
113         qset->qh.scratch[2] = 0;
114
115         memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
116
117         init_completion(&qset->remove_complete);
118 }
119
120 /**
121  * qset_reset - reset endpoint state in a qset.
122  *
123  * Clears the sequence number and current window.  This qset must not
124  * be in the ASL or PZL.
125  */
126 void qset_reset(struct whc *whc, struct whc_qset *qset)
127 {
128         wait_for_completion(&qset->remove_complete);
129
130         qset->qh.status &= ~QH_STATUS_SEQ_MASK;
131         qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
132 }
133
134 /**
135  * get_qset - get the qset for an async endpoint
136  *
137  * A new qset is created if one does not already exist.
138  */
139 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
140                                  gfp_t mem_flags)
141 {
142         struct whc_qset *qset;
143
144         qset = urb->ep->hcpriv;
145         if (qset == NULL) {
146                 qset = qset_alloc(whc, mem_flags);
147                 if (qset == NULL)
148                         return NULL;
149
150                 qset->ep = urb->ep;
151                 urb->ep->hcpriv = qset;
152                 qset_fill_qh(qset, urb);
153         }
154         return qset;
155 }
156
157 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
158 {
159         list_del_init(&qset->list_node);
160         complete(&qset->remove_complete);
161 }
162
163 /**
164  * qset_add_qtds - add qTDs for an URB to a qset
165  *
166  * Returns true if the list (ASL/PZL) must be updated because (for a
167  * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
168  */
169 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
170 {
171         struct whc_std *std;
172         enum whc_update update = 0;
173
174         list_for_each_entry(std, &qset->stds, list_node) {
175                 struct whc_qtd *qtd;
176                 uint32_t status;
177
178                 if (qset->ntds >= WHCI_QSET_TD_MAX
179                     || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
180                         break;
181
182                 if (std->qtd)
183                         continue; /* already has a qTD */
184
185                 qtd = std->qtd = &qset->qtd[qset->td_end];
186
187                 /* Fill in setup bytes for control transfers. */
188                 if (usb_pipecontrol(std->urb->pipe))
189                         memcpy(qtd->setup, std->urb->setup_packet, 8);
190
191                 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
192
193                 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
194                         status |= QTD_STS_LAST_PKT;
195
196                 /*
197                  * For an IN transfer the iAlt field should be set so
198                  * the h/w will automatically advance to the next
199                  * transfer. However, if there are 8 or more TDs
200                  * remaining in this transfer then iAlt cannot be set
201                  * as it could point to somewhere in this transfer.
202                  */
203                 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
204                         int ialt;
205                         ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
206                         status |= QTD_STS_IALT(ialt);
207                 } else if (usb_pipein(std->urb->pipe))
208                         qset->pause_after_urb = std->urb;
209
210                 if (std->num_pointers)
211                         qtd->options = cpu_to_le32(QTD_OPT_IOC);
212                 else
213                         qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
214                 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
215
216                 qtd->status = cpu_to_le32(status);
217
218                 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
219                         update = WHC_UPDATE_UPDATED;
220
221                 if (++qset->td_end >= WHCI_QSET_TD_MAX)
222                         qset->td_end = 0;
223                 qset->ntds++;
224         }
225
226         return update;
227 }
228
229 /**
230  * qset_remove_qtd - remove the first qTD from a qset.
231  *
232  * The qTD might be still active (if it's part of a IN URB that
233  * resulted in a short read) so ensure it's deactivated.
234  */
235 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
236 {
237         qset->qtd[qset->td_start].status = 0;
238
239         if (++qset->td_start >= WHCI_QSET_TD_MAX)
240                 qset->td_start = 0;
241         qset->ntds--;
242 }
243
244 /**
245  * qset_free_std - remove an sTD and free it.
246  * @whc: the WHCI host controller
247  * @std: the sTD to remove and free.
248  */
249 void qset_free_std(struct whc *whc, struct whc_std *std)
250 {
251         list_del(&std->list_node);
252         if (std->num_pointers) {
253                 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
254                                  std->num_pointers * sizeof(struct whc_page_list_entry),
255                                  DMA_TO_DEVICE);
256                 kfree(std->pl_virt);
257         }
258
259         kfree(std);
260 }
261
262 /**
263  * qset_remove_qtds - remove an URB's qTDs (and sTDs).
264  */
265 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
266                              struct urb *urb)
267 {
268         struct whc_std *std, *t;
269
270         list_for_each_entry_safe(std, t, &qset->stds, list_node) {
271                 if (std->urb != urb)
272                         break;
273                 if (std->qtd != NULL)
274                         qset_remove_qtd(whc, qset);
275                 qset_free_std(whc, std);
276         }
277 }
278
279 /**
280  * qset_free_stds - free any remaining sTDs for an URB.
281  */
282 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
283 {
284         struct whc_std *std, *t;
285
286         list_for_each_entry_safe(std, t, &qset->stds, list_node) {
287                 if (std->urb == urb)
288                         qset_free_std(qset->whc, std);
289         }
290 }
291
292 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
293 {
294         dma_addr_t dma_addr = std->dma_addr;
295         dma_addr_t sp, ep;
296         size_t std_len = std->len;
297         size_t pl_len;
298         int p;
299
300         sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
301         ep = dma_addr + std_len;
302         std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
303
304         pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
305         std->pl_virt = kmalloc(pl_len, mem_flags);
306         if (std->pl_virt == NULL)
307                 return -ENOMEM;
308         std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
309
310         for (p = 0; p < std->num_pointers; p++) {
311                 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
312                 dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
313         }
314
315         return 0;
316 }
317
318 /**
319  * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
320  */
321 static void urb_dequeue_work(struct work_struct *work)
322 {
323         struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
324         struct whc_qset *qset = wurb->qset;
325         struct whc *whc = qset->whc;
326         unsigned long flags;
327
328         if (wurb->is_async == true)
329                 asl_update(whc, WUSBCMD_ASYNC_UPDATED
330                            | WUSBCMD_ASYNC_SYNCED_DB
331                            | WUSBCMD_ASYNC_QSET_RM);
332         else
333                 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
334                            | WUSBCMD_PERIODIC_SYNCED_DB
335                            | WUSBCMD_PERIODIC_QSET_RM);
336
337         spin_lock_irqsave(&whc->lock, flags);
338         qset_remove_urb(whc, qset, wurb->urb, wurb->status);
339         spin_unlock_irqrestore(&whc->lock, flags);
340 }
341
342 /**
343  * qset_add_urb - add an urb to the qset's queue.
344  *
345  * The URB is chopped into sTDs, one for each qTD that will required.
346  * At least one qTD (and sTD) is required even if the transfer has no
347  * data (e.g., for some control transfers).
348  */
349 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
350         gfp_t mem_flags)
351 {
352         struct whc_urb *wurb;
353         int remaining = urb->transfer_buffer_length;
354         u64 transfer_dma = urb->transfer_dma;
355         int ntds_remaining;
356
357         ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
358         if (ntds_remaining == 0)
359                 ntds_remaining = 1;
360
361         wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
362         if (wurb == NULL)
363                 goto err_no_mem;
364         urb->hcpriv = wurb;
365         wurb->qset = qset;
366         wurb->urb = urb;
367         INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
368
369         while (ntds_remaining) {
370                 struct whc_std *std;
371                 size_t std_len;
372
373                 std = kmalloc(sizeof(struct whc_std), mem_flags);
374                 if (std == NULL)
375                         goto err_no_mem;
376
377                 std_len = remaining;
378                 if (std_len > QTD_MAX_XFER_SIZE)
379                         std_len = QTD_MAX_XFER_SIZE;
380
381                 std->urb = urb;
382                 std->dma_addr = transfer_dma;
383                 std->len = std_len;
384                 std->ntds_remaining = ntds_remaining;
385                 std->qtd = NULL;
386
387                 INIT_LIST_HEAD(&std->list_node);
388                 list_add_tail(&std->list_node, &qset->stds);
389
390                 if (std_len > WHCI_PAGE_SIZE) {
391                         if (qset_fill_page_list(whc, std, mem_flags) < 0)
392                                 goto err_no_mem;
393                 } else
394                         std->num_pointers = 0;
395
396                 ntds_remaining--;
397                 remaining -= std_len;
398                 transfer_dma += std_len;
399         }
400
401         return 0;
402
403 err_no_mem:
404         qset_free_stds(qset, urb);
405         return -ENOMEM;
406 }
407
408 /**
409  * qset_remove_urb - remove an URB from the urb queue.
410  *
411  * The URB is returned to the USB subsystem.
412  */
413 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
414                             struct urb *urb, int status)
415 {
416         struct wusbhc *wusbhc = &whc->wusbhc;
417         struct whc_urb *wurb = urb->hcpriv;
418
419         usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
420         /* Drop the lock as urb->complete() may enqueue another urb. */
421         spin_unlock(&whc->lock);
422         wusbhc_giveback_urb(wusbhc, urb, status);
423         spin_lock(&whc->lock);
424
425         kfree(wurb);
426 }
427
428 /**
429  * get_urb_status_from_qtd - get the completed urb status from qTD status
430  * @urb:    completed urb
431  * @status: qTD status
432  */
433 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
434 {
435         if (status & QTD_STS_HALTED) {
436                 if (status & QTD_STS_DBE)
437                         return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
438                 else if (status & QTD_STS_BABBLE)
439                         return -EOVERFLOW;
440                 else if (status & QTD_STS_RCE)
441                         return -ETIME;
442                 return -EPIPE;
443         }
444         if (usb_pipein(urb->pipe)
445             && (urb->transfer_flags & URB_SHORT_NOT_OK)
446             && urb->actual_length < urb->transfer_buffer_length)
447                 return -EREMOTEIO;
448         return 0;
449 }
450
451 /**
452  * process_inactive_qtd - process an inactive (but not halted) qTD.
453  *
454  * Update the urb with the transfer bytes from the qTD, if the urb is
455  * completely transfered or (in the case of an IN only) the LPF is
456  * set, then the transfer is complete and the urb should be returned
457  * to the system.
458  */
459 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
460                                  struct whc_qtd *qtd)
461 {
462         struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
463         struct urb *urb = std->urb;
464         uint32_t status;
465         bool complete;
466
467         status = le32_to_cpu(qtd->status);
468
469         urb->actual_length += std->len - QTD_STS_TO_LEN(status);
470
471         if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
472                 complete = true;
473         else
474                 complete = whc_std_last(std);
475
476         qset_remove_qtd(whc, qset);
477         qset_free_std(whc, std);
478
479         /*
480          * Transfers for this URB are complete?  Then return it to the
481          * USB subsystem.
482          */
483         if (complete) {
484                 qset_remove_qtds(whc, qset, urb);
485                 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
486
487                 /*
488                  * If iAlt isn't valid then the hardware didn't
489                  * advance iCur. Adjust the start and end pointers to
490                  * match iCur.
491                  */
492                 if (!(status & QTD_STS_IALT_VALID))
493                         qset->td_start = qset->td_end
494                                 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
495                 qset->pause_after_urb = NULL;
496         }
497 }
498
499 /**
500  * process_halted_qtd - process a qset with a halted qtd
501  *
502  * Remove all the qTDs for the failed URB and return the failed URB to
503  * the USB subsystem.  Then remove all other qTDs so the qset can be
504  * removed.
505  *
506  * FIXME: this is the point where rate adaptation can be done.  If a
507  * transfer failed because it exceeded the maximum number of retries
508  * then it could be reactivated with a slower rate without having to
509  * remove the qset.
510  */
511 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
512                                struct whc_qtd *qtd)
513 {
514         struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
515         struct urb *urb = std->urb;
516         int urb_status;
517
518         urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
519
520         qset_remove_qtds(whc, qset, urb);
521         qset_remove_urb(whc, qset, urb, urb_status);
522
523         list_for_each_entry(std, &qset->stds, list_node) {
524                 if (qset->ntds == 0)
525                         break;
526                 qset_remove_qtd(whc, qset);
527                 std->qtd = NULL;
528         }
529
530         qset->remove = 1;
531 }
532
533 void qset_free(struct whc *whc, struct whc_qset *qset)
534 {
535         dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
536 }
537
538 /**
539  * qset_delete - wait for a qset to be unused, then free it.
540  */
541 void qset_delete(struct whc *whc, struct whc_qset *qset)
542 {
543         wait_for_completion(&qset->remove_complete);
544         qset_free(whc, qset);
545 }