2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
37 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
39 struct xhci_segment *seg;
42 seg = kzalloc(sizeof *seg, flags);
45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg->trbs, (unsigned long long)dma);
55 memset(seg->trbs, 0, SEGMENT_SIZE);
62 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
67 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
68 seg->trbs, (unsigned long long)seg->dma);
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
77 * Make the prev segment point to the next segment.
79 * Change the last TRB in the prev segment to be a Link TRB which points to the
80 * DMA address of the next segment. The caller needs to set any Link TRB
81 * related flags, such as End TRB, Toggle Cycle, and no snoop.
83 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
84 struct xhci_segment *next, bool link_trbs)
92 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
94 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
95 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
96 val &= ~TRB_TYPE_BITMASK;
97 val |= TRB_TYPE(TRB_LINK);
98 /* Always set the chain bit with 0.95 hardware */
99 if (xhci_link_trb_quirk(xhci))
101 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
103 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
104 (unsigned long long)prev->dma,
105 (unsigned long long)next->dma);
108 /* XXX: Do we need the hcd structure in all these functions? */
109 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
111 struct xhci_segment *seg;
112 struct xhci_segment *first_seg;
114 if (!ring || !ring->first_seg)
116 first_seg = ring->first_seg;
117 seg = first_seg->next;
118 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
119 while (seg != first_seg) {
120 struct xhci_segment *next = seg->next;
121 xhci_segment_free(xhci, seg);
124 xhci_segment_free(xhci, first_seg);
125 ring->first_seg = NULL;
129 static void xhci_initialize_ring_info(struct xhci_ring *ring)
131 /* The ring is empty, so the enqueue pointer == dequeue pointer */
132 ring->enqueue = ring->first_seg->trbs;
133 ring->enq_seg = ring->first_seg;
134 ring->dequeue = ring->enqueue;
135 ring->deq_seg = ring->first_seg;
136 /* The ring is initialized to 0. The producer must write 1 to the cycle
137 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
138 * compare CCS to the cycle bit to check ownership, so CCS = 1.
140 ring->cycle_state = 1;
141 /* Not necessary for new rings, but needed for re-initialized rings */
142 ring->enq_updates = 0;
143 ring->deq_updates = 0;
147 * Create a new ring with zero or more segments.
149 * Link each segment together into a ring.
150 * Set the end flag and the cycle toggle bit on the last segment.
151 * See section 4.9.1 and figures 15 and 16.
153 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
154 unsigned int num_segs, bool link_trbs, gfp_t flags)
156 struct xhci_ring *ring;
157 struct xhci_segment *prev;
159 ring = kzalloc(sizeof *(ring), flags);
160 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
164 INIT_LIST_HEAD(&ring->td_list);
168 ring->first_seg = xhci_segment_alloc(xhci, flags);
169 if (!ring->first_seg)
173 prev = ring->first_seg;
174 while (num_segs > 0) {
175 struct xhci_segment *next;
177 next = xhci_segment_alloc(xhci, flags);
180 xhci_link_segments(xhci, prev, next, link_trbs);
185 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
188 /* See section 4.9.2.1 and 6.4.4.1 */
189 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
190 xhci_dbg(xhci, "Wrote link toggle flag to"
191 " segment %p (virtual), 0x%llx (DMA)\n",
192 prev, (unsigned long long)prev->dma);
194 xhci_initialize_ring_info(ring);
198 xhci_ring_free(xhci, ring);
202 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
203 struct xhci_virt_device *virt_dev,
204 unsigned int ep_index)
208 rings_cached = virt_dev->num_rings_cached;
209 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
210 virt_dev->num_rings_cached++;
211 rings_cached = virt_dev->num_rings_cached;
212 virt_dev->ring_cache[rings_cached] =
213 virt_dev->eps[ep_index].ring;
214 xhci_dbg(xhci, "Cached old ring, "
215 "%d ring%s cached\n",
217 (rings_cached > 1) ? "s" : "");
219 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
220 xhci_dbg(xhci, "Ring cache full (%d rings), "
222 virt_dev->num_rings_cached);
224 virt_dev->eps[ep_index].ring = NULL;
227 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
228 * pointers to the beginning of the ring.
230 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
231 struct xhci_ring *ring)
233 struct xhci_segment *seg = ring->first_seg;
236 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
237 /* All endpoint rings have link TRBs */
238 xhci_link_segments(xhci, seg, seg->next, 1);
240 } while (seg != ring->first_seg);
241 xhci_initialize_ring_info(ring);
242 /* td list should be empty since all URBs have been cancelled,
243 * but just in case...
245 INIT_LIST_HEAD(&ring->td_list);
248 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
250 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
251 int type, gfp_t flags)
253 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
257 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
259 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
260 if (type == XHCI_CTX_TYPE_INPUT)
261 ctx->size += CTX_SIZE(xhci->hcc_params);
263 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
264 memset(ctx->bytes, 0, ctx->size);
268 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
269 struct xhci_container_ctx *ctx)
273 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
277 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
278 struct xhci_container_ctx *ctx)
280 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
281 return (struct xhci_input_control_ctx *)ctx->bytes;
284 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
285 struct xhci_container_ctx *ctx)
287 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
288 return (struct xhci_slot_ctx *)ctx->bytes;
290 return (struct xhci_slot_ctx *)
291 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
294 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
295 struct xhci_container_ctx *ctx,
296 unsigned int ep_index)
298 /* increment ep index by offset of start of ep ctx array */
300 if (ctx->type == XHCI_CTX_TYPE_INPUT)
303 return (struct xhci_ep_ctx *)
304 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
308 /***************** Streams structures manipulation *************************/
310 void xhci_free_stream_ctx(struct xhci_hcd *xhci,
311 unsigned int num_stream_ctxs,
312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
314 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
316 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
317 pci_free_consistent(pdev,
318 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
320 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
321 return dma_pool_free(xhci->small_streams_pool,
324 return dma_pool_free(xhci->medium_streams_pool,
329 * The stream context array for each endpoint with bulk streams enabled can
330 * vary in size, based on:
331 * - how many streams the endpoint supports,
332 * - the maximum primary stream array size the host controller supports,
333 * - and how many streams the device driver asks for.
335 * The stream context array must be a power of 2, and can be as small as
336 * 64 bytes or as large as 1MB.
338 struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
339 unsigned int num_stream_ctxs, dma_addr_t *dma,
342 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
344 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
345 return pci_alloc_consistent(pdev,
346 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
348 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
349 return dma_pool_alloc(xhci->small_streams_pool,
352 return dma_pool_alloc(xhci->medium_streams_pool,
356 struct xhci_ring *xhci_dma_to_transfer_ring(
357 struct xhci_virt_ep *ep,
360 if (ep->ep_state & EP_HAS_STREAMS)
361 return radix_tree_lookup(&ep->stream_info->trb_address_map,
362 address >> SEGMENT_SHIFT);
366 /* Only use this when you know stream_info is valid */
367 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
368 static struct xhci_ring *dma_to_stream_ring(
369 struct xhci_stream_info *stream_info,
372 return radix_tree_lookup(&stream_info->trb_address_map,
373 address >> SEGMENT_SHIFT);
375 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
377 struct xhci_ring *xhci_stream_id_to_ring(
378 struct xhci_virt_device *dev,
379 unsigned int ep_index,
380 unsigned int stream_id)
382 struct xhci_virt_ep *ep = &dev->eps[ep_index];
386 if (!ep->stream_info)
389 if (stream_id > ep->stream_info->num_streams)
391 return ep->stream_info->stream_rings[stream_id];
394 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
395 unsigned int slot_id, unsigned int ep_index,
396 unsigned int stream_id)
398 struct xhci_virt_ep *ep;
400 ep = &xhci->devs[slot_id]->eps[ep_index];
401 /* Common case: no streams */
402 if (!(ep->ep_state & EP_HAS_STREAMS))
405 if (stream_id == 0) {
407 "WARN: Slot ID %u, ep index %u has streams, "
408 "but URB has no stream ID.\n",
413 if (stream_id < ep->stream_info->num_streams)
414 return ep->stream_info->stream_rings[stream_id];
417 "WARN: Slot ID %u, ep index %u has "
418 "stream IDs 1 to %u allocated, "
419 "but stream ID %u is requested.\n",
421 ep->stream_info->num_streams - 1,
426 /* Get the right ring for the given URB.
427 * If the endpoint supports streams, boundary check the URB's stream ID.
428 * If the endpoint doesn't support streams, return the singular endpoint ring.
430 struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
433 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
434 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
437 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
438 static int xhci_test_radix_tree(struct xhci_hcd *xhci,
439 unsigned int num_streams,
440 struct xhci_stream_info *stream_info)
443 struct xhci_ring *cur_ring;
446 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
447 struct xhci_ring *mapped_ring;
448 int trb_size = sizeof(union xhci_trb);
450 cur_ring = stream_info->stream_rings[cur_stream];
451 for (addr = cur_ring->first_seg->dma;
452 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
454 mapped_ring = dma_to_stream_ring(stream_info, addr);
455 if (cur_ring != mapped_ring) {
456 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
457 "didn't map to stream ID %u; "
458 "mapped to ring %p\n",
459 (unsigned long long) addr,
465 /* One TRB after the end of the ring segment shouldn't return a
466 * pointer to the current ring (although it may be a part of a
469 mapped_ring = dma_to_stream_ring(stream_info, addr);
470 if (mapped_ring != cur_ring) {
471 /* One TRB before should also fail */
472 addr = cur_ring->first_seg->dma - trb_size;
473 mapped_ring = dma_to_stream_ring(stream_info, addr);
475 if (mapped_ring == cur_ring) {
476 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
477 "mapped to valid stream ID %u; "
478 "mapped ring = %p\n",
479 (unsigned long long) addr,
487 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
490 * Change an endpoint's internal structure so it supports stream IDs. The
491 * number of requested streams includes stream 0, which cannot be used by device
494 * The number of stream contexts in the stream context array may be bigger than
495 * the number of streams the driver wants to use. This is because the number of
496 * stream context array entries must be a power of two.
498 * We need a radix tree for mapping physical addresses of TRBs to which stream
499 * ID they belong to. We need to do this because the host controller won't tell
500 * us which stream ring the TRB came from. We could store the stream ID in an
501 * event data TRB, but that doesn't help us for the cancellation case, since the
502 * endpoint may stop before it reaches that event data TRB.
504 * The radix tree maps the upper portion of the TRB DMA address to a ring
505 * segment that has the same upper portion of DMA addresses. For example, say I
506 * have segments of size 1KB, that are always 64-byte aligned. A segment may
507 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
508 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
509 * pass the radix tree a key to get the right stream ID:
511 * 0x10c90fff >> 10 = 0x43243
512 * 0x10c912c0 >> 10 = 0x43244
513 * 0x10c91400 >> 10 = 0x43245
515 * Obviously, only those TRBs with DMA addresses that are within the segment
516 * will make the radix tree return the stream ID for that ring.
518 * Caveats for the radix tree:
520 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
521 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
522 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
523 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
524 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
525 * extended systems (where the DMA address can be bigger than 32-bits),
526 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
528 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
529 unsigned int num_stream_ctxs,
530 unsigned int num_streams, gfp_t mem_flags)
532 struct xhci_stream_info *stream_info;
534 struct xhci_ring *cur_ring;
539 xhci_dbg(xhci, "Allocating %u streams and %u "
540 "stream context array entries.\n",
541 num_streams, num_stream_ctxs);
542 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
543 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
546 xhci->cmd_ring_reserved_trbs++;
548 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
552 stream_info->num_streams = num_streams;
553 stream_info->num_stream_ctxs = num_stream_ctxs;
555 /* Initialize the array of virtual pointers to stream rings. */
556 stream_info->stream_rings = kzalloc(
557 sizeof(struct xhci_ring *)*num_streams,
559 if (!stream_info->stream_rings)
562 /* Initialize the array of DMA addresses for stream rings for the HW. */
563 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
564 num_stream_ctxs, &stream_info->ctx_array_dma,
566 if (!stream_info->stream_ctx_array)
568 memset(stream_info->stream_ctx_array, 0,
569 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
571 /* Allocate everything needed to free the stream rings later */
572 stream_info->free_streams_command =
573 xhci_alloc_command(xhci, true, true, mem_flags);
574 if (!stream_info->free_streams_command)
577 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
579 /* Allocate rings for all the streams that the driver will use,
580 * and add their segment DMA addresses to the radix tree.
581 * Stream 0 is reserved.
583 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
584 stream_info->stream_rings[cur_stream] =
585 xhci_ring_alloc(xhci, 1, true, mem_flags);
586 cur_ring = stream_info->stream_rings[cur_stream];
589 cur_ring->stream_id = cur_stream;
590 /* Set deq ptr, cycle bit, and stream context type */
591 addr = cur_ring->first_seg->dma |
592 SCT_FOR_CTX(SCT_PRI_TR) |
593 cur_ring->cycle_state;
594 stream_info->stream_ctx_array[cur_stream].stream_ring = addr;
595 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
596 cur_stream, (unsigned long long) addr);
598 key = (unsigned long)
599 (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
600 ret = radix_tree_insert(&stream_info->trb_address_map,
603 xhci_ring_free(xhci, cur_ring);
604 stream_info->stream_rings[cur_stream] = NULL;
608 /* Leave the other unused stream ring pointers in the stream context
609 * array initialized to zero. This will cause the xHC to give us an
610 * error if the device asks for a stream ID we don't have setup (if it
611 * was any other way, the host controller would assume the ring is
612 * "empty" and wait forever for data to be queued to that stream ID).
615 /* Do a little test on the radix tree to make sure it returns the
618 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
625 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
626 cur_ring = stream_info->stream_rings[cur_stream];
628 addr = cur_ring->first_seg->dma;
629 radix_tree_delete(&stream_info->trb_address_map,
630 addr >> SEGMENT_SHIFT);
631 xhci_ring_free(xhci, cur_ring);
632 stream_info->stream_rings[cur_stream] = NULL;
635 xhci_free_command(xhci, stream_info->free_streams_command);
637 kfree(stream_info->stream_rings);
641 xhci->cmd_ring_reserved_trbs--;
645 * Sets the MaxPStreams field and the Linear Stream Array field.
646 * Sets the dequeue pointer to the stream context array.
648 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
649 struct xhci_ep_ctx *ep_ctx,
650 struct xhci_stream_info *stream_info)
652 u32 max_primary_streams;
653 /* MaxPStreams is the number of stream context array entries, not the
654 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
655 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
657 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
658 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
659 1 << (max_primary_streams + 1));
660 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
661 ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams);
662 ep_ctx->ep_info |= EP_HAS_LSA;
663 ep_ctx->deq = stream_info->ctx_array_dma;
667 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
668 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
669 * not at the beginning of the ring).
671 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
672 struct xhci_ep_ctx *ep_ctx,
673 struct xhci_virt_ep *ep)
676 ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
677 ep_ctx->ep_info &= ~EP_HAS_LSA;
678 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
679 ep_ctx->deq = addr | ep->ring->cycle_state;
682 /* Frees all stream contexts associated with the endpoint,
684 * Caller should fix the endpoint context streams fields.
686 void xhci_free_stream_info(struct xhci_hcd *xhci,
687 struct xhci_stream_info *stream_info)
690 struct xhci_ring *cur_ring;
696 for (cur_stream = 1; cur_stream < stream_info->num_streams;
698 cur_ring = stream_info->stream_rings[cur_stream];
700 addr = cur_ring->first_seg->dma;
701 radix_tree_delete(&stream_info->trb_address_map,
702 addr >> SEGMENT_SHIFT);
703 xhci_ring_free(xhci, cur_ring);
704 stream_info->stream_rings[cur_stream] = NULL;
707 xhci_free_command(xhci, stream_info->free_streams_command);
708 xhci->cmd_ring_reserved_trbs--;
709 if (stream_info->stream_ctx_array)
710 xhci_free_stream_ctx(xhci,
711 stream_info->num_stream_ctxs,
712 stream_info->stream_ctx_array,
713 stream_info->ctx_array_dma);
716 kfree(stream_info->stream_rings);
721 /***************** Device context manipulation *************************/
723 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
724 struct xhci_virt_ep *ep)
726 init_timer(&ep->stop_cmd_timer);
727 ep->stop_cmd_timer.data = (unsigned long) ep;
728 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
732 /* All the xhci_tds in the ring's TD list should be freed at this point */
733 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
735 struct xhci_virt_device *dev;
738 /* Slot ID 0 is reserved */
739 if (slot_id == 0 || !xhci->devs[slot_id])
742 dev = xhci->devs[slot_id];
743 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
747 for (i = 0; i < 31; ++i) {
748 if (dev->eps[i].ring)
749 xhci_ring_free(xhci, dev->eps[i].ring);
750 if (dev->eps[i].stream_info)
751 xhci_free_stream_info(xhci,
752 dev->eps[i].stream_info);
755 if (dev->ring_cache) {
756 for (i = 0; i < dev->num_rings_cached; i++)
757 xhci_ring_free(xhci, dev->ring_cache[i]);
758 kfree(dev->ring_cache);
762 xhci_free_container_ctx(xhci, dev->in_ctx);
764 xhci_free_container_ctx(xhci, dev->out_ctx);
766 kfree(xhci->devs[slot_id]);
767 xhci->devs[slot_id] = NULL;
770 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
771 struct usb_device *udev, gfp_t flags)
773 struct xhci_virt_device *dev;
776 /* Slot ID 0 is reserved */
777 if (slot_id == 0 || xhci->devs[slot_id]) {
778 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
782 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
783 if (!xhci->devs[slot_id])
785 dev = xhci->devs[slot_id];
787 /* Allocate the (output) device context that will be used in the HC. */
788 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
792 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
793 (unsigned long long)dev->out_ctx->dma);
795 /* Allocate the (input) device context for address device command */
796 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
800 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
801 (unsigned long long)dev->in_ctx->dma);
803 /* Initialize the cancellation list and watchdog timers for each ep */
804 for (i = 0; i < 31; i++) {
805 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
806 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
809 /* Allocate endpoint 0 ring */
810 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
811 if (!dev->eps[0].ring)
814 /* Allocate pointers to the ring cache */
815 dev->ring_cache = kzalloc(
816 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
818 if (!dev->ring_cache)
820 dev->num_rings_cached = 0;
822 init_completion(&dev->cmd_completion);
823 INIT_LIST_HEAD(&dev->cmd_list);
825 /* Point to output device context in dcbaa. */
826 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
827 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
829 &xhci->dcbaa->dev_context_ptrs[slot_id],
830 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
834 xhci_free_virt_device(xhci, slot_id);
838 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
839 struct usb_device *udev)
841 struct xhci_virt_device *virt_dev;
842 struct xhci_ep_ctx *ep0_ctx;
843 struct xhci_ring *ep_ring;
845 virt_dev = xhci->devs[udev->slot_id];
846 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
847 ep_ring = virt_dev->eps[0].ring;
849 * FIXME we don't keep track of the dequeue pointer very well after a
850 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
851 * host to our enqueue pointer. This should only be called after a
852 * configured device has reset, so all control transfers should have
853 * been completed or cancelled before the reset.
855 ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
856 ep0_ctx->deq |= ep_ring->cycle_state;
859 /* Setup an xHCI virtual device for a Set Address command */
860 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
862 struct xhci_virt_device *dev;
863 struct xhci_ep_ctx *ep0_ctx;
864 struct usb_device *top_dev;
865 struct xhci_slot_ctx *slot_ctx;
866 struct xhci_input_control_ctx *ctrl_ctx;
868 dev = xhci->devs[udev->slot_id];
869 /* Slot ID 0 is reserved */
870 if (udev->slot_id == 0 || !dev) {
871 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
875 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
876 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
877 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
879 /* 2) New slot context and endpoint 0 context are valid*/
880 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
882 /* 3) Only the control endpoint is valid - one endpoint context */
883 slot_ctx->dev_info |= LAST_CTX(1);
885 slot_ctx->dev_info |= (u32) udev->route;
886 switch (udev->speed) {
887 case USB_SPEED_SUPER:
888 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
891 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
894 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
897 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
899 case USB_SPEED_WIRELESS:
900 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
904 /* Speed was set earlier, this shouldn't happen. */
907 /* Find the root hub port this device is under */
908 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
909 top_dev = top_dev->parent)
910 /* Found device below root hub */;
911 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
912 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
914 /* Is this a LS/FS device under a HS hub? */
915 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
917 slot_ctx->tt_info = udev->tt->hub->slot_id;
918 slot_ctx->tt_info |= udev->ttport << 8;
920 slot_ctx->dev_info |= DEV_MTT;
922 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
923 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
925 /* Step 4 - ring already allocated */
927 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
929 * XXX: Not sure about wireless USB devices.
931 switch (udev->speed) {
932 case USB_SPEED_SUPER:
933 ep0_ctx->ep_info2 |= MAX_PACKET(512);
936 /* USB core guesses at a 64-byte max packet first for FS devices */
938 ep0_ctx->ep_info2 |= MAX_PACKET(64);
941 ep0_ctx->ep_info2 |= MAX_PACKET(8);
943 case USB_SPEED_WIRELESS:
944 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
951 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
952 ep0_ctx->ep_info2 |= MAX_BURST(0);
953 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
956 dev->eps[0].ring->first_seg->dma;
957 ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
959 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
964 /* Return the polling or NAK interval.
966 * The polling interval is expressed in "microframes". If xHCI's Interval field
967 * is set to N, it will service the endpoint every 2^(Interval)*125us.
969 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
972 static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
973 struct usb_host_endpoint *ep)
975 unsigned int interval = 0;
977 switch (udev->speed) {
980 if (usb_endpoint_xfer_control(&ep->desc) ||
981 usb_endpoint_xfer_bulk(&ep->desc))
982 interval = ep->desc.bInterval;
983 /* Fall through - SS and HS isoc/int have same decoding */
984 case USB_SPEED_SUPER:
985 if (usb_endpoint_xfer_int(&ep->desc) ||
986 usb_endpoint_xfer_isoc(&ep->desc)) {
987 if (ep->desc.bInterval == 0)
990 interval = ep->desc.bInterval - 1;
993 if (interval != ep->desc.bInterval + 1)
994 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
995 ep->desc.bEndpointAddress, 1 << interval);
998 /* Convert bInterval (in 1-255 frames) to microframes and round down to
999 * nearest power of 2.
1001 case USB_SPEED_FULL:
1003 if (usb_endpoint_xfer_int(&ep->desc) ||
1004 usb_endpoint_xfer_isoc(&ep->desc)) {
1005 interval = fls(8*ep->desc.bInterval) - 1;
1010 if ((1 << interval) != 8*ep->desc.bInterval)
1011 dev_warn(&udev->dev,
1012 "ep %#x - rounding interval"
1013 " to %d microframes, "
1014 "ep desc says %d microframes\n",
1015 ep->desc.bEndpointAddress,
1017 8*ep->desc.bInterval);
1023 return EP_INTERVAL(interval);
1026 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1027 * High speed endpoint descriptors can define "the number of additional
1028 * transaction opportunities per microframe", but that goes in the Max Burst
1029 * endpoint context field.
1031 static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
1032 struct usb_host_endpoint *ep)
1034 if (udev->speed != USB_SPEED_SUPER ||
1035 !usb_endpoint_xfer_isoc(&ep->desc))
1037 return ep->ss_ep_comp.bmAttributes;
1040 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
1041 struct usb_host_endpoint *ep)
1046 in = usb_endpoint_dir_in(&ep->desc);
1047 if (usb_endpoint_xfer_control(&ep->desc)) {
1048 type = EP_TYPE(CTRL_EP);
1049 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1051 type = EP_TYPE(BULK_IN_EP);
1053 type = EP_TYPE(BULK_OUT_EP);
1054 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1056 type = EP_TYPE(ISOC_IN_EP);
1058 type = EP_TYPE(ISOC_OUT_EP);
1059 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1061 type = EP_TYPE(INT_IN_EP);
1063 type = EP_TYPE(INT_OUT_EP);
1070 /* Return the maximum endpoint service interval time (ESIT) payload.
1071 * Basically, this is the maxpacket size, multiplied by the burst size
1074 static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1075 struct usb_device *udev,
1076 struct usb_host_endpoint *ep)
1081 /* Only applies for interrupt or isochronous endpoints */
1082 if (usb_endpoint_xfer_control(&ep->desc) ||
1083 usb_endpoint_xfer_bulk(&ep->desc))
1086 if (udev->speed == USB_SPEED_SUPER)
1087 return ep->ss_ep_comp.wBytesPerInterval;
1089 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
1090 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
1091 /* A 0 in max burst means 1 transfer per ESIT */
1092 return max_packet * (max_burst + 1);
1095 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1096 * Drivers will have to call usb_alloc_streams() to do that.
1098 int xhci_endpoint_init(struct xhci_hcd *xhci,
1099 struct xhci_virt_device *virt_dev,
1100 struct usb_device *udev,
1101 struct usb_host_endpoint *ep,
1104 unsigned int ep_index;
1105 struct xhci_ep_ctx *ep_ctx;
1106 struct xhci_ring *ep_ring;
1107 unsigned int max_packet;
1108 unsigned int max_burst;
1109 u32 max_esit_payload;
1111 ep_index = xhci_get_endpoint_index(&ep->desc);
1112 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1114 /* Set up the endpoint ring */
1115 virt_dev->eps[ep_index].new_ring =
1116 xhci_ring_alloc(xhci, 1, true, mem_flags);
1117 if (!virt_dev->eps[ep_index].new_ring) {
1118 /* Attempt to use the ring cache */
1119 if (virt_dev->num_rings_cached == 0)
1121 virt_dev->eps[ep_index].new_ring =
1122 virt_dev->ring_cache[virt_dev->num_rings_cached];
1123 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1124 virt_dev->num_rings_cached--;
1125 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
1127 ep_ring = virt_dev->eps[ep_index].new_ring;
1128 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
1130 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
1131 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
1133 /* FIXME dig Mult and streams info out of ep companion desc */
1135 /* Allow 3 retries for everything but isoc;
1136 * error count = 0 means infinite retries.
1138 if (!usb_endpoint_xfer_isoc(&ep->desc))
1139 ep_ctx->ep_info2 = ERROR_COUNT(3);
1141 ep_ctx->ep_info2 = ERROR_COUNT(1);
1143 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
1145 /* Set the max packet size and max burst */
1146 switch (udev->speed) {
1147 case USB_SPEED_SUPER:
1148 max_packet = ep->desc.wMaxPacketSize;
1149 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
1150 /* dig out max burst from ep companion desc */
1151 max_packet = ep->ss_ep_comp.bMaxBurst;
1153 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
1154 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
1156 case USB_SPEED_HIGH:
1157 /* bits 11:12 specify the number of additional transaction
1158 * opportunities per microframe (USB 2.0, section 9.6.6)
1160 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1161 usb_endpoint_xfer_int(&ep->desc)) {
1162 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
1163 ep_ctx->ep_info2 |= MAX_BURST(max_burst);
1166 case USB_SPEED_FULL:
1168 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
1169 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
1174 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1175 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
1178 * XXX no idea how to calculate the average TRB buffer length for bulk
1179 * endpoints, as the driver gives us no clue how big each scatter gather
1180 * list entry (or buffer) is going to be.
1182 * For isochronous and interrupt endpoints, we set it to the max
1183 * available, until we have new API in the USB core to allow drivers to
1184 * declare how much bandwidth they actually need.
1186 * Normally, it would be calculated by taking the total of the buffer
1187 * lengths in the TD and then dividing by the number of TRBs in a TD,
1188 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1189 * use Event Data TRBs, and we don't chain in a link TRB on short
1190 * transfers, we're basically dividing by 1.
1192 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
1194 /* FIXME Debug endpoint context */
1198 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1199 struct xhci_virt_device *virt_dev,
1200 struct usb_host_endpoint *ep)
1202 unsigned int ep_index;
1203 struct xhci_ep_ctx *ep_ctx;
1205 ep_index = xhci_get_endpoint_index(&ep->desc);
1206 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1208 ep_ctx->ep_info = 0;
1209 ep_ctx->ep_info2 = 0;
1211 ep_ctx->tx_info = 0;
1212 /* Don't free the endpoint ring until the set interface or configuration
1217 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1218 * Useful when you want to change one particular aspect of the endpoint and then
1219 * issue a configure endpoint command.
1221 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1222 struct xhci_container_ctx *in_ctx,
1223 struct xhci_container_ctx *out_ctx,
1224 unsigned int ep_index)
1226 struct xhci_ep_ctx *out_ep_ctx;
1227 struct xhci_ep_ctx *in_ep_ctx;
1229 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1230 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1232 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1233 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1234 in_ep_ctx->deq = out_ep_ctx->deq;
1235 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1238 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1239 * Useful when you want to change one particular aspect of the endpoint and then
1240 * issue a configure endpoint command. Only the context entries field matters,
1241 * but we'll copy the whole thing anyway.
1243 void xhci_slot_copy(struct xhci_hcd *xhci,
1244 struct xhci_container_ctx *in_ctx,
1245 struct xhci_container_ctx *out_ctx)
1247 struct xhci_slot_ctx *in_slot_ctx;
1248 struct xhci_slot_ctx *out_slot_ctx;
1250 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1251 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1253 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1254 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1255 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1256 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1259 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1260 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1263 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1264 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1266 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1271 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1272 if (!xhci->scratchpad)
1275 xhci->scratchpad->sp_array =
1276 pci_alloc_consistent(to_pci_dev(dev),
1277 num_sp * sizeof(u64),
1278 &xhci->scratchpad->sp_dma);
1279 if (!xhci->scratchpad->sp_array)
1282 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1283 if (!xhci->scratchpad->sp_buffers)
1286 xhci->scratchpad->sp_dma_buffers =
1287 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1289 if (!xhci->scratchpad->sp_dma_buffers)
1292 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
1293 for (i = 0; i < num_sp; i++) {
1295 void *buf = pci_alloc_consistent(to_pci_dev(dev),
1296 xhci->page_size, &dma);
1300 xhci->scratchpad->sp_array[i] = dma;
1301 xhci->scratchpad->sp_buffers[i] = buf;
1302 xhci->scratchpad->sp_dma_buffers[i] = dma;
1308 for (i = i - 1; i >= 0; i--) {
1309 pci_free_consistent(to_pci_dev(dev), xhci->page_size,
1310 xhci->scratchpad->sp_buffers[i],
1311 xhci->scratchpad->sp_dma_buffers[i]);
1313 kfree(xhci->scratchpad->sp_dma_buffers);
1316 kfree(xhci->scratchpad->sp_buffers);
1319 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
1320 xhci->scratchpad->sp_array,
1321 xhci->scratchpad->sp_dma);
1324 kfree(xhci->scratchpad);
1325 xhci->scratchpad = NULL;
1331 static void scratchpad_free(struct xhci_hcd *xhci)
1335 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1337 if (!xhci->scratchpad)
1340 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1342 for (i = 0; i < num_sp; i++) {
1343 pci_free_consistent(pdev, xhci->page_size,
1344 xhci->scratchpad->sp_buffers[i],
1345 xhci->scratchpad->sp_dma_buffers[i]);
1347 kfree(xhci->scratchpad->sp_dma_buffers);
1348 kfree(xhci->scratchpad->sp_buffers);
1349 pci_free_consistent(pdev, num_sp * sizeof(u64),
1350 xhci->scratchpad->sp_array,
1351 xhci->scratchpad->sp_dma);
1352 kfree(xhci->scratchpad);
1353 xhci->scratchpad = NULL;
1356 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1357 bool allocate_in_ctx, bool allocate_completion,
1360 struct xhci_command *command;
1362 command = kzalloc(sizeof(*command), mem_flags);
1366 if (allocate_in_ctx) {
1368 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1370 if (!command->in_ctx) {
1376 if (allocate_completion) {
1377 command->completion =
1378 kzalloc(sizeof(struct completion), mem_flags);
1379 if (!command->completion) {
1380 xhci_free_container_ctx(xhci, command->in_ctx);
1384 init_completion(command->completion);
1387 command->status = 0;
1388 INIT_LIST_HEAD(&command->cmd_list);
1392 void xhci_free_command(struct xhci_hcd *xhci,
1393 struct xhci_command *command)
1395 xhci_free_container_ctx(xhci,
1397 kfree(command->completion);
1401 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1403 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1407 /* Free the Event Ring Segment Table and the actual Event Ring */
1409 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
1410 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
1411 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
1413 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1414 if (xhci->erst.entries)
1415 pci_free_consistent(pdev, size,
1416 xhci->erst.entries, xhci->erst.erst_dma_addr);
1417 xhci->erst.entries = NULL;
1418 xhci_dbg(xhci, "Freed ERST\n");
1419 if (xhci->event_ring)
1420 xhci_ring_free(xhci, xhci->event_ring);
1421 xhci->event_ring = NULL;
1422 xhci_dbg(xhci, "Freed event ring\n");
1424 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
1426 xhci_ring_free(xhci, xhci->cmd_ring);
1427 xhci->cmd_ring = NULL;
1428 xhci_dbg(xhci, "Freed command ring\n");
1430 for (i = 1; i < MAX_HC_SLOTS; ++i)
1431 xhci_free_virt_device(xhci, i);
1433 if (xhci->segment_pool)
1434 dma_pool_destroy(xhci->segment_pool);
1435 xhci->segment_pool = NULL;
1436 xhci_dbg(xhci, "Freed segment pool\n");
1438 if (xhci->device_pool)
1439 dma_pool_destroy(xhci->device_pool);
1440 xhci->device_pool = NULL;
1441 xhci_dbg(xhci, "Freed device context pool\n");
1443 if (xhci->small_streams_pool)
1444 dma_pool_destroy(xhci->small_streams_pool);
1445 xhci->small_streams_pool = NULL;
1446 xhci_dbg(xhci, "Freed small stream array pool\n");
1448 if (xhci->medium_streams_pool)
1449 dma_pool_destroy(xhci->medium_streams_pool);
1450 xhci->medium_streams_pool = NULL;
1451 xhci_dbg(xhci, "Freed medium stream array pool\n");
1453 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1455 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
1456 xhci->dcbaa, xhci->dcbaa->dma);
1459 scratchpad_free(xhci);
1460 xhci->page_size = 0;
1461 xhci->page_shift = 0;
1464 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1465 struct xhci_segment *input_seg,
1466 union xhci_trb *start_trb,
1467 union xhci_trb *end_trb,
1468 dma_addr_t input_dma,
1469 struct xhci_segment *result_seg,
1470 char *test_name, int test_number)
1472 unsigned long long start_dma;
1473 unsigned long long end_dma;
1474 struct xhci_segment *seg;
1476 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1477 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1479 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1480 if (seg != result_seg) {
1481 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1482 test_name, test_number);
1483 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1484 "input DMA 0x%llx\n",
1486 (unsigned long long) input_dma);
1487 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1488 "ending TRB %p (0x%llx DMA)\n",
1489 start_trb, start_dma,
1491 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1498 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1499 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1502 dma_addr_t input_dma;
1503 struct xhci_segment *result_seg;
1504 } simple_test_vector [] = {
1505 /* A zeroed DMA field should fail */
1507 /* One TRB before the ring start should fail */
1508 { xhci->event_ring->first_seg->dma - 16, NULL },
1509 /* One byte before the ring start should fail */
1510 { xhci->event_ring->first_seg->dma - 1, NULL },
1511 /* Starting TRB should succeed */
1512 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1513 /* Ending TRB should succeed */
1514 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1515 xhci->event_ring->first_seg },
1516 /* One byte after the ring end should fail */
1517 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1518 /* One TRB after the ring end should fail */
1519 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1520 /* An address of all ones should fail */
1521 { (dma_addr_t) (~0), NULL },
1524 struct xhci_segment *input_seg;
1525 union xhci_trb *start_trb;
1526 union xhci_trb *end_trb;
1527 dma_addr_t input_dma;
1528 struct xhci_segment *result_seg;
1529 } complex_test_vector [] = {
1530 /* Test feeding a valid DMA address from a different ring */
1531 { .input_seg = xhci->event_ring->first_seg,
1532 .start_trb = xhci->event_ring->first_seg->trbs,
1533 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1534 .input_dma = xhci->cmd_ring->first_seg->dma,
1537 /* Test feeding a valid end TRB from a different ring */
1538 { .input_seg = xhci->event_ring->first_seg,
1539 .start_trb = xhci->event_ring->first_seg->trbs,
1540 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1541 .input_dma = xhci->cmd_ring->first_seg->dma,
1544 /* Test feeding a valid start and end TRB from a different ring */
1545 { .input_seg = xhci->event_ring->first_seg,
1546 .start_trb = xhci->cmd_ring->first_seg->trbs,
1547 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1548 .input_dma = xhci->cmd_ring->first_seg->dma,
1551 /* TRB in this ring, but after this TD */
1552 { .input_seg = xhci->event_ring->first_seg,
1553 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1554 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1555 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1558 /* TRB in this ring, but before this TD */
1559 { .input_seg = xhci->event_ring->first_seg,
1560 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1561 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1562 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1565 /* TRB in this ring, but after this wrapped TD */
1566 { .input_seg = xhci->event_ring->first_seg,
1567 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1568 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1569 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1572 /* TRB in this ring, but before this wrapped TD */
1573 { .input_seg = xhci->event_ring->first_seg,
1574 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1575 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1576 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1579 /* TRB not in this ring, and we have a wrapped TD */
1580 { .input_seg = xhci->event_ring->first_seg,
1581 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1582 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1583 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1588 unsigned int num_tests;
1591 num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
1592 for (i = 0; i < num_tests; i++) {
1593 ret = xhci_test_trb_in_td(xhci,
1594 xhci->event_ring->first_seg,
1595 xhci->event_ring->first_seg->trbs,
1596 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1597 simple_test_vector[i].input_dma,
1598 simple_test_vector[i].result_seg,
1604 num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
1605 for (i = 0; i < num_tests; i++) {
1606 ret = xhci_test_trb_in_td(xhci,
1607 complex_test_vector[i].input_seg,
1608 complex_test_vector[i].start_trb,
1609 complex_test_vector[i].end_trb,
1610 complex_test_vector[i].input_dma,
1611 complex_test_vector[i].result_seg,
1616 xhci_dbg(xhci, "TRB math tests passed.\n");
1621 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1624 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1625 unsigned int val, val2;
1627 struct xhci_segment *seg;
1631 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
1632 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
1633 for (i = 0; i < 16; i++) {
1634 if ((0x1 & page_size) != 0)
1636 page_size = page_size >> 1;
1639 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
1641 xhci_warn(xhci, "WARN: no supported page size\n");
1642 /* Use 4K pages, since that's common and the minimum the HC supports */
1643 xhci->page_shift = 12;
1644 xhci->page_size = 1 << xhci->page_shift;
1645 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
1648 * Program the Number of Device Slots Enabled field in the CONFIG
1649 * register with the max value of slots the HC can handle.
1651 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
1652 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
1653 (unsigned int) val);
1654 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
1655 val |= (val2 & ~HCS_SLOTS_MASK);
1656 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
1657 (unsigned int) val);
1658 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
1661 * Section 5.4.8 - doorbell array must be
1662 * "physically contiguous and 64-byte (cache line) aligned".
1664 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
1665 sizeof(*xhci->dcbaa), &dma);
1668 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
1669 xhci->dcbaa->dma = dma;
1670 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1671 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
1672 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
1675 * Initialize the ring segment pool. The ring must be a contiguous
1676 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1677 * however, the command ring segment needs 64-byte aligned segments,
1678 * so we pick the greater alignment need.
1680 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
1681 SEGMENT_SIZE, 64, xhci->page_size);
1683 /* See Table 46 and Note on Figure 55 */
1684 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
1685 2112, 64, xhci->page_size);
1686 if (!xhci->segment_pool || !xhci->device_pool)
1689 /* Linear stream context arrays don't have any boundary restrictions,
1690 * and only need to be 16-byte aligned.
1692 xhci->small_streams_pool =
1693 dma_pool_create("xHCI 256 byte stream ctx arrays",
1694 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
1695 xhci->medium_streams_pool =
1696 dma_pool_create("xHCI 1KB stream ctx arrays",
1697 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
1698 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
1699 * will be allocated with pci_alloc_consistent()
1702 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
1705 /* Set up the command ring to have one segments for now. */
1706 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
1707 if (!xhci->cmd_ring)
1709 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
1710 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
1711 (unsigned long long)xhci->cmd_ring->first_seg->dma);
1713 /* Set the address in the Command Ring Control register */
1714 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1715 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
1716 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
1717 xhci->cmd_ring->cycle_state;
1718 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
1719 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
1720 xhci_dbg_cmd_ptrs(xhci);
1722 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
1724 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1725 " from cap regs base addr\n", val);
1726 xhci->dba = (void *) xhci->cap_regs + val;
1727 xhci_dbg_regs(xhci);
1728 xhci_print_run_regs(xhci);
1729 /* Set ir_set to interrupt register set 0 */
1730 xhci->ir_set = (void *) xhci->run_regs->ir_set;
1733 * Event ring setup: Allocate a normal ring, but also setup
1734 * the event ring segment table (ERST). Section 4.9.3.
1736 xhci_dbg(xhci, "// Allocating event ring\n");
1737 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
1738 if (!xhci->event_ring)
1740 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
1743 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
1744 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
1745 if (!xhci->erst.entries)
1747 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
1748 (unsigned long long)dma);
1750 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
1751 xhci->erst.num_entries = ERST_NUM_SEGS;
1752 xhci->erst.erst_dma_addr = dma;
1753 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
1754 xhci->erst.num_entries,
1756 (unsigned long long)xhci->erst.erst_dma_addr);
1758 /* set ring base address and size for each segment table entry */
1759 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
1760 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
1761 entry->seg_addr = seg->dma;
1762 entry->seg_size = TRBS_PER_SEGMENT;
1767 /* set ERST count with the number of entries in the segment table */
1768 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
1769 val &= ERST_SIZE_MASK;
1770 val |= ERST_NUM_SEGS;
1771 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
1773 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
1775 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
1776 /* set the segment table base address */
1777 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
1778 (unsigned long long)xhci->erst.erst_dma_addr);
1779 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
1780 val_64 &= ERST_PTR_MASK;
1781 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
1782 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
1784 /* Set the event ring dequeue address */
1785 xhci_set_hc_event_deq(xhci);
1786 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1787 xhci_print_ir_set(xhci, xhci->ir_set, 0);
1790 * XXX: Might need to set the Interrupter Moderation Register to
1791 * something other than the default (~1ms minimum between interrupts).
1792 * See section 5.5.1.2.
1794 init_completion(&xhci->addr_dev);
1795 for (i = 0; i < MAX_HC_SLOTS; ++i)
1796 xhci->devs[i] = NULL;
1798 if (scratchpad_alloc(xhci, flags))
1804 xhci_warn(xhci, "Couldn't initialize memory\n");
1805 xhci_mem_cleanup(xhci);