2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
37 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
39 struct xhci_segment *seg;
42 seg = kzalloc(sizeof *seg, flags);
45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg->trbs, (unsigned long long)dma);
55 memset(seg->trbs, 0, SEGMENT_SIZE);
62 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
67 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
68 seg->trbs, (unsigned long long)seg->dma);
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
77 * Make the prev segment point to the next segment.
79 * Change the last TRB in the prev segment to be a Link TRB which points to the
80 * DMA address of the next segment. The caller needs to set any Link TRB
81 * related flags, such as End TRB, Toggle Cycle, and no snoop.
83 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
84 struct xhci_segment *next, bool link_trbs)
92 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
94 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
95 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
96 val &= ~TRB_TYPE_BITMASK;
97 val |= TRB_TYPE(TRB_LINK);
98 /* Always set the chain bit with 0.95 hardware */
99 if (xhci_link_trb_quirk(xhci))
101 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
103 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
104 (unsigned long long)prev->dma,
105 (unsigned long long)next->dma);
108 /* XXX: Do we need the hcd structure in all these functions? */
109 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
111 struct xhci_segment *seg;
112 struct xhci_segment *first_seg;
114 if (!ring || !ring->first_seg)
116 first_seg = ring->first_seg;
117 seg = first_seg->next;
118 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
119 while (seg != first_seg) {
120 struct xhci_segment *next = seg->next;
121 xhci_segment_free(xhci, seg);
124 xhci_segment_free(xhci, first_seg);
125 ring->first_seg = NULL;
129 static void xhci_initialize_ring_info(struct xhci_ring *ring)
131 /* The ring is empty, so the enqueue pointer == dequeue pointer */
132 ring->enqueue = ring->first_seg->trbs;
133 ring->enq_seg = ring->first_seg;
134 ring->dequeue = ring->enqueue;
135 ring->deq_seg = ring->first_seg;
136 /* The ring is initialized to 0. The producer must write 1 to the cycle
137 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
138 * compare CCS to the cycle bit to check ownership, so CCS = 1.
140 ring->cycle_state = 1;
141 /* Not necessary for new rings, but needed for re-initialized rings */
142 ring->enq_updates = 0;
143 ring->deq_updates = 0;
147 * Create a new ring with zero or more segments.
149 * Link each segment together into a ring.
150 * Set the end flag and the cycle toggle bit on the last segment.
151 * See section 4.9.1 and figures 15 and 16.
153 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
154 unsigned int num_segs, bool link_trbs, gfp_t flags)
156 struct xhci_ring *ring;
157 struct xhci_segment *prev;
159 ring = kzalloc(sizeof *(ring), flags);
160 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
164 INIT_LIST_HEAD(&ring->td_list);
168 ring->first_seg = xhci_segment_alloc(xhci, flags);
169 if (!ring->first_seg)
173 prev = ring->first_seg;
174 while (num_segs > 0) {
175 struct xhci_segment *next;
177 next = xhci_segment_alloc(xhci, flags);
180 xhci_link_segments(xhci, prev, next, link_trbs);
185 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
188 /* See section 4.9.2.1 and 6.4.4.1 */
189 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
190 xhci_dbg(xhci, "Wrote link toggle flag to"
191 " segment %p (virtual), 0x%llx (DMA)\n",
192 prev, (unsigned long long)prev->dma);
194 xhci_initialize_ring_info(ring);
198 xhci_ring_free(xhci, ring);
202 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
203 struct xhci_virt_device *virt_dev,
204 unsigned int ep_index)
208 rings_cached = virt_dev->num_rings_cached;
209 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
210 virt_dev->num_rings_cached++;
211 rings_cached = virt_dev->num_rings_cached;
212 virt_dev->ring_cache[rings_cached] =
213 virt_dev->eps[ep_index].ring;
214 xhci_dbg(xhci, "Cached old ring, "
215 "%d ring%s cached\n",
217 (rings_cached > 1) ? "s" : "");
219 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
220 xhci_dbg(xhci, "Ring cache full (%d rings), "
222 virt_dev->num_rings_cached);
224 virt_dev->eps[ep_index].ring = NULL;
227 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
228 * pointers to the beginning of the ring.
230 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
231 struct xhci_ring *ring)
233 struct xhci_segment *seg = ring->first_seg;
236 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
237 /* All endpoint rings have link TRBs */
238 xhci_link_segments(xhci, seg, seg->next, 1);
240 } while (seg != ring->first_seg);
241 xhci_initialize_ring_info(ring);
242 /* td list should be empty since all URBs have been cancelled,
243 * but just in case...
245 INIT_LIST_HEAD(&ring->td_list);
248 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
250 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
251 int type, gfp_t flags)
253 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
257 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
259 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
260 if (type == XHCI_CTX_TYPE_INPUT)
261 ctx->size += CTX_SIZE(xhci->hcc_params);
263 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
264 memset(ctx->bytes, 0, ctx->size);
268 void xhci_free_container_ctx(struct xhci_hcd *xhci,
269 struct xhci_container_ctx *ctx)
273 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
277 struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
278 struct xhci_container_ctx *ctx)
280 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
281 return (struct xhci_input_control_ctx *)ctx->bytes;
284 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
285 struct xhci_container_ctx *ctx)
287 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
288 return (struct xhci_slot_ctx *)ctx->bytes;
290 return (struct xhci_slot_ctx *)
291 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
294 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
295 struct xhci_container_ctx *ctx,
296 unsigned int ep_index)
298 /* increment ep index by offset of start of ep ctx array */
300 if (ctx->type == XHCI_CTX_TYPE_INPUT)
303 return (struct xhci_ep_ctx *)
304 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
307 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
308 struct xhci_virt_ep *ep)
310 init_timer(&ep->stop_cmd_timer);
311 ep->stop_cmd_timer.data = (unsigned long) ep;
312 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
316 /* All the xhci_tds in the ring's TD list should be freed at this point */
317 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
319 struct xhci_virt_device *dev;
322 /* Slot ID 0 is reserved */
323 if (slot_id == 0 || !xhci->devs[slot_id])
326 dev = xhci->devs[slot_id];
327 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
331 for (i = 0; i < 31; ++i)
332 if (dev->eps[i].ring)
333 xhci_ring_free(xhci, dev->eps[i].ring);
335 if (dev->ring_cache) {
336 for (i = 0; i < dev->num_rings_cached; i++)
337 xhci_ring_free(xhci, dev->ring_cache[i]);
338 kfree(dev->ring_cache);
342 xhci_free_container_ctx(xhci, dev->in_ctx);
344 xhci_free_container_ctx(xhci, dev->out_ctx);
346 kfree(xhci->devs[slot_id]);
347 xhci->devs[slot_id] = 0;
350 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
351 struct usb_device *udev, gfp_t flags)
353 struct xhci_virt_device *dev;
356 /* Slot ID 0 is reserved */
357 if (slot_id == 0 || xhci->devs[slot_id]) {
358 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
362 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
363 if (!xhci->devs[slot_id])
365 dev = xhci->devs[slot_id];
367 /* Allocate the (output) device context that will be used in the HC. */
368 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
372 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
373 (unsigned long long)dev->out_ctx->dma);
375 /* Allocate the (input) device context for address device command */
376 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
380 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
381 (unsigned long long)dev->in_ctx->dma);
383 /* Initialize the cancellation list and watchdog timers for each ep */
384 for (i = 0; i < 31; i++) {
385 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
386 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
389 /* Allocate endpoint 0 ring */
390 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
391 if (!dev->eps[0].ring)
394 /* Allocate pointers to the ring cache */
395 dev->ring_cache = kzalloc(
396 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
398 if (!dev->ring_cache)
400 dev->num_rings_cached = 0;
402 init_completion(&dev->cmd_completion);
403 INIT_LIST_HEAD(&dev->cmd_list);
405 /* Point to output device context in dcbaa. */
406 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
407 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
409 &xhci->dcbaa->dev_context_ptrs[slot_id],
410 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
414 xhci_free_virt_device(xhci, slot_id);
418 /* Setup an xHCI virtual device for a Set Address command */
419 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
421 struct xhci_virt_device *dev;
422 struct xhci_ep_ctx *ep0_ctx;
423 struct usb_device *top_dev;
424 struct xhci_slot_ctx *slot_ctx;
425 struct xhci_input_control_ctx *ctrl_ctx;
427 dev = xhci->devs[udev->slot_id];
428 /* Slot ID 0 is reserved */
429 if (udev->slot_id == 0 || !dev) {
430 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
434 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
435 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
436 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
438 /* 2) New slot context and endpoint 0 context are valid*/
439 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
441 /* 3) Only the control endpoint is valid - one endpoint context */
442 slot_ctx->dev_info |= LAST_CTX(1);
444 slot_ctx->dev_info |= (u32) udev->route;
445 switch (udev->speed) {
446 case USB_SPEED_SUPER:
447 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
450 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
453 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
456 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
458 case USB_SPEED_WIRELESS:
459 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
463 /* Speed was set earlier, this shouldn't happen. */
466 /* Find the root hub port this device is under */
467 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
468 top_dev = top_dev->parent)
469 /* Found device below root hub */;
470 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
471 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
473 /* Is this a LS/FS device under a HS hub? */
474 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
476 slot_ctx->tt_info = udev->tt->hub->slot_id;
477 slot_ctx->tt_info |= udev->ttport << 8;
479 slot_ctx->dev_info |= DEV_MTT;
481 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
482 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
484 /* Step 4 - ring already allocated */
486 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
488 * XXX: Not sure about wireless USB devices.
490 switch (udev->speed) {
491 case USB_SPEED_SUPER:
492 ep0_ctx->ep_info2 |= MAX_PACKET(512);
495 /* USB core guesses at a 64-byte max packet first for FS devices */
497 ep0_ctx->ep_info2 |= MAX_PACKET(64);
500 ep0_ctx->ep_info2 |= MAX_PACKET(8);
502 case USB_SPEED_WIRELESS:
503 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
510 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
511 ep0_ctx->ep_info2 |= MAX_BURST(0);
512 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
515 dev->eps[0].ring->first_seg->dma;
516 ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
518 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
523 /* Return the polling or NAK interval.
525 * The polling interval is expressed in "microframes". If xHCI's Interval field
526 * is set to N, it will service the endpoint every 2^(Interval)*125us.
528 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
531 static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
532 struct usb_host_endpoint *ep)
534 unsigned int interval = 0;
536 switch (udev->speed) {
539 if (usb_endpoint_xfer_control(&ep->desc) ||
540 usb_endpoint_xfer_bulk(&ep->desc))
541 interval = ep->desc.bInterval;
542 /* Fall through - SS and HS isoc/int have same decoding */
543 case USB_SPEED_SUPER:
544 if (usb_endpoint_xfer_int(&ep->desc) ||
545 usb_endpoint_xfer_isoc(&ep->desc)) {
546 if (ep->desc.bInterval == 0)
549 interval = ep->desc.bInterval - 1;
552 if (interval != ep->desc.bInterval + 1)
553 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
554 ep->desc.bEndpointAddress, 1 << interval);
557 /* Convert bInterval (in 1-255 frames) to microframes and round down to
558 * nearest power of 2.
562 if (usb_endpoint_xfer_int(&ep->desc) ||
563 usb_endpoint_xfer_isoc(&ep->desc)) {
564 interval = fls(8*ep->desc.bInterval) - 1;
569 if ((1 << interval) != 8*ep->desc.bInterval)
571 "ep %#x - rounding interval"
572 " to %d microframes, "
573 "ep desc says %d microframes\n",
574 ep->desc.bEndpointAddress,
576 8*ep->desc.bInterval);
582 return EP_INTERVAL(interval);
585 /* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
586 * High speed endpoint descriptors can define "the number of additional
587 * transaction opportunities per microframe", but that goes in the Max Burst
588 * endpoint context field.
590 static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
591 struct usb_host_endpoint *ep)
593 if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
595 return ep->ss_ep_comp->desc.bmAttributes;
598 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
599 struct usb_host_endpoint *ep)
604 in = usb_endpoint_dir_in(&ep->desc);
605 if (usb_endpoint_xfer_control(&ep->desc)) {
606 type = EP_TYPE(CTRL_EP);
607 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
609 type = EP_TYPE(BULK_IN_EP);
611 type = EP_TYPE(BULK_OUT_EP);
612 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
614 type = EP_TYPE(ISOC_IN_EP);
616 type = EP_TYPE(ISOC_OUT_EP);
617 } else if (usb_endpoint_xfer_int(&ep->desc)) {
619 type = EP_TYPE(INT_IN_EP);
621 type = EP_TYPE(INT_OUT_EP);
628 /* Return the maximum endpoint service interval time (ESIT) payload.
629 * Basically, this is the maxpacket size, multiplied by the burst size
632 static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
633 struct usb_device *udev,
634 struct usb_host_endpoint *ep)
639 /* Only applies for interrupt or isochronous endpoints */
640 if (usb_endpoint_xfer_control(&ep->desc) ||
641 usb_endpoint_xfer_bulk(&ep->desc))
644 if (udev->speed == USB_SPEED_SUPER) {
646 return ep->ss_ep_comp->desc.wBytesPerInterval;
647 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
648 /* Assume no bursts, no multiple opportunities to send. */
649 return ep->desc.wMaxPacketSize;
652 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
653 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
654 /* A 0 in max burst means 1 transfer per ESIT */
655 return max_packet * (max_burst + 1);
658 int xhci_endpoint_init(struct xhci_hcd *xhci,
659 struct xhci_virt_device *virt_dev,
660 struct usb_device *udev,
661 struct usb_host_endpoint *ep,
664 unsigned int ep_index;
665 struct xhci_ep_ctx *ep_ctx;
666 struct xhci_ring *ep_ring;
667 unsigned int max_packet;
668 unsigned int max_burst;
669 u32 max_esit_payload;
671 ep_index = xhci_get_endpoint_index(&ep->desc);
672 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
674 /* Set up the endpoint ring */
675 virt_dev->eps[ep_index].new_ring =
676 xhci_ring_alloc(xhci, 1, true, mem_flags);
677 if (!virt_dev->eps[ep_index].new_ring) {
678 /* Attempt to use the ring cache */
679 if (virt_dev->num_rings_cached == 0)
681 virt_dev->eps[ep_index].new_ring =
682 virt_dev->ring_cache[virt_dev->num_rings_cached];
683 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
684 virt_dev->num_rings_cached--;
685 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
687 ep_ring = virt_dev->eps[ep_index].new_ring;
688 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
690 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
691 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
693 /* FIXME dig Mult and streams info out of ep companion desc */
695 /* Allow 3 retries for everything but isoc;
696 * error count = 0 means infinite retries.
698 if (!usb_endpoint_xfer_isoc(&ep->desc))
699 ep_ctx->ep_info2 = ERROR_COUNT(3);
701 ep_ctx->ep_info2 = ERROR_COUNT(1);
703 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
705 /* Set the max packet size and max burst */
706 switch (udev->speed) {
707 case USB_SPEED_SUPER:
708 max_packet = ep->desc.wMaxPacketSize;
709 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
710 /* dig out max burst from ep companion desc */
711 if (!ep->ss_ep_comp) {
712 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
715 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
717 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
720 /* bits 11:12 specify the number of additional transaction
721 * opportunities per microframe (USB 2.0, section 9.6.6)
723 if (usb_endpoint_xfer_isoc(&ep->desc) ||
724 usb_endpoint_xfer_int(&ep->desc)) {
725 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
726 ep_ctx->ep_info2 |= MAX_BURST(max_burst);
731 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
732 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
737 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
738 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
741 * XXX no idea how to calculate the average TRB buffer length for bulk
742 * endpoints, as the driver gives us no clue how big each scatter gather
743 * list entry (or buffer) is going to be.
745 * For isochronous and interrupt endpoints, we set it to the max
746 * available, until we have new API in the USB core to allow drivers to
747 * declare how much bandwidth they actually need.
749 * Normally, it would be calculated by taking the total of the buffer
750 * lengths in the TD and then dividing by the number of TRBs in a TD,
751 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
752 * use Event Data TRBs, and we don't chain in a link TRB on short
753 * transfers, we're basically dividing by 1.
755 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
757 /* FIXME Debug endpoint context */
761 void xhci_endpoint_zero(struct xhci_hcd *xhci,
762 struct xhci_virt_device *virt_dev,
763 struct usb_host_endpoint *ep)
765 unsigned int ep_index;
766 struct xhci_ep_ctx *ep_ctx;
768 ep_index = xhci_get_endpoint_index(&ep->desc);
769 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
772 ep_ctx->ep_info2 = 0;
775 /* Don't free the endpoint ring until the set interface or configuration
780 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
781 * Useful when you want to change one particular aspect of the endpoint and then
782 * issue a configure endpoint command.
784 void xhci_endpoint_copy(struct xhci_hcd *xhci,
785 struct xhci_container_ctx *in_ctx,
786 struct xhci_container_ctx *out_ctx,
787 unsigned int ep_index)
789 struct xhci_ep_ctx *out_ep_ctx;
790 struct xhci_ep_ctx *in_ep_ctx;
792 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
793 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
795 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
796 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
797 in_ep_ctx->deq = out_ep_ctx->deq;
798 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
801 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
802 * Useful when you want to change one particular aspect of the endpoint and then
803 * issue a configure endpoint command. Only the context entries field matters,
804 * but we'll copy the whole thing anyway.
806 void xhci_slot_copy(struct xhci_hcd *xhci,
807 struct xhci_container_ctx *in_ctx,
808 struct xhci_container_ctx *out_ctx)
810 struct xhci_slot_ctx *in_slot_ctx;
811 struct xhci_slot_ctx *out_slot_ctx;
813 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
814 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
816 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
817 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
818 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
819 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
822 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
823 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
826 struct device *dev = xhci_to_hcd(xhci)->self.controller;
827 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
829 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
834 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
835 if (!xhci->scratchpad)
838 xhci->scratchpad->sp_array =
839 pci_alloc_consistent(to_pci_dev(dev),
840 num_sp * sizeof(u64),
841 &xhci->scratchpad->sp_dma);
842 if (!xhci->scratchpad->sp_array)
845 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
846 if (!xhci->scratchpad->sp_buffers)
849 xhci->scratchpad->sp_dma_buffers =
850 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
852 if (!xhci->scratchpad->sp_dma_buffers)
855 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
856 for (i = 0; i < num_sp; i++) {
858 void *buf = pci_alloc_consistent(to_pci_dev(dev),
859 xhci->page_size, &dma);
863 xhci->scratchpad->sp_array[i] = dma;
864 xhci->scratchpad->sp_buffers[i] = buf;
865 xhci->scratchpad->sp_dma_buffers[i] = dma;
871 for (i = i - 1; i >= 0; i--) {
872 pci_free_consistent(to_pci_dev(dev), xhci->page_size,
873 xhci->scratchpad->sp_buffers[i],
874 xhci->scratchpad->sp_dma_buffers[i]);
876 kfree(xhci->scratchpad->sp_dma_buffers);
879 kfree(xhci->scratchpad->sp_buffers);
882 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
883 xhci->scratchpad->sp_array,
884 xhci->scratchpad->sp_dma);
887 kfree(xhci->scratchpad);
888 xhci->scratchpad = NULL;
894 static void scratchpad_free(struct xhci_hcd *xhci)
898 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
900 if (!xhci->scratchpad)
903 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
905 for (i = 0; i < num_sp; i++) {
906 pci_free_consistent(pdev, xhci->page_size,
907 xhci->scratchpad->sp_buffers[i],
908 xhci->scratchpad->sp_dma_buffers[i]);
910 kfree(xhci->scratchpad->sp_dma_buffers);
911 kfree(xhci->scratchpad->sp_buffers);
912 pci_free_consistent(pdev, num_sp * sizeof(u64),
913 xhci->scratchpad->sp_array,
914 xhci->scratchpad->sp_dma);
915 kfree(xhci->scratchpad);
916 xhci->scratchpad = NULL;
919 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
920 bool allocate_in_ctx, bool allocate_completion,
923 struct xhci_command *command;
925 command = kzalloc(sizeof(*command), mem_flags);
929 if (allocate_in_ctx) {
931 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
933 if (!command->in_ctx) {
939 if (allocate_completion) {
940 command->completion =
941 kzalloc(sizeof(struct completion), mem_flags);
942 if (!command->completion) {
943 xhci_free_container_ctx(xhci, command->in_ctx);
947 init_completion(command->completion);
951 INIT_LIST_HEAD(&command->cmd_list);
955 void xhci_free_command(struct xhci_hcd *xhci,
956 struct xhci_command *command)
958 xhci_free_container_ctx(xhci,
960 kfree(command->completion);
964 void xhci_mem_cleanup(struct xhci_hcd *xhci)
966 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
970 /* Free the Event Ring Segment Table and the actual Event Ring */
972 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
973 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
974 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
976 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
977 if (xhci->erst.entries)
978 pci_free_consistent(pdev, size,
979 xhci->erst.entries, xhci->erst.erst_dma_addr);
980 xhci->erst.entries = NULL;
981 xhci_dbg(xhci, "Freed ERST\n");
982 if (xhci->event_ring)
983 xhci_ring_free(xhci, xhci->event_ring);
984 xhci->event_ring = NULL;
985 xhci_dbg(xhci, "Freed event ring\n");
987 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
989 xhci_ring_free(xhci, xhci->cmd_ring);
990 xhci->cmd_ring = NULL;
991 xhci_dbg(xhci, "Freed command ring\n");
993 for (i = 1; i < MAX_HC_SLOTS; ++i)
994 xhci_free_virt_device(xhci, i);
996 if (xhci->segment_pool)
997 dma_pool_destroy(xhci->segment_pool);
998 xhci->segment_pool = NULL;
999 xhci_dbg(xhci, "Freed segment pool\n");
1001 if (xhci->device_pool)
1002 dma_pool_destroy(xhci->device_pool);
1003 xhci->device_pool = NULL;
1004 xhci_dbg(xhci, "Freed device context pool\n");
1006 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1008 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
1009 xhci->dcbaa, xhci->dcbaa->dma);
1012 scratchpad_free(xhci);
1013 xhci->page_size = 0;
1014 xhci->page_shift = 0;
1017 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1018 struct xhci_segment *input_seg,
1019 union xhci_trb *start_trb,
1020 union xhci_trb *end_trb,
1021 dma_addr_t input_dma,
1022 struct xhci_segment *result_seg,
1023 char *test_name, int test_number)
1025 unsigned long long start_dma;
1026 unsigned long long end_dma;
1027 struct xhci_segment *seg;
1029 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1030 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1032 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1033 if (seg != result_seg) {
1034 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1035 test_name, test_number);
1036 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1037 "input DMA 0x%llx\n",
1039 (unsigned long long) input_dma);
1040 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1041 "ending TRB %p (0x%llx DMA)\n",
1042 start_trb, start_dma,
1044 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1051 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1052 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1055 dma_addr_t input_dma;
1056 struct xhci_segment *result_seg;
1057 } simple_test_vector [] = {
1058 /* A zeroed DMA field should fail */
1060 /* One TRB before the ring start should fail */
1061 { xhci->event_ring->first_seg->dma - 16, NULL },
1062 /* One byte before the ring start should fail */
1063 { xhci->event_ring->first_seg->dma - 1, NULL },
1064 /* Starting TRB should succeed */
1065 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1066 /* Ending TRB should succeed */
1067 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1068 xhci->event_ring->first_seg },
1069 /* One byte after the ring end should fail */
1070 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1071 /* One TRB after the ring end should fail */
1072 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1073 /* An address of all ones should fail */
1074 { (dma_addr_t) (~0), NULL },
1077 struct xhci_segment *input_seg;
1078 union xhci_trb *start_trb;
1079 union xhci_trb *end_trb;
1080 dma_addr_t input_dma;
1081 struct xhci_segment *result_seg;
1082 } complex_test_vector [] = {
1083 /* Test feeding a valid DMA address from a different ring */
1084 { .input_seg = xhci->event_ring->first_seg,
1085 .start_trb = xhci->event_ring->first_seg->trbs,
1086 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1087 .input_dma = xhci->cmd_ring->first_seg->dma,
1090 /* Test feeding a valid end TRB from a different ring */
1091 { .input_seg = xhci->event_ring->first_seg,
1092 .start_trb = xhci->event_ring->first_seg->trbs,
1093 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1094 .input_dma = xhci->cmd_ring->first_seg->dma,
1097 /* Test feeding a valid start and end TRB from a different ring */
1098 { .input_seg = xhci->event_ring->first_seg,
1099 .start_trb = xhci->cmd_ring->first_seg->trbs,
1100 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1101 .input_dma = xhci->cmd_ring->first_seg->dma,
1104 /* TRB in this ring, but after this TD */
1105 { .input_seg = xhci->event_ring->first_seg,
1106 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1107 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1108 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1111 /* TRB in this ring, but before this TD */
1112 { .input_seg = xhci->event_ring->first_seg,
1113 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1114 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1115 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1118 /* TRB in this ring, but after this wrapped TD */
1119 { .input_seg = xhci->event_ring->first_seg,
1120 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1121 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1122 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1125 /* TRB in this ring, but before this wrapped TD */
1126 { .input_seg = xhci->event_ring->first_seg,
1127 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1128 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1129 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1132 /* TRB not in this ring, and we have a wrapped TD */
1133 { .input_seg = xhci->event_ring->first_seg,
1134 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1135 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1136 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1141 unsigned int num_tests;
1144 num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
1145 for (i = 0; i < num_tests; i++) {
1146 ret = xhci_test_trb_in_td(xhci,
1147 xhci->event_ring->first_seg,
1148 xhci->event_ring->first_seg->trbs,
1149 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1150 simple_test_vector[i].input_dma,
1151 simple_test_vector[i].result_seg,
1157 num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
1158 for (i = 0; i < num_tests; i++) {
1159 ret = xhci_test_trb_in_td(xhci,
1160 complex_test_vector[i].input_seg,
1161 complex_test_vector[i].start_trb,
1162 complex_test_vector[i].end_trb,
1163 complex_test_vector[i].input_dma,
1164 complex_test_vector[i].result_seg,
1169 xhci_dbg(xhci, "TRB math tests passed.\n");
1174 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1177 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1178 unsigned int val, val2;
1180 struct xhci_segment *seg;
1184 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
1185 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
1186 for (i = 0; i < 16; i++) {
1187 if ((0x1 & page_size) != 0)
1189 page_size = page_size >> 1;
1192 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
1194 xhci_warn(xhci, "WARN: no supported page size\n");
1195 /* Use 4K pages, since that's common and the minimum the HC supports */
1196 xhci->page_shift = 12;
1197 xhci->page_size = 1 << xhci->page_shift;
1198 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
1201 * Program the Number of Device Slots Enabled field in the CONFIG
1202 * register with the max value of slots the HC can handle.
1204 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
1205 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
1206 (unsigned int) val);
1207 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
1208 val |= (val2 & ~HCS_SLOTS_MASK);
1209 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
1210 (unsigned int) val);
1211 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
1214 * Section 5.4.8 - doorbell array must be
1215 * "physically contiguous and 64-byte (cache line) aligned".
1217 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
1218 sizeof(*xhci->dcbaa), &dma);
1221 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
1222 xhci->dcbaa->dma = dma;
1223 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1224 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
1225 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
1228 * Initialize the ring segment pool. The ring must be a contiguous
1229 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1230 * however, the command ring segment needs 64-byte aligned segments,
1231 * so we pick the greater alignment need.
1233 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
1234 SEGMENT_SIZE, 64, xhci->page_size);
1236 /* See Table 46 and Note on Figure 55 */
1237 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
1238 2112, 64, xhci->page_size);
1239 if (!xhci->segment_pool || !xhci->device_pool)
1242 /* Set up the command ring to have one segments for now. */
1243 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
1244 if (!xhci->cmd_ring)
1246 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
1247 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
1248 (unsigned long long)xhci->cmd_ring->first_seg->dma);
1250 /* Set the address in the Command Ring Control register */
1251 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1252 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
1253 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
1254 xhci->cmd_ring->cycle_state;
1255 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
1256 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
1257 xhci_dbg_cmd_ptrs(xhci);
1259 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
1261 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1262 " from cap regs base addr\n", val);
1263 xhci->dba = (void *) xhci->cap_regs + val;
1264 xhci_dbg_regs(xhci);
1265 xhci_print_run_regs(xhci);
1266 /* Set ir_set to interrupt register set 0 */
1267 xhci->ir_set = (void *) xhci->run_regs->ir_set;
1270 * Event ring setup: Allocate a normal ring, but also setup
1271 * the event ring segment table (ERST). Section 4.9.3.
1273 xhci_dbg(xhci, "// Allocating event ring\n");
1274 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
1275 if (!xhci->event_ring)
1277 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
1280 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
1281 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
1282 if (!xhci->erst.entries)
1284 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
1285 (unsigned long long)dma);
1287 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
1288 xhci->erst.num_entries = ERST_NUM_SEGS;
1289 xhci->erst.erst_dma_addr = dma;
1290 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
1291 xhci->erst.num_entries,
1293 (unsigned long long)xhci->erst.erst_dma_addr);
1295 /* set ring base address and size for each segment table entry */
1296 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
1297 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
1298 entry->seg_addr = seg->dma;
1299 entry->seg_size = TRBS_PER_SEGMENT;
1304 /* set ERST count with the number of entries in the segment table */
1305 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
1306 val &= ERST_SIZE_MASK;
1307 val |= ERST_NUM_SEGS;
1308 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
1310 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
1312 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
1313 /* set the segment table base address */
1314 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
1315 (unsigned long long)xhci->erst.erst_dma_addr);
1316 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
1317 val_64 &= ERST_PTR_MASK;
1318 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
1319 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
1321 /* Set the event ring dequeue address */
1322 xhci_set_hc_event_deq(xhci);
1323 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1324 xhci_print_ir_set(xhci, xhci->ir_set, 0);
1327 * XXX: Might need to set the Interrupter Moderation Register to
1328 * something other than the default (~1ms minimum between interrupts).
1329 * See section 5.5.1.2.
1331 init_completion(&xhci->addr_dev);
1332 for (i = 0; i < MAX_HC_SLOTS; ++i)
1335 if (scratchpad_alloc(xhci, flags))
1341 xhci_warn(xhci, "Couldn't initialize memory\n");
1342 xhci_mem_cleanup(xhci);