firewire: core: add memo about the caller of show functions for device attributes
[sfrench/cifs-2.6.git] / drivers / usb / host / xhci-mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xHCI host controller driver
4  *
5  * Copyright (C) 2008 Intel Corp.
6  *
7  * Author: Sarah Sharp
8  * Some code borrowed from the Linux EHCI driver.
9  */
10
11 #include <linux/usb.h>
12 #include <linux/overflow.h>
13 #include <linux/pci.h>
14 #include <linux/slab.h>
15 #include <linux/dmapool.h>
16 #include <linux/dma-mapping.h>
17
18 #include "xhci.h"
19 #include "xhci-trace.h"
20 #include "xhci-debugfs.h"
21
22 /*
23  * Allocates a generic ring segment from the ring pool, sets the dma address,
24  * initializes the segment to zero, and sets the private next pointer to NULL.
25  *
26  * Section 4.11.1.1:
27  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
28  */
29 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
30                                                unsigned int cycle_state,
31                                                unsigned int max_packet,
32                                                unsigned int num,
33                                                gfp_t flags)
34 {
35         struct xhci_segment *seg;
36         dma_addr_t      dma;
37         int             i;
38         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
39
40         seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
41         if (!seg)
42                 return NULL;
43
44         seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
45         if (!seg->trbs) {
46                 kfree(seg);
47                 return NULL;
48         }
49
50         if (max_packet) {
51                 seg->bounce_buf = kzalloc_node(max_packet, flags,
52                                         dev_to_node(dev));
53                 if (!seg->bounce_buf) {
54                         dma_pool_free(xhci->segment_pool, seg->trbs, dma);
55                         kfree(seg);
56                         return NULL;
57                 }
58         }
59         /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
60         if (cycle_state == 0) {
61                 for (i = 0; i < TRBS_PER_SEGMENT; i++)
62                         seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
63         }
64         seg->num = num;
65         seg->dma = dma;
66         seg->next = NULL;
67
68         return seg;
69 }
70
71 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
72 {
73         if (seg->trbs) {
74                 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
75                 seg->trbs = NULL;
76         }
77         kfree(seg->bounce_buf);
78         kfree(seg);
79 }
80
81 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
82                                 struct xhci_segment *first)
83 {
84         struct xhci_segment *seg;
85
86         seg = first->next;
87         while (seg != first) {
88                 struct xhci_segment *next = seg->next;
89                 xhci_segment_free(xhci, seg);
90                 seg = next;
91         }
92         xhci_segment_free(xhci, first);
93 }
94
95 /*
96  * Make the prev segment point to the next segment.
97  *
98  * Change the last TRB in the prev segment to be a Link TRB which points to the
99  * DMA address of the next segment.  The caller needs to set any Link TRB
100  * related flags, such as End TRB, Toggle Cycle, and no snoop.
101  */
102 static void xhci_link_segments(struct xhci_segment *prev,
103                                struct xhci_segment *next,
104                                enum xhci_ring_type type, bool chain_links)
105 {
106         u32 val;
107
108         if (!prev || !next)
109                 return;
110         prev->next = next;
111         if (type != TYPE_EVENT) {
112                 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
113                         cpu_to_le64(next->dma);
114
115                 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
116                 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
117                 val &= ~TRB_TYPE_BITMASK;
118                 val |= TRB_TYPE(TRB_LINK);
119                 if (chain_links)
120                         val |= TRB_CHAIN;
121                 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
122         }
123 }
124
125 /*
126  * Link the ring to the new segments.
127  * Set Toggle Cycle for the new ring if needed.
128  */
129 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
130                 struct xhci_segment *first, struct xhci_segment *last,
131                 unsigned int num_segs)
132 {
133         struct xhci_segment *next, *seg;
134         bool chain_links;
135
136         if (!ring || !first || !last)
137                 return;
138
139         /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
140         chain_links = !!(xhci_link_trb_quirk(xhci) ||
141                          (ring->type == TYPE_ISOC &&
142                           (xhci->quirks & XHCI_AMD_0x96_HOST)));
143
144         next = ring->enq_seg->next;
145         xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
146         xhci_link_segments(last, next, ring->type, chain_links);
147         ring->num_segs += num_segs;
148
149         if (ring->enq_seg == ring->last_seg) {
150                 if (ring->type != TYPE_EVENT) {
151                         ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
152                                 &= ~cpu_to_le32(LINK_TOGGLE);
153                         last->trbs[TRBS_PER_SEGMENT-1].link.control
154                                 |= cpu_to_le32(LINK_TOGGLE);
155                 }
156                 ring->last_seg = last;
157         }
158
159         for (seg = last; seg != ring->last_seg; seg = seg->next)
160                 seg->next->num = seg->num + 1;
161 }
162
163 /*
164  * We need a radix tree for mapping physical addresses of TRBs to which stream
165  * ID they belong to.  We need to do this because the host controller won't tell
166  * us which stream ring the TRB came from.  We could store the stream ID in an
167  * event data TRB, but that doesn't help us for the cancellation case, since the
168  * endpoint may stop before it reaches that event data TRB.
169  *
170  * The radix tree maps the upper portion of the TRB DMA address to a ring
171  * segment that has the same upper portion of DMA addresses.  For example, say I
172  * have segments of size 1KB, that are always 1KB aligned.  A segment may
173  * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
174  * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
175  * pass the radix tree a key to get the right stream ID:
176  *
177  *      0x10c90fff >> 10 = 0x43243
178  *      0x10c912c0 >> 10 = 0x43244
179  *      0x10c91400 >> 10 = 0x43245
180  *
181  * Obviously, only those TRBs with DMA addresses that are within the segment
182  * will make the radix tree return the stream ID for that ring.
183  *
184  * Caveats for the radix tree:
185  *
186  * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
187  * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
188  * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
189  * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
190  * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
191  * extended systems (where the DMA address can be bigger than 32-bits),
192  * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
193  */
194 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
195                 struct xhci_ring *ring,
196                 struct xhci_segment *seg,
197                 gfp_t mem_flags)
198 {
199         unsigned long key;
200         int ret;
201
202         key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
203         /* Skip any segments that were already added. */
204         if (radix_tree_lookup(trb_address_map, key))
205                 return 0;
206
207         ret = radix_tree_maybe_preload(mem_flags);
208         if (ret)
209                 return ret;
210         ret = radix_tree_insert(trb_address_map,
211                         key, ring);
212         radix_tree_preload_end();
213         return ret;
214 }
215
216 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
217                 struct xhci_segment *seg)
218 {
219         unsigned long key;
220
221         key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
222         if (radix_tree_lookup(trb_address_map, key))
223                 radix_tree_delete(trb_address_map, key);
224 }
225
226 static int xhci_update_stream_segment_mapping(
227                 struct radix_tree_root *trb_address_map,
228                 struct xhci_ring *ring,
229                 struct xhci_segment *first_seg,
230                 struct xhci_segment *last_seg,
231                 gfp_t mem_flags)
232 {
233         struct xhci_segment *seg;
234         struct xhci_segment *failed_seg;
235         int ret;
236
237         if (WARN_ON_ONCE(trb_address_map == NULL))
238                 return 0;
239
240         seg = first_seg;
241         do {
242                 ret = xhci_insert_segment_mapping(trb_address_map,
243                                 ring, seg, mem_flags);
244                 if (ret)
245                         goto remove_streams;
246                 if (seg == last_seg)
247                         return 0;
248                 seg = seg->next;
249         } while (seg != first_seg);
250
251         return 0;
252
253 remove_streams:
254         failed_seg = seg;
255         seg = first_seg;
256         do {
257                 xhci_remove_segment_mapping(trb_address_map, seg);
258                 if (seg == failed_seg)
259                         return ret;
260                 seg = seg->next;
261         } while (seg != first_seg);
262
263         return ret;
264 }
265
266 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
267 {
268         struct xhci_segment *seg;
269
270         if (WARN_ON_ONCE(ring->trb_address_map == NULL))
271                 return;
272
273         seg = ring->first_seg;
274         do {
275                 xhci_remove_segment_mapping(ring->trb_address_map, seg);
276                 seg = seg->next;
277         } while (seg != ring->first_seg);
278 }
279
280 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
281 {
282         return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
283                         ring->first_seg, ring->last_seg, mem_flags);
284 }
285
286 /* XXX: Do we need the hcd structure in all these functions? */
287 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
288 {
289         if (!ring)
290                 return;
291
292         trace_xhci_ring_free(ring);
293
294         if (ring->first_seg) {
295                 if (ring->type == TYPE_STREAM)
296                         xhci_remove_stream_mapping(ring);
297                 xhci_free_segments_for_ring(xhci, ring->first_seg);
298         }
299
300         kfree(ring);
301 }
302
303 void xhci_initialize_ring_info(struct xhci_ring *ring,
304                                unsigned int cycle_state)
305 {
306         /* The ring is empty, so the enqueue pointer == dequeue pointer */
307         ring->enqueue = ring->first_seg->trbs;
308         ring->enq_seg = ring->first_seg;
309         ring->dequeue = ring->enqueue;
310         ring->deq_seg = ring->first_seg;
311         /* The ring is initialized to 0. The producer must write 1 to the cycle
312          * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
313          * compare CCS to the cycle bit to check ownership, so CCS = 1.
314          *
315          * New rings are initialized with cycle state equal to 1; if we are
316          * handling ring expansion, set the cycle state equal to the old ring.
317          */
318         ring->cycle_state = cycle_state;
319
320         /*
321          * Each segment has a link TRB, and leave an extra TRB for SW
322          * accounting purpose
323          */
324         ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
325 }
326 EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
327
328 /* Allocate segments and link them for a ring */
329 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
330                 struct xhci_segment **first, struct xhci_segment **last,
331                 unsigned int num_segs, unsigned int num,
332                 unsigned int cycle_state, enum xhci_ring_type type,
333                 unsigned int max_packet, gfp_t flags)
334 {
335         struct xhci_segment *prev;
336         bool chain_links;
337
338         /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
339         chain_links = !!(xhci_link_trb_quirk(xhci) ||
340                          (type == TYPE_ISOC &&
341                           (xhci->quirks & XHCI_AMD_0x96_HOST)));
342
343         prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
344         if (!prev)
345                 return -ENOMEM;
346         num++;
347
348         *first = prev;
349         while (num < num_segs) {
350                 struct xhci_segment     *next;
351
352                 next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
353                                           flags);
354                 if (!next) {
355                         prev = *first;
356                         while (prev) {
357                                 next = prev->next;
358                                 xhci_segment_free(xhci, prev);
359                                 prev = next;
360                         }
361                         return -ENOMEM;
362                 }
363                 xhci_link_segments(prev, next, type, chain_links);
364
365                 prev = next;
366                 num++;
367         }
368         xhci_link_segments(prev, *first, type, chain_links);
369         *last = prev;
370
371         return 0;
372 }
373
374 /*
375  * Create a new ring with zero or more segments.
376  *
377  * Link each segment together into a ring.
378  * Set the end flag and the cycle toggle bit on the last segment.
379  * See section 4.9.1 and figures 15 and 16.
380  */
381 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
382                 unsigned int num_segs, unsigned int cycle_state,
383                 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
384 {
385         struct xhci_ring        *ring;
386         int ret;
387         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
388
389         ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
390         if (!ring)
391                 return NULL;
392
393         ring->num_segs = num_segs;
394         ring->bounce_buf_len = max_packet;
395         INIT_LIST_HEAD(&ring->td_list);
396         ring->type = type;
397         if (num_segs == 0)
398                 return ring;
399
400         ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
401                         &ring->last_seg, num_segs, 0, cycle_state, type,
402                         max_packet, flags);
403         if (ret)
404                 goto fail;
405
406         /* Only event ring does not use link TRB */
407         if (type != TYPE_EVENT) {
408                 /* See section 4.9.2.1 and 6.4.4.1 */
409                 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
410                         cpu_to_le32(LINK_TOGGLE);
411         }
412         xhci_initialize_ring_info(ring, cycle_state);
413         trace_xhci_ring_alloc(ring);
414         return ring;
415
416 fail:
417         kfree(ring);
418         return NULL;
419 }
420
421 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
422                 struct xhci_virt_device *virt_dev,
423                 unsigned int ep_index)
424 {
425         xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
426         virt_dev->eps[ep_index].ring = NULL;
427 }
428
429 /*
430  * Expand an existing ring.
431  * Allocate a new ring which has same segment numbers and link the two rings.
432  */
433 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
434                                 unsigned int num_new_segs, gfp_t flags)
435 {
436         struct xhci_segment     *first;
437         struct xhci_segment     *last;
438         int                     ret;
439
440         ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
441                         num_new_segs, ring->enq_seg->num + 1,
442                         ring->cycle_state, ring->type,
443                         ring->bounce_buf_len, flags);
444         if (ret)
445                 return -ENOMEM;
446
447         if (ring->type == TYPE_STREAM)
448                 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
449                                                 ring, first, last, flags);
450         if (ret) {
451                 struct xhci_segment *next;
452                 do {
453                         next = first->next;
454                         xhci_segment_free(xhci, first);
455                         if (first == last)
456                                 break;
457                         first = next;
458                 } while (true);
459                 return ret;
460         }
461
462         xhci_link_rings(xhci, ring, first, last, num_new_segs);
463         trace_xhci_ring_expansion(ring);
464         xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
465                         "ring expansion succeed, now has %d segments",
466                         ring->num_segs);
467
468         return 0;
469 }
470
471 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
472                                                     int type, gfp_t flags)
473 {
474         struct xhci_container_ctx *ctx;
475         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
476
477         if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
478                 return NULL;
479
480         ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
481         if (!ctx)
482                 return NULL;
483
484         ctx->type = type;
485         ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
486         if (type == XHCI_CTX_TYPE_INPUT)
487                 ctx->size += CTX_SIZE(xhci->hcc_params);
488
489         ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
490         if (!ctx->bytes) {
491                 kfree(ctx);
492                 return NULL;
493         }
494         return ctx;
495 }
496
497 void xhci_free_container_ctx(struct xhci_hcd *xhci,
498                              struct xhci_container_ctx *ctx)
499 {
500         if (!ctx)
501                 return;
502         dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
503         kfree(ctx);
504 }
505
506 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
507                                               struct xhci_container_ctx *ctx)
508 {
509         if (ctx->type != XHCI_CTX_TYPE_INPUT)
510                 return NULL;
511
512         return (struct xhci_input_control_ctx *)ctx->bytes;
513 }
514
515 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
516                                         struct xhci_container_ctx *ctx)
517 {
518         if (ctx->type == XHCI_CTX_TYPE_DEVICE)
519                 return (struct xhci_slot_ctx *)ctx->bytes;
520
521         return (struct xhci_slot_ctx *)
522                 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
523 }
524
525 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
526                                     struct xhci_container_ctx *ctx,
527                                     unsigned int ep_index)
528 {
529         /* increment ep index by offset of start of ep ctx array */
530         ep_index++;
531         if (ctx->type == XHCI_CTX_TYPE_INPUT)
532                 ep_index++;
533
534         return (struct xhci_ep_ctx *)
535                 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
536 }
537 EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
538
539 /***************** Streams structures manipulation *************************/
540
541 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
542                 unsigned int num_stream_ctxs,
543                 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
544 {
545         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
546         size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
547
548         if (size > MEDIUM_STREAM_ARRAY_SIZE)
549                 dma_free_coherent(dev, size, stream_ctx, dma);
550         else if (size > SMALL_STREAM_ARRAY_SIZE)
551                 dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma);
552         else
553                 dma_pool_free(xhci->small_streams_pool, stream_ctx, dma);
554 }
555
556 /*
557  * The stream context array for each endpoint with bulk streams enabled can
558  * vary in size, based on:
559  *  - how many streams the endpoint supports,
560  *  - the maximum primary stream array size the host controller supports,
561  *  - and how many streams the device driver asks for.
562  *
563  * The stream context array must be a power of 2, and can be as small as
564  * 64 bytes or as large as 1MB.
565  */
566 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
567                 unsigned int num_stream_ctxs, dma_addr_t *dma,
568                 gfp_t mem_flags)
569 {
570         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
571         size_t size = size_mul(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
572
573         if (size > MEDIUM_STREAM_ARRAY_SIZE)
574                 return dma_alloc_coherent(dev, size, dma, mem_flags);
575         if (size > SMALL_STREAM_ARRAY_SIZE)
576                 return dma_pool_zalloc(xhci->medium_streams_pool, mem_flags, dma);
577         else
578                 return dma_pool_zalloc(xhci->small_streams_pool, mem_flags, dma);
579 }
580
581 struct xhci_ring *xhci_dma_to_transfer_ring(
582                 struct xhci_virt_ep *ep,
583                 u64 address)
584 {
585         if (ep->ep_state & EP_HAS_STREAMS)
586                 return radix_tree_lookup(&ep->stream_info->trb_address_map,
587                                 address >> TRB_SEGMENT_SHIFT);
588         return ep->ring;
589 }
590
591 /*
592  * Change an endpoint's internal structure so it supports stream IDs.  The
593  * number of requested streams includes stream 0, which cannot be used by device
594  * drivers.
595  *
596  * The number of stream contexts in the stream context array may be bigger than
597  * the number of streams the driver wants to use.  This is because the number of
598  * stream context array entries must be a power of two.
599  */
600 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
601                 unsigned int num_stream_ctxs,
602                 unsigned int num_streams,
603                 unsigned int max_packet, gfp_t mem_flags)
604 {
605         struct xhci_stream_info *stream_info;
606         u32 cur_stream;
607         struct xhci_ring *cur_ring;
608         u64 addr;
609         int ret;
610         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
611
612         xhci_dbg(xhci, "Allocating %u streams and %u stream context array entries.\n",
613                         num_streams, num_stream_ctxs);
614         if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
615                 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
616                 return NULL;
617         }
618         xhci->cmd_ring_reserved_trbs++;
619
620         stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
621                         dev_to_node(dev));
622         if (!stream_info)
623                 goto cleanup_trbs;
624
625         stream_info->num_streams = num_streams;
626         stream_info->num_stream_ctxs = num_stream_ctxs;
627
628         /* Initialize the array of virtual pointers to stream rings. */
629         stream_info->stream_rings = kcalloc_node(
630                         num_streams, sizeof(struct xhci_ring *), mem_flags,
631                         dev_to_node(dev));
632         if (!stream_info->stream_rings)
633                 goto cleanup_info;
634
635         /* Initialize the array of DMA addresses for stream rings for the HW. */
636         stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
637                         num_stream_ctxs, &stream_info->ctx_array_dma,
638                         mem_flags);
639         if (!stream_info->stream_ctx_array)
640                 goto cleanup_ring_array;
641
642         /* Allocate everything needed to free the stream rings later */
643         stream_info->free_streams_command =
644                 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
645         if (!stream_info->free_streams_command)
646                 goto cleanup_ctx;
647
648         INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
649
650         /* Allocate rings for all the streams that the driver will use,
651          * and add their segment DMA addresses to the radix tree.
652          * Stream 0 is reserved.
653          */
654
655         for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
656                 stream_info->stream_rings[cur_stream] =
657                         xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
658                                         mem_flags);
659                 cur_ring = stream_info->stream_rings[cur_stream];
660                 if (!cur_ring)
661                         goto cleanup_rings;
662                 cur_ring->stream_id = cur_stream;
663                 cur_ring->trb_address_map = &stream_info->trb_address_map;
664                 /* Set deq ptr, cycle bit, and stream context type */
665                 addr = cur_ring->first_seg->dma |
666                         SCT_FOR_CTX(SCT_PRI_TR) |
667                         cur_ring->cycle_state;
668                 stream_info->stream_ctx_array[cur_stream].stream_ring =
669                         cpu_to_le64(addr);
670                 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr);
671
672                 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
673                 if (ret) {
674                         xhci_ring_free(xhci, cur_ring);
675                         stream_info->stream_rings[cur_stream] = NULL;
676                         goto cleanup_rings;
677                 }
678         }
679         /* Leave the other unused stream ring pointers in the stream context
680          * array initialized to zero.  This will cause the xHC to give us an
681          * error if the device asks for a stream ID we don't have setup (if it
682          * was any other way, the host controller would assume the ring is
683          * "empty" and wait forever for data to be queued to that stream ID).
684          */
685
686         return stream_info;
687
688 cleanup_rings:
689         for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
690                 cur_ring = stream_info->stream_rings[cur_stream];
691                 if (cur_ring) {
692                         xhci_ring_free(xhci, cur_ring);
693                         stream_info->stream_rings[cur_stream] = NULL;
694                 }
695         }
696         xhci_free_command(xhci, stream_info->free_streams_command);
697 cleanup_ctx:
698         xhci_free_stream_ctx(xhci,
699                 stream_info->num_stream_ctxs,
700                 stream_info->stream_ctx_array,
701                 stream_info->ctx_array_dma);
702 cleanup_ring_array:
703         kfree(stream_info->stream_rings);
704 cleanup_info:
705         kfree(stream_info);
706 cleanup_trbs:
707         xhci->cmd_ring_reserved_trbs--;
708         return NULL;
709 }
710 /*
711  * Sets the MaxPStreams field and the Linear Stream Array field.
712  * Sets the dequeue pointer to the stream context array.
713  */
714 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
715                 struct xhci_ep_ctx *ep_ctx,
716                 struct xhci_stream_info *stream_info)
717 {
718         u32 max_primary_streams;
719         /* MaxPStreams is the number of stream context array entries, not the
720          * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
721          * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
722          */
723         max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
724         xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
725                         "Setting number of stream ctx array entries to %u",
726                         1 << (max_primary_streams + 1));
727         ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
728         ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
729                                        | EP_HAS_LSA);
730         ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
731 }
732
733 /*
734  * Sets the MaxPStreams field and the Linear Stream Array field to 0.
735  * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
736  * not at the beginning of the ring).
737  */
738 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
739                 struct xhci_virt_ep *ep)
740 {
741         dma_addr_t addr;
742         ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
743         addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
744         ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
745 }
746
747 /* Frees all stream contexts associated with the endpoint,
748  *
749  * Caller should fix the endpoint context streams fields.
750  */
751 void xhci_free_stream_info(struct xhci_hcd *xhci,
752                 struct xhci_stream_info *stream_info)
753 {
754         int cur_stream;
755         struct xhci_ring *cur_ring;
756
757         if (!stream_info)
758                 return;
759
760         for (cur_stream = 1; cur_stream < stream_info->num_streams;
761                         cur_stream++) {
762                 cur_ring = stream_info->stream_rings[cur_stream];
763                 if (cur_ring) {
764                         xhci_ring_free(xhci, cur_ring);
765                         stream_info->stream_rings[cur_stream] = NULL;
766                 }
767         }
768         xhci_free_command(xhci, stream_info->free_streams_command);
769         xhci->cmd_ring_reserved_trbs--;
770         if (stream_info->stream_ctx_array)
771                 xhci_free_stream_ctx(xhci,
772                                 stream_info->num_stream_ctxs,
773                                 stream_info->stream_ctx_array,
774                                 stream_info->ctx_array_dma);
775
776         kfree(stream_info->stream_rings);
777         kfree(stream_info);
778 }
779
780
781 /***************** Device context manipulation *************************/
782
783 static void xhci_free_tt_info(struct xhci_hcd *xhci,
784                 struct xhci_virt_device *virt_dev,
785                 int slot_id)
786 {
787         struct list_head *tt_list_head;
788         struct xhci_tt_bw_info *tt_info, *next;
789         bool slot_found = false;
790
791         /* If the device never made it past the Set Address stage,
792          * it may not have the real_port set correctly.
793          */
794         if (virt_dev->real_port == 0 ||
795                         virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
796                 xhci_dbg(xhci, "Bad real port.\n");
797                 return;
798         }
799
800         tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
801         list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
802                 /* Multi-TT hubs will have more than one entry */
803                 if (tt_info->slot_id == slot_id) {
804                         slot_found = true;
805                         list_del(&tt_info->tt_list);
806                         kfree(tt_info);
807                 } else if (slot_found) {
808                         break;
809                 }
810         }
811 }
812
813 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
814                 struct xhci_virt_device *virt_dev,
815                 struct usb_device *hdev,
816                 struct usb_tt *tt, gfp_t mem_flags)
817 {
818         struct xhci_tt_bw_info          *tt_info;
819         unsigned int                    num_ports;
820         int                             i, j;
821         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
822
823         if (!tt->multi)
824                 num_ports = 1;
825         else
826                 num_ports = hdev->maxchild;
827
828         for (i = 0; i < num_ports; i++, tt_info++) {
829                 struct xhci_interval_bw_table *bw_table;
830
831                 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
832                                 dev_to_node(dev));
833                 if (!tt_info)
834                         goto free_tts;
835                 INIT_LIST_HEAD(&tt_info->tt_list);
836                 list_add(&tt_info->tt_list,
837                                 &xhci->rh_bw[virt_dev->real_port - 1].tts);
838                 tt_info->slot_id = virt_dev->udev->slot_id;
839                 if (tt->multi)
840                         tt_info->ttport = i+1;
841                 bw_table = &tt_info->bw_table;
842                 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
843                         INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
844         }
845         return 0;
846
847 free_tts:
848         xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
849         return -ENOMEM;
850 }
851
852
853 /* All the xhci_tds in the ring's TD list should be freed at this point.
854  * Should be called with xhci->lock held if there is any chance the TT lists
855  * will be manipulated by the configure endpoint, allocate device, or update
856  * hub functions while this function is removing the TT entries from the list.
857  */
858 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
859 {
860         struct xhci_virt_device *dev;
861         int i;
862         int old_active_eps = 0;
863
864         /* Slot ID 0 is reserved */
865         if (slot_id == 0 || !xhci->devs[slot_id])
866                 return;
867
868         dev = xhci->devs[slot_id];
869
870         xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
871         if (!dev)
872                 return;
873
874         trace_xhci_free_virt_device(dev);
875
876         if (dev->tt_info)
877                 old_active_eps = dev->tt_info->active_eps;
878
879         for (i = 0; i < 31; i++) {
880                 if (dev->eps[i].ring)
881                         xhci_ring_free(xhci, dev->eps[i].ring);
882                 if (dev->eps[i].stream_info)
883                         xhci_free_stream_info(xhci,
884                                         dev->eps[i].stream_info);
885                 /*
886                  * Endpoints are normally deleted from the bandwidth list when
887                  * endpoints are dropped, before device is freed.
888                  * If host is dying or being removed then endpoints aren't
889                  * dropped cleanly, so delete the endpoint from list here.
890                  * Only applicable for hosts with software bandwidth checking.
891                  */
892
893                 if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
894                         list_del_init(&dev->eps[i].bw_endpoint_list);
895                         xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
896                                  slot_id, i);
897                 }
898         }
899         /* If this is a hub, free the TT(s) from the TT list */
900         xhci_free_tt_info(xhci, dev, slot_id);
901         /* If necessary, update the number of active TTs on this root port */
902         xhci_update_tt_active_eps(xhci, dev, old_active_eps);
903
904         if (dev->in_ctx)
905                 xhci_free_container_ctx(xhci, dev->in_ctx);
906         if (dev->out_ctx)
907                 xhci_free_container_ctx(xhci, dev->out_ctx);
908
909         if (dev->udev && dev->udev->slot_id)
910                 dev->udev->slot_id = 0;
911         kfree(xhci->devs[slot_id]);
912         xhci->devs[slot_id] = NULL;
913 }
914
915 /*
916  * Free a virt_device structure.
917  * If the virt_device added a tt_info (a hub) and has children pointing to
918  * that tt_info, then free the child first. Recursive.
919  * We can't rely on udev at this point to find child-parent relationships.
920  */
921 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
922 {
923         struct xhci_virt_device *vdev;
924         struct list_head *tt_list_head;
925         struct xhci_tt_bw_info *tt_info, *next;
926         int i;
927
928         vdev = xhci->devs[slot_id];
929         if (!vdev)
930                 return;
931
932         if (vdev->real_port == 0 ||
933                         vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
934                 xhci_dbg(xhci, "Bad vdev->real_port.\n");
935                 goto out;
936         }
937
938         tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
939         list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
940                 /* is this a hub device that added a tt_info to the tts list */
941                 if (tt_info->slot_id == slot_id) {
942                         /* are any devices using this tt_info? */
943                         for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
944                                 vdev = xhci->devs[i];
945                                 if (vdev && (vdev->tt_info == tt_info))
946                                         xhci_free_virt_devices_depth_first(
947                                                 xhci, i);
948                         }
949                 }
950         }
951 out:
952         /* we are now at a leaf device */
953         xhci_debugfs_remove_slot(xhci, slot_id);
954         xhci_free_virt_device(xhci, slot_id);
955 }
956
957 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
958                 struct usb_device *udev, gfp_t flags)
959 {
960         struct xhci_virt_device *dev;
961         int i;
962
963         /* Slot ID 0 is reserved */
964         if (slot_id == 0 || xhci->devs[slot_id]) {
965                 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
966                 return 0;
967         }
968
969         dev = kzalloc(sizeof(*dev), flags);
970         if (!dev)
971                 return 0;
972
973         dev->slot_id = slot_id;
974
975         /* Allocate the (output) device context that will be used in the HC. */
976         dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
977         if (!dev->out_ctx)
978                 goto fail;
979
980         xhci_dbg(xhci, "Slot %d output ctx = 0x%pad (dma)\n", slot_id, &dev->out_ctx->dma);
981
982         /* Allocate the (input) device context for address device command */
983         dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
984         if (!dev->in_ctx)
985                 goto fail;
986
987         xhci_dbg(xhci, "Slot %d input ctx = 0x%pad (dma)\n", slot_id, &dev->in_ctx->dma);
988
989         /* Initialize the cancellation and bandwidth list for each ep */
990         for (i = 0; i < 31; i++) {
991                 dev->eps[i].ep_index = i;
992                 dev->eps[i].vdev = dev;
993                 dev->eps[i].xhci = xhci;
994                 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
995                 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
996         }
997
998         /* Allocate endpoint 0 ring */
999         dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1000         if (!dev->eps[0].ring)
1001                 goto fail;
1002
1003         dev->udev = udev;
1004
1005         /* Point to output device context in dcbaa. */
1006         xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1007         xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1008                  slot_id,
1009                  &xhci->dcbaa->dev_context_ptrs[slot_id],
1010                  le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1011
1012         trace_xhci_alloc_virt_device(dev);
1013
1014         xhci->devs[slot_id] = dev;
1015
1016         return 1;
1017 fail:
1018
1019         if (dev->in_ctx)
1020                 xhci_free_container_ctx(xhci, dev->in_ctx);
1021         if (dev->out_ctx)
1022                 xhci_free_container_ctx(xhci, dev->out_ctx);
1023         kfree(dev);
1024
1025         return 0;
1026 }
1027
1028 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1029                 struct usb_device *udev)
1030 {
1031         struct xhci_virt_device *virt_dev;
1032         struct xhci_ep_ctx      *ep0_ctx;
1033         struct xhci_ring        *ep_ring;
1034
1035         virt_dev = xhci->devs[udev->slot_id];
1036         ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1037         ep_ring = virt_dev->eps[0].ring;
1038         /*
1039          * FIXME we don't keep track of the dequeue pointer very well after a
1040          * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1041          * host to our enqueue pointer.  This should only be called after a
1042          * configured device has reset, so all control transfers should have
1043          * been completed or cancelled before the reset.
1044          */
1045         ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1046                                                         ep_ring->enqueue)
1047                                    | ep_ring->cycle_state);
1048 }
1049
1050 /*
1051  * The xHCI roothub may have ports of differing speeds in any order in the port
1052  * status registers.
1053  *
1054  * The xHCI hardware wants to know the roothub port number that the USB device
1055  * is attached to (or the roothub port its ancestor hub is attached to).  All we
1056  * know is the index of that port under either the USB 2.0 or the USB 3.0
1057  * roothub, but that doesn't give us the real index into the HW port status
1058  * registers. Call xhci_find_raw_port_number() to get real index.
1059  */
1060 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1061                 struct usb_device *udev)
1062 {
1063         struct usb_device *top_dev;
1064         struct usb_hcd *hcd;
1065
1066         if (udev->speed >= USB_SPEED_SUPER)
1067                 hcd = xhci_get_usb3_hcd(xhci);
1068         else
1069                 hcd = xhci->main_hcd;
1070
1071         for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1072                         top_dev = top_dev->parent)
1073                 /* Found device below root hub */;
1074
1075         return  xhci_find_raw_port_number(hcd, top_dev->portnum);
1076 }
1077
1078 /* Setup an xHCI virtual device for a Set Address command */
1079 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1080 {
1081         struct xhci_virt_device *dev;
1082         struct xhci_ep_ctx      *ep0_ctx;
1083         struct xhci_slot_ctx    *slot_ctx;
1084         u32                     port_num;
1085         u32                     max_packets;
1086         struct usb_device *top_dev;
1087
1088         dev = xhci->devs[udev->slot_id];
1089         /* Slot ID 0 is reserved */
1090         if (udev->slot_id == 0 || !dev) {
1091                 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1092                                 udev->slot_id);
1093                 return -EINVAL;
1094         }
1095         ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1096         slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1097
1098         /* 3) Only the control endpoint is valid - one endpoint context */
1099         slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1100         switch (udev->speed) {
1101         case USB_SPEED_SUPER_PLUS:
1102                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1103                 max_packets = MAX_PACKET(512);
1104                 break;
1105         case USB_SPEED_SUPER:
1106                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1107                 max_packets = MAX_PACKET(512);
1108                 break;
1109         case USB_SPEED_HIGH:
1110                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1111                 max_packets = MAX_PACKET(64);
1112                 break;
1113         /* USB core guesses at a 64-byte max packet first for FS devices */
1114         case USB_SPEED_FULL:
1115                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1116                 max_packets = MAX_PACKET(64);
1117                 break;
1118         case USB_SPEED_LOW:
1119                 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1120                 max_packets = MAX_PACKET(8);
1121                 break;
1122         default:
1123                 /* Speed was set earlier, this shouldn't happen. */
1124                 return -EINVAL;
1125         }
1126         /* Find the root hub port this device is under */
1127         port_num = xhci_find_real_port_number(xhci, udev);
1128         if (!port_num)
1129                 return -EINVAL;
1130         slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1131         /* Set the port number in the virtual_device to the faked port number */
1132         for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1133                         top_dev = top_dev->parent)
1134                 /* Found device below root hub */;
1135         dev->fake_port = top_dev->portnum;
1136         dev->real_port = port_num;
1137         xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1138         xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1139
1140         /* Find the right bandwidth table that this device will be a part of.
1141          * If this is a full speed device attached directly to a root port (or a
1142          * decendent of one), it counts as a primary bandwidth domain, not a
1143          * secondary bandwidth domain under a TT.  An xhci_tt_info structure
1144          * will never be created for the HS root hub.
1145          */
1146         if (!udev->tt || !udev->tt->hub->parent) {
1147                 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1148         } else {
1149                 struct xhci_root_port_bw_info *rh_bw;
1150                 struct xhci_tt_bw_info *tt_bw;
1151
1152                 rh_bw = &xhci->rh_bw[port_num - 1];
1153                 /* Find the right TT. */
1154                 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1155                         if (tt_bw->slot_id != udev->tt->hub->slot_id)
1156                                 continue;
1157
1158                         if (!dev->udev->tt->multi ||
1159                                         (udev->tt->multi &&
1160                                          tt_bw->ttport == dev->udev->ttport)) {
1161                                 dev->bw_table = &tt_bw->bw_table;
1162                                 dev->tt_info = tt_bw;
1163                                 break;
1164                         }
1165                 }
1166                 if (!dev->tt_info)
1167                         xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1168         }
1169
1170         /* Is this a LS/FS device under an external HS hub? */
1171         if (udev->tt && udev->tt->hub->parent) {
1172                 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1173                                                 (udev->ttport << 8));
1174                 if (udev->tt->multi)
1175                         slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1176         }
1177         xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1178         xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1179
1180         /* Step 4 - ring already allocated */
1181         /* Step 5 */
1182         ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1183
1184         /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1185         ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1186                                          max_packets);
1187
1188         ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1189                                    dev->eps[0].ring->cycle_state);
1190
1191         trace_xhci_setup_addressable_virt_device(dev);
1192
1193         /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1194
1195         return 0;
1196 }
1197
1198 /*
1199  * Convert interval expressed as 2^(bInterval - 1) == interval into
1200  * straight exponent value 2^n == interval.
1201  *
1202  */
1203 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1204                 struct usb_host_endpoint *ep)
1205 {
1206         unsigned int interval;
1207
1208         interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1209         if (interval != ep->desc.bInterval - 1)
1210                 dev_warn(&udev->dev,
1211                          "ep %#x - rounding interval to %d %sframes\n",
1212                          ep->desc.bEndpointAddress,
1213                          1 << interval,
1214                          udev->speed == USB_SPEED_FULL ? "" : "micro");
1215
1216         if (udev->speed == USB_SPEED_FULL) {
1217                 /*
1218                  * Full speed isoc endpoints specify interval in frames,
1219                  * not microframes. We are using microframes everywhere,
1220                  * so adjust accordingly.
1221                  */
1222                 interval += 3;  /* 1 frame = 2^3 uframes */
1223         }
1224
1225         return interval;
1226 }
1227
1228 /*
1229  * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1230  * microframes, rounded down to nearest power of 2.
1231  */
1232 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1233                 struct usb_host_endpoint *ep, unsigned int desc_interval,
1234                 unsigned int min_exponent, unsigned int max_exponent)
1235 {
1236         unsigned int interval;
1237
1238         interval = fls(desc_interval) - 1;
1239         interval = clamp_val(interval, min_exponent, max_exponent);
1240         if ((1 << interval) != desc_interval)
1241                 dev_dbg(&udev->dev,
1242                          "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1243                          ep->desc.bEndpointAddress,
1244                          1 << interval,
1245                          desc_interval);
1246
1247         return interval;
1248 }
1249
1250 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1251                 struct usb_host_endpoint *ep)
1252 {
1253         if (ep->desc.bInterval == 0)
1254                 return 0;
1255         return xhci_microframes_to_exponent(udev, ep,
1256                         ep->desc.bInterval, 0, 15);
1257 }
1258
1259
1260 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1261                 struct usb_host_endpoint *ep)
1262 {
1263         return xhci_microframes_to_exponent(udev, ep,
1264                         ep->desc.bInterval * 8, 3, 10);
1265 }
1266
1267 /* Return the polling or NAK interval.
1268  *
1269  * The polling interval is expressed in "microframes".  If xHCI's Interval field
1270  * is set to N, it will service the endpoint every 2^(Interval)*125us.
1271  *
1272  * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1273  * is set to 0.
1274  */
1275 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1276                 struct usb_host_endpoint *ep)
1277 {
1278         unsigned int interval = 0;
1279
1280         switch (udev->speed) {
1281         case USB_SPEED_HIGH:
1282                 /* Max NAK rate */
1283                 if (usb_endpoint_xfer_control(&ep->desc) ||
1284                     usb_endpoint_xfer_bulk(&ep->desc)) {
1285                         interval = xhci_parse_microframe_interval(udev, ep);
1286                         break;
1287                 }
1288                 fallthrough;    /* SS and HS isoc/int have same decoding */
1289
1290         case USB_SPEED_SUPER_PLUS:
1291         case USB_SPEED_SUPER:
1292                 if (usb_endpoint_xfer_int(&ep->desc) ||
1293                     usb_endpoint_xfer_isoc(&ep->desc)) {
1294                         interval = xhci_parse_exponent_interval(udev, ep);
1295                 }
1296                 break;
1297
1298         case USB_SPEED_FULL:
1299                 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1300                         interval = xhci_parse_exponent_interval(udev, ep);
1301                         break;
1302                 }
1303                 /*
1304                  * Fall through for interrupt endpoint interval decoding
1305                  * since it uses the same rules as low speed interrupt
1306                  * endpoints.
1307                  */
1308                 fallthrough;
1309
1310         case USB_SPEED_LOW:
1311                 if (usb_endpoint_xfer_int(&ep->desc) ||
1312                     usb_endpoint_xfer_isoc(&ep->desc)) {
1313
1314                         interval = xhci_parse_frame_interval(udev, ep);
1315                 }
1316                 break;
1317
1318         default:
1319                 BUG();
1320         }
1321         return interval;
1322 }
1323
1324 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1325  * High speed endpoint descriptors can define "the number of additional
1326  * transaction opportunities per microframe", but that goes in the Max Burst
1327  * endpoint context field.
1328  */
1329 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1330                 struct usb_host_endpoint *ep)
1331 {
1332         if (udev->speed < USB_SPEED_SUPER ||
1333                         !usb_endpoint_xfer_isoc(&ep->desc))
1334                 return 0;
1335         return ep->ss_ep_comp.bmAttributes;
1336 }
1337
1338 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1339                                        struct usb_host_endpoint *ep)
1340 {
1341         /* Super speed and Plus have max burst in ep companion desc */
1342         if (udev->speed >= USB_SPEED_SUPER)
1343                 return ep->ss_ep_comp.bMaxBurst;
1344
1345         if (udev->speed == USB_SPEED_HIGH &&
1346             (usb_endpoint_xfer_isoc(&ep->desc) ||
1347              usb_endpoint_xfer_int(&ep->desc)))
1348                 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1349
1350         return 0;
1351 }
1352
1353 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1354 {
1355         int in;
1356
1357         in = usb_endpoint_dir_in(&ep->desc);
1358
1359         switch (usb_endpoint_type(&ep->desc)) {
1360         case USB_ENDPOINT_XFER_CONTROL:
1361                 return CTRL_EP;
1362         case USB_ENDPOINT_XFER_BULK:
1363                 return in ? BULK_IN_EP : BULK_OUT_EP;
1364         case USB_ENDPOINT_XFER_ISOC:
1365                 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1366         case USB_ENDPOINT_XFER_INT:
1367                 return in ? INT_IN_EP : INT_OUT_EP;
1368         }
1369         return 0;
1370 }
1371
1372 /* Return the maximum endpoint service interval time (ESIT) payload.
1373  * Basically, this is the maxpacket size, multiplied by the burst size
1374  * and mult size.
1375  */
1376 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1377                 struct usb_host_endpoint *ep)
1378 {
1379         int max_burst;
1380         int max_packet;
1381
1382         /* Only applies for interrupt or isochronous endpoints */
1383         if (usb_endpoint_xfer_control(&ep->desc) ||
1384                         usb_endpoint_xfer_bulk(&ep->desc))
1385                 return 0;
1386
1387         /* SuperSpeedPlus Isoc ep sending over 48k per esit */
1388         if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1389             USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1390                 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1391
1392         /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1393         if (udev->speed >= USB_SPEED_SUPER)
1394                 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1395
1396         max_packet = usb_endpoint_maxp(&ep->desc);
1397         max_burst = usb_endpoint_maxp_mult(&ep->desc);
1398         /* A 0 in max burst means 1 transfer per ESIT */
1399         return max_packet * max_burst;
1400 }
1401
1402 /* Set up an endpoint with one ring segment.  Do not allocate stream rings.
1403  * Drivers will have to call usb_alloc_streams() to do that.
1404  */
1405 int xhci_endpoint_init(struct xhci_hcd *xhci,
1406                 struct xhci_virt_device *virt_dev,
1407                 struct usb_device *udev,
1408                 struct usb_host_endpoint *ep,
1409                 gfp_t mem_flags)
1410 {
1411         unsigned int ep_index;
1412         struct xhci_ep_ctx *ep_ctx;
1413         struct xhci_ring *ep_ring;
1414         unsigned int max_packet;
1415         enum xhci_ring_type ring_type;
1416         u32 max_esit_payload;
1417         u32 endpoint_type;
1418         unsigned int max_burst;
1419         unsigned int interval;
1420         unsigned int mult;
1421         unsigned int avg_trb_len;
1422         unsigned int err_count = 0;
1423
1424         ep_index = xhci_get_endpoint_index(&ep->desc);
1425         ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1426
1427         endpoint_type = xhci_get_endpoint_type(ep);
1428         if (!endpoint_type)
1429                 return -EINVAL;
1430
1431         ring_type = usb_endpoint_type(&ep->desc);
1432
1433         /*
1434          * Get values to fill the endpoint context, mostly from ep descriptor.
1435          * The average TRB buffer lengt for bulk endpoints is unclear as we
1436          * have no clue on scatter gather list entry size. For Isoc and Int,
1437          * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1438          */
1439         max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1440         interval = xhci_get_endpoint_interval(udev, ep);
1441
1442         /* Periodic endpoint bInterval limit quirk */
1443         if (usb_endpoint_xfer_int(&ep->desc) ||
1444             usb_endpoint_xfer_isoc(&ep->desc)) {
1445                 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1446                     udev->speed >= USB_SPEED_HIGH &&
1447                     interval >= 7) {
1448                         interval = 6;
1449                 }
1450         }
1451
1452         mult = xhci_get_endpoint_mult(udev, ep);
1453         max_packet = usb_endpoint_maxp(&ep->desc);
1454         max_burst = xhci_get_endpoint_max_burst(udev, ep);
1455         avg_trb_len = max_esit_payload;
1456
1457         /* FIXME dig Mult and streams info out of ep companion desc */
1458
1459         /* Allow 3 retries for everything but isoc, set CErr = 3 */
1460         if (!usb_endpoint_xfer_isoc(&ep->desc))
1461                 err_count = 3;
1462         /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
1463         if (usb_endpoint_xfer_bulk(&ep->desc)) {
1464                 if (udev->speed == USB_SPEED_HIGH)
1465                         max_packet = 512;
1466                 if (udev->speed == USB_SPEED_FULL) {
1467                         max_packet = rounddown_pow_of_two(max_packet);
1468                         max_packet = clamp_val(max_packet, 8, 64);
1469                 }
1470         }
1471         /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1472         if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1473                 avg_trb_len = 8;
1474         /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1475         if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1476                 mult = 0;
1477
1478         /* Set up the endpoint ring */
1479         virt_dev->eps[ep_index].new_ring =
1480                 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1481         if (!virt_dev->eps[ep_index].new_ring)
1482                 return -ENOMEM;
1483
1484         virt_dev->eps[ep_index].skip = false;
1485         ep_ring = virt_dev->eps[ep_index].new_ring;
1486
1487         /* Fill the endpoint context */
1488         ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1489                                       EP_INTERVAL(interval) |
1490                                       EP_MULT(mult));
1491         ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1492                                        MAX_PACKET(max_packet) |
1493                                        MAX_BURST(max_burst) |
1494                                        ERROR_COUNT(err_count));
1495         ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1496                                   ep_ring->cycle_state);
1497
1498         ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1499                                       EP_AVG_TRB_LENGTH(avg_trb_len));
1500
1501         return 0;
1502 }
1503
1504 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1505                 struct xhci_virt_device *virt_dev,
1506                 struct usb_host_endpoint *ep)
1507 {
1508         unsigned int ep_index;
1509         struct xhci_ep_ctx *ep_ctx;
1510
1511         ep_index = xhci_get_endpoint_index(&ep->desc);
1512         ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1513
1514         ep_ctx->ep_info = 0;
1515         ep_ctx->ep_info2 = 0;
1516         ep_ctx->deq = 0;
1517         ep_ctx->tx_info = 0;
1518         /* Don't free the endpoint ring until the set interface or configuration
1519          * request succeeds.
1520          */
1521 }
1522
1523 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1524 {
1525         bw_info->ep_interval = 0;
1526         bw_info->mult = 0;
1527         bw_info->num_packets = 0;
1528         bw_info->max_packet_size = 0;
1529         bw_info->type = 0;
1530         bw_info->max_esit_payload = 0;
1531 }
1532
1533 void xhci_update_bw_info(struct xhci_hcd *xhci,
1534                 struct xhci_container_ctx *in_ctx,
1535                 struct xhci_input_control_ctx *ctrl_ctx,
1536                 struct xhci_virt_device *virt_dev)
1537 {
1538         struct xhci_bw_info *bw_info;
1539         struct xhci_ep_ctx *ep_ctx;
1540         unsigned int ep_type;
1541         int i;
1542
1543         for (i = 1; i < 31; i++) {
1544                 bw_info = &virt_dev->eps[i].bw_info;
1545
1546                 /* We can't tell what endpoint type is being dropped, but
1547                  * unconditionally clearing the bandwidth info for non-periodic
1548                  * endpoints should be harmless because the info will never be
1549                  * set in the first place.
1550                  */
1551                 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1552                         /* Dropped endpoint */
1553                         xhci_clear_endpoint_bw_info(bw_info);
1554                         continue;
1555                 }
1556
1557                 if (EP_IS_ADDED(ctrl_ctx, i)) {
1558                         ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1559                         ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1560
1561                         /* Ignore non-periodic endpoints */
1562                         if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1563                                         ep_type != ISOC_IN_EP &&
1564                                         ep_type != INT_IN_EP)
1565                                 continue;
1566
1567                         /* Added or changed endpoint */
1568                         bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1569                                         le32_to_cpu(ep_ctx->ep_info));
1570                         /* Number of packets and mult are zero-based in the
1571                          * input context, but we want one-based for the
1572                          * interval table.
1573                          */
1574                         bw_info->mult = CTX_TO_EP_MULT(
1575                                         le32_to_cpu(ep_ctx->ep_info)) + 1;
1576                         bw_info->num_packets = CTX_TO_MAX_BURST(
1577                                         le32_to_cpu(ep_ctx->ep_info2)) + 1;
1578                         bw_info->max_packet_size = MAX_PACKET_DECODED(
1579                                         le32_to_cpu(ep_ctx->ep_info2));
1580                         bw_info->type = ep_type;
1581                         bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1582                                         le32_to_cpu(ep_ctx->tx_info));
1583                 }
1584         }
1585 }
1586
1587 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1588  * Useful when you want to change one particular aspect of the endpoint and then
1589  * issue a configure endpoint command.
1590  */
1591 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1592                 struct xhci_container_ctx *in_ctx,
1593                 struct xhci_container_ctx *out_ctx,
1594                 unsigned int ep_index)
1595 {
1596         struct xhci_ep_ctx *out_ep_ctx;
1597         struct xhci_ep_ctx *in_ep_ctx;
1598
1599         out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1600         in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1601
1602         in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1603         in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1604         in_ep_ctx->deq = out_ep_ctx->deq;
1605         in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1606         if (xhci->quirks & XHCI_MTK_HOST) {
1607                 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1608                 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1609         }
1610 }
1611
1612 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1613  * Useful when you want to change one particular aspect of the endpoint and then
1614  * issue a configure endpoint command.  Only the context entries field matters,
1615  * but we'll copy the whole thing anyway.
1616  */
1617 void xhci_slot_copy(struct xhci_hcd *xhci,
1618                 struct xhci_container_ctx *in_ctx,
1619                 struct xhci_container_ctx *out_ctx)
1620 {
1621         struct xhci_slot_ctx *in_slot_ctx;
1622         struct xhci_slot_ctx *out_slot_ctx;
1623
1624         in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1625         out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1626
1627         in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1628         in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1629         in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1630         in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1631 }
1632
1633 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1634 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1635 {
1636         int i;
1637         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1638         int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1639
1640         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1641                         "Allocating %d scratchpad buffers", num_sp);
1642
1643         if (!num_sp)
1644                 return 0;
1645
1646         xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1647                                 dev_to_node(dev));
1648         if (!xhci->scratchpad)
1649                 goto fail_sp;
1650
1651         xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1652                                      size_mul(sizeof(u64), num_sp),
1653                                      &xhci->scratchpad->sp_dma, flags);
1654         if (!xhci->scratchpad->sp_array)
1655                 goto fail_sp2;
1656
1657         xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1658                                         flags, dev_to_node(dev));
1659         if (!xhci->scratchpad->sp_buffers)
1660                 goto fail_sp3;
1661
1662         xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1663         for (i = 0; i < num_sp; i++) {
1664                 dma_addr_t dma;
1665                 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1666                                                flags);
1667                 if (!buf)
1668                         goto fail_sp4;
1669
1670                 xhci->scratchpad->sp_array[i] = dma;
1671                 xhci->scratchpad->sp_buffers[i] = buf;
1672         }
1673
1674         return 0;
1675
1676  fail_sp4:
1677         while (i--)
1678                 dma_free_coherent(dev, xhci->page_size,
1679                                     xhci->scratchpad->sp_buffers[i],
1680                                     xhci->scratchpad->sp_array[i]);
1681
1682         kfree(xhci->scratchpad->sp_buffers);
1683
1684  fail_sp3:
1685         dma_free_coherent(dev, num_sp * sizeof(u64),
1686                             xhci->scratchpad->sp_array,
1687                             xhci->scratchpad->sp_dma);
1688
1689  fail_sp2:
1690         kfree(xhci->scratchpad);
1691         xhci->scratchpad = NULL;
1692
1693  fail_sp:
1694         return -ENOMEM;
1695 }
1696
1697 static void scratchpad_free(struct xhci_hcd *xhci)
1698 {
1699         int num_sp;
1700         int i;
1701         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1702
1703         if (!xhci->scratchpad)
1704                 return;
1705
1706         num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1707
1708         for (i = 0; i < num_sp; i++) {
1709                 dma_free_coherent(dev, xhci->page_size,
1710                                     xhci->scratchpad->sp_buffers[i],
1711                                     xhci->scratchpad->sp_array[i]);
1712         }
1713         kfree(xhci->scratchpad->sp_buffers);
1714         dma_free_coherent(dev, num_sp * sizeof(u64),
1715                             xhci->scratchpad->sp_array,
1716                             xhci->scratchpad->sp_dma);
1717         kfree(xhci->scratchpad);
1718         xhci->scratchpad = NULL;
1719 }
1720
1721 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1722                 bool allocate_completion, gfp_t mem_flags)
1723 {
1724         struct xhci_command *command;
1725         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1726
1727         command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1728         if (!command)
1729                 return NULL;
1730
1731         if (allocate_completion) {
1732                 command->completion =
1733                         kzalloc_node(sizeof(struct completion), mem_flags,
1734                                 dev_to_node(dev));
1735                 if (!command->completion) {
1736                         kfree(command);
1737                         return NULL;
1738                 }
1739                 init_completion(command->completion);
1740         }
1741
1742         command->status = 0;
1743         /* set default timeout to 5000 ms */
1744         command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
1745         INIT_LIST_HEAD(&command->cmd_list);
1746         return command;
1747 }
1748
1749 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1750                 bool allocate_completion, gfp_t mem_flags)
1751 {
1752         struct xhci_command *command;
1753
1754         command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1755         if (!command)
1756                 return NULL;
1757
1758         command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1759                                                    mem_flags);
1760         if (!command->in_ctx) {
1761                 kfree(command->completion);
1762                 kfree(command);
1763                 return NULL;
1764         }
1765         return command;
1766 }
1767
1768 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1769 {
1770         kfree(urb_priv);
1771 }
1772
1773 void xhci_free_command(struct xhci_hcd *xhci,
1774                 struct xhci_command *command)
1775 {
1776         xhci_free_container_ctx(xhci,
1777                         command->in_ctx);
1778         kfree(command->completion);
1779         kfree(command);
1780 }
1781
1782 static int xhci_alloc_erst(struct xhci_hcd *xhci,
1783                     struct xhci_ring *evt_ring,
1784                     struct xhci_erst *erst,
1785                     gfp_t flags)
1786 {
1787         size_t size;
1788         unsigned int val;
1789         struct xhci_segment *seg;
1790         struct xhci_erst_entry *entry;
1791
1792         size = size_mul(sizeof(struct xhci_erst_entry), evt_ring->num_segs);
1793         erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1794                                            size, &erst->erst_dma_addr, flags);
1795         if (!erst->entries)
1796                 return -ENOMEM;
1797
1798         erst->num_entries = evt_ring->num_segs;
1799
1800         seg = evt_ring->first_seg;
1801         for (val = 0; val < evt_ring->num_segs; val++) {
1802                 entry = &erst->entries[val];
1803                 entry->seg_addr = cpu_to_le64(seg->dma);
1804                 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1805                 entry->rsvd = 0;
1806                 seg = seg->next;
1807         }
1808
1809         return 0;
1810 }
1811
1812 static void
1813 xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
1814 {
1815         u32 tmp;
1816
1817         if (!ir)
1818                 return;
1819
1820         /*
1821          * Clean out interrupter registers except ERSTBA. Clearing either the
1822          * low or high 32 bits of ERSTBA immediately causes the controller to
1823          * dereference the partially cleared 64 bit address, causing IOMMU error.
1824          */
1825         if (ir->ir_set) {
1826                 tmp = readl(&ir->ir_set->erst_size);
1827                 tmp &= ERST_SIZE_MASK;
1828                 writel(tmp, &ir->ir_set->erst_size);
1829
1830                 xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
1831         }
1832 }
1833
1834 static void
1835 xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
1836 {
1837         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1838         size_t erst_size;
1839
1840         if (!ir)
1841                 return;
1842
1843         erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
1844         if (ir->erst.entries)
1845                 dma_free_coherent(dev, erst_size,
1846                                   ir->erst.entries,
1847                                   ir->erst.erst_dma_addr);
1848         ir->erst.entries = NULL;
1849
1850         /* free interrupter event ring */
1851         if (ir->event_ring)
1852                 xhci_ring_free(xhci, ir->event_ring);
1853
1854         ir->event_ring = NULL;
1855
1856         kfree(ir);
1857 }
1858
1859 void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir)
1860 {
1861         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1862         unsigned int intr_num;
1863
1864         spin_lock_irq(&xhci->lock);
1865
1866         /* interrupter 0 is primary interrupter, don't touch it */
1867         if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
1868                 xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
1869                 spin_unlock_irq(&xhci->lock);
1870                 return;
1871         }
1872
1873         intr_num = ir->intr_num;
1874
1875         xhci_remove_interrupter(xhci, ir);
1876         xhci->interrupters[intr_num] = NULL;
1877
1878         spin_unlock_irq(&xhci->lock);
1879
1880         xhci_free_interrupter(xhci, ir);
1881 }
1882 EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
1883
1884 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1885 {
1886         struct device   *dev = xhci_to_hcd(xhci)->self.sysdev;
1887         int i, j, num_ports;
1888
1889         cancel_delayed_work_sync(&xhci->cmd_timer);
1890
1891         for (i = 0; i < xhci->max_interrupters; i++) {
1892                 if (xhci->interrupters[i]) {
1893                         xhci_remove_interrupter(xhci, xhci->interrupters[i]);
1894                         xhci_free_interrupter(xhci, xhci->interrupters[i]);
1895                         xhci->interrupters[i] = NULL;
1896                 }
1897         }
1898         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters");
1899
1900         if (xhci->cmd_ring)
1901                 xhci_ring_free(xhci, xhci->cmd_ring);
1902         xhci->cmd_ring = NULL;
1903         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1904         xhci_cleanup_command_queue(xhci);
1905
1906         num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1907         for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1908                 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1909                 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1910                         struct list_head *ep = &bwt->interval_bw[j].endpoints;
1911                         while (!list_empty(ep))
1912                                 list_del_init(ep->next);
1913                 }
1914         }
1915
1916         for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1917                 xhci_free_virt_devices_depth_first(xhci, i);
1918
1919         dma_pool_destroy(xhci->segment_pool);
1920         xhci->segment_pool = NULL;
1921         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1922
1923         dma_pool_destroy(xhci->device_pool);
1924         xhci->device_pool = NULL;
1925         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1926
1927         dma_pool_destroy(xhci->small_streams_pool);
1928         xhci->small_streams_pool = NULL;
1929         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1930                         "Freed small stream array pool");
1931
1932         dma_pool_destroy(xhci->medium_streams_pool);
1933         xhci->medium_streams_pool = NULL;
1934         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1935                         "Freed medium stream array pool");
1936
1937         if (xhci->dcbaa)
1938                 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1939                                 xhci->dcbaa, xhci->dcbaa->dma);
1940         xhci->dcbaa = NULL;
1941
1942         scratchpad_free(xhci);
1943
1944         if (!xhci->rh_bw)
1945                 goto no_bw;
1946
1947         for (i = 0; i < num_ports; i++) {
1948                 struct xhci_tt_bw_info *tt, *n;
1949                 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1950                         list_del(&tt->tt_list);
1951                         kfree(tt);
1952                 }
1953         }
1954
1955 no_bw:
1956         xhci->cmd_ring_reserved_trbs = 0;
1957         xhci->usb2_rhub.num_ports = 0;
1958         xhci->usb3_rhub.num_ports = 0;
1959         xhci->num_active_eps = 0;
1960         kfree(xhci->usb2_rhub.ports);
1961         kfree(xhci->usb3_rhub.ports);
1962         kfree(xhci->hw_ports);
1963         kfree(xhci->rh_bw);
1964         kfree(xhci->ext_caps);
1965         for (i = 0; i < xhci->num_port_caps; i++)
1966                 kfree(xhci->port_caps[i].psi);
1967         kfree(xhci->port_caps);
1968         kfree(xhci->interrupters);
1969         xhci->num_port_caps = 0;
1970
1971         xhci->usb2_rhub.ports = NULL;
1972         xhci->usb3_rhub.ports = NULL;
1973         xhci->hw_ports = NULL;
1974         xhci->rh_bw = NULL;
1975         xhci->ext_caps = NULL;
1976         xhci->port_caps = NULL;
1977         xhci->interrupters = NULL;
1978
1979         xhci->page_size = 0;
1980         xhci->page_shift = 0;
1981         xhci->usb2_rhub.bus_state.bus_suspended = 0;
1982         xhci->usb3_rhub.bus_state.bus_suspended = 0;
1983 }
1984
1985 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
1986 {
1987         dma_addr_t deq;
1988
1989         deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
1990                         ir->event_ring->dequeue);
1991         if (!deq)
1992                 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
1993         /* Update HC event ring dequeue pointer */
1994         /* Don't clear the EHB bit (which is RW1C) because
1995          * there might be more events to service.
1996          */
1997         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1998                        "// Write event ring dequeue pointer, preserving EHB bit");
1999         xhci_write_64(xhci, deq & ERST_PTR_MASK, &ir->ir_set->erst_dequeue);
2000 }
2001
2002 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2003                 __le32 __iomem *addr, int max_caps)
2004 {
2005         u32 temp, port_offset, port_count;
2006         int i;
2007         u8 major_revision, minor_revision, tmp_minor_revision;
2008         struct xhci_hub *rhub;
2009         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2010         struct xhci_port_cap *port_cap;
2011
2012         temp = readl(addr);
2013         major_revision = XHCI_EXT_PORT_MAJOR(temp);
2014         minor_revision = XHCI_EXT_PORT_MINOR(temp);
2015
2016         if (major_revision == 0x03) {
2017                 rhub = &xhci->usb3_rhub;
2018                 /*
2019                  * Some hosts incorrectly use sub-minor version for minor
2020                  * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
2021                  * for bcdUSB 0x310). Since there is no USB release with sub
2022                  * minor version 0x301 to 0x309, we can assume that they are
2023                  * incorrect and fix it here.
2024                  */
2025                 if (minor_revision > 0x00 && minor_revision < 0x10)
2026                         minor_revision <<= 4;
2027                 /*
2028                  * Some zhaoxin's xHCI controller that follow usb3.1 spec
2029                  * but only support Gen1.
2030                  */
2031                 if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
2032                         tmp_minor_revision = minor_revision;
2033                         minor_revision = 0;
2034                 }
2035
2036         } else if (major_revision <= 0x02) {
2037                 rhub = &xhci->usb2_rhub;
2038         } else {
2039                 xhci_warn(xhci, "Ignoring unknown port speed, Ext Cap %p, revision = 0x%x\n",
2040                                 addr, major_revision);
2041                 /* Ignoring port protocol we can't understand. FIXME */
2042                 return;
2043         }
2044
2045         /* Port offset and count in the third dword, see section 7.2 */
2046         temp = readl(addr + 2);
2047         port_offset = XHCI_EXT_PORT_OFF(temp);
2048         port_count = XHCI_EXT_PORT_COUNT(temp);
2049         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2050                        "Ext Cap %p, port offset = %u, count = %u, revision = 0x%x",
2051                        addr, port_offset, port_count, major_revision);
2052         /* Port count includes the current port offset */
2053         if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2054                 /* WTF? "Valid values are â€˜1’ to MaxPorts" */
2055                 return;
2056
2057         port_cap = &xhci->port_caps[xhci->num_port_caps++];
2058         if (xhci->num_port_caps > max_caps)
2059                 return;
2060
2061         port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2062
2063         if (port_cap->psi_count) {
2064                 port_cap->psi = kcalloc_node(port_cap->psi_count,
2065                                              sizeof(*port_cap->psi),
2066                                              GFP_KERNEL, dev_to_node(dev));
2067                 if (!port_cap->psi)
2068                         port_cap->psi_count = 0;
2069
2070                 port_cap->psi_uid_count++;
2071                 for (i = 0; i < port_cap->psi_count; i++) {
2072                         port_cap->psi[i] = readl(addr + 4 + i);
2073
2074                         /* count unique ID values, two consecutive entries can
2075                          * have the same ID if link is assymetric
2076                          */
2077                         if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2078                                   XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2079                                 port_cap->psi_uid_count++;
2080
2081                         if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
2082                             major_revision == 0x03 &&
2083                             XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
2084                                 minor_revision = tmp_minor_revision;
2085
2086                         xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2087                                   XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2088                                   XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2089                                   XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2090                                   XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2091                                   XHCI_EXT_PORT_LP(port_cap->psi[i]),
2092                                   XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2093                 }
2094         }
2095
2096         rhub->maj_rev = major_revision;
2097
2098         if (rhub->min_rev < minor_revision)
2099                 rhub->min_rev = minor_revision;
2100
2101         port_cap->maj_rev = major_revision;
2102         port_cap->min_rev = minor_revision;
2103
2104         /* cache usb2 port capabilities */
2105         if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2106                 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2107
2108         if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2109                  (temp & XHCI_HLC)) {
2110                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2111                                "xHCI 1.0: support USB2 hardware lpm");
2112                 xhci->hw_lpm_support = 1;
2113         }
2114
2115         port_offset--;
2116         for (i = port_offset; i < (port_offset + port_count); i++) {
2117                 struct xhci_port *hw_port = &xhci->hw_ports[i];
2118                 /* Duplicate entry.  Ignore the port if the revisions differ. */
2119                 if (hw_port->rhub) {
2120                         xhci_warn(xhci, "Duplicate port entry, Ext Cap %p, port %u\n", addr, i);
2121                         xhci_warn(xhci, "Port was marked as USB %u, duplicated as USB %u\n",
2122                                         hw_port->rhub->maj_rev, major_revision);
2123                         /* Only adjust the roothub port counts if we haven't
2124                          * found a similar duplicate.
2125                          */
2126                         if (hw_port->rhub != rhub &&
2127                                  hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2128                                 hw_port->rhub->num_ports--;
2129                                 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2130                         }
2131                         continue;
2132                 }
2133                 hw_port->rhub = rhub;
2134                 hw_port->port_cap = port_cap;
2135                 rhub->num_ports++;
2136         }
2137         /* FIXME: Should we disable ports not in the Extended Capabilities? */
2138 }
2139
2140 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2141                                         struct xhci_hub *rhub, gfp_t flags)
2142 {
2143         int port_index = 0;
2144         int i;
2145         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2146
2147         if (!rhub->num_ports)
2148                 return;
2149         rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2150                         flags, dev_to_node(dev));
2151         if (!rhub->ports)
2152                 return;
2153
2154         for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2155                 if (xhci->hw_ports[i].rhub != rhub ||
2156                     xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2157                         continue;
2158                 xhci->hw_ports[i].hcd_portnum = port_index;
2159                 rhub->ports[port_index] = &xhci->hw_ports[i];
2160                 port_index++;
2161                 if (port_index == rhub->num_ports)
2162                         break;
2163         }
2164 }
2165
2166 /*
2167  * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2168  * specify what speeds each port is supposed to be.  We can't count on the port
2169  * speed bits in the PORTSC register being correct until a device is connected,
2170  * but we need to set up the two fake roothubs with the correct number of USB
2171  * 3.0 and USB 2.0 ports at host controller initialization time.
2172  */
2173 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2174 {
2175         void __iomem *base;
2176         u32 offset;
2177         unsigned int num_ports;
2178         int i, j;
2179         int cap_count = 0;
2180         u32 cap_start;
2181         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2182
2183         num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2184         xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2185                                 flags, dev_to_node(dev));
2186         if (!xhci->hw_ports)
2187                 return -ENOMEM;
2188
2189         for (i = 0; i < num_ports; i++) {
2190                 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2191                         NUM_PORT_REGS * i;
2192                 xhci->hw_ports[i].hw_portnum = i;
2193
2194                 init_completion(&xhci->hw_ports[i].rexit_done);
2195                 init_completion(&xhci->hw_ports[i].u3exit_done);
2196         }
2197
2198         xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2199                                    dev_to_node(dev));
2200         if (!xhci->rh_bw)
2201                 return -ENOMEM;
2202         for (i = 0; i < num_ports; i++) {
2203                 struct xhci_interval_bw_table *bw_table;
2204
2205                 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2206                 bw_table = &xhci->rh_bw[i].bw_table;
2207                 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2208                         INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2209         }
2210         base = &xhci->cap_regs->hc_capbase;
2211
2212         cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2213         if (!cap_start) {
2214                 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2215                 return -ENODEV;
2216         }
2217
2218         offset = cap_start;
2219         /* count extended protocol capability entries for later caching */
2220         while (offset) {
2221                 cap_count++;
2222                 offset = xhci_find_next_ext_cap(base, offset,
2223                                                       XHCI_EXT_CAPS_PROTOCOL);
2224         }
2225
2226         xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2227                                 flags, dev_to_node(dev));
2228         if (!xhci->ext_caps)
2229                 return -ENOMEM;
2230
2231         xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2232                                 flags, dev_to_node(dev));
2233         if (!xhci->port_caps)
2234                 return -ENOMEM;
2235
2236         offset = cap_start;
2237
2238         while (offset) {
2239                 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2240                 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2241                     num_ports)
2242                         break;
2243                 offset = xhci_find_next_ext_cap(base, offset,
2244                                                 XHCI_EXT_CAPS_PROTOCOL);
2245         }
2246         if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2247                 xhci_warn(xhci, "No ports on the roothubs?\n");
2248                 return -ENODEV;
2249         }
2250         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2251                        "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2252                        xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2253
2254         /* Place limits on the number of roothub ports so that the hub
2255          * descriptors aren't longer than the USB core will allocate.
2256          */
2257         if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2258                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2259                                 "Limiting USB 3.0 roothub ports to %u.",
2260                                 USB_SS_MAXPORTS);
2261                 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2262         }
2263         if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2264                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2265                                 "Limiting USB 2.0 roothub ports to %u.",
2266                                 USB_MAXCHILDREN);
2267                 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2268         }
2269
2270         if (!xhci->usb2_rhub.num_ports)
2271                 xhci_info(xhci, "USB2 root hub has no ports\n");
2272
2273         if (!xhci->usb3_rhub.num_ports)
2274                 xhci_info(xhci, "USB3 root hub has no ports\n");
2275
2276         xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2277         xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2278
2279         return 0;
2280 }
2281
2282 static struct xhci_interrupter *
2283 xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags)
2284 {
2285         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2286         struct xhci_interrupter *ir;
2287         unsigned int num_segs = segs;
2288         int ret;
2289
2290         ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
2291         if (!ir)
2292                 return NULL;
2293
2294         /* number of ring segments should be greater than 0 */
2295         if (segs <= 0)
2296                 num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
2297                          ERST_MAX_SEGS);
2298
2299         ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
2300                                          flags);
2301         if (!ir->event_ring) {
2302                 xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
2303                 kfree(ir);
2304                 return NULL;
2305         }
2306
2307         ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
2308         if (ret) {
2309                 xhci_warn(xhci, "Failed to allocate interrupter erst\n");
2310                 xhci_ring_free(xhci, ir->event_ring);
2311                 kfree(ir);
2312                 return NULL;
2313         }
2314
2315         return ir;
2316 }
2317
2318 static int
2319 xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
2320                      unsigned int intr_num)
2321 {
2322         u64 erst_base;
2323         u32 erst_size;
2324
2325         if (intr_num >= xhci->max_interrupters) {
2326                 xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
2327                           intr_num, xhci->max_interrupters);
2328                 return -EINVAL;
2329         }
2330
2331         if (xhci->interrupters[intr_num]) {
2332                 xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
2333                 return -EINVAL;
2334         }
2335
2336         xhci->interrupters[intr_num] = ir;
2337         ir->intr_num = intr_num;
2338         ir->ir_set = &xhci->run_regs->ir_set[intr_num];
2339
2340         /* set ERST count with the number of entries in the segment table */
2341         erst_size = readl(&ir->ir_set->erst_size);
2342         erst_size &= ERST_SIZE_MASK;
2343         erst_size |= ir->event_ring->num_segs;
2344         writel(erst_size, &ir->ir_set->erst_size);
2345
2346         erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
2347         erst_base &= ERST_BASE_RSVDP;
2348         erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
2349         xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
2350
2351         /* Set the event ring dequeue address of this interrupter */
2352         xhci_set_hc_event_deq(xhci, ir);
2353
2354         return 0;
2355 }
2356
2357 struct xhci_interrupter *
2358 xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
2359 {
2360         struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2361         struct xhci_interrupter *ir;
2362         unsigned int i;
2363         int err = -ENOSPC;
2364
2365         if (!xhci->interrupters || xhci->max_interrupters <= 1)
2366                 return NULL;
2367
2368         ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL);
2369         if (!ir)
2370                 return NULL;
2371
2372         spin_lock_irq(&xhci->lock);
2373
2374         /* Find available secondary interrupter, interrupter 0 is reserved for primary */
2375         for (i = 1; i < xhci->max_interrupters; i++) {
2376                 if (xhci->interrupters[i] == NULL) {
2377                         err = xhci_add_interrupter(xhci, ir, i);
2378                         break;
2379                 }
2380         }
2381
2382         spin_unlock_irq(&xhci->lock);
2383
2384         if (err) {
2385                 xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n",
2386                           xhci->max_interrupters);
2387                 xhci_free_interrupter(xhci, ir);
2388                 return NULL;
2389         }
2390
2391         xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
2392                  i, xhci->max_interrupters);
2393
2394         return ir;
2395 }
2396 EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
2397
2398 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2399 {
2400         struct xhci_interrupter *ir;
2401         struct device   *dev = xhci_to_hcd(xhci)->self.sysdev;
2402         dma_addr_t      dma;
2403         unsigned int    val, val2;
2404         u64             val_64;
2405         u32             page_size, temp;
2406         int             i;
2407
2408         INIT_LIST_HEAD(&xhci->cmd_list);
2409
2410         /* init command timeout work */
2411         INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2412         init_completion(&xhci->cmd_ring_stop_completion);
2413
2414         page_size = readl(&xhci->op_regs->page_size);
2415         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2416                         "Supported page size register = 0x%x", page_size);
2417         i = ffs(page_size);
2418         if (i < 16)
2419                 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2420                         "Supported page size of %iK", (1 << (i+12)) / 1024);
2421         else
2422                 xhci_warn(xhci, "WARN: no supported page size\n");
2423         /* Use 4K pages, since that's common and the minimum the HC supports */
2424         xhci->page_shift = 12;
2425         xhci->page_size = 1 << xhci->page_shift;
2426         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2427                         "HCD page size set to %iK", xhci->page_size / 1024);
2428
2429         /*
2430          * Program the Number of Device Slots Enabled field in the CONFIG
2431          * register with the max value of slots the HC can handle.
2432          */
2433         val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2434         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2435                         "// xHC can handle at most %d device slots.", val);
2436         val2 = readl(&xhci->op_regs->config_reg);
2437         val |= (val2 & ~HCS_SLOTS_MASK);
2438         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2439                         "// Setting Max device slots reg = 0x%x.", val);
2440         writel(val, &xhci->op_regs->config_reg);
2441
2442         /*
2443          * xHCI section 5.4.6 - Device Context array must be
2444          * "physically contiguous and 64-byte (cache line) aligned".
2445          */
2446         xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2447                         flags);
2448         if (!xhci->dcbaa)
2449                 goto fail;
2450         xhci->dcbaa->dma = dma;
2451         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2452                         "// Device context base array address = 0x%pad (DMA), %p (virt)",
2453                         &xhci->dcbaa->dma, xhci->dcbaa);
2454         xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2455
2456         /*
2457          * Initialize the ring segment pool.  The ring must be a contiguous
2458          * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
2459          * however, the command ring segment needs 64-byte aligned segments
2460          * and our use of dma addresses in the trb_address_map radix tree needs
2461          * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2462          */
2463         if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
2464                 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2465                                 TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
2466         else
2467                 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2468                                 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2469
2470         /* See Table 46 and Note on Figure 55 */
2471         xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2472                         2112, 64, xhci->page_size);
2473         if (!xhci->segment_pool || !xhci->device_pool)
2474                 goto fail;
2475
2476         /* Linear stream context arrays don't have any boundary restrictions,
2477          * and only need to be 16-byte aligned.
2478          */
2479         xhci->small_streams_pool =
2480                 dma_pool_create("xHCI 256 byte stream ctx arrays",
2481                         dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2482         xhci->medium_streams_pool =
2483                 dma_pool_create("xHCI 1KB stream ctx arrays",
2484                         dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2485         /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2486          * will be allocated with dma_alloc_coherent()
2487          */
2488
2489         if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2490                 goto fail;
2491
2492         /* Set up the command ring to have one segments for now. */
2493         xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2494         if (!xhci->cmd_ring)
2495                 goto fail;
2496         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2497                         "Allocated command ring at %p", xhci->cmd_ring);
2498         xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
2499                         &xhci->cmd_ring->first_seg->dma);
2500
2501         /* Set the address in the Command Ring Control register */
2502         val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2503         val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2504                 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2505                 xhci->cmd_ring->cycle_state;
2506         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2507                         "// Setting command ring address to 0x%016llx", val_64);
2508         xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2509
2510         /* Reserve one command ring TRB for disabling LPM.
2511          * Since the USB core grabs the shared usb_bus bandwidth mutex before
2512          * disabling LPM, we only need to reserve one TRB for all devices.
2513          */
2514         xhci->cmd_ring_reserved_trbs++;
2515
2516         val = readl(&xhci->cap_regs->db_off);
2517         val &= DBOFF_MASK;
2518         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2519                        "// Doorbell array is located at offset 0x%x from cap regs base addr",
2520                        val);
2521         xhci->dba = (void __iomem *) xhci->cap_regs + val;
2522
2523         /* Allocate and set up primary interrupter 0 with an event ring. */
2524         xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2525                        "Allocating primary event ring");
2526         xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
2527                                           flags, dev_to_node(dev));
2528
2529         ir = xhci_alloc_interrupter(xhci, 0, flags);
2530         if (!ir)
2531                 goto fail;
2532
2533         if (xhci_add_interrupter(xhci, ir, 0))
2534                 goto fail;
2535
2536         xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
2537
2538         /*
2539          * XXX: Might need to set the Interrupter Moderation Register to
2540          * something other than the default (~1ms minimum between interrupts).
2541          * See section 5.5.1.2.
2542          */
2543         for (i = 0; i < MAX_HC_SLOTS; i++)
2544                 xhci->devs[i] = NULL;
2545
2546         if (scratchpad_alloc(xhci, flags))
2547                 goto fail;
2548         if (xhci_setup_port_arrays(xhci, flags))
2549                 goto fail;
2550
2551         /* Enable USB 3.0 device notifications for function remote wake, which
2552          * is necessary for allowing USB 3.0 devices to do remote wakeup from
2553          * U3 (device suspend).
2554          */
2555         temp = readl(&xhci->op_regs->dev_notification);
2556         temp &= ~DEV_NOTE_MASK;
2557         temp |= DEV_NOTE_FWAKE;
2558         writel(temp, &xhci->op_regs->dev_notification);
2559
2560         return 0;
2561
2562 fail:
2563         xhci_halt(xhci);
2564         xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
2565         xhci_mem_cleanup(xhci);
2566         return -ENOMEM;
2567 }