2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/delay.h>
53 #define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
55 #define SC(name) SEND_CTXT_##name
57 * Send Context functions
59 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
62 * Set the CM reset bit and wait for it to clear. Use the provided
63 * sendctrl register. This routine has no locking.
65 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
67 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
70 sendctrl = read_csr(dd, SEND_CTRL);
71 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
76 /* defined in header release 48 and higher */
77 #ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
78 #define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
79 #define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
80 #define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
81 << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
84 /* global control of PIO send */
85 void pio_send_control(struct hfi1_devdata *dd, int op)
89 int write = 1; /* write sendctrl back */
90 int flush = 0; /* re-read sendctrl to make sure it is flushed */
92 spin_lock_irqsave(&dd->sendctrl_lock, flags);
94 reg = read_csr(dd, SEND_CTRL);
96 case PSC_GLOBAL_ENABLE:
97 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
99 case PSC_DATA_VL_ENABLE:
100 /* Disallow sending on VLs not enabled */
101 mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
102 SEND_CTRL_UNSUPPORTED_VL_SHIFT;
103 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
105 case PSC_GLOBAL_DISABLE:
106 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
108 case PSC_GLOBAL_VLARB_ENABLE:
109 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
111 case PSC_GLOBAL_VLARB_DISABLE:
112 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
116 write = 0; /* CSR already written (and flushed) */
118 case PSC_DATA_VL_DISABLE:
119 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
123 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
128 write_csr(dd, SEND_CTRL, reg);
130 (void)read_csr(dd, SEND_CTRL); /* flush write */
133 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
136 /* number of send context memory pools */
137 #define NUM_SC_POOLS 2
139 /* Send Context Size (SCS) wildcards */
140 #define SCS_POOL_0 -1
141 #define SCS_POOL_1 -2
142 /* Send Context Count (SCC) wildcards */
143 #define SCC_PER_VL -1
144 #define SCC_PER_CPU -2
146 #define SCC_PER_KRCVQ -3
147 #define SCC_ACK_CREDITS 32
149 #define PIO_WAIT_BATCH_SIZE 5
151 /* default send context sizes */
152 static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
153 [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
154 .count = SCC_PER_VL },/* one per NUMA */
155 [SC_ACK] = { .size = SCC_ACK_CREDITS,
156 .count = SCC_PER_KRCVQ },
157 [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
158 .count = SCC_PER_CPU }, /* one per CPU */
162 /* send context memory pool configuration */
163 struct mem_pool_config {
164 int centipercent; /* % of memory, in 100ths of 1% */
165 int absolute_blocks; /* absolute block count */
168 /* default memory pool configuration: 100% in pool 0 */
169 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
170 /* centi%, abs blocks */
171 { 10000, -1 }, /* pool 0 */
172 { 0, -1 }, /* pool 1 */
175 /* memory pool information, used when calculating final sizes */
176 struct mem_pool_info {
178 * 100th of 1% of memory to use, -1 if blocks
181 int count; /* count of contexts in the pool */
182 int blocks; /* block size of the pool */
183 int size; /* context size, in blocks */
187 * Convert a pool wildcard to a valid pool index. The wildcards
188 * start at -1 and increase negatively. Map them as:
193 * Return -1 on non-wildcard input, otherwise convert to a pool number.
195 static int wildcard_to_pool(int wc)
198 return -1; /* non-wildcard */
202 static const char *sc_type_names[SC_MAX] = {
208 static const char *sc_type_name(int index)
210 if (index < 0 || index >= SC_MAX)
212 return sc_type_names[index];
216 * Read the send context memory pool configuration and send context
217 * size configuration. Replace any wildcards and come up with final
218 * counts and sizes for the send context types.
220 int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
222 struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
223 int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
224 int total_contexts = 0;
228 int cp_total; /* centipercent total */
229 int ab_total; /* absolute block total */
235 * - copy the centipercents/absolute sizes from the pool config
236 * - sanity check these values
237 * - add up centipercents, then later check for full value
238 * - add up absolute blocks, then later check for over-commit
242 for (i = 0; i < NUM_SC_POOLS; i++) {
243 int cp = sc_mem_pool_config[i].centipercent;
244 int ab = sc_mem_pool_config[i].absolute_blocks;
247 * A negative value is "unused" or "invalid". Both *can*
248 * be valid, but centipercent wins, so check that first
250 if (cp >= 0) { /* centipercent valid */
252 } else if (ab >= 0) { /* absolute blocks valid */
254 } else { /* neither valid */
257 "Send context memory pool %d: both the block count and centipercent are invalid\n",
262 mem_pool_info[i].centipercent = cp;
263 mem_pool_info[i].blocks = ab;
266 /* do not use both % and absolute blocks for different pools */
267 if (cp_total != 0 && ab_total != 0) {
270 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
274 /* if any percentages are present, they must add up to 100% x 100 */
275 if (cp_total != 0 && cp_total != 10000) {
278 "Send context memory pool centipercent is %d, expecting 10000\n",
283 /* the absolute pool total cannot be more than the mem total */
284 if (ab_total > total_blocks) {
287 "Send context memory pool absolute block count %d is larger than the memory size %d\n",
288 ab_total, total_blocks);
294 * - copy from the context size config
295 * - replace context type wildcard counts with real values
296 * - add up non-memory pool block sizes
297 * - add up memory pool user counts
300 for (i = 0; i < SC_MAX; i++) {
301 int count = sc_config_sizes[i].count;
302 int size = sc_config_sizes[i].size;
306 * Sanity check count: Either a positive value or
307 * one of the expected wildcards is valid. The positive
308 * value is checked later when we compare against total
312 count = dd->n_krcv_queues;
313 } else if (i == SC_KERNEL) {
314 count = (INIT_SC_PER_VL * num_vls) + 1 /* VL15 */;
315 } else if (count == SCC_PER_CPU) {
316 count = dd->num_rcv_contexts - dd->n_krcv_queues;
317 } else if (count < 0) {
320 "%s send context invalid count wildcard %d\n",
321 sc_type_name(i), count);
324 if (total_contexts + count > dd->chip_send_contexts)
325 count = dd->chip_send_contexts - total_contexts;
327 total_contexts += count;
330 * Sanity check pool: The conversion will return a pool
331 * number or -1 if a fixed (non-negative) value. The fixed
332 * value is checked later when we compare against
333 * total memory available.
335 pool = wildcard_to_pool(size);
336 if (pool == -1) { /* non-wildcard */
337 fixed_blocks += size * count;
338 } else if (pool < NUM_SC_POOLS) { /* valid wildcard */
339 mem_pool_info[pool].count += count;
340 } else { /* invalid wildcard */
343 "%s send context invalid pool wildcard %d\n",
344 sc_type_name(i), size);
348 dd->sc_sizes[i].count = count;
349 dd->sc_sizes[i].size = size;
351 if (fixed_blocks > total_blocks) {
354 "Send context fixed block count, %u, larger than total block count %u\n",
355 fixed_blocks, total_blocks);
359 /* step 3: calculate the blocks in the pools, and pool context sizes */
360 pool_blocks = total_blocks - fixed_blocks;
361 if (ab_total > pool_blocks) {
364 "Send context fixed pool sizes, %u, larger than pool block count %u\n",
365 ab_total, pool_blocks);
368 /* subtract off the fixed pool blocks */
369 pool_blocks -= ab_total;
371 for (i = 0; i < NUM_SC_POOLS; i++) {
372 struct mem_pool_info *pi = &mem_pool_info[i];
374 /* % beats absolute blocks */
375 if (pi->centipercent >= 0)
376 pi->blocks = (pool_blocks * pi->centipercent) / 10000;
378 if (pi->blocks == 0 && pi->count != 0) {
381 "Send context memory pool %d has %u contexts, but no blocks\n",
385 if (pi->count == 0) {
386 /* warn about wasted blocks */
390 "Send context memory pool %d has %u blocks, but zero contexts\n",
394 pi->size = pi->blocks / pi->count;
398 /* step 4: fill in the context type sizes from the pool sizes */
400 for (i = 0; i < SC_MAX; i++) {
401 if (dd->sc_sizes[i].size < 0) {
402 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
404 WARN_ON_ONCE(pool >= NUM_SC_POOLS);
405 dd->sc_sizes[i].size = mem_pool_info[pool].size;
407 /* make sure we are not larger than what is allowed by the HW */
408 #define PIO_MAX_BLOCKS 1024
409 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
410 dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
412 /* calculate our total usage */
413 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
415 extra = total_blocks - used_blocks;
417 dd_dev_info(dd, "unused send context blocks: %d\n", extra);
419 return total_contexts;
422 int init_send_contexts(struct hfi1_devdata *dd)
425 int ret, i, j, context;
427 ret = init_credit_return(dd);
431 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
433 dd->send_contexts = kcalloc(dd->num_send_contexts,
434 sizeof(struct send_context_info),
436 if (!dd->send_contexts || !dd->hw_to_sw) {
438 kfree(dd->send_contexts);
439 free_credit_return(dd);
443 /* hardware context map starts with invalid send context indices */
444 for (i = 0; i < TXE_NUM_CONTEXTS; i++)
445 dd->hw_to_sw[i] = INVALID_SCI;
448 * All send contexts have their credit sizes. Allocate credits
449 * for each context one after another from the global space.
453 for (i = 0; i < SC_MAX; i++) {
454 struct sc_config_sizes *scs = &dd->sc_sizes[i];
456 for (j = 0; j < scs->count; j++) {
457 struct send_context_info *sci =
458 &dd->send_contexts[context];
461 sci->credits = scs->size;
472 * Allocate a software index and hardware context of the given type.
474 * Must be called with dd->sc_lock held.
476 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
479 struct send_context_info *sci;
483 for (index = 0, sci = &dd->send_contexts[0];
484 index < dd->num_send_contexts; index++, sci++) {
485 if (sci->type == type && sci->allocated == 0) {
487 /* use a 1:1 mapping, but make them non-equal */
488 context = dd->chip_send_contexts - index - 1;
489 dd->hw_to_sw[context] = index;
491 *hw_context = context;
492 return 0; /* success */
495 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
500 * Free the send context given by its software index.
502 * Must be called with dd->sc_lock held.
504 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
506 struct send_context_info *sci;
508 sci = &dd->send_contexts[sw_index];
509 if (!sci->allocated) {
510 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
511 __func__, sw_index, hw_context);
514 dd->hw_to_sw[hw_context] = INVALID_SCI;
517 /* return the base context of a context in a group */
518 static inline u32 group_context(u32 context, u32 group)
520 return (context >> group) << group;
523 /* return the size of a group */
524 static inline u32 group_size(u32 group)
530 * Obtain the credit return addresses, kernel virtual and physical, for the
533 * To understand this routine:
534 * o va and pa are arrays of struct credit_return. One for each physical
535 * send context, per NUMA.
536 * o Each send context always looks in its relative location in a struct
537 * credit_return for its credit return.
538 * o Each send context in a group must have its return address CSR programmed
539 * with the same value. Use the address of the first send context in the
542 static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
544 u32 gc = group_context(sc->hw_context, sc->group);
545 u32 index = sc->hw_context & 0x7;
547 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
548 *pa = (unsigned long)
549 &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
553 * Work queue function triggered in error interrupt routine for
556 static void sc_halted(struct work_struct *work)
558 struct send_context *sc;
560 sc = container_of(work, struct send_context, halt_work);
565 * Calculate PIO block threshold for this send context using the given MTU.
566 * Trigger a return when one MTU plus optional header of credits remain.
568 * Parameter mtu is in bytes.
569 * Parameter hdrqentsize is in DWORDs.
571 * Return value is what to write into the CSR: trigger return when
572 * unreturned credits pass this count.
574 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
579 /* add in the header size, then divide by the PIO block size */
580 mtu += hdrqentsize << 2;
581 release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
583 /* check against this context's credits */
584 if (sc->credits <= release_credits)
587 threshold = sc->credits - release_credits;
593 * Calculate credit threshold in terms of percent of the allocated credits.
594 * Trigger when unreturned credits equal or exceed the percentage of the whole.
596 * Return value is what to write into the CSR: trigger return when
597 * unreturned credits pass this count.
599 static u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
601 return (sc->credits * percent) / 100;
605 * Set the credit return threshold.
607 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
611 int force_return = 0;
613 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
615 old_threshold = (sc->credit_ctrl >>
616 SC(CREDIT_CTRL_THRESHOLD_SHIFT))
617 & SC(CREDIT_CTRL_THRESHOLD_MASK);
619 if (new_threshold != old_threshold) {
622 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
624 & SC(CREDIT_CTRL_THRESHOLD_MASK))
625 << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
626 write_kctxt_csr(sc->dd, sc->hw_context,
627 SC(CREDIT_CTRL), sc->credit_ctrl);
629 /* force a credit return on change to avoid a possible stall */
633 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
636 sc_return_credits(sc);
642 * Set the CHECK_ENABLE register for the send context 'sc'.
644 void set_pio_integrity(struct send_context *sc)
646 struct hfi1_devdata *dd = sc->dd;
648 u32 hw_context = sc->hw_context;
652 * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
655 if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
656 dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
657 reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
659 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
662 static u32 get_buffers_allocated(struct send_context *sc)
667 for_each_possible_cpu(cpu)
668 ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
672 static void reset_buffers_allocated(struct send_context *sc)
676 for_each_possible_cpu(cpu)
677 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
681 * Allocate a NUMA relative send context structure of the given type along
684 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
685 uint hdrqentsize, int numa)
687 struct send_context_info *sci;
688 struct send_context *sc = NULL;
698 /* do not allocate while frozen */
699 if (dd->flags & HFI1_FROZEN)
702 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
706 sc->buffers_allocated = alloc_percpu(u32);
707 if (!sc->buffers_allocated) {
710 "Cannot allocate buffers_allocated per cpu counters\n"
715 spin_lock_irqsave(&dd->sc_lock, flags);
716 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
718 spin_unlock_irqrestore(&dd->sc_lock, flags);
719 free_percpu(sc->buffers_allocated);
724 sci = &dd->send_contexts[sw_index];
730 spin_lock_init(&sc->alloc_lock);
731 spin_lock_init(&sc->release_lock);
732 spin_lock_init(&sc->credit_ctrl_lock);
733 INIT_LIST_HEAD(&sc->piowait);
734 INIT_WORK(&sc->halt_work, sc_halted);
735 init_waitqueue_head(&sc->halt_wait);
737 /* grouping is always single context for now */
740 sc->sw_index = sw_index;
741 sc->hw_context = hw_context;
742 cr_group_addresses(sc, &pa);
743 sc->credits = sci->credits;
745 /* PIO Send Memory Address details */
746 #define PIO_ADDR_CONTEXT_MASK 0xfful
747 #define PIO_ADDR_CONTEXT_SHIFT 16
748 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
749 << PIO_ADDR_CONTEXT_SHIFT);
751 /* set base and credits */
752 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
753 << SC(CTRL_CTXT_DEPTH_SHIFT))
754 | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
755 << SC(CTRL_CTXT_BASE_SHIFT));
756 write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
758 set_pio_integrity(sc);
760 /* unmask all errors */
761 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
763 /* set the default partition key */
764 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
765 (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
767 SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
769 /* per context type checks */
770 if (type == SC_USER) {
771 opval = USER_OPCODE_CHECK_VAL;
772 opmask = USER_OPCODE_CHECK_MASK;
774 opval = OPCODE_CHECK_VAL_DISABLED;
775 opmask = OPCODE_CHECK_MASK_DISABLED;
778 /* set the send context check opcode mask and value */
779 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
780 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
781 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
783 /* set up credit return */
784 reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
785 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
788 * Calculate the initial credit return threshold.
790 * For Ack contexts, set a threshold for half the credits.
791 * For User contexts use the given percentage. This has been
792 * sanitized on driver start-up.
793 * For Kernel contexts, use the default MTU plus a header.
795 if (type == SC_ACK) {
796 thresh = sc_percent_to_threshold(sc, 50);
797 } else if (type == SC_USER) {
798 thresh = sc_percent_to_threshold(sc,
799 user_credit_return_threshold);
800 } else { /* kernel */
801 thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
803 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
804 /* add in early return */
805 if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
806 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
807 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
808 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
810 /* set up write-through credit_ctrl */
811 sc->credit_ctrl = reg;
812 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
814 /* User send contexts should not allow sending on VL15 */
815 if (type == SC_USER) {
817 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
820 spin_unlock_irqrestore(&dd->sc_lock, flags);
823 * Allocate shadow ring to track outstanding PIO buffers _after_
824 * unlocking. We don't know the size until the lock is held and
825 * we can't allocate while the lock is held. No one is using
826 * the context yet, so allocate it now.
828 * User contexts do not get a shadow ring.
830 if (type != SC_USER) {
832 * Size the shadow ring 1 larger than the number of credits
833 * so head == tail can mean empty.
835 sc->sr_size = sci->credits + 1;
836 sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
837 sc->sr_size, GFP_KERNEL, numa);
845 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
857 /* free a per-NUMA send context structure */
858 void sc_free(struct send_context *sc)
860 struct hfi1_devdata *dd;
868 sc->flags |= SCF_IN_FREE; /* ensure no restarts */
870 if (!list_empty(&sc->piowait))
871 dd_dev_err(dd, "piowait list not empty!\n");
872 sw_index = sc->sw_index;
873 hw_context = sc->hw_context;
874 sc_disable(sc); /* make sure the HW is disabled */
875 flush_work(&sc->halt_work);
877 spin_lock_irqsave(&dd->sc_lock, flags);
878 dd->send_contexts[sw_index].sc = NULL;
880 /* clear/disable all registers set in sc_alloc */
881 write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
882 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
883 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
884 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
885 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
886 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
887 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
889 /* release the index and context for re-use */
890 sc_hw_free(dd, sw_index, hw_context);
891 spin_unlock_irqrestore(&dd->sc_lock, flags);
894 free_percpu(sc->buffers_allocated);
898 /* disable the context */
899 void sc_disable(struct send_context *sc)
903 struct pio_buf *pbuf;
908 /* do all steps, even if already disabled */
909 spin_lock_irqsave(&sc->alloc_lock, flags);
910 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
911 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
912 sc->flags &= ~SCF_ENABLED;
913 sc_wait_for_packet_egress(sc, 1);
914 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
915 spin_unlock_irqrestore(&sc->alloc_lock, flags);
918 * Flush any waiters. Once the context is disabled,
919 * credit return interrupts are stopped (although there
920 * could be one in-process when the context is disabled).
921 * Wait one microsecond for any lingering interrupts, then
922 * proceed with the flush.
925 spin_lock_irqsave(&sc->release_lock, flags);
926 if (sc->sr) { /* this context has a shadow ring */
927 while (sc->sr_tail != sc->sr_head) {
928 pbuf = &sc->sr[sc->sr_tail].pbuf;
930 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
932 if (sc->sr_tail >= sc->sr_size)
936 spin_unlock_irqrestore(&sc->release_lock, flags);
939 /* return SendEgressCtxtStatus.PacketOccupancy */
940 #define packet_occupancy(r) \
941 (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
942 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
944 /* is egress halted on the context? */
945 #define egress_halted(r) \
946 ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
948 /* wait for packet egress, optionally pause for credit return */
949 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
951 struct hfi1_devdata *dd = sc->dd;
958 reg = read_csr(dd, sc->hw_context * 8 +
959 SEND_EGRESS_CTXT_STATUS);
960 /* done if egress is stopped */
961 if (egress_halted(reg))
963 reg = packet_occupancy(reg);
966 /* counter is reset if occupancy count changes */
970 /* timed out - bounce the link */
972 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
973 __func__, sc->sw_index,
974 sc->hw_context, (u32)reg);
975 queue_work(dd->pport->hfi1_wq,
976 &dd->pport->link_bounce_work);
984 /* Add additional delay to ensure chip returns all credits */
985 pause_for_credit_return(dd);
988 void sc_wait(struct hfi1_devdata *dd)
992 for (i = 0; i < dd->num_send_contexts; i++) {
993 struct send_context *sc = dd->send_contexts[i].sc;
997 sc_wait_for_packet_egress(sc, 0);
1002 * Restart a context after it has been halted due to error.
1004 * If the first step fails - wait for the halt to be asserted, return early.
1005 * Otherwise complain about timeouts but keep going.
1007 * It is expected that allocations (enabled flag bit) have been shut off
1008 * already (only applies to kernel contexts).
1010 int sc_restart(struct send_context *sc)
1012 struct hfi1_devdata *dd = sc->dd;
1017 /* bounce off if not halted, or being free'd */
1018 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1021 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1025 * Step 1: Wait for the context to actually halt.
1027 * The error interrupt is asynchronous to actually setting halt
1032 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1033 if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1036 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1037 __func__, sc->sw_index, sc->hw_context);
1045 * Step 2: Ensure no users are still trying to write to PIO.
1047 * For kernel contexts, we have already turned off buffer allocation.
1048 * Now wait for the buffer count to go to zero.
1050 * For user contexts, the user handling code has cut off write access
1051 * to the context's PIO pages before calling this routine and will
1052 * restore write access after this routine returns.
1054 if (sc->type != SC_USER) {
1055 /* kernel context */
1058 count = get_buffers_allocated(sc);
1063 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1064 __func__, sc->sw_index,
1065 sc->hw_context, count);
1073 * Step 3: Wait for all packets to egress.
1074 * This is done while disabling the send context
1076 * Step 4: Disable the context
1078 * This is a superset of the halt. After the disable, the
1079 * errors can be cleared.
1084 * Step 5: Enable the context
1086 * This enable will clear the halted flag and per-send context
1089 return sc_enable(sc);
1093 * PIO freeze processing. To be called after the TXE block is fully frozen.
1094 * Go through all frozen send contexts and disable them. The contexts are
1095 * already stopped by the freeze.
1097 void pio_freeze(struct hfi1_devdata *dd)
1099 struct send_context *sc;
1102 for (i = 0; i < dd->num_send_contexts; i++) {
1103 sc = dd->send_contexts[i].sc;
1105 * Don't disable unallocated, unfrozen, or user send contexts.
1106 * User send contexts will be disabled when the process
1107 * calls into the driver to reset its context.
1109 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1112 /* only need to disable, the context is already stopped */
1118 * Unfreeze PIO for kernel send contexts. The precondition for calling this
1119 * is that all PIO send contexts have been disabled and the SPC freeze has
1120 * been cleared. Now perform the last step and re-enable each kernel context.
1121 * User (PSM) processing will occur when PSM calls into the kernel to
1122 * acknowledge the freeze.
1124 void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1126 struct send_context *sc;
1129 for (i = 0; i < dd->num_send_contexts; i++) {
1130 sc = dd->send_contexts[i].sc;
1131 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1134 sc_enable(sc); /* will clear the sc frozen flag */
1139 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1141 * -ETIMEDOUT - if we wait too long
1142 * -EIO - if there was an error
1144 static int pio_init_wait_progress(struct hfi1_devdata *dd)
1149 /* max is the longest possible HW init time / delay */
1150 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1152 reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1153 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1161 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1165 * Reset all of the send contexts to their power-on state. Used
1166 * only during manual init - no lock against sc_enable needed.
1168 void pio_reset_all(struct hfi1_devdata *dd)
1172 /* make sure the init engine is not busy */
1173 ret = pio_init_wait_progress(dd);
1174 /* ignore any timeout */
1176 /* clear the error */
1177 write_csr(dd, SEND_PIO_ERR_CLEAR,
1178 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1181 /* reset init all */
1182 write_csr(dd, SEND_PIO_INIT_CTXT,
1183 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1185 ret = pio_init_wait_progress(dd);
1188 "PIO send context init %s while initializing all PIO blocks\n",
1189 ret == -ETIMEDOUT ? "is stuck" : "had an error");
1193 /* enable the context */
1194 int sc_enable(struct send_context *sc)
1196 u64 sc_ctrl, reg, pio;
1197 struct hfi1_devdata *dd;
1198 unsigned long flags;
1206 * Obtain the allocator lock to guard against any allocation
1207 * attempts (which should not happen prior to context being
1208 * enabled). On the release/disable side we don't need to
1209 * worry about locking since the releaser will not do anything
1210 * if the context accounting values have not changed.
1212 spin_lock_irqsave(&sc->alloc_lock, flags);
1213 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1214 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1215 goto unlock; /* already enabled */
1217 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1226 /* the alloc lock insures no fast path allocation */
1227 reset_buffers_allocated(sc);
1230 * Clear all per-context errors. Some of these will be set when
1231 * we are re-enabling after a context halt. Now that the context
1232 * is disabled, the halt will not clear until after the PIO init
1233 * engine runs below.
1235 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1237 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1240 * The HW PIO initialization engine can handle only one init
1241 * request at a time. Serialize access to each device's engine.
1243 spin_lock(&dd->sc_init_lock);
1245 * Since access to this code block is serialized and
1246 * each access waits for the initialization to complete
1247 * before releasing the lock, the PIO initialization engine
1248 * should not be in use, so we don't have to wait for the
1249 * InProgress bit to go down.
1251 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1252 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1253 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1254 write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1256 * Wait until the engine is done. Give the chip the required time
1257 * so, hopefully, we read the register just once.
1260 ret = pio_init_wait_progress(dd);
1261 spin_unlock(&dd->sc_init_lock);
1264 "sctxt%u(%u): Context not enabled due to init failure %d\n",
1265 sc->sw_index, sc->hw_context, ret);
1270 * All is well. Enable the context.
1272 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1273 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1275 * Read SendCtxtCtrl to force the write out and prevent a timing
1276 * hazard where a PIO write may reach the context before the enable.
1278 read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1279 sc->flags |= SCF_ENABLED;
1282 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1287 /* force a credit return on the context */
1288 void sc_return_credits(struct send_context *sc)
1293 /* a 0->1 transition schedules a credit return */
1294 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1295 SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1297 * Ensure that the write is flushed and the credit return is
1298 * scheduled. We care more about the 0 -> 1 transition.
1300 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1301 /* set back to 0 for next time */
1302 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1305 /* allow all in-flight packets to drain on the context */
1306 void sc_flush(struct send_context *sc)
1311 sc_wait_for_packet_egress(sc, 1);
1314 /* drop all packets on the context, no waiting until they are sent */
1315 void sc_drop(struct send_context *sc)
1320 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1321 __func__, sc->sw_index, sc->hw_context);
1325 * Start the software reaction to a context halt or SPC freeze:
1326 * - mark the context as halted or frozen
1327 * - stop buffer allocations
1329 * Called from the error interrupt. Other work is deferred until
1330 * out of the interrupt.
1332 void sc_stop(struct send_context *sc, int flag)
1334 unsigned long flags;
1336 /* mark the context */
1339 /* stop buffer allocations */
1340 spin_lock_irqsave(&sc->alloc_lock, flags);
1341 sc->flags &= ~SCF_ENABLED;
1342 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1343 wake_up(&sc->halt_wait);
1346 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1347 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1350 * The send context buffer "allocator".
1352 * @sc: the PIO send context we are allocating from
1353 * @len: length of whole packet - including PBC - in dwords
1354 * @cb: optional callback to call when the buffer is finished sending
1355 * @arg: argument for cb
1357 * Return a pointer to a PIO buffer if successful, NULL if not enough room.
1359 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1360 pio_release_cb cb, void *arg)
1362 struct pio_buf *pbuf = NULL;
1363 unsigned long flags;
1364 unsigned long avail;
1365 unsigned long blocks = dwords_to_blocks(dw_len);
1366 unsigned long start_fill;
1370 spin_lock_irqsave(&sc->alloc_lock, flags);
1371 if (!(sc->flags & SCF_ENABLED)) {
1372 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1377 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1378 if (blocks > avail) {
1379 /* not enough room */
1380 if (unlikely(trycount)) { /* already tried to get more room */
1381 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1384 /* copy from receiver cache line and recalculate */
1385 sc->alloc_free = ACCESS_ONCE(sc->free);
1387 (unsigned long)sc->credits -
1388 (sc->fill - sc->alloc_free);
1389 if (blocks > avail) {
1390 /* still no room, actively update */
1391 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1392 sc_release_update(sc);
1393 spin_lock_irqsave(&sc->alloc_lock, flags);
1394 sc->alloc_free = ACCESS_ONCE(sc->free);
1400 /* there is enough room */
1403 this_cpu_inc(*sc->buffers_allocated);
1405 /* read this once */
1408 /* "allocate" the buffer */
1409 start_fill = sc->fill;
1413 * Fill the parts that the releaser looks at before moving the head.
1414 * The only necessary piece is the sent_at field. The credits
1415 * we have just allocated cannot have been returned yet, so the
1416 * cb and arg will not be looked at for a "while". Put them
1417 * on this side of the memory barrier anyway.
1419 pbuf = &sc->sr[head].pbuf;
1420 pbuf->sent_at = sc->fill;
1423 pbuf->sc = sc; /* could be filled in at sc->sr init time */
1424 /* make sure this is in memory before updating the head */
1426 /* calculate next head index, do not store */
1428 if (next >= sc->sr_size)
1431 * update the head - must be last! - the releaser can look at fields
1432 * in pbuf once we move the head
1436 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1438 /* finish filling in the buffer outside the lock */
1439 pbuf->start = sc->base_addr + ((start_fill % sc->credits)
1441 pbuf->size = sc->credits * PIO_BLOCK_SIZE;
1442 pbuf->end = sc->base_addr + pbuf->size;
1443 pbuf->block_count = blocks;
1444 pbuf->qw_written = 0;
1445 pbuf->carry_bytes = 0;
1446 pbuf->carry.val64 = 0;
1452 * There are at least two entities that can turn on credit return
1453 * interrupts and they can overlap. Avoid problems by implementing
1454 * a count scheme that is enforced by a lock. The lock is needed because
1455 * the count and CSR write must be paired.
1459 * Start credit return interrupts. This is managed by a count. If already
1460 * on, just increment the count.
1462 void sc_add_credit_return_intr(struct send_context *sc)
1464 unsigned long flags;
1466 /* lock must surround both the count change and the CSR update */
1467 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1468 if (sc->credit_intr_count == 0) {
1469 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1470 write_kctxt_csr(sc->dd, sc->hw_context,
1471 SC(CREDIT_CTRL), sc->credit_ctrl);
1473 sc->credit_intr_count++;
1474 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1478 * Stop credit return interrupts. This is managed by a count. Decrement the
1479 * count, if the last user, then turn the credit interrupts off.
1481 void sc_del_credit_return_intr(struct send_context *sc)
1483 unsigned long flags;
1485 WARN_ON(sc->credit_intr_count == 0);
1487 /* lock must surround both the count change and the CSR update */
1488 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1489 sc->credit_intr_count--;
1490 if (sc->credit_intr_count == 0) {
1491 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1492 write_kctxt_csr(sc->dd, sc->hw_context,
1493 SC(CREDIT_CTRL), sc->credit_ctrl);
1495 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1499 * The caller must be careful when calling this. All needint calls
1500 * must be paired with !needint.
1502 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1505 sc_add_credit_return_intr(sc);
1507 sc_del_credit_return_intr(sc);
1508 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1511 sc_return_credits(sc);
1516 * sc_piobufavail - callback when a PIO buffer is available
1517 * @sc: the send context
1519 * This is called from the interrupt handler when a PIO buffer is
1520 * available after hfi1_verbs_send() returned an error that no buffers were
1521 * available. Disable the interrupt if there are no more QPs waiting.
1523 static void sc_piobufavail(struct send_context *sc)
1525 struct hfi1_devdata *dd = sc->dd;
1526 struct hfi1_ibdev *dev = &dd->verbs_dev;
1527 struct list_head *list;
1528 struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1530 struct hfi1_qp_priv *priv;
1531 unsigned long flags;
1534 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL)
1536 list = &sc->piowait;
1538 * Note: checking that the piowait list is empty and clearing
1539 * the buffer available interrupt needs to be atomic or we
1540 * could end up with QPs on the wait list with the interrupt
1543 write_seqlock_irqsave(&dev->iowait_lock, flags);
1544 while (!list_empty(list)) {
1545 struct iowait *wait;
1547 if (n == ARRAY_SIZE(qps))
1549 wait = list_first_entry(list, struct iowait, list);
1550 qp = iowait_to_qp(wait);
1552 list_del_init(&priv->s_iowait.list);
1553 /* refcount held until actual wake up */
1557 * If there had been waiters and there are more
1558 * insure that we redo the force to avoid a potential hang.
1561 hfi1_sc_wantpiobuf_intr(sc, 0);
1562 if (!list_empty(list))
1563 hfi1_sc_wantpiobuf_intr(sc, 1);
1565 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
1567 for (i = 0; i < n; i++)
1568 hfi1_qp_wakeup(qps[i],
1569 RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
1572 /* translate a send credit update to a bit code of reasons */
1573 static inline int fill_code(u64 hw_free)
1577 if (hw_free & CR_STATUS_SMASK)
1578 code |= PRC_STATUS_ERR;
1579 if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1581 if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1582 code |= PRC_THRESHOLD;
1583 if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1584 code |= PRC_FILL_ERR;
1585 if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1586 code |= PRC_SC_DISABLE;
1590 /* use the jiffies compare to get the wrap right */
1591 #define sent_before(a, b) time_before(a, b) /* a < b */
1594 * The send context buffer "releaser".
1596 void sc_release_update(struct send_context *sc)
1598 struct pio_buf *pbuf;
1601 unsigned long old_free;
1603 unsigned long extra;
1604 unsigned long flags;
1610 spin_lock_irqsave(&sc->release_lock, flags);
1612 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
1613 old_free = sc->free;
1614 extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1615 - (old_free & CR_COUNTER_MASK))
1617 free = old_free + extra;
1618 trace_hfi1_piofree(sc, extra);
1620 /* call sent buffer callbacks */
1621 code = -1; /* code not yet set */
1622 head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
1624 while (head != tail) {
1625 pbuf = &sc->sr[tail].pbuf;
1627 if (sent_before(free, pbuf->sent_at)) {
1632 if (code < 0) /* fill in code on first user */
1633 code = fill_code(hw_free);
1634 (*pbuf->cb)(pbuf->arg, code);
1638 if (tail >= sc->sr_size)
1642 /* make sure tail is updated before free */
1645 spin_unlock_irqrestore(&sc->release_lock, flags);
1650 * Send context group releaser. Argument is the send context that caused
1651 * the interrupt. Called from the send context interrupt handler.
1653 * Call release on all contexts in the group.
1655 * This routine takes the sc_lock without an irqsave because it is only
1656 * called from an interrupt handler. Adjust if that changes.
1658 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1660 struct send_context *sc;
1664 spin_lock(&dd->sc_lock);
1665 sw_index = dd->hw_to_sw[hw_context];
1666 if (unlikely(sw_index >= dd->num_send_contexts)) {
1667 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1668 __func__, hw_context, sw_index);
1671 sc = dd->send_contexts[sw_index].sc;
1675 gc = group_context(hw_context, sc->group);
1676 gc_end = gc + group_size(sc->group);
1677 for (; gc < gc_end; gc++) {
1678 sw_index = dd->hw_to_sw[gc];
1679 if (unlikely(sw_index >= dd->num_send_contexts)) {
1681 "%s: invalid hw (%u) to sw (%u) mapping\n",
1682 __func__, hw_context, sw_index);
1685 sc_release_update(dd->send_contexts[sw_index].sc);
1688 spin_unlock(&dd->sc_lock);
1692 * pio_select_send_context_vl() - select send context
1694 * @selector: a spreading factor
1697 * This function returns a send context based on the selector and a vl.
1698 * The mapping fields are protected by RCU
1700 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1701 u32 selector, u8 vl)
1703 struct pio_vl_map *m;
1704 struct pio_map_elem *e;
1705 struct send_context *rval;
1708 * NOTE This should only happen if SC->VL changed after the initial
1709 * checks on the QP/AH
1710 * Default will return VL0's send context below
1712 if (unlikely(vl >= num_vls)) {
1718 m = rcu_dereference(dd->pio_map);
1721 return dd->vld[0].sc;
1723 e = m->map[vl & m->mask];
1724 rval = e->ksc[selector & e->mask];
1728 rval = !rval ? dd->vld[0].sc : rval;
1733 * pio_select_send_context_sc() - select send context
1735 * @selector: a spreading factor
1736 * @sc5: the 5 bit sc
1738 * This function returns an send context based on the selector and an sc
1740 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1741 u32 selector, u8 sc5)
1743 u8 vl = sc_to_vlt(dd, sc5);
1745 return pio_select_send_context_vl(dd, selector, vl);
1749 * Free the indicated map struct
1751 static void pio_map_free(struct pio_vl_map *m)
1755 for (i = 0; m && i < m->actual_vls; i++)
1761 * Handle RCU callback
1763 static void pio_map_rcu_callback(struct rcu_head *list)
1765 struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1771 * pio_map_init - called when #vls change
1773 * @port: port number
1774 * @num_vls: number of vls
1775 * @vl_scontexts: per vl send context mapping (optional)
1777 * This routine changes the mapping based on the number of vls.
1779 * vl_scontexts is used to specify a non-uniform vl/send context
1780 * loading. NULL implies auto computing the loading and giving each
1781 * VL an uniform distribution of send contexts per VL.
1783 * The auto algorithm computers the sc_per_vl and the number of extra
1784 * send contexts. Any extra send contexts are added from the last VL
1787 * rcu locking is used here to control access to the mapping fields.
1789 * If either the num_vls or num_send_contexts are non-power of 2, the
1790 * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1791 * rounded up to the next highest power of 2 and the first entry is
1792 * reused in a round robin fashion.
1794 * If an error occurs the map change is not done and the mapping is not
1798 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1801 int extra, sc_per_vl;
1803 int num_kernel_send_contexts = 0;
1804 u8 lvl_scontexts[OPA_MAX_VLS];
1805 struct pio_vl_map *oldmap, *newmap;
1807 if (!vl_scontexts) {
1808 /* send context 0 reserved for VL15 */
1809 for (i = 1; i < dd->num_send_contexts; i++)
1810 if (dd->send_contexts[i].type == SC_KERNEL)
1811 num_kernel_send_contexts++;
1812 /* truncate divide */
1813 sc_per_vl = num_kernel_send_contexts / num_vls;
1815 extra = num_kernel_send_contexts % num_vls;
1816 vl_scontexts = lvl_scontexts;
1817 /* add extras from last vl down */
1818 for (i = num_vls - 1; i >= 0; i--, extra--)
1819 vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1822 newmap = kzalloc(sizeof(*newmap) +
1823 roundup_pow_of_two(num_vls) *
1824 sizeof(struct pio_map_elem *),
1828 newmap->actual_vls = num_vls;
1829 newmap->vls = roundup_pow_of_two(num_vls);
1830 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1831 for (i = 0; i < newmap->vls; i++) {
1832 /* save for wrap around */
1833 int first_scontext = scontext;
1835 if (i < newmap->actual_vls) {
1836 int sz = roundup_pow_of_two(vl_scontexts[i]);
1838 /* only allocate once */
1839 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1843 if (!newmap->map[i])
1845 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1846 /* assign send contexts */
1847 for (j = 0; j < sz; j++) {
1848 if (dd->kernel_send_context[scontext])
1849 newmap->map[i]->ksc[j] =
1850 dd->kernel_send_context[scontext];
1851 if (++scontext >= first_scontext +
1853 /* wrap back to first send context */
1854 scontext = first_scontext;
1857 /* just re-use entry without allocating */
1858 newmap->map[i] = newmap->map[i % num_vls];
1860 scontext = first_scontext + vl_scontexts[i];
1862 /* newmap in hand, save old map */
1863 spin_lock_irq(&dd->pio_map_lock);
1864 oldmap = rcu_dereference_protected(dd->pio_map,
1865 lockdep_is_held(&dd->pio_map_lock));
1867 /* publish newmap */
1868 rcu_assign_pointer(dd->pio_map, newmap);
1870 spin_unlock_irq(&dd->pio_map_lock);
1871 /* success, free any old map after grace period */
1873 call_rcu(&oldmap->list, pio_map_rcu_callback);
1876 /* free any partial allocation */
1877 pio_map_free(newmap);
1881 void free_pio_map(struct hfi1_devdata *dd)
1883 /* Free PIO map if allocated */
1884 if (rcu_access_pointer(dd->pio_map)) {
1885 spin_lock_irq(&dd->pio_map_lock);
1886 pio_map_free(rcu_access_pointer(dd->pio_map));
1887 RCU_INIT_POINTER(dd->pio_map, NULL);
1888 spin_unlock_irq(&dd->pio_map_lock);
1891 kfree(dd->kernel_send_context);
1892 dd->kernel_send_context = NULL;
1895 int init_pervl_scs(struct hfi1_devdata *dd)
1898 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
1899 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
1901 struct hfi1_pportdata *ppd = dd->pport;
1903 dd->vld[15].sc = sc_alloc(dd, SC_KERNEL,
1904 dd->rcd[0]->rcvhdrqentsize, dd->node);
1905 if (!dd->vld[15].sc)
1907 hfi1_init_ctxt(dd->vld[15].sc);
1908 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
1910 dd->kernel_send_context = kmalloc_node(dd->num_send_contexts *
1911 sizeof(struct send_context *),
1912 GFP_KERNEL, dd->node);
1913 dd->kernel_send_context[0] = dd->vld[15].sc;
1915 for (i = 0; i < num_vls; i++) {
1917 * Since this function does not deal with a specific
1918 * receive context but we need the RcvHdrQ entry size,
1919 * use the size from rcd[0]. It is guaranteed to be
1920 * valid at this point and will remain the same for all
1923 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
1924 dd->rcd[0]->rcvhdrqentsize, dd->node);
1927 dd->kernel_send_context[i + 1] = dd->vld[i].sc;
1928 hfi1_init_ctxt(dd->vld[i].sc);
1929 /* non VL15 start with the max MTU */
1930 dd->vld[i].mtu = hfi1_max_mtu;
1932 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
1933 dd->kernel_send_context[i + 1] =
1934 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
1935 if (!dd->kernel_send_context[i + 1])
1937 hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
1940 sc_enable(dd->vld[15].sc);
1941 ctxt = dd->vld[15].sc->hw_context;
1942 mask = all_vl_mask & ~(1LL << 15);
1943 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1945 "Using send context %u(%u) for VL15\n",
1946 dd->vld[15].sc->sw_index, ctxt);
1948 for (i = 0; i < num_vls; i++) {
1949 sc_enable(dd->vld[i].sc);
1950 ctxt = dd->vld[i].sc->hw_context;
1951 mask = all_vl_mask & ~(data_vls_mask);
1952 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1954 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
1955 sc_enable(dd->kernel_send_context[i + 1]);
1956 ctxt = dd->kernel_send_context[i + 1]->hw_context;
1957 mask = all_vl_mask & ~(data_vls_mask);
1958 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1961 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
1965 sc_free(dd->vld[15].sc);
1966 for (i = 0; i < num_vls; i++)
1967 sc_free(dd->vld[i].sc);
1968 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
1969 sc_free(dd->kernel_send_context[i + 1]);
1973 int init_credit_return(struct hfi1_devdata *dd)
1979 num_numa = num_online_nodes();
1980 /* enforce the expectation that the numas are compact */
1981 for (i = 0; i < num_numa; i++) {
1982 if (!node_online(i)) {
1983 dd_dev_err(dd, "NUMA nodes are not compact\n");
1989 dd->cr_base = kcalloc(
1991 sizeof(struct credit_return_base),
1994 dd_dev_err(dd, "Unable to allocate credit return base\n");
1998 for (i = 0; i < num_numa; i++) {
1999 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2001 set_dev_node(&dd->pcidev->dev, i);
2002 dd->cr_base[i].va = dma_zalloc_coherent(
2007 if (!dd->cr_base[i].va) {
2008 set_dev_node(&dd->pcidev->dev, dd->node);
2010 "Unable to allocate credit return DMA range for NUMA %d\n",
2016 set_dev_node(&dd->pcidev->dev, dd->node);
2023 void free_credit_return(struct hfi1_devdata *dd)
2031 num_numa = num_online_nodes();
2032 for (i = 0; i < num_numa; i++) {
2033 if (dd->cr_base[i].va) {
2034 dma_free_coherent(&dd->pcidev->dev,
2036 sizeof(struct credit_return),