2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rdma/ib_mad.h>
49 #include <rdma/ib_user_verbs.h>
51 #include <linux/module.h>
52 #include <linux/utsname.h>
53 #include <linux/rculist.h>
55 #include <linux/vmalloc.h>
62 #include "verbs_txreq.h"
66 static unsigned int hfi1_lkey_table_size = 16;
67 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
69 MODULE_PARM_DESC(lkey_table_size,
70 "LKEY table size in bits (2^n, 1 <= n <= 23)");
72 static unsigned int hfi1_max_pds = 0xFFFF;
73 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
74 MODULE_PARM_DESC(max_pds,
75 "Maximum number of protection domains to support");
77 static unsigned int hfi1_max_ahs = 0xFFFF;
78 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
81 unsigned int hfi1_max_cqes = 0x2FFFFF;
82 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_cqes,
84 "Maximum number of completion queue entries to support");
86 unsigned int hfi1_max_cqs = 0x1FFFF;
87 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
88 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
90 unsigned int hfi1_max_qp_wrs = 0x3FFF;
91 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
92 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
94 unsigned int hfi1_max_qps = 32768;
95 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
96 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
98 unsigned int hfi1_max_sges = 0x60;
99 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
100 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
102 unsigned int hfi1_max_mcast_grps = 16384;
103 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
104 MODULE_PARM_DESC(max_mcast_grps,
105 "Maximum number of multicast groups to support");
107 unsigned int hfi1_max_mcast_qp_attached = 16;
108 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
110 MODULE_PARM_DESC(max_mcast_qp_attached,
111 "Maximum number of attached QPs to support");
113 unsigned int hfi1_max_srqs = 1024;
114 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
115 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
117 unsigned int hfi1_max_srq_sges = 128;
118 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
119 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
121 unsigned int hfi1_max_srq_wrs = 0x1FFFF;
122 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
123 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
125 unsigned short piothreshold = 256;
126 module_param(piothreshold, ushort, S_IRUGO);
127 MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
129 #define COPY_CACHELESS 1
130 #define COPY_ADAPTIVE 2
131 static unsigned int sge_copy_mode;
132 module_param(sge_copy_mode, uint, S_IRUGO);
133 MODULE_PARM_DESC(sge_copy_mode,
134 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
136 static void verbs_sdma_complete(
137 struct sdma_txreq *cookie,
140 static int pio_wait(struct rvt_qp *qp,
141 struct send_context *sc,
142 struct hfi1_pkt_state *ps,
145 /* Length of buffer to create verbs txreq cache name */
146 #define TXREQ_NAME_LEN 24
148 static uint wss_threshold;
149 module_param(wss_threshold, uint, S_IRUGO);
150 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
151 static uint wss_clean_period = 256;
152 module_param(wss_clean_period, uint, S_IRUGO);
153 MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
155 /* memory working set size */
157 unsigned long *entries;
158 atomic_t total_count;
159 atomic_t clean_counter;
160 atomic_t clean_entry;
167 static struct hfi1_wss wss;
169 int hfi1_wss_init(void)
176 /* check for a valid percent range - default to 80 if none or invalid */
177 if (wss_threshold < 1 || wss_threshold > 100)
179 /* reject a wildly large period */
180 if (wss_clean_period > 1000000)
181 wss_clean_period = 256;
182 /* reject a zero period */
183 if (wss_clean_period == 0)
184 wss_clean_period = 1;
187 * Calculate the table size - the next power of 2 larger than the
188 * LLC size. LLC size is in KiB.
190 llc_size = wss_llc_size() * 1024;
191 table_size = roundup_pow_of_two(llc_size);
193 /* one bit per page in rounded up table */
194 llc_bits = llc_size / PAGE_SIZE;
195 table_bits = table_size / PAGE_SIZE;
196 wss.pages_mask = table_bits - 1;
197 wss.num_entries = table_bits / BITS_PER_LONG;
199 wss.threshold = (llc_bits * wss_threshold) / 100;
200 if (wss.threshold == 0)
203 atomic_set(&wss.clean_counter, wss_clean_period);
205 wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
215 void hfi1_wss_exit(void)
217 /* coded to handle partially initialized and repeat callers */
223 * Advance the clean counter. When the clean period has expired,
226 * This is implemented in atomics to avoid locking. Because multiple
227 * variables are involved, it can be racy which can lead to slightly
228 * inaccurate information. Since this is only a heuristic, this is
229 * OK. Any innaccuracies will clean themselves out as the counter
230 * advances. That said, it is unlikely the entry clean operation will
231 * race - the next possible racer will not start until the next clean
234 * The clean counter is implemented as a decrement to zero. When zero
235 * is reached an entry is cleaned.
237 static void wss_advance_clean_counter(void)
243 /* become the cleaner if we decrement the counter to zero */
244 if (atomic_dec_and_test(&wss.clean_counter)) {
246 * Set, not add, the clean period. This avoids an issue
247 * where the counter could decrement below the clean period.
248 * Doing a set can result in lost decrements, slowing the
249 * clean advance. Since this a heuristic, this possible
252 * An alternative is to loop, advancing the counter by a
253 * clean period until the result is > 0. However, this could
254 * lead to several threads keeping another in the clean loop.
255 * This could be mitigated by limiting the number of times
256 * we stay in the loop.
258 atomic_set(&wss.clean_counter, wss_clean_period);
261 * Uniquely grab the entry to clean and move to next.
262 * The current entry is always the lower bits of
263 * wss.clean_entry. The table size, wss.num_entries,
264 * is always a power-of-2.
266 entry = (atomic_inc_return(&wss.clean_entry) - 1)
267 & (wss.num_entries - 1);
269 /* clear the entry and count the bits */
270 bits = xchg(&wss.entries[entry], 0);
271 weight = hweight64((u64)bits);
272 /* only adjust the contended total count if needed */
274 atomic_sub(weight, &wss.total_count);
279 * Insert the given address into the working set array.
281 static void wss_insert(void *address)
283 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
284 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
285 u32 nr = page & (BITS_PER_LONG - 1);
287 if (!test_and_set_bit(nr, &wss.entries[entry]))
288 atomic_inc(&wss.total_count);
290 wss_advance_clean_counter();
294 * Is the working set larger than the threshold?
296 static inline bool wss_exceeds_threshold(void)
298 return atomic_read(&wss.total_count) >= wss.threshold;
302 * Translate ib_wr_opcode into ib_wc_opcode.
304 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
305 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
306 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
307 [IB_WR_SEND] = IB_WC_SEND,
308 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
309 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
310 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
311 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
312 [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
313 [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
314 [IB_WR_REG_MR] = IB_WC_REG_MR
318 * Length of header by opcode, 0 --> not supported
320 const u8 hdr_len_by_opcode[256] = {
322 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
323 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
324 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
325 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
326 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
327 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
328 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
329 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
330 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
331 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
332 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
333 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
334 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
335 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
336 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
337 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
338 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
339 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
340 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8,
341 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
342 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
343 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
344 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
346 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
347 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
348 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
349 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
350 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
351 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
352 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
353 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
354 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
355 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
356 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
357 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
359 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
360 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
363 static const opcode_handler opcode_handler_tbl[256] = {
365 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
366 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
367 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
368 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
369 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
370 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
371 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
372 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
373 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
374 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
375 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
376 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
377 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
378 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
379 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
380 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
381 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
382 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
383 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
384 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
385 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
386 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
387 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
389 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
390 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
391 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
392 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
393 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
394 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
395 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
396 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
397 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
398 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
399 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
400 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
402 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
403 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
405 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
410 static const u32 pio_opmask[BIT(3)] = {
412 [IB_OPCODE_RC >> 5] =
413 BIT(RC_OP(SEND_ONLY) & OPMASK) |
414 BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
415 BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
416 BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
417 BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
418 BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
419 BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
420 BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
421 BIT(RC_OP(FETCH_ADD) & OPMASK),
423 [IB_OPCODE_UC >> 5] =
424 BIT(UC_OP(SEND_ONLY) & OPMASK) |
425 BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
426 BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
427 BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
433 __be64 ib_hfi1_sys_image_guid;
436 * hfi1_copy_sge - copy data to SGE memory
438 * @data: the data to copy
439 * @length: the length of the data
440 * @release: boolean to release MR
441 * @copy_last: do a separate copy of the last 8 bytes
444 struct rvt_sge_state *ss,
445 void *data, u32 length,
449 struct rvt_sge *sge = &ss->sge;
451 bool in_last = false;
452 bool cacheless_copy = false;
454 if (sge_copy_mode == COPY_CACHELESS) {
455 cacheless_copy = length >= PAGE_SIZE;
456 } else if (sge_copy_mode == COPY_ADAPTIVE) {
457 if (length >= PAGE_SIZE) {
459 * NOTE: this *assumes*:
460 * o The first vaddr is the dest.
461 * o If multiple pages, then vaddr is sequential.
463 wss_insert(sge->vaddr);
464 if (length >= (2 * PAGE_SIZE))
465 wss_insert(sge->vaddr + PAGE_SIZE);
467 cacheless_copy = wss_exceeds_threshold();
469 wss_advance_clean_counter();
483 u32 len = rvt_get_sge_length(sge, length);
485 WARN_ON_ONCE(len == 0);
486 if (unlikely(in_last)) {
487 /* enforce byte transfer ordering */
488 for (i = 0; i < len; i++)
489 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
490 } else if (cacheless_copy) {
491 cacheless_memcpy(sge->vaddr, data, len);
493 memcpy(sge->vaddr, data, len);
495 rvt_update_sge(ss, len, release);
509 * Make sure the QP is ready and able to accept the given opcode.
511 static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet)
513 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
515 if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
516 (opcode == IB_OPCODE_CNP))
517 return opcode_handler_tbl[opcode];
522 static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
524 #ifdef CONFIG_FAULT_INJECTION
525 if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP)
527 * In order to drop non-IB traffic we
528 * set PbcInsertHrc to NONE (0x2).
529 * The packet will still be delivered
530 * to the receiving node but a
531 * KHdrHCRCErr (KDETH packet with a bad
532 * HCRC) will be triggered and the
533 * packet will not be delivered to the
536 pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
539 * In order to drop regular verbs
540 * traffic we set the PbcTestEbp
541 * flag. The packet will still be
542 * delivered to the receiving node but
543 * a 'late ebp error' will be
544 * triggered and will be dropped.
552 * hfi1_ib_rcv - process an incoming packet
553 * @packet: data packet information
555 * This is called to process an incoming packet at interrupt level.
557 * Tlen is the length of the header + data + CRC in bytes.
559 void hfi1_ib_rcv(struct hfi1_packet *packet)
561 struct hfi1_ctxtdata *rcd = packet->rcd;
562 struct ib_header *hdr = packet->hdr;
563 u32 tlen = packet->tlen;
564 struct hfi1_pportdata *ppd = rcd->ppd;
565 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
566 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
567 opcode_handler packet_handler;
575 lnh = ib_get_lnh(hdr);
576 if (lnh == HFI1_LRH_BTH) {
577 packet->ohdr = &hdr->u.oth;
578 } else if (lnh == HFI1_LRH_GRH) {
581 packet->ohdr = &hdr->u.l.oth;
582 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
584 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
585 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
587 packet->rcv_flags |= HFI1_HAS_GRH;
592 trace_input_ibhdr(rcd->dd, hdr);
594 opcode = ib_bth_get_opcode(packet->ohdr);
595 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
597 /* Get the destination QP number. */
598 qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
599 lid = ib_get_dlid(hdr);
600 if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
601 (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
602 struct rvt_mcast *mcast;
603 struct rvt_mcast_qp *p;
605 if (lnh != HFI1_LRH_GRH)
607 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
610 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
612 spin_lock_irqsave(&packet->qp->r_lock, flags);
613 packet_handler = qp_ok(opcode, packet);
614 if (likely(packet_handler))
615 packet_handler(packet);
617 ibp->rvp.n_pkt_drops++;
618 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
621 * Notify rvt_multicast_detach() if it is waiting for us
624 if (atomic_dec_return(&mcast->refcount) <= 1)
625 wake_up(&mcast->wait);
628 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
633 if (unlikely(hfi1_dbg_fault_opcode(packet->qp, opcode,
638 spin_lock_irqsave(&packet->qp->r_lock, flags);
639 packet_handler = qp_ok(opcode, packet);
640 if (likely(packet_handler))
641 packet_handler(packet);
643 ibp->rvp.n_pkt_drops++;
644 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
650 ibp->rvp.n_pkt_drops++;
654 * This is called from a timer to check for QPs
655 * which need kernel memory in order to send a packet.
657 static void mem_timer(unsigned long data)
659 struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
660 struct list_head *list = &dev->memwait;
661 struct rvt_qp *qp = NULL;
664 struct hfi1_qp_priv *priv;
666 write_seqlock_irqsave(&dev->iowait_lock, flags);
667 if (!list_empty(list)) {
668 wait = list_first_entry(list, struct iowait, list);
669 qp = iowait_to_qp(wait);
671 list_del_init(&priv->s_iowait.list);
672 priv->s_iowait.lock = NULL;
673 /* refcount held until actual wake up */
674 if (!list_empty(list))
675 mod_timer(&dev->mem_timer, jiffies + 1);
677 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
680 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
684 * This is called with progress side lock held.
687 static void verbs_sdma_complete(
688 struct sdma_txreq *cookie,
691 struct verbs_txreq *tx =
692 container_of(cookie, struct verbs_txreq, txreq);
693 struct rvt_qp *qp = tx->qp;
695 spin_lock(&qp->s_lock);
697 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
698 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
699 struct ib_header *hdr;
702 hfi1_rc_send_complete(qp, hdr);
704 spin_unlock(&qp->s_lock);
709 static int wait_kmem(struct hfi1_ibdev *dev,
711 struct hfi1_pkt_state *ps)
713 struct hfi1_qp_priv *priv = qp->priv;
717 spin_lock_irqsave(&qp->s_lock, flags);
718 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
719 write_seqlock(&dev->iowait_lock);
720 list_add_tail(&ps->s_txreq->txreq.list,
721 &priv->s_iowait.tx_head);
722 if (list_empty(&priv->s_iowait.list)) {
723 if (list_empty(&dev->memwait))
724 mod_timer(&dev->mem_timer, jiffies + 1);
725 qp->s_flags |= RVT_S_WAIT_KMEM;
726 list_add_tail(&priv->s_iowait.list, &dev->memwait);
727 priv->s_iowait.lock = &dev->iowait_lock;
728 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
731 write_sequnlock(&dev->iowait_lock);
732 qp->s_flags &= ~RVT_S_BUSY;
735 spin_unlock_irqrestore(&qp->s_lock, flags);
741 * This routine calls txadds for each sg entry.
743 * Add failures will revert the sge cursor
745 static noinline int build_verbs_ulp_payload(
746 struct sdma_engine *sde,
748 struct verbs_txreq *tx)
750 struct rvt_sge_state *ss = tx->ss;
751 struct rvt_sge *sg_list = ss->sg_list;
752 struct rvt_sge sge = ss->sge;
753 u8 num_sge = ss->num_sge;
758 len = ss->sge.length;
761 if (len > ss->sge.sge_length)
762 len = ss->sge.sge_length;
763 WARN_ON_ONCE(len == 0);
764 ret = sdma_txadd_kvaddr(
771 rvt_update_sge(ss, len, false);
778 ss->num_sge = num_sge;
779 ss->sg_list = sg_list;
784 * Build the number of DMA descriptors needed to send length bytes of data.
786 * NOTE: DMA mapping is held in the tx until completed in the ring or
787 * the tx desc is freed without having been submitted to the ring
789 * This routine ensures all the helper routine calls succeed.
792 static int build_verbs_tx_desc(
793 struct sdma_engine *sde,
795 struct verbs_txreq *tx,
796 struct hfi1_ahg_info *ahg_info,
800 struct hfi1_sdma_header *phdr = &tx->phdr;
801 u16 hdrbytes = tx->hdr_dwords << 2;
803 if (!ahg_info->ahgcount) {
804 ret = sdma_txinit_ahg(
812 verbs_sdma_complete);
815 phdr->pbc = cpu_to_le64(pbc);
816 ret = sdma_txadd_kvaddr(
824 ret = sdma_txinit_ahg(
832 verbs_sdma_complete);
836 /* add the ulp payload - if any. tx->ss can be NULL for acks */
838 ret = build_verbs_ulp_payload(sde, length, tx);
843 int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
846 struct hfi1_qp_priv *priv = qp->priv;
847 struct hfi1_ahg_info *ahg_info = priv->s_ahg;
848 u32 hdrwords = qp->s_hdrwords;
849 u32 len = ps->s_txreq->s_cur_size;
850 u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
851 struct hfi1_ibdev *dev = ps->dev;
852 struct hfi1_pportdata *ppd = ps->ppd;
853 struct verbs_txreq *tx;
859 if (!sdma_txreq_built(&tx->txreq)) {
860 if (likely(pbc == 0)) {
861 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
862 u8 opcode = get_opcode(&tx->phdr.hdr);
865 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
866 pbc |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
868 if (unlikely(hfi1_dbg_fault_opcode(qp, opcode, false)))
869 pbc = hfi1_fault_tx(qp, opcode, pbc);
870 pbc = create_pbc(ppd,
877 ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
881 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
882 if (unlikely(ret < 0)) {
887 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
888 &ps->s_txreq->phdr.hdr);
892 /* The current one got "sent" */
895 ret = wait_kmem(dev, qp, ps);
897 /* free txreq - bad state */
898 hfi1_put_txreq(ps->s_txreq);
905 * If we are now in the error state, return zero to flush the
908 static int pio_wait(struct rvt_qp *qp,
909 struct send_context *sc,
910 struct hfi1_pkt_state *ps,
913 struct hfi1_qp_priv *priv = qp->priv;
914 struct hfi1_devdata *dd = sc->dd;
915 struct hfi1_ibdev *dev = &dd->verbs_dev;
920 * Note that as soon as want_buffer() is called and
921 * possibly before it returns, sc_piobufavail()
922 * could be called. Therefore, put QP on the I/O wait list before
923 * enabling the PIO avail interrupt.
925 spin_lock_irqsave(&qp->s_lock, flags);
926 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
927 write_seqlock(&dev->iowait_lock);
928 list_add_tail(&ps->s_txreq->txreq.list,
929 &priv->s_iowait.tx_head);
930 if (list_empty(&priv->s_iowait.list)) {
931 struct hfi1_ibdev *dev = &dd->verbs_dev;
934 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
935 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
937 was_empty = list_empty(&sc->piowait);
938 list_add_tail(&priv->s_iowait.list, &sc->piowait);
939 priv->s_iowait.lock = &dev->iowait_lock;
940 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
942 /* counting: only call wantpiobuf_intr if first user */
944 hfi1_sc_wantpiobuf_intr(sc, 1);
946 write_sequnlock(&dev->iowait_lock);
947 qp->s_flags &= ~RVT_S_BUSY;
950 spin_unlock_irqrestore(&qp->s_lock, flags);
954 static void verbs_pio_complete(void *arg, int code)
956 struct rvt_qp *qp = (struct rvt_qp *)arg;
957 struct hfi1_qp_priv *priv = qp->priv;
959 if (iowait_pio_dec(&priv->s_iowait))
960 iowait_drain_wakeup(&priv->s_iowait);
963 int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
966 struct hfi1_qp_priv *priv = qp->priv;
967 u32 hdrwords = qp->s_hdrwords;
968 struct rvt_sge_state *ss = ps->s_txreq->ss;
969 u32 len = ps->s_txreq->s_cur_size;
970 u32 dwords = (len + 3) >> 2;
971 u32 plen = hdrwords + dwords + 2; /* includes pbc */
972 struct hfi1_pportdata *ppd = ps->ppd;
973 u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
975 unsigned long flags = 0;
976 struct send_context *sc;
977 struct pio_buf *pbuf;
978 int wc_status = IB_WC_SUCCESS;
980 pio_release_cb cb = NULL;
982 /* only RC/UC use complete */
983 switch (qp->ibqp.qp_type) {
986 cb = verbs_pio_complete;
992 /* vl15 special case taken care of in ud.c */
994 sc = ps->s_txreq->psc;
996 if (likely(pbc == 0)) {
997 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
998 struct verbs_txreq *tx = ps->s_txreq;
999 u8 opcode = get_opcode(&tx->phdr.hdr);
1001 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1002 pbc |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
1003 if (unlikely(hfi1_dbg_fault_opcode(qp, opcode, false)))
1004 pbc = hfi1_fault_tx(qp, opcode, pbc);
1005 pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
1008 iowait_pio_inc(&priv->s_iowait);
1009 pbuf = sc_buffer_alloc(sc, plen, cb, qp);
1010 if (unlikely(!pbuf)) {
1012 verbs_pio_complete(qp, 0);
1013 if (ppd->host_link_state != HLS_UP_ACTIVE) {
1015 * If we have filled the PIO buffers to capacity and are
1016 * not in an active state this request is not going to
1017 * go out to so just complete it with an error or else a
1018 * ULP or the core may be stuck waiting.
1022 "alloc failed. state not active, completing");
1023 wc_status = IB_WC_GENERAL_ERR;
1027 * This is a normal occurrence. The PIO buffs are full
1028 * up but we are still happily sending, well we could be
1029 * so lets continue to queue the request.
1031 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
1032 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
1034 /* txreq not queued - free */
1036 /* tx consumed in wait */
1042 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1045 seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4);
1047 void *addr = ss->sge.vaddr;
1048 u32 slen = ss->sge.length;
1052 rvt_update_sge(ss, slen, false);
1053 seg_pio_copy_mid(pbuf, addr, slen);
1056 seg_pio_copy_end(pbuf);
1060 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1061 &ps->s_txreq->phdr.hdr);
1065 spin_lock_irqsave(&qp->s_lock, flags);
1066 hfi1_send_complete(qp, qp->s_wqe, wc_status);
1067 spin_unlock_irqrestore(&qp->s_lock, flags);
1068 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1069 spin_lock_irqsave(&qp->s_lock, flags);
1070 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
1071 spin_unlock_irqrestore(&qp->s_lock, flags);
1077 hfi1_put_txreq(ps->s_txreq);
1082 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1083 * being an entry from the partition key table), return 0
1084 * otherwise. Use the matching criteria for egress partition keys
1085 * specified in the OPAv1 spec., section 9.1l.7.
1087 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1089 u16 mkey = pkey & PKEY_LOW_15_MASK;
1090 u16 mentry = ent & PKEY_LOW_15_MASK;
1092 if (mkey == mentry) {
1094 * If pkey[15] is set (full partition member),
1095 * is bit 15 in the corresponding table element
1096 * clear (limited member)?
1098 if (pkey & PKEY_MEMBER_MASK)
1099 return !!(ent & PKEY_MEMBER_MASK);
1106 * egress_pkey_check - check P_KEY of a packet
1107 * @ppd: Physical IB port data
1108 * @lrh: Local route header
1109 * @bth: Base transport header
1110 * @sc5: SC for packet
1111 * @s_pkey_index: It will be used for look up optimization for kernel contexts
1112 * only. If it is negative value, then it means user contexts is calling this
1115 * It checks if hdr's pkey is valid.
1117 * Return: 0 on success, otherwise, 1
1119 int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth,
1120 u8 sc5, int8_t s_pkey_index)
1122 struct hfi1_devdata *dd;
1125 int is_user_ctxt_mechanism = (s_pkey_index < 0);
1127 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1130 pkey = (u16)be32_to_cpu(bth[0]);
1132 /* If SC15, pkey[0:14] must be 0x7fff */
1133 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1136 /* Is the pkey = 0x0, or 0x8000? */
1137 if ((pkey & PKEY_LOW_15_MASK) == 0)
1141 * For the kernel contexts only, if a qp is passed into the function,
1142 * the most likely matching pkey has index qp->s_pkey_index
1144 if (!is_user_ctxt_mechanism &&
1145 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
1149 for (i = 0; i < MAX_PKEY_VALUES; i++) {
1150 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1155 * For the user-context mechanism, the P_KEY check would only happen
1156 * once per SDMA request, not once per packet. Therefore, there's no
1157 * need to increment the counter for the user-context mechanism.
1159 if (!is_user_ctxt_mechanism) {
1160 incr_cntr64(&ppd->port_xmit_constraint_errors);
1162 if (!(dd->err_info_xmit_constraint.status &
1163 OPA_EI_STATUS_SMASK)) {
1164 u16 slid = be16_to_cpu(lrh[3]);
1166 dd->err_info_xmit_constraint.status |=
1167 OPA_EI_STATUS_SMASK;
1168 dd->err_info_xmit_constraint.slid = slid;
1169 dd->err_info_xmit_constraint.pkey = pkey;
1176 * get_send_routine - choose an egress routine
1178 * Choose an egress routine based on QP type
1181 static inline send_routine get_send_routine(struct rvt_qp *qp,
1182 struct verbs_txreq *tx)
1184 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1185 struct hfi1_qp_priv *priv = qp->priv;
1186 struct ib_header *h = &tx->phdr.hdr;
1188 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
1189 return dd->process_pio_send;
1190 switch (qp->ibqp.qp_type) {
1192 return dd->process_pio_send;
1198 u8 op = get_opcode(h);
1201 tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
1202 (BIT(op & OPMASK) & pio_opmask[op >> 5]) &&
1203 iowait_sdma_pending(&priv->s_iowait) == 0 &&
1204 !sdma_txreq_built(&tx->txreq))
1205 return dd->process_pio_send;
1211 return dd->process_dma_send;
1215 * hfi1_verbs_send - send a packet
1216 * @qp: the QP to send on
1217 * @ps: the state of the packet to send
1219 * Return zero if packet is sent or queued OK.
1220 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1222 int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
1224 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1225 struct hfi1_qp_priv *priv = qp->priv;
1226 struct ib_other_headers *ohdr;
1227 struct ib_header *hdr;
1232 hdr = &ps->s_txreq->phdr.hdr;
1233 /* locate the pkey within the headers */
1234 lnh = ib_get_lnh(hdr);
1235 if (lnh == HFI1_LRH_GRH)
1236 ohdr = &hdr->u.l.oth;
1240 sr = get_send_routine(qp, ps->s_txreq);
1241 ret = egress_pkey_check(dd->pport,
1246 if (unlikely(ret)) {
1248 * The value we are returning here does not get propagated to
1249 * the verbs caller. Thus we need to complete the request with
1250 * error otherwise the caller could be sitting waiting on the
1251 * completion event. Only do this for PIO. SDMA has its own
1252 * mechanism for handling the errors. So for SDMA we can just
1255 if (sr == dd->process_pio_send) {
1256 unsigned long flags;
1258 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1260 spin_lock_irqsave(&qp->s_lock, flags);
1261 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1262 spin_unlock_irqrestore(&qp->s_lock, flags);
1266 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
1270 RVT_S_WAIT_PIO_DRAIN);
1271 return sr(qp, ps, 0);
1275 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1276 * @dd: the device data structure
1278 static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1280 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1281 u32 ver = dd->dc8051_ver;
1283 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1285 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) |
1286 ((u64)(dc8051_ver_min(ver)) << 16) |
1287 (u64)dc8051_ver_patch(ver);
1289 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1290 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1291 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1292 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
1293 IB_DEVICE_MEM_MGT_EXTENSIONS |
1294 IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
1295 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1296 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1297 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1298 rdi->dparms.props.hw_ver = dd->minrev;
1299 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
1300 rdi->dparms.props.max_mr_size = U64_MAX;
1301 rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
1302 rdi->dparms.props.max_qp = hfi1_max_qps;
1303 rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
1304 rdi->dparms.props.max_sge = hfi1_max_sges;
1305 rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1306 rdi->dparms.props.max_cq = hfi1_max_cqs;
1307 rdi->dparms.props.max_ah = hfi1_max_ahs;
1308 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1309 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1310 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1311 rdi->dparms.props.max_map_per_fmr = 32767;
1312 rdi->dparms.props.max_pd = hfi1_max_pds;
1313 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1314 rdi->dparms.props.max_qp_init_rd_atom = 255;
1315 rdi->dparms.props.max_srq = hfi1_max_srqs;
1316 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1317 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1318 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1319 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1320 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1321 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1322 rdi->dparms.props.max_total_mcast_qp_attach =
1323 rdi->dparms.props.max_mcast_qp_attach *
1324 rdi->dparms.props.max_mcast_grp;
1327 static inline u16 opa_speed_to_ib(u16 in)
1331 if (in & OPA_LINK_SPEED_25G)
1332 out |= IB_SPEED_EDR;
1333 if (in & OPA_LINK_SPEED_12_5G)
1334 out |= IB_SPEED_FDR;
1340 * Convert a single OPA link width (no multiple flags) to an IB value.
1341 * A zero OPA link width means link down, which means the IB width value
1344 static inline u16 opa_width_to_ib(u16 in)
1347 case OPA_LINK_WIDTH_1X:
1348 /* map 2x and 3x to 1x as they don't exist in IB */
1349 case OPA_LINK_WIDTH_2X:
1350 case OPA_LINK_WIDTH_3X:
1352 default: /* link down or unknown, return our largest width */
1353 case OPA_LINK_WIDTH_4X:
1358 static int query_port(struct rvt_dev_info *rdi, u8 port_num,
1359 struct ib_port_attr *props)
1361 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1362 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1363 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1366 /* props being zeroed by the caller, avoid zeroing it here */
1367 props->lid = lid ? lid : 0;
1368 props->lmc = ppd->lmc;
1369 /* OPA logical states match IB logical states */
1370 props->state = driver_lstate(ppd);
1371 props->phys_state = hfi1_ibphys_portstate(ppd);
1372 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
1373 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1374 /* see rate_show() in ib core/sysfs.c */
1375 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1376 props->max_vl_num = ppd->vls_supported;
1378 /* Once we are a "first class" citizen and have added the OPA MTUs to
1379 * the core we can advertise the larger MTU enum to the ULPs, for now
1380 * advertise only 4K.
1382 * Those applications which are either OPA aware or pass the MTU enum
1383 * from the Path Records to us will get the new 8k MTU. Those that
1384 * attempt to process the MTU enum may fail in various ways.
1386 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1387 4096 : hfi1_max_mtu), IB_MTU_4096);
1388 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
1389 mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
1394 static int modify_device(struct ib_device *device,
1395 int device_modify_mask,
1396 struct ib_device_modify *device_modify)
1398 struct hfi1_devdata *dd = dd_from_ibdev(device);
1402 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1403 IB_DEVICE_MODIFY_NODE_DESC)) {
1408 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1409 memcpy(device->node_desc, device_modify->node_desc,
1410 IB_DEVICE_NODE_DESC_MAX);
1411 for (i = 0; i < dd->num_pports; i++) {
1412 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1414 hfi1_node_desc_chg(ibp);
1418 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1419 ib_hfi1_sys_image_guid =
1420 cpu_to_be64(device_modify->sys_image_guid);
1421 for (i = 0; i < dd->num_pports; i++) {
1422 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1424 hfi1_sys_guid_chg(ibp);
1434 static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1436 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1437 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1438 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1441 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1442 OPA_LINKDOWN_REASON_UNKNOWN);
1443 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
1447 static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1448 int guid_index, __be64 *guid)
1450 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
1452 if (guid_index >= HFI1_GUIDS_PER_PORT)
1455 *guid = get_sguid(ibp, guid_index);
1460 * convert ah port,sl to sc
1462 u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
1464 struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
1466 return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
1469 static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1471 struct hfi1_ibport *ibp;
1472 struct hfi1_pportdata *ppd;
1473 struct hfi1_devdata *dd;
1476 /* test the mapping for validity */
1477 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1478 ppd = ppd_from_ibp(ibp);
1479 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
1480 dd = dd_from_ppd(ppd);
1481 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1486 static void hfi1_notify_new_ah(struct ib_device *ibdev,
1487 struct rdma_ah_attr *ah_attr,
1490 struct hfi1_ibport *ibp;
1491 struct hfi1_pportdata *ppd;
1492 struct hfi1_devdata *dd;
1496 * Do not trust reading anything from rvt_ah at this point as it is not
1497 * done being setup. We can however modify things which we need to set.
1500 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1501 ppd = ppd_from_ibp(ibp);
1502 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
1503 dd = dd_from_ppd(ppd);
1504 ah->vl = sc_to_vlt(dd, sc5);
1505 if (ah->vl < num_vls || ah->vl == 15)
1506 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1509 struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
1511 struct rdma_ah_attr attr;
1512 struct ib_ah *ah = ERR_PTR(-EINVAL);
1515 memset(&attr, 0, sizeof(attr));
1516 rdma_ah_set_dlid(&attr, dlid);
1517 rdma_ah_set_port_num(&attr, ppd_from_ibp(ibp)->port);
1519 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1521 ah = rdma_create_ah(qp0->ibqp.pd, &attr);
1527 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1528 * @dd: the hfi1_ib device
1530 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1532 return ARRAY_SIZE(dd->pport[0].pkeys);
1535 static void init_ibport(struct hfi1_pportdata *ppd)
1537 struct hfi1_ibport *ibp = &ppd->ibport_data;
1538 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1541 for (i = 0; i < sz; i++) {
1542 ibp->sl_to_sc[i] = i;
1543 ibp->sc_to_sl[i] = i;
1546 spin_lock_init(&ibp->rvp.lock);
1547 /* Set the prefix to the default value (see ch. 4.1.1) */
1548 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1549 ibp->rvp.sm_lid = 0;
1550 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1551 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
1552 IB_PORT_CAP_MASK_NOTICE_SUP;
1553 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1554 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1555 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1556 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1557 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1559 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1560 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1563 static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
1566 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
1567 struct hfi1_ibdev *dev = dev_from_rdi(rdi);
1568 u32 ver = dd_from_dev(dev)->dc8051_ver;
1570 snprintf(str, str_len, "%u.%u.%u", dc8051_ver_maj(ver),
1571 dc8051_ver_min(ver), dc8051_ver_patch(ver));
1574 static const char * const driver_cntr_names[] = {
1575 /* must be element 0*/
1583 "DRIVER_RcvLen_Errs",
1584 "DRIVER_EgrBufFull",
1588 static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
1589 static const char **dev_cntr_names;
1590 static const char **port_cntr_names;
1591 static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
1592 static int num_dev_cntrs;
1593 static int num_port_cntrs;
1594 static int cntr_names_initialized;
1597 * Convert a list of names separated by '\n' into an array of NULL terminated
1598 * strings. Optionally some entries can be reserved in the array to hold extra
1601 static int init_cntr_names(const char *names_in,
1602 const size_t names_len,
1603 int num_extra_names,
1605 const char ***cntr_names)
1607 char *names_out, *p, **q;
1611 for (i = 0; i < names_len; i++)
1612 if (names_in[i] == '\n')
1615 names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
1623 p = names_out + (n + num_extra_names) * sizeof(char *);
1624 memcpy(p, names_in, names_len);
1626 q = (char **)names_out;
1627 for (i = 0; i < n; i++) {
1629 p = strchr(p, '\n');
1634 *cntr_names = (const char **)names_out;
1638 static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
1643 mutex_lock(&cntr_names_lock);
1644 if (!cntr_names_initialized) {
1645 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1647 err = init_cntr_names(dd->cntrnames,
1653 mutex_unlock(&cntr_names_lock);
1657 for (i = 0; i < num_driver_cntrs; i++)
1658 dev_cntr_names[num_dev_cntrs + i] =
1659 driver_cntr_names[i];
1661 err = init_cntr_names(dd->portcntrnames,
1662 dd->portcntrnameslen,
1667 kfree(dev_cntr_names);
1668 dev_cntr_names = NULL;
1669 mutex_unlock(&cntr_names_lock);
1672 cntr_names_initialized = 1;
1674 mutex_unlock(&cntr_names_lock);
1677 return rdma_alloc_hw_stats_struct(
1679 num_dev_cntrs + num_driver_cntrs,
1680 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1682 return rdma_alloc_hw_stats_struct(
1685 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1688 static u64 hfi1_sps_ints(void)
1690 unsigned long flags;
1691 struct hfi1_devdata *dd;
1694 spin_lock_irqsave(&hfi1_devs_lock, flags);
1695 list_for_each_entry(dd, &hfi1_dev_list, list) {
1696 sps_ints += get_all_cpu_total(dd->int_counter);
1698 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1702 static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1709 u64 *stats = (u64 *)&hfi1_stats;
1712 hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
1713 values[num_dev_cntrs] = hfi1_sps_ints();
1714 for (i = 1; i < num_driver_cntrs; i++)
1715 values[num_dev_cntrs + i] = stats[i];
1716 count = num_dev_cntrs + num_driver_cntrs;
1718 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1720 hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
1721 count = num_port_cntrs;
1724 memcpy(stats->value, values, count * sizeof(u64));
1729 * hfi1_register_ib_device - register our device with the infiniband core
1730 * @dd: the device data structure
1731 * Return 0 if successful, errno if unsuccessful.
1733 int hfi1_register_ib_device(struct hfi1_devdata *dd)
1735 struct hfi1_ibdev *dev = &dd->verbs_dev;
1736 struct ib_device *ibdev = &dev->rdi.ibdev;
1737 struct hfi1_pportdata *ppd = dd->pport;
1738 struct hfi1_ibport *ibp = &ppd->ibport_data;
1741 size_t lcpysz = IB_DEVICE_NAME_MAX;
1743 for (i = 0; i < dd->num_pports; i++)
1744 init_ibport(ppd + i);
1746 /* Only need to initialize non-zero fields. */
1748 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
1750 seqlock_init(&dev->iowait_lock);
1751 seqlock_init(&dev->txwait_lock);
1752 INIT_LIST_HEAD(&dev->txwait);
1753 INIT_LIST_HEAD(&dev->memwait);
1755 ret = verbs_txreq_init(dev);
1757 goto err_verbs_txreq;
1759 /* Use first-port GUID as node guid */
1760 ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
1763 * The system image GUID is supposed to be the same for all
1764 * HFIs in a single system but since there can be other
1765 * device types in the system, we can't be sure this is unique.
1767 if (!ib_hfi1_sys_image_guid)
1768 ib_hfi1_sys_image_guid = ibdev->node_guid;
1769 lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
1770 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
1771 ibdev->owner = THIS_MODULE;
1772 ibdev->phys_port_cnt = dd->num_pports;
1773 ibdev->dev.parent = &dd->pcidev->dev;
1774 ibdev->modify_device = modify_device;
1775 ibdev->alloc_hw_stats = alloc_hw_stats;
1776 ibdev->get_hw_stats = get_hw_stats;
1777 ibdev->alloc_rdma_netdev = hfi1_vnic_alloc_rn;
1778 ibdev->free_rdma_netdev = hfi1_vnic_free_rn;
1780 /* keep process mad in the driver */
1781 ibdev->process_mad = hfi1_process_mad;
1782 ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
1784 strncpy(ibdev->node_desc, init_utsname()->nodename,
1785 sizeof(ibdev->node_desc));
1788 * Fill in rvt info object.
1790 dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
1791 dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
1792 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
1793 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
1794 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
1795 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
1796 dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1797 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1798 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
1800 * Fill in rvt info device attributes.
1802 hfi1_fill_device_attr(dd);
1805 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1806 dd->verbs_dev.rdi.dparms.qpn_start = 0;
1807 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1808 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1809 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1810 dd->verbs_dev.rdi.dparms.qpn_res_end =
1811 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
1812 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1813 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1814 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1815 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
1816 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
1817 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1819 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1820 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1821 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1822 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1823 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
1824 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
1825 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
1826 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1827 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1828 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1829 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1830 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1831 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1832 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1833 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1834 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1835 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
1836 dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
1837 dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
1839 /* completeion queue */
1840 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1841 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1842 "hfi1_cq%d", dd->unit);
1843 dd->verbs_dev.rdi.dparms.node = dd->node;
1846 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
1847 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
1848 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1849 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
1851 /* post send table */
1852 dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
1855 for (i = 0; i < dd->num_pports; i++, ppd++)
1856 rvt_init_port(&dd->verbs_dev.rdi,
1857 &ppd->ibport_data.rvp,
1861 ret = rvt_register_device(&dd->verbs_dev.rdi);
1863 goto err_verbs_txreq;
1865 ret = hfi1_verbs_register_sysfs(dd);
1872 rvt_unregister_device(&dd->verbs_dev.rdi);
1874 verbs_txreq_exit(dev);
1875 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1879 void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1881 struct hfi1_ibdev *dev = &dd->verbs_dev;
1883 hfi1_verbs_unregister_sysfs(dd);
1885 rvt_unregister_device(&dd->verbs_dev.rdi);
1887 if (!list_empty(&dev->txwait))
1888 dd_dev_err(dd, "txwait list not empty!\n");
1889 if (!list_empty(&dev->memwait))
1890 dd_dev_err(dd, "memwait list not empty!\n");
1892 del_timer_sync(&dev->mem_timer);
1893 verbs_txreq_exit(dev);
1895 mutex_lock(&cntr_names_lock);
1896 kfree(dev_cntr_names);
1897 kfree(port_cntr_names);
1898 dev_cntr_names = NULL;
1899 port_cntr_names = NULL;
1900 cntr_names_initialized = 0;
1901 mutex_unlock(&cntr_names_lock);
1904 void hfi1_cnp_rcv(struct hfi1_packet *packet)
1906 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1907 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1908 struct ib_header *hdr = packet->hdr;
1909 struct rvt_qp *qp = packet->qp;
1912 u8 sl, sc5, svc_type;
1914 switch (packet->qp->ibqp.qp_type) {
1916 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1917 rqpn = qp->remote_qpn;
1918 svc_type = IB_CC_SVCTYPE_UC;
1921 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1922 rqpn = qp->remote_qpn;
1923 svc_type = IB_CC_SVCTYPE_RC;
1928 svc_type = IB_CC_SVCTYPE_UD;
1931 ibp->rvp.n_pkt_drops++;
1935 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
1936 sl = ibp->sc_to_sl[sc5];
1937 lqpn = qp->ibqp.qp_num;
1939 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);