2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/net.h>
49 #include <rdma/ib_smi.h>
53 #include "verbs_txreq.h"
57 * ud_loopback - handle send on loopback QPs
58 * @sqp: the sending QP
59 * @swqe: the send work request
61 * This is called from hfi1_make_ud_req() to forward a WQE addressed
63 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
64 * while this is being called.
66 static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
68 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
69 struct hfi1_pportdata *ppd;
71 struct rdma_ah_attr *ah_attr;
73 struct rvt_sge_state ssge;
77 enum ib_qp_type sqptype, dqptype;
81 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
82 swqe->ud_wr.remote_qpn);
84 ibp->rvp.n_pkt_drops++;
89 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
90 IB_QPT_UD : sqp->ibqp.qp_type;
91 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
92 IB_QPT_UD : qp->ibqp.qp_type;
94 if (dqptype != sqptype ||
95 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
96 ibp->rvp.n_pkt_drops++;
100 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
101 ppd = ppd_from_ibp(ibp);
103 if (qp->ibqp.qp_num > 1) {
106 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
108 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
109 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
110 ((1 << ppd->lmc) - 1));
111 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
112 qp->s_pkey_index, slid))) {
113 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
114 rdma_ah_get_sl(ah_attr),
115 sqp->ibqp.qp_num, qp->ibqp.qp_num,
116 slid, rdma_ah_get_dlid(ah_attr));
122 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
123 * Qkeys with the high order bit set mean use the
124 * qkey from the QP context instead of the WR (see 10.2.5).
126 if (qp->ibqp.qp_num) {
129 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
130 sqp->qkey : swqe->ud_wr.remote_qkey;
131 if (unlikely(qkey != qp->qkey)) {
134 lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
135 ((1 << ppd->lmc) - 1));
136 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
137 rdma_ah_get_sl(ah_attr),
138 sqp->ibqp.qp_num, qp->ibqp.qp_num,
140 rdma_ah_get_dlid(ah_attr));
146 * A GRH is expected to precede the data even if not
147 * present on the wire.
149 length = swqe->length;
150 memset(&wc, 0, sizeof(wc));
151 wc.byte_len = length + sizeof(struct ib_grh);
153 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
154 wc.wc_flags = IB_WC_WITH_IMM;
155 wc.ex.imm_data = swqe->wr.ex.imm_data;
158 spin_lock_irqsave(&qp->r_lock, flags);
161 * Get the next work request entry to find where to put the data.
163 if (qp->r_flags & RVT_R_REUSE_SGE) {
164 qp->r_flags &= ~RVT_R_REUSE_SGE;
168 ret = hfi1_rvt_get_rwqe(qp, 0);
170 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
174 if (qp->ibqp.qp_num == 0)
175 ibp->rvp.n_vl15_dropped++;
179 /* Silently drop packets which are too big. */
180 if (unlikely(wc.byte_len > qp->r_len)) {
181 qp->r_flags |= RVT_R_REUSE_SGE;
182 ibp->rvp.n_pkt_drops++;
186 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
188 const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
190 hfi1_make_grh(ibp, &grh, grd, 0, 0);
191 hfi1_copy_sge(&qp->r_sge, &grh,
192 sizeof(grh), true, false);
193 wc.wc_flags |= IB_WC_GRH;
195 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
197 ssge.sg_list = swqe->sg_list + 1;
198 ssge.sge = *swqe->sg_list;
199 ssge.num_sge = swqe->wr.num_sge;
202 u32 len = sge->length;
206 if (len > sge->sge_length)
207 len = sge->sge_length;
208 WARN_ON_ONCE(len == 0);
209 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
212 sge->sge_length -= len;
213 if (sge->sge_length == 0) {
215 *sge = *ssge.sg_list++;
216 } else if (sge->length == 0 && sge->mr->lkey) {
217 if (++sge->n >= RVT_SEGSZ) {
218 if (++sge->m >= sge->mr->mapsz)
223 sge->mr->map[sge->m]->segs[sge->n].vaddr;
225 sge->mr->map[sge->m]->segs[sge->n].length;
229 rvt_put_ss(&qp->r_sge);
230 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
232 wc.wr_id = qp->r_wr_id;
233 wc.status = IB_WC_SUCCESS;
234 wc.opcode = IB_WC_RECV;
236 wc.src_qp = sqp->ibqp.qp_num;
237 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
238 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
239 sqp->ibqp.qp_type == IB_QPT_SMI)
240 wc.pkey_index = swqe->ud_wr.pkey_index;
242 wc.pkey_index = sqp->s_pkey_index;
246 wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
247 ((1 << ppd->lmc) - 1));
248 /* Check for loopback when the port lid is not set */
249 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
250 wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
251 wc.sl = rdma_ah_get_sl(ah_attr);
252 wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
253 wc.port_num = qp->port_num;
254 /* Signal completion event if the solicited bit is set. */
255 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
256 swqe->wr.send_flags & IB_SEND_SOLICITED);
257 ibp->rvp.n_loop_pkts++;
259 spin_unlock_irqrestore(&qp->r_lock, flags);
265 * hfi1_make_ud_req - construct a UD request packet
268 * Assume s_lock is held.
270 * Return 1 if constructed; otherwise, return 0.
272 int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
274 struct hfi1_qp_priv *priv = qp->priv;
275 struct ib_other_headers *ohdr;
276 struct rdma_ah_attr *ah_attr;
277 struct hfi1_pportdata *ppd;
278 struct hfi1_ibport *ibp;
279 struct rvt_swqe *wqe;
288 ps->s_txreq = get_txreq(ps->dev, qp);
289 if (IS_ERR(ps->s_txreq))
292 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
293 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
295 /* We are in the error state, flush the work request. */
296 smp_read_barrier_depends(); /* see post_one_send */
297 if (qp->s_last == ACCESS_ONCE(qp->s_head))
299 /* If DMAs are in progress, we can't flush immediately. */
300 if (iowait_sdma_pending(&priv->s_iowait)) {
301 qp->s_flags |= RVT_S_WAIT_DMA;
304 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
305 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
309 /* see post_one_send() */
310 smp_read_barrier_depends();
311 if (qp->s_cur == ACCESS_ONCE(qp->s_head))
314 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
315 next_cur = qp->s_cur + 1;
316 if (next_cur >= qp->s_size)
319 /* Construct the header. */
320 ibp = to_iport(qp->ibqp.device, qp->port_num);
321 ppd = ppd_from_ibp(ibp);
322 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
323 if (rdma_ah_get_dlid(ah_attr) < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
324 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) {
325 lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
326 if (unlikely(!loopback &&
328 (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
329 qp->ibqp.qp_type == IB_QPT_GSI)))) {
330 unsigned long tflags = ps->flags;
332 * If DMAs are in progress, we can't generate
333 * a completion for the loopback packet since
334 * it would be out of order.
335 * Instead of waiting, we could queue a
336 * zero length descriptor so we get a callback.
338 if (iowait_sdma_pending(&priv->s_iowait)) {
339 qp->s_flags |= RVT_S_WAIT_DMA;
342 qp->s_cur = next_cur;
343 spin_unlock_irqrestore(&qp->s_lock, tflags);
344 ud_loopback(qp, wqe);
345 spin_lock_irqsave(&qp->s_lock, tflags);
347 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
352 qp->s_cur = next_cur;
353 extra_bytes = -wqe->length & 3;
354 nwords = (wqe->length + extra_bytes) >> 2;
356 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
358 ps->s_txreq->s_cur_size = wqe->length;
359 ps->s_txreq->ss = &qp->s_sge;
360 qp->s_srate = rdma_ah_get_static_rate(ah_attr);
361 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
363 qp->s_sge.sge = wqe->sg_list[0];
364 qp->s_sge.sg_list = wqe->sg_list + 1;
365 qp->s_sge.num_sge = wqe->wr.num_sge;
366 qp->s_sge.total_len = wqe->length;
368 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
369 /* Header size in 32-bit words. */
370 qp->s_hdrwords += hfi1_make_grh(ibp,
371 &ps->s_txreq->phdr.hdr.u.l.grh,
372 rdma_ah_read_grh(ah_attr),
373 qp->s_hdrwords, nwords);
375 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
377 * Don't worry about sending to locally attached multicast
378 * QPs. It is unspecified by the spec. what happens.
381 /* Header size in 32-bit words. */
383 ohdr = &ps->s_txreq->phdr.hdr.u.oth;
385 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
387 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
388 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
390 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
392 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
393 lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
394 if (qp->ibqp.qp_type == IB_QPT_SMI) {
395 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
398 lrh0 |= (sc5 & 0xf) << 12;
401 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
402 ps->s_txreq->sde = priv->s_sde;
403 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
404 ps->s_txreq->psc = priv->s_sendcontext;
405 ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
406 ps->s_txreq->phdr.hdr.lrh[1] =
407 cpu_to_be16(rdma_ah_get_dlid(ah_attr));
408 ps->s_txreq->phdr.hdr.lrh[2] =
409 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
410 if (rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) {
411 ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
415 lid |= rdma_ah_get_path_bits(ah_attr) &
416 ((1 << ppd->lmc) - 1);
417 ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(lid);
419 ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
422 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
423 bth0 |= IB_BTH_SOLICITED;
424 bth0 |= extra_bytes << 20;
425 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
426 bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
428 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
429 ohdr->bth[0] = cpu_to_be32(bth0);
430 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
431 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
433 * Qkeys with the high order bit set mean use the
434 * qkey from the QP context instead of the WR (see 10.2.5).
436 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
437 qp->qkey : wqe->ud_wr.remote_qkey);
438 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
440 priv->s_ahg->ahgcount = 0;
441 priv->s_ahg->ahgidx = 0;
442 priv->s_ahg->tx_flags = 0;
444 ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
449 hfi1_put_txreq(ps->s_txreq);
454 hfi1_put_txreq(ps->s_txreq);
458 qp->s_flags &= ~RVT_S_BUSY;
464 * Hardware can't check this so we do it here.
466 * This is a slightly different algorithm than the standard pkey check. It
467 * special cases the management keys and allows for 0x7fff and 0xffff to be in
468 * the table at the same time.
470 * @returns the index found or -1 if not found
472 int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
474 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
477 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
478 unsigned lim_idx = -1;
480 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
481 /* here we look for an exact match */
482 if (ppd->pkeys[i] == pkey)
484 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
488 /* did not find 0xffff return 0x7fff idx if found */
489 if (pkey == FULL_MGMT_P_KEY)
496 pkey &= 0x7fff; /* remove limited/full membership bit */
498 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
499 if ((ppd->pkeys[i] & 0x7fff) == pkey)
503 * Should not get here, this means hardware failed to validate pkeys.
508 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
509 u32 pkey, u32 slid, u32 dlid, u8 sc5,
510 const struct ib_grh *old_grh)
512 u64 pbc, pbc_flags = 0;
513 u32 bth0, plen, vl, hwords = 5;
515 u8 sl = ibp->sc_to_sl[sc5];
516 struct ib_header hdr;
517 struct ib_other_headers *ohdr;
518 struct pio_buf *pbuf;
519 struct send_context *ctxt = qp_to_send_context(qp, sc5);
520 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
523 struct ib_grh *grh = &hdr.u.l.grh;
525 grh->version_tclass_flow = old_grh->version_tclass_flow;
526 grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
527 grh->hop_limit = 0xff;
528 grh->sgid = old_grh->dgid;
529 grh->dgid = old_grh->sgid;
532 hwords += sizeof(struct ib_grh) / sizeof(u32);
538 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
540 bth0 = pkey | (IB_OPCODE_CNP << 24);
541 ohdr->bth[0] = cpu_to_be32(bth0);
543 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
544 ohdr->bth[2] = 0; /* PSN 0 */
546 hdr.lrh[0] = cpu_to_be16(lrh0);
547 hdr.lrh[1] = cpu_to_be16(dlid);
548 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
549 hdr.lrh[3] = cpu_to_be16(slid);
551 plen = 2 /* PBC */ + hwords;
552 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
553 vl = sc_to_vlt(ppd->dd, sc5);
554 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
556 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
558 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
564 * opa_smp_check() - Do the regular pkey checking, and the additional
565 * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
566 * 9.10.25 ("SMA Packet Checks").
569 * - Checks are done using the pkey directly from the packet's BTH,
570 * and specifically _not_ the pkey that we attach to the completion,
571 * which may be different.
572 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
573 * which originated on another node). SMPs which are sent from, and
574 * destined to this node are checked in opa_local_smp_check().
576 * At the point where opa_smp_check() is called, we know:
577 * - destination QP is QP0
579 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
581 static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
582 struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
584 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
587 * I don't think it's possible for us to get here with sc != 0xf,
588 * but check it to be certain.
593 if (rcv_pkey_check(ppd, pkey, sc5, slid))
597 * At this point we know (and so don't need to check again) that
598 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
599 * (see ingress_pkey_check).
601 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
602 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
603 ingress_pkey_table_fail(ppd, pkey, slid);
608 * SMPs fall into one of four (disjoint) categories:
609 * SMA request, SMA response, SMA trap, or SMA trap repress.
610 * Our response depends, in part, on which type of SMP we're
613 * If this is an SMA response, skip the check here.
615 * If this is an SMA request or SMA trap repress:
616 * - pkey != FULL_MGMT_P_KEY =>
617 * increment port recv constraint errors, drop MAD
620 * - accept if the port is running an SM
621 * - drop MAD if it's an SMA trap
622 * - pkey == FULL_MGMT_P_KEY =>
623 * reply with unsupported method
624 * - pkey != FULL_MGMT_P_KEY =>
625 * increment port recv constraint errors, drop MAD
627 switch (smp->method) {
628 case IB_MGMT_METHOD_GET_RESP:
629 case IB_MGMT_METHOD_REPORT_RESP:
631 case IB_MGMT_METHOD_GET:
632 case IB_MGMT_METHOD_SET:
633 case IB_MGMT_METHOD_REPORT:
634 case IB_MGMT_METHOD_TRAP_REPRESS:
635 if (pkey != FULL_MGMT_P_KEY) {
636 ingress_pkey_table_fail(ppd, pkey, slid);
641 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
643 if (smp->method == IB_MGMT_METHOD_TRAP)
645 if (pkey == FULL_MGMT_P_KEY) {
646 smp->status |= IB_SMP_UNSUP_METHOD;
649 ingress_pkey_table_fail(ppd, pkey, slid);
656 * hfi1_ud_rcv - receive an incoming UD packet
657 * @ibp: the port the packet came in on
658 * @hdr: the packet header
659 * @rcv_flags: flags relevant to rcv processing
660 * @data: the packet data
661 * @tlen: the packet length
662 * @qp: the QP the packet came on
664 * This is called from qp_rcv() to process an incoming UD packet
666 * Called at interrupt level.
668 void hfi1_ud_rcv(struct hfi1_packet *packet)
670 struct ib_other_headers *ohdr = packet->ohdr;
672 u32 hdrsize = packet->hlen;
677 int mgmt_pkey_idx = -1;
678 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
679 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
680 struct ib_header *hdr = packet->hdr;
681 u32 rcv_flags = packet->rcv_flags;
682 void *data = packet->ebuf;
683 u32 tlen = packet->tlen;
684 struct rvt_qp *qp = packet->qp;
685 bool has_grh = rcv_flags & HFI1_HAS_GRH;
686 u8 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
692 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
693 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
694 dlid = ib_get_dlid(hdr);
695 bth1 = be32_to_cpu(ohdr->bth[1]);
696 slid = ib_get_slid(hdr);
697 pkey = ib_bth_get_pkey(ohdr);
698 opcode = ib_bth_get_opcode(ohdr);
700 extra_bytes = ib_bth_get_pad(ohdr);
701 extra_bytes += (SIZE_OF_CRC << 2);
702 sl_from_sc = ibp->sc_to_sl[sc5];
704 process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
706 * Get the number of bytes the message was padded by
707 * and drop incomplete packets.
709 if (unlikely(tlen < (hdrsize + extra_bytes)))
712 tlen -= hdrsize + extra_bytes;
715 * Check that the permissive LID is only used on QP0
716 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
718 if (qp->ibqp.qp_num) {
719 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
720 hdr->lrh[3] == IB_LID_PERMISSIVE))
722 if (qp->ibqp.qp_num > 1) {
723 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
725 * Traps will not be sent for packets dropped
726 * by the HW. This is fine, as sending trap
727 * for invalid pkeys is optional according to
728 * IB spec (release 1.3, section 10.9.4)
730 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
732 src_qp, qp->ibqp.qp_num,
738 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
739 if (mgmt_pkey_idx < 0)
742 if (unlikely(qkey != qp->qkey)) {
743 hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, sl,
744 src_qp, qp->ibqp.qp_num,
748 /* Drop invalid MAD packets (see 13.5.3.1). */
749 if (unlikely(qp->ibqp.qp_num == 1 &&
750 (tlen > 2048 || (sc5 == 0xF))))
753 /* Received on QP0, and so by definition, this is an SMP */
754 struct opa_smp *smp = (struct opa_smp *)data;
756 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
761 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
762 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
763 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
766 /* look up SMI pkey */
767 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
768 if (mgmt_pkey_idx < 0)
772 if (qp->ibqp.qp_num > 1 &&
773 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
774 wc.ex.imm_data = ohdr->u.ud.imm_data;
775 wc.wc_flags = IB_WC_WITH_IMM;
777 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
785 * A GRH is expected to precede the data even if not
786 * present on the wire.
788 wc.byte_len = tlen + sizeof(struct ib_grh);
791 * Get the next work request entry to find where to put the data.
793 if (qp->r_flags & RVT_R_REUSE_SGE) {
794 qp->r_flags &= ~RVT_R_REUSE_SGE;
798 ret = hfi1_rvt_get_rwqe(qp, 0);
800 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
804 if (qp->ibqp.qp_num == 0)
805 ibp->rvp.n_vl15_dropped++;
809 /* Silently drop packets which are too big. */
810 if (unlikely(wc.byte_len > qp->r_len)) {
811 qp->r_flags |= RVT_R_REUSE_SGE;
815 hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
816 sizeof(struct ib_grh), true, false);
817 wc.wc_flags |= IB_WC_GRH;
819 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
821 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
823 rvt_put_ss(&qp->r_sge);
824 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
826 wc.wr_id = qp->r_wr_id;
827 wc.status = IB_WC_SUCCESS;
828 wc.opcode = IB_WC_RECV;
833 if (qp->ibqp.qp_type == IB_QPT_GSI ||
834 qp->ibqp.qp_type == IB_QPT_SMI) {
835 if (mgmt_pkey_idx < 0) {
836 if (net_ratelimit()) {
837 struct hfi1_devdata *dd = ppd->dd;
839 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
844 wc.pkey_index = (unsigned)mgmt_pkey_idx;
853 * Save the LMC lower bits if the destination LID is a unicast LID.
855 wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
856 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
857 wc.port_num = qp->port_num;
858 /* Signal completion event if the solicited bit is set. */
859 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
861 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
865 ibp->rvp.n_pkt_drops++;