2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/err.h>
49 #include <linux/vmalloc.h>
50 #include <linux/hash.h>
51 #include <linux/module.h>
52 #include <linux/seq_file.h>
53 #include <rdma/rdma_vt.h>
54 #include <rdma/rdmavt_qp.h>
55 #include <rdma/ib_verbs.h>
60 #include "verbs_txreq.h"
62 unsigned int hfi1_qp_table_size = 256;
63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
64 MODULE_PARM_DESC(qp_table_size, "QP table size");
66 static void flush_tx_list(struct rvt_qp *qp);
67 static int iowait_sleep(
68 struct sdma_engine *sde,
70 struct sdma_txreq *stx,
72 static void iowait_wakeup(struct iowait *wait, int reason);
73 static void iowait_sdma_drained(struct iowait *wait);
74 static void qp_pio_drain(struct rvt_qp *qp);
76 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
77 struct rvt_qpn_map *map, unsigned off)
79 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
82 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
83 [IB_WR_RDMA_WRITE] = {
84 .length = sizeof(struct ib_rdma_wr),
85 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
89 .length = sizeof(struct ib_rdma_wr),
90 .qpt_support = BIT(IB_QPT_RC),
91 .flags = RVT_OPERATION_ATOMIC,
94 [IB_WR_ATOMIC_CMP_AND_SWP] = {
95 .length = sizeof(struct ib_atomic_wr),
96 .qpt_support = BIT(IB_QPT_RC),
97 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
100 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
101 .length = sizeof(struct ib_atomic_wr),
102 .qpt_support = BIT(IB_QPT_RC),
103 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
106 [IB_WR_RDMA_WRITE_WITH_IMM] = {
107 .length = sizeof(struct ib_rdma_wr),
108 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
112 .length = sizeof(struct ib_send_wr),
113 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
114 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
117 [IB_WR_SEND_WITH_IMM] = {
118 .length = sizeof(struct ib_send_wr),
119 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
120 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
124 .length = sizeof(struct ib_reg_wr),
125 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
126 .flags = RVT_OPERATION_LOCAL,
129 [IB_WR_LOCAL_INV] = {
130 .length = sizeof(struct ib_send_wr),
131 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
132 .flags = RVT_OPERATION_LOCAL,
135 [IB_WR_SEND_WITH_INV] = {
136 .length = sizeof(struct ib_send_wr),
137 .qpt_support = BIT(IB_QPT_RC),
142 static void flush_tx_list(struct rvt_qp *qp)
144 struct hfi1_qp_priv *priv = qp->priv;
146 while (!list_empty(&priv->s_iowait.tx_head)) {
147 struct sdma_txreq *tx;
149 tx = list_first_entry(
150 &priv->s_iowait.tx_head,
153 list_del_init(&tx->list);
155 container_of(tx, struct verbs_txreq, txreq));
159 static void flush_iowait(struct rvt_qp *qp)
161 struct hfi1_qp_priv *priv = qp->priv;
163 seqlock_t *lock = priv->s_iowait.lock;
167 write_seqlock_irqsave(lock, flags);
168 if (!list_empty(&priv->s_iowait.list)) {
169 list_del_init(&priv->s_iowait.list);
170 priv->s_iowait.lock = NULL;
173 write_sequnlock_irqrestore(lock, flags);
176 static inline int opa_mtu_enum_to_int(int mtu)
179 case OPA_MTU_8192: return 8192;
180 case OPA_MTU_10240: return 10240;
186 * This function is what we would push to the core layer if we wanted to be a
187 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
188 * to blindly pass the MTU enum value from the PathRecord to us.
190 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
194 /* Constraining 10KB packets to 8KB packets */
195 if (mtu == (enum ib_mtu)OPA_MTU_10240)
197 val = opa_mtu_enum_to_int((int)mtu);
200 return ib_mtu_enum_to_int(mtu);
203 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
204 int attr_mask, struct ib_udata *udata)
206 struct ib_qp *ibqp = &qp->ibqp;
207 struct hfi1_ibdev *dev = to_idev(ibqp->device);
208 struct hfi1_devdata *dd = dd_from_dev(dev);
211 if (attr_mask & IB_QP_AV) {
212 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
216 if (!qp_to_sdma_engine(qp, sc) &&
217 dd->flags & HFI1_HAS_SEND_DMA)
220 if (!qp_to_send_context(qp, sc))
224 if (attr_mask & IB_QP_ALT_PATH) {
225 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
229 if (!qp_to_sdma_engine(qp, sc) &&
230 dd->flags & HFI1_HAS_SEND_DMA)
233 if (!qp_to_send_context(qp, sc))
240 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
241 int attr_mask, struct ib_udata *udata)
243 struct ib_qp *ibqp = &qp->ibqp;
244 struct hfi1_qp_priv *priv = qp->priv;
246 if (attr_mask & IB_QP_AV) {
247 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
248 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
249 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
252 if (attr_mask & IB_QP_PATH_MIG_STATE &&
253 attr->path_mig_state == IB_MIG_MIGRATED &&
254 qp->s_mig_state == IB_MIG_ARMED) {
255 qp->s_flags |= RVT_S_AHG_CLEAR;
256 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
257 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
258 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
263 * hfi1_check_send_wqe - validate wqe
265 * @wqe - The built wqe
267 * validate wqe. This is called
268 * prior to inserting the wqe into
269 * the ring but after the wqe has been
272 * Returns 0 on success, -EINVAL on failure
275 int hfi1_check_send_wqe(struct rvt_qp *qp,
276 struct rvt_swqe *wqe)
278 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
281 switch (qp->ibqp.qp_type) {
284 if (wqe->length > 0x80000000U)
288 ah = ibah_to_rvtah(wqe->ud_wr.ah);
289 if (wqe->length > (1 << ah->log_pmtu))
294 ah = ibah_to_rvtah(wqe->ud_wr.ah);
295 if (wqe->length > (1 << ah->log_pmtu))
297 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
302 return wqe->length <= piothreshold;
306 * _hfi1_schedule_send - schedule progress
309 * This schedules qp progress w/o regard to the s_flags.
311 * It is only used in the post send, which doesn't hold
314 void _hfi1_schedule_send(struct rvt_qp *qp)
316 struct hfi1_qp_priv *priv = qp->priv;
317 struct hfi1_ibport *ibp =
318 to_iport(qp->ibqp.device, qp->port_num);
319 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
320 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
322 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
325 cpumask_first(cpumask_of_node(dd->node)));
328 static void qp_pio_drain(struct rvt_qp *qp)
330 struct hfi1_ibdev *dev;
331 struct hfi1_qp_priv *priv = qp->priv;
333 if (!priv->s_sendcontext)
335 dev = to_idev(qp->ibqp.device);
336 while (iowait_pio_pending(&priv->s_iowait)) {
337 write_seqlock_irq(&dev->iowait_lock);
338 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
339 write_sequnlock_irq(&dev->iowait_lock);
340 iowait_pio_drain(&priv->s_iowait);
341 write_seqlock_irq(&dev->iowait_lock);
342 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
343 write_sequnlock_irq(&dev->iowait_lock);
348 * hfi1_schedule_send - schedule progress
351 * This schedules qp progress and caller should hold
354 void hfi1_schedule_send(struct rvt_qp *qp)
356 lockdep_assert_held(&qp->s_lock);
357 if (hfi1_send_ok(qp))
358 _hfi1_schedule_send(qp);
361 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
365 spin_lock_irqsave(&qp->s_lock, flags);
366 if (qp->s_flags & flag) {
367 qp->s_flags &= ~flag;
368 trace_hfi1_qpwakeup(qp, flag);
369 hfi1_schedule_send(qp);
371 spin_unlock_irqrestore(&qp->s_lock, flags);
372 /* Notify hfi1_destroy_qp() if it is waiting. */
376 static int iowait_sleep(
377 struct sdma_engine *sde,
379 struct sdma_txreq *stx,
382 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
384 struct hfi1_qp_priv *priv;
387 struct hfi1_ibdev *dev;
392 spin_lock_irqsave(&qp->s_lock, flags);
393 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
395 * If we couldn't queue the DMA request, save the info
396 * and try again later rather than destroying the
397 * buffer and undoing the side effects of the copy.
399 /* Make a common routine? */
400 dev = &sde->dd->verbs_dev;
401 list_add_tail(&stx->list, &wait->tx_head);
402 write_seqlock(&dev->iowait_lock);
403 if (sdma_progress(sde, seq, stx))
405 if (list_empty(&priv->s_iowait.list)) {
406 struct hfi1_ibport *ibp =
407 to_iport(qp->ibqp.device, qp->port_num);
409 ibp->rvp.n_dmawait++;
410 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
411 list_add_tail(&priv->s_iowait.list, &sde->dmawait);
412 priv->s_iowait.lock = &dev->iowait_lock;
413 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
416 write_sequnlock(&dev->iowait_lock);
417 qp->s_flags &= ~RVT_S_BUSY;
418 spin_unlock_irqrestore(&qp->s_lock, flags);
421 spin_unlock_irqrestore(&qp->s_lock, flags);
426 write_sequnlock(&dev->iowait_lock);
427 spin_unlock_irqrestore(&qp->s_lock, flags);
428 list_del_init(&stx->list);
432 static void iowait_wakeup(struct iowait *wait, int reason)
434 struct rvt_qp *qp = iowait_to_qp(wait);
436 WARN_ON(reason != SDMA_AVAIL_REASON);
437 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
440 static void iowait_sdma_drained(struct iowait *wait)
442 struct rvt_qp *qp = iowait_to_qp(wait);
446 * This happens when the send engine notes
447 * a QP in the error state and cannot
448 * do the flush work until that QP's
449 * sdma work has finished.
451 spin_lock_irqsave(&qp->s_lock, flags);
452 if (qp->s_flags & RVT_S_WAIT_DMA) {
453 qp->s_flags &= ~RVT_S_WAIT_DMA;
454 hfi1_schedule_send(qp);
456 spin_unlock_irqrestore(&qp->s_lock, flags);
461 * qp_to_sdma_engine - map a qp to a send engine
466 * A send engine for the qp or NULL for SMI type qp.
468 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
470 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
471 struct sdma_engine *sde;
473 if (!(dd->flags & HFI1_HAS_SEND_DMA))
475 switch (qp->ibqp.qp_type) {
481 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
486 * qp_to_send_context - map a qp to a send context
491 * A send context for the qp
493 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
495 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
497 switch (qp->ibqp.qp_type) {
499 /* SMA packets to VL15 */
500 return dd->vld[15].sc;
505 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
510 struct hfi1_ibdev *dev;
516 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
518 struct qp_iter *iter;
520 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
525 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
530 int qp_iter_next(struct qp_iter *iter)
532 struct hfi1_ibdev *dev = iter->dev;
535 struct rvt_qp *pqp = iter->qp;
539 * The approach is to consider the special qps
540 * as an additional table entries before the
541 * real hash table. Since the qp code sets
542 * the qp->next hash link to NULL, this works just fine.
544 * iter->specials is 2 * # ports
546 * n = 0..iter->specials is the special qp indices
548 * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
549 * the potential hash bucket entries
552 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
554 qp = rcu_dereference(pqp->next);
556 if (n < iter->specials) {
557 struct hfi1_pportdata *ppd;
558 struct hfi1_ibport *ibp;
561 pidx = n % dev->rdi.ibdev.phys_port_cnt;
562 ppd = &dd_from_dev(dev)->pport[pidx];
563 ibp = &ppd->ibport_data;
566 qp = rcu_dereference(ibp->rvp.qp[0]);
568 qp = rcu_dereference(ibp->rvp.qp[1]);
570 qp = rcu_dereference(
571 dev->rdi.qp_dev->qp_table[
572 (n - iter->specials)]);
585 static const char * const qp_type_str[] = {
586 "SMI", "GSI", "RC", "UC", "UD",
589 static int qp_idle(struct rvt_qp *qp)
592 qp->s_last == qp->s_acked &&
593 qp->s_acked == qp->s_cur &&
594 qp->s_cur == qp->s_tail &&
595 qp->s_tail == qp->s_head;
598 void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
600 struct rvt_swqe *wqe;
601 struct rvt_qp *qp = iter->qp;
602 struct hfi1_qp_priv *priv = qp->priv;
603 struct sdma_engine *sde;
604 struct send_context *send_context;
606 sde = qp_to_sdma_engine(qp, priv->s_sc);
607 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
608 send_context = qp_to_send_context(qp, priv->s_sc);
610 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
612 qp_idle(qp) ? "I" : "B",
614 atomic_read(&qp->refcount),
615 qp_type_str[qp->ibqp.qp_type],
617 wqe ? wqe->wr.opcode : 0,
620 iowait_sdma_pending(&priv->s_iowait),
621 iowait_pio_pending(&priv->s_iowait),
622 !list_empty(&priv->s_iowait.list),
627 qp->s_psn, qp->s_next_psn,
628 qp->s_sending_psn, qp->s_sending_hpsn,
630 qp->s_last, qp->s_acked, qp->s_cur,
631 qp->s_tail, qp->s_head, qp->s_size,
634 rdma_ah_get_dlid(&qp->remote_ah_attr),
635 rdma_ah_get_sl(&qp->remote_ah_attr),
642 sde ? sde->this_idx : 0,
644 send_context ? send_context->sw_index : 0,
645 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
646 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
650 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
653 struct hfi1_qp_priv *priv;
655 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
657 return ERR_PTR(-ENOMEM);
661 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
665 return ERR_PTR(-ENOMEM);
673 iowait_sdma_drained);
677 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
679 struct hfi1_qp_priv *priv = qp->priv;
685 unsigned free_all_qps(struct rvt_dev_info *rdi)
687 struct hfi1_ibdev *verbs_dev = container_of(rdi,
690 struct hfi1_devdata *dd = container_of(verbs_dev,
694 unsigned qp_inuse = 0;
696 for (n = 0; n < dd->num_pports; n++) {
697 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
700 if (rcu_dereference(ibp->rvp.qp[0]))
702 if (rcu_dereference(ibp->rvp.qp[1]))
710 void flush_qp_waiters(struct rvt_qp *qp)
712 lockdep_assert_held(&qp->s_lock);
716 void stop_send_queue(struct rvt_qp *qp)
718 struct hfi1_qp_priv *priv = qp->priv;
720 cancel_work_sync(&priv->s_iowait.iowork);
723 void quiesce_qp(struct rvt_qp *qp)
725 struct hfi1_qp_priv *priv = qp->priv;
727 iowait_sdma_drain(&priv->s_iowait);
732 void notify_qp_reset(struct rvt_qp *qp)
739 * Switch to alternate path.
740 * The QP s_lock should be held and interrupts disabled.
742 void hfi1_migrate_qp(struct rvt_qp *qp)
744 struct hfi1_qp_priv *priv = qp->priv;
747 qp->s_mig_state = IB_MIG_MIGRATED;
748 qp->remote_ah_attr = qp->alt_ah_attr;
749 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
750 qp->s_pkey_index = qp->s_alt_pkey_index;
751 qp->s_flags |= RVT_S_AHG_CLEAR;
752 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
753 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
755 ev.device = qp->ibqp.device;
756 ev.element.qp = &qp->ibqp;
757 ev.event = IB_EVENT_PATH_MIG;
758 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
761 int mtu_to_path_mtu(u32 mtu)
763 return mtu_to_enum(mtu, OPA_MTU_8192);
766 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
769 struct hfi1_ibdev *verbs_dev = container_of(rdi,
772 struct hfi1_devdata *dd = container_of(verbs_dev,
775 struct hfi1_ibport *ibp;
778 ibp = &dd->pport[qp->port_num - 1].ibport_data;
779 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
780 vl = sc_to_vlt(dd, sc);
782 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
783 if (vl < PER_VL_SEND_CONTEXTS)
784 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
788 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
789 struct ib_qp_attr *attr)
791 int mtu, pidx = qp->port_num - 1;
792 struct hfi1_ibdev *verbs_dev = container_of(rdi,
795 struct hfi1_devdata *dd = container_of(verbs_dev,
798 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
800 return -1; /* values less than 0 are error */
802 if (mtu > dd->pport[pidx].ibmtu)
803 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
805 return attr->path_mtu;
808 void notify_error_qp(struct rvt_qp *qp)
810 struct hfi1_qp_priv *priv = qp->priv;
811 seqlock_t *lock = priv->s_iowait.lock;
815 if (!list_empty(&priv->s_iowait.list) &&
816 !(qp->s_flags & RVT_S_BUSY)) {
817 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
818 list_del_init(&priv->s_iowait.list);
819 priv->s_iowait.lock = NULL;
822 write_sequnlock(lock);
825 if (!(qp->s_flags & RVT_S_BUSY)) {
828 rvt_put_mr(qp->s_rdma_mr);
829 qp->s_rdma_mr = NULL;
836 * hfi1_error_port_qps - put a port's RC/UC qps into error state
838 * @sl: the service level.
840 * This function places all RC/UC qps with a given service level into error
841 * state. It is generally called to force upper lay apps to abandon stale qps
842 * after an sl->sc mapping change.
844 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
846 struct rvt_qp *qp = NULL;
847 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
848 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
855 /* Deal only with RC/UC qps that use the given SL. */
856 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
857 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
858 qp = rcu_dereference(qp->next)) {
859 if (qp->port_num == ppd->port &&
860 (qp->ibqp.qp_type == IB_QPT_UC ||
861 qp->ibqp.qp_type == IB_QPT_RC) &&
862 rdma_ah_get_sl(&qp->remote_ah_attr) == sl &&
863 (ib_rvt_state_ops[qp->state] &
865 spin_lock_irq(&qp->r_lock);
866 spin_lock(&qp->s_hlock);
867 spin_lock(&qp->s_lock);
868 lastwqe = rvt_error_qp(qp,
870 spin_unlock(&qp->s_lock);
871 spin_unlock(&qp->s_hlock);
872 spin_unlock_irq(&qp->r_lock);
874 ev.device = qp->ibqp.device;
875 ev.element.qp = &qp->ibqp;
877 IB_EVENT_QP_LAST_WQE_REACHED;
878 qp->ibqp.event_handler(&ev,
879 qp->ibqp.qp_context);