3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/err.h>
52 #include <linux/vmalloc.h>
53 #include <linux/hash.h>
54 #include <linux/module.h>
55 #include <linux/random.h>
56 #include <linux/seq_file.h>
57 #include <rdma/rdma_vt.h>
58 #include <rdma/rdmavt_qp.h>
65 unsigned int hfi1_qp_table_size = 256;
66 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
67 MODULE_PARM_DESC(qp_table_size, "QP table size");
69 static void flush_tx_list(struct rvt_qp *qp);
70 static int iowait_sleep(
71 struct sdma_engine *sde,
73 struct sdma_txreq *stx,
75 static void iowait_wakeup(struct iowait *wait, int reason);
77 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
78 struct rvt_qpn_map *map, unsigned off)
80 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
84 * Convert the AETH credit code into the number of credits.
86 static const u16 credit_table[31] = {
120 static void flush_tx_list(struct rvt_qp *qp)
122 struct hfi1_qp_priv *priv = qp->priv;
124 while (!list_empty(&priv->s_iowait.tx_head)) {
125 struct sdma_txreq *tx;
127 tx = list_first_entry(
128 &priv->s_iowait.tx_head,
131 list_del_init(&tx->list);
133 container_of(tx, struct verbs_txreq, txreq));
137 static void flush_iowait(struct rvt_qp *qp)
139 struct hfi1_qp_priv *priv = qp->priv;
140 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
143 write_seqlock_irqsave(&dev->iowait_lock, flags);
144 if (!list_empty(&priv->s_iowait.list)) {
145 list_del_init(&priv->s_iowait.list);
146 if (atomic_dec_and_test(&qp->refcount))
149 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
152 static inline int opa_mtu_enum_to_int(int mtu)
155 case OPA_MTU_8192: return 8192;
156 case OPA_MTU_10240: return 10240;
162 * This function is what we would push to the core layer if we wanted to be a
163 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
164 * to blindly pass the MTU enum value from the PathRecord to us.
166 * The actual flag used to determine "8k MTU" will change and is currently
169 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
171 int val = opa_mtu_enum_to_int((int)mtu);
175 return ib_mtu_enum_to_int(mtu);
178 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
179 int attr_mask, struct ib_udata *udata)
181 struct ib_qp *ibqp = &qp->ibqp;
182 struct hfi1_ibdev *dev = to_idev(ibqp->device);
183 struct hfi1_devdata *dd = dd_from_dev(dev);
186 if (attr_mask & IB_QP_AV) {
187 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
191 if (!qp_to_sdma_engine(qp, sc) &&
192 dd->flags & HFI1_HAS_SEND_DMA)
196 if (attr_mask & IB_QP_ALT_PATH) {
197 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
201 if (!qp_to_sdma_engine(qp, sc) &&
202 dd->flags & HFI1_HAS_SEND_DMA)
209 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
210 int attr_mask, struct ib_udata *udata)
212 struct ib_qp *ibqp = &qp->ibqp;
213 struct hfi1_qp_priv *priv = qp->priv;
215 if (attr_mask & IB_QP_AV) {
216 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
217 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
220 if (attr_mask & IB_QP_PATH_MIG_STATE &&
221 attr->path_mig_state == IB_MIG_MIGRATED &&
222 qp->s_mig_state == IB_MIG_ARMED) {
223 qp->s_flags |= RVT_S_AHG_CLEAR;
224 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
225 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
229 int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
231 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
232 struct rvt_ah *ah = ibah_to_rvtah(ud_wr(wr)->ah);
234 if (qp->ibqp.qp_type != IB_QPT_RC &&
235 qp->ibqp.qp_type != IB_QPT_UC &&
236 qp->ibqp.qp_type != IB_QPT_SMI &&
237 ibp->sl_to_sc[ah->attr.sl] == 0xf) {
244 * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
245 * @qp: the queue pair to compute the AETH for
249 __be32 hfi1_compute_aeth(struct rvt_qp *qp)
251 u32 aeth = qp->r_msn & HFI1_MSN_MASK;
255 * Shared receive queues don't generate credits.
256 * Set the credit field to the invalid value.
258 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
262 struct rvt_rwq *wq = qp->r_rq.wq;
266 /* sanity check pointers before trusting them */
268 if (head >= qp->r_rq.size)
271 if (tail >= qp->r_rq.size)
274 * Compute the number of credits available (RWQEs).
275 * There is a small chance that the pair of reads are
276 * not atomic, which is OK, since the fuzziness is
277 * resolved as further ACKs go out.
279 credits = head - tail;
280 if ((int)credits < 0)
281 credits += qp->r_rq.size;
283 * Binary search the credit table to find the code to
290 if (credit_table[x] == credits)
292 if (credit_table[x] > credits)
299 aeth |= x << HFI1_AETH_CREDIT_SHIFT;
301 return cpu_to_be32(aeth);
305 * hfi1_get_credit - flush the send work queue of a QP
306 * @qp: the qp who's send work queue to flush
307 * @aeth: the Acknowledge Extended Transport Header
309 * The QP s_lock should be held.
311 void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
313 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
316 * If the credit is invalid, we can send
317 * as many packets as we like. Otherwise, we have to
318 * honor the credit field.
320 if (credit == HFI1_AETH_CREDIT_INVAL) {
321 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
322 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
323 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
324 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
325 hfi1_schedule_send(qp);
328 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
329 /* Compute new LSN (i.e., MSN + credit) */
330 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
331 if (cmp_msn(credit, qp->s_lsn) > 0) {
333 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
334 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
335 hfi1_schedule_send(qp);
341 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
345 spin_lock_irqsave(&qp->s_lock, flags);
346 if (qp->s_flags & flag) {
347 qp->s_flags &= ~flag;
348 trace_hfi1_qpwakeup(qp, flag);
349 hfi1_schedule_send(qp);
351 spin_unlock_irqrestore(&qp->s_lock, flags);
352 /* Notify hfi1_destroy_qp() if it is waiting. */
353 if (atomic_dec_and_test(&qp->refcount))
357 static int iowait_sleep(
358 struct sdma_engine *sde,
360 struct sdma_txreq *stx,
363 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
365 struct hfi1_qp_priv *priv;
368 struct hfi1_ibdev *dev;
373 spin_lock_irqsave(&qp->s_lock, flags);
374 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
377 * If we couldn't queue the DMA request, save the info
378 * and try again later rather than destroying the
379 * buffer and undoing the side effects of the copy.
381 /* Make a common routine? */
382 dev = &sde->dd->verbs_dev;
383 list_add_tail(&stx->list, &wait->tx_head);
384 write_seqlock(&dev->iowait_lock);
385 if (sdma_progress(sde, seq, stx))
387 if (list_empty(&priv->s_iowait.list)) {
388 struct hfi1_ibport *ibp =
389 to_iport(qp->ibqp.device, qp->port_num);
391 ibp->rvp.n_dmawait++;
392 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
393 list_add_tail(&priv->s_iowait.list, &sde->dmawait);
394 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
395 atomic_inc(&qp->refcount);
397 write_sequnlock(&dev->iowait_lock);
398 qp->s_flags &= ~RVT_S_BUSY;
399 spin_unlock_irqrestore(&qp->s_lock, flags);
402 spin_unlock_irqrestore(&qp->s_lock, flags);
407 write_sequnlock(&dev->iowait_lock);
408 spin_unlock_irqrestore(&qp->s_lock, flags);
409 list_del_init(&stx->list);
413 static void iowait_wakeup(struct iowait *wait, int reason)
415 struct rvt_qp *qp = iowait_to_qp(wait);
417 WARN_ON(reason != SDMA_AVAIL_REASON);
418 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
423 * qp_to_sdma_engine - map a qp to a send engine
428 * A send engine for the qp or NULL for SMI type qp.
430 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
432 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
433 struct sdma_engine *sde;
435 if (!(dd->flags & HFI1_HAS_SEND_DMA))
437 switch (qp->ibqp.qp_type) {
443 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
448 struct hfi1_ibdev *dev;
454 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
456 struct qp_iter *iter;
458 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
463 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
464 if (qp_iter_next(iter)) {
472 int qp_iter_next(struct qp_iter *iter)
474 struct hfi1_ibdev *dev = iter->dev;
477 struct rvt_qp *pqp = iter->qp;
481 * The approach is to consider the special qps
482 * as an additional table entries before the
483 * real hash table. Since the qp code sets
484 * the qp->next hash link to NULL, this works just fine.
486 * iter->specials is 2 * # ports
488 * n = 0..iter->specials is the special qp indices
490 * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
491 * the potential hash bucket entries
494 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
496 qp = rcu_dereference(pqp->next);
498 if (n < iter->specials) {
499 struct hfi1_pportdata *ppd;
500 struct hfi1_ibport *ibp;
503 pidx = n % dev->rdi.ibdev.phys_port_cnt;
504 ppd = &dd_from_dev(dev)->pport[pidx];
505 ibp = &ppd->ibport_data;
508 qp = rcu_dereference(ibp->rvp.qp[0]);
510 qp = rcu_dereference(ibp->rvp.qp[1]);
512 qp = rcu_dereference(
513 dev->rdi.qp_dev->qp_table[
514 (n - iter->specials)]);
527 static const char * const qp_type_str[] = {
528 "SMI", "GSI", "RC", "UC", "UD",
531 static int qp_idle(struct rvt_qp *qp)
534 qp->s_last == qp->s_acked &&
535 qp->s_acked == qp->s_cur &&
536 qp->s_cur == qp->s_tail &&
537 qp->s_tail == qp->s_head;
540 void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
542 struct rvt_swqe *wqe;
543 struct rvt_qp *qp = iter->qp;
544 struct hfi1_qp_priv *priv = qp->priv;
545 struct sdma_engine *sde;
547 sde = qp_to_sdma_engine(qp, priv->s_sc);
548 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
550 "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u SDE %p,%u\n",
552 qp_idle(qp) ? "I" : "B",
554 atomic_read(&qp->refcount),
555 qp_type_str[qp->ibqp.qp_type],
557 wqe ? wqe->wr.opcode : 0,
560 atomic_read(&priv->s_iowait.sdma_busy),
561 !list_empty(&priv->s_iowait.list),
566 qp->s_psn, qp->s_next_psn,
567 qp->s_sending_psn, qp->s_sending_hpsn,
568 qp->s_last, qp->s_acked, qp->s_cur,
569 qp->s_tail, qp->s_head, qp->s_size,
571 qp->remote_ah_attr.dlid,
572 qp->remote_ah_attr.sl,
577 sde ? sde->this_idx : 0);
580 void qp_comm_est(struct rvt_qp *qp)
582 qp->r_flags |= RVT_R_COMM_EST;
583 if (qp->ibqp.event_handler) {
586 ev.device = qp->ibqp.device;
587 ev.element.qp = &qp->ibqp;
588 ev.event = IB_EVENT_COMM_EST;
589 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
593 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
596 struct hfi1_qp_priv *priv;
598 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
600 return ERR_PTR(-ENOMEM);
604 priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
607 return ERR_PTR(-ENOMEM);
609 setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
610 qp->s_timer.function = hfi1_rc_timeout;
614 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
616 struct hfi1_qp_priv *priv = qp->priv;
622 unsigned free_all_qps(struct rvt_dev_info *rdi)
624 struct hfi1_ibdev *verbs_dev = container_of(rdi,
627 struct hfi1_devdata *dd = container_of(verbs_dev,
631 unsigned qp_inuse = 0;
633 for (n = 0; n < dd->num_pports; n++) {
634 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
637 if (rcu_dereference(ibp->rvp.qp[0]))
639 if (rcu_dereference(ibp->rvp.qp[1]))
647 void flush_qp_waiters(struct rvt_qp *qp)
650 hfi1_stop_rc_timers(qp);
653 void stop_send_queue(struct rvt_qp *qp)
655 struct hfi1_qp_priv *priv = qp->priv;
657 cancel_work_sync(&priv->s_iowait.iowork);
658 hfi1_del_timers_sync(qp);
661 void quiesce_qp(struct rvt_qp *qp)
663 struct hfi1_qp_priv *priv = qp->priv;
665 iowait_sdma_drain(&priv->s_iowait);
669 void notify_qp_reset(struct rvt_qp *qp)
671 struct hfi1_qp_priv *priv = qp->priv;
679 priv->r_adefered = 0;
684 * Switch to alternate path.
685 * The QP s_lock should be held and interrupts disabled.
687 void hfi1_migrate_qp(struct rvt_qp *qp)
689 struct hfi1_qp_priv *priv = qp->priv;
692 qp->s_mig_state = IB_MIG_MIGRATED;
693 qp->remote_ah_attr = qp->alt_ah_attr;
694 qp->port_num = qp->alt_ah_attr.port_num;
695 qp->s_pkey_index = qp->s_alt_pkey_index;
696 qp->s_flags |= RVT_S_AHG_CLEAR;
697 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
698 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
700 ev.device = qp->ibqp.device;
701 ev.element.qp = &qp->ibqp;
702 ev.event = IB_EVENT_PATH_MIG;
703 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
706 int mtu_to_path_mtu(u32 mtu)
708 return mtu_to_enum(mtu, OPA_MTU_8192);
711 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
714 struct hfi1_ibdev *verbs_dev = container_of(rdi,
717 struct hfi1_devdata *dd = container_of(verbs_dev,
720 struct hfi1_ibport *ibp;
723 ibp = &dd->pport[qp->port_num - 1].ibport_data;
724 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
725 vl = sc_to_vlt(dd, sc);
727 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
728 if (vl < PER_VL_SEND_CONTEXTS)
729 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
733 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
734 struct ib_qp_attr *attr)
736 int mtu, pidx = qp->port_num - 1;
737 struct hfi1_ibdev *verbs_dev = container_of(rdi,
740 struct hfi1_devdata *dd = container_of(verbs_dev,
743 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
745 return -1; /* values less than 0 are error */
747 if (mtu > dd->pport[pidx].ibmtu)
748 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
750 return attr->path_mtu;
753 void notify_error_qp(struct rvt_qp *qp)
755 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
756 struct hfi1_qp_priv *priv = qp->priv;
758 write_seqlock(&dev->iowait_lock);
759 if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
760 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
761 list_del_init(&priv->s_iowait.list);
762 if (atomic_dec_and_test(&qp->refcount))
765 write_sequnlock(&dev->iowait_lock);
767 if (!(qp->s_flags & RVT_S_BUSY)) {
770 rvt_put_mr(qp->s_rdma_mr);
771 qp->s_rdma_mr = NULL;