]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/rdma/hfi1/qp.c
staging/rdma/hfi1: remove duplicate timeout print
[karo-tx-linux.git] / drivers / staging / rdma / hfi1 / qp.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50
51 #include <linux/err.h>
52 #include <linux/vmalloc.h>
53 #include <linux/hash.h>
54 #include <linux/module.h>
55 #include <linux/random.h>
56 #include <linux/seq_file.h>
57 #include <rdma/rdma_vt.h>
58 #include <rdma/rdmavt_qp.h>
59
60 #include "hfi.h"
61 #include "qp.h"
62 #include "trace.h"
63 #include "sdma.h"
64
65 unsigned int hfi1_qp_table_size = 256;
66 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
67 MODULE_PARM_DESC(qp_table_size, "QP table size");
68
69 static void flush_tx_list(struct rvt_qp *qp);
70 static int iowait_sleep(
71         struct sdma_engine *sde,
72         struct iowait *wait,
73         struct sdma_txreq *stx,
74         unsigned seq);
75 static void iowait_wakeup(struct iowait *wait, int reason);
76
77 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
78                               struct rvt_qpn_map *map, unsigned off)
79 {
80         return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
81 }
82
83 /*
84  * Convert the AETH credit code into the number of credits.
85  */
86 static const u16 credit_table[31] = {
87         0,                      /* 0 */
88         1,                      /* 1 */
89         2,                      /* 2 */
90         3,                      /* 3 */
91         4,                      /* 4 */
92         6,                      /* 5 */
93         8,                      /* 6 */
94         12,                     /* 7 */
95         16,                     /* 8 */
96         24,                     /* 9 */
97         32,                     /* A */
98         48,                     /* B */
99         64,                     /* C */
100         96,                     /* D */
101         128,                    /* E */
102         192,                    /* F */
103         256,                    /* 10 */
104         384,                    /* 11 */
105         512,                    /* 12 */
106         768,                    /* 13 */
107         1024,                   /* 14 */
108         1536,                   /* 15 */
109         2048,                   /* 16 */
110         3072,                   /* 17 */
111         4096,                   /* 18 */
112         6144,                   /* 19 */
113         8192,                   /* 1A */
114         12288,                  /* 1B */
115         16384,                  /* 1C */
116         24576,                  /* 1D */
117         32768                   /* 1E */
118 };
119
120 static void flush_tx_list(struct rvt_qp *qp)
121 {
122         struct hfi1_qp_priv *priv = qp->priv;
123
124         while (!list_empty(&priv->s_iowait.tx_head)) {
125                 struct sdma_txreq *tx;
126
127                 tx = list_first_entry(
128                         &priv->s_iowait.tx_head,
129                         struct sdma_txreq,
130                         list);
131                 list_del_init(&tx->list);
132                 hfi1_put_txreq(
133                         container_of(tx, struct verbs_txreq, txreq));
134         }
135 }
136
137 static void flush_iowait(struct rvt_qp *qp)
138 {
139         struct hfi1_qp_priv *priv = qp->priv;
140         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
141         unsigned long flags;
142
143         write_seqlock_irqsave(&dev->iowait_lock, flags);
144         if (!list_empty(&priv->s_iowait.list)) {
145                 list_del_init(&priv->s_iowait.list);
146                 if (atomic_dec_and_test(&qp->refcount))
147                         wake_up(&qp->wait);
148         }
149         write_sequnlock_irqrestore(&dev->iowait_lock, flags);
150 }
151
152 static inline int opa_mtu_enum_to_int(int mtu)
153 {
154         switch (mtu) {
155         case OPA_MTU_8192:  return 8192;
156         case OPA_MTU_10240: return 10240;
157         default:            return -1;
158         }
159 }
160
161 /**
162  * This function is what we would push to the core layer if we wanted to be a
163  * "first class citizen".  Instead we hide this here and rely on Verbs ULPs
164  * to blindly pass the MTU enum value from the PathRecord to us.
165  *
166  * The actual flag used to determine "8k MTU" will change and is currently
167  * unknown.
168  */
169 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
170 {
171         int val = opa_mtu_enum_to_int((int)mtu);
172
173         if (val > 0)
174                 return val;
175         return ib_mtu_enum_to_int(mtu);
176 }
177
178 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
179                          int attr_mask, struct ib_udata *udata)
180 {
181         struct ib_qp *ibqp = &qp->ibqp;
182         struct hfi1_ibdev *dev = to_idev(ibqp->device);
183         struct hfi1_devdata *dd = dd_from_dev(dev);
184         u8 sc;
185
186         if (attr_mask & IB_QP_AV) {
187                 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
188                 if (sc == 0xf)
189                         return -EINVAL;
190
191                 if (!qp_to_sdma_engine(qp, sc) &&
192                     dd->flags & HFI1_HAS_SEND_DMA)
193                         return -EINVAL;
194         }
195
196         if (attr_mask & IB_QP_ALT_PATH) {
197                 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
198                 if (sc == 0xf)
199                         return -EINVAL;
200
201                 if (!qp_to_sdma_engine(qp, sc) &&
202                     dd->flags & HFI1_HAS_SEND_DMA)
203                         return -EINVAL;
204         }
205
206         return 0;
207 }
208
209 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
210                     int attr_mask, struct ib_udata *udata)
211 {
212         struct ib_qp *ibqp = &qp->ibqp;
213         struct hfi1_qp_priv *priv = qp->priv;
214
215         if (attr_mask & IB_QP_AV) {
216                 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
217                 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
218         }
219
220         if (attr_mask & IB_QP_PATH_MIG_STATE &&
221             attr->path_mig_state == IB_MIG_MIGRATED &&
222             qp->s_mig_state == IB_MIG_ARMED) {
223                 qp->s_flags |= RVT_S_AHG_CLEAR;
224                 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
225                 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
226         }
227 }
228
229 int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
230 {
231         struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
232         struct rvt_ah *ah = ibah_to_rvtah(ud_wr(wr)->ah);
233
234         if (qp->ibqp.qp_type != IB_QPT_RC &&
235             qp->ibqp.qp_type != IB_QPT_UC &&
236             qp->ibqp.qp_type != IB_QPT_SMI &&
237             ibp->sl_to_sc[ah->attr.sl] == 0xf) {
238                 return -EINVAL;
239         }
240         return 0;
241 }
242
243 /**
244  * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
245  * @qp: the queue pair to compute the AETH for
246  *
247  * Returns the AETH.
248  */
249 __be32 hfi1_compute_aeth(struct rvt_qp *qp)
250 {
251         u32 aeth = qp->r_msn & HFI1_MSN_MASK;
252
253         if (qp->ibqp.srq) {
254                 /*
255                  * Shared receive queues don't generate credits.
256                  * Set the credit field to the invalid value.
257                  */
258                 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
259         } else {
260                 u32 min, max, x;
261                 u32 credits;
262                 struct rvt_rwq *wq = qp->r_rq.wq;
263                 u32 head;
264                 u32 tail;
265
266                 /* sanity check pointers before trusting them */
267                 head = wq->head;
268                 if (head >= qp->r_rq.size)
269                         head = 0;
270                 tail = wq->tail;
271                 if (tail >= qp->r_rq.size)
272                         tail = 0;
273                 /*
274                  * Compute the number of credits available (RWQEs).
275                  * There is a small chance that the pair of reads are
276                  * not atomic, which is OK, since the fuzziness is
277                  * resolved as further ACKs go out.
278                  */
279                 credits = head - tail;
280                 if ((int)credits < 0)
281                         credits += qp->r_rq.size;
282                 /*
283                  * Binary search the credit table to find the code to
284                  * use.
285                  */
286                 min = 0;
287                 max = 31;
288                 for (;;) {
289                         x = (min + max) / 2;
290                         if (credit_table[x] == credits)
291                                 break;
292                         if (credit_table[x] > credits)
293                                 max = x;
294                         else if (min == x)
295                                 break;
296                         else
297                                 min = x;
298                 }
299                 aeth |= x << HFI1_AETH_CREDIT_SHIFT;
300         }
301         return cpu_to_be32(aeth);
302 }
303
304 /**
305  * hfi1_get_credit - flush the send work queue of a QP
306  * @qp: the qp who's send work queue to flush
307  * @aeth: the Acknowledge Extended Transport Header
308  *
309  * The QP s_lock should be held.
310  */
311 void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
312 {
313         u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
314
315         /*
316          * If the credit is invalid, we can send
317          * as many packets as we like.  Otherwise, we have to
318          * honor the credit field.
319          */
320         if (credit == HFI1_AETH_CREDIT_INVAL) {
321                 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
322                         qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
323                         if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
324                                 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
325                                 hfi1_schedule_send(qp);
326                         }
327                 }
328         } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
329                 /* Compute new LSN (i.e., MSN + credit) */
330                 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
331                 if (cmp_msn(credit, qp->s_lsn) > 0) {
332                         qp->s_lsn = credit;
333                         if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
334                                 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
335                                 hfi1_schedule_send(qp);
336                         }
337                 }
338         }
339 }
340
341 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
342 {
343         unsigned long flags;
344
345         spin_lock_irqsave(&qp->s_lock, flags);
346         if (qp->s_flags & flag) {
347                 qp->s_flags &= ~flag;
348                 trace_hfi1_qpwakeup(qp, flag);
349                 hfi1_schedule_send(qp);
350         }
351         spin_unlock_irqrestore(&qp->s_lock, flags);
352         /* Notify hfi1_destroy_qp() if it is waiting. */
353         if (atomic_dec_and_test(&qp->refcount))
354                 wake_up(&qp->wait);
355 }
356
357 static int iowait_sleep(
358         struct sdma_engine *sde,
359         struct iowait *wait,
360         struct sdma_txreq *stx,
361         unsigned seq)
362 {
363         struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
364         struct rvt_qp *qp;
365         struct hfi1_qp_priv *priv;
366         unsigned long flags;
367         int ret = 0;
368         struct hfi1_ibdev *dev;
369
370         qp = tx->qp;
371         priv = qp->priv;
372
373         spin_lock_irqsave(&qp->s_lock, flags);
374         if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
375
376                 /*
377                  * If we couldn't queue the DMA request, save the info
378                  * and try again later rather than destroying the
379                  * buffer and undoing the side effects of the copy.
380                  */
381                 /* Make a common routine? */
382                 dev = &sde->dd->verbs_dev;
383                 list_add_tail(&stx->list, &wait->tx_head);
384                 write_seqlock(&dev->iowait_lock);
385                 if (sdma_progress(sde, seq, stx))
386                         goto eagain;
387                 if (list_empty(&priv->s_iowait.list)) {
388                         struct hfi1_ibport *ibp =
389                                 to_iport(qp->ibqp.device, qp->port_num);
390
391                         ibp->rvp.n_dmawait++;
392                         qp->s_flags |= RVT_S_WAIT_DMA_DESC;
393                         list_add_tail(&priv->s_iowait.list, &sde->dmawait);
394                         trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
395                         atomic_inc(&qp->refcount);
396                 }
397                 write_sequnlock(&dev->iowait_lock);
398                 qp->s_flags &= ~RVT_S_BUSY;
399                 spin_unlock_irqrestore(&qp->s_lock, flags);
400                 ret = -EBUSY;
401         } else {
402                 spin_unlock_irqrestore(&qp->s_lock, flags);
403                 hfi1_put_txreq(tx);
404         }
405         return ret;
406 eagain:
407         write_sequnlock(&dev->iowait_lock);
408         spin_unlock_irqrestore(&qp->s_lock, flags);
409         list_del_init(&stx->list);
410         return -EAGAIN;
411 }
412
413 static void iowait_wakeup(struct iowait *wait, int reason)
414 {
415         struct rvt_qp *qp = iowait_to_qp(wait);
416
417         WARN_ON(reason != SDMA_AVAIL_REASON);
418         hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
419 }
420
421 /**
422  *
423  * qp_to_sdma_engine - map a qp to a send engine
424  * @qp: the QP
425  * @sc5: the 5 bit sc
426  *
427  * Return:
428  * A send engine for the qp or NULL for SMI type qp.
429  */
430 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
431 {
432         struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
433         struct sdma_engine *sde;
434
435         if (!(dd->flags & HFI1_HAS_SEND_DMA))
436                 return NULL;
437         switch (qp->ibqp.qp_type) {
438         case IB_QPT_SMI:
439                 return NULL;
440         default:
441                 break;
442         }
443         sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
444         return sde;
445 }
446
447 struct qp_iter {
448         struct hfi1_ibdev *dev;
449         struct rvt_qp *qp;
450         int specials;
451         int n;
452 };
453
454 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
455 {
456         struct qp_iter *iter;
457
458         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
459         if (!iter)
460                 return NULL;
461
462         iter->dev = dev;
463         iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
464         if (qp_iter_next(iter)) {
465                 kfree(iter);
466                 return NULL;
467         }
468
469         return iter;
470 }
471
472 int qp_iter_next(struct qp_iter *iter)
473 {
474         struct hfi1_ibdev *dev = iter->dev;
475         int n = iter->n;
476         int ret = 1;
477         struct rvt_qp *pqp = iter->qp;
478         struct rvt_qp *qp;
479
480         /*
481          * The approach is to consider the special qps
482          * as an additional table entries before the
483          * real hash table.  Since the qp code sets
484          * the qp->next hash link to NULL, this works just fine.
485          *
486          * iter->specials is 2 * # ports
487          *
488          * n = 0..iter->specials is the special qp indices
489          *
490          * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
491          * the potential hash bucket entries
492          *
493          */
494         for (; n <  dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
495                 if (pqp) {
496                         qp = rcu_dereference(pqp->next);
497                 } else {
498                         if (n < iter->specials) {
499                                 struct hfi1_pportdata *ppd;
500                                 struct hfi1_ibport *ibp;
501                                 int pidx;
502
503                                 pidx = n % dev->rdi.ibdev.phys_port_cnt;
504                                 ppd = &dd_from_dev(dev)->pport[pidx];
505                                 ibp = &ppd->ibport_data;
506
507                                 if (!(n & 1))
508                                         qp = rcu_dereference(ibp->rvp.qp[0]);
509                                 else
510                                         qp = rcu_dereference(ibp->rvp.qp[1]);
511                         } else {
512                                 qp = rcu_dereference(
513                                         dev->rdi.qp_dev->qp_table[
514                                                 (n - iter->specials)]);
515                         }
516                 }
517                 pqp = qp;
518                 if (qp) {
519                         iter->qp = qp;
520                         iter->n = n;
521                         return 0;
522                 }
523         }
524         return ret;
525 }
526
527 static const char * const qp_type_str[] = {
528         "SMI", "GSI", "RC", "UC", "UD",
529 };
530
531 static int qp_idle(struct rvt_qp *qp)
532 {
533         return
534                 qp->s_last == qp->s_acked &&
535                 qp->s_acked == qp->s_cur &&
536                 qp->s_cur == qp->s_tail &&
537                 qp->s_tail == qp->s_head;
538 }
539
540 void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
541 {
542         struct rvt_swqe *wqe;
543         struct rvt_qp *qp = iter->qp;
544         struct hfi1_qp_priv *priv = qp->priv;
545         struct sdma_engine *sde;
546
547         sde = qp_to_sdma_engine(qp, priv->s_sc);
548         wqe = rvt_get_swqe_ptr(qp, qp->s_last);
549         seq_printf(s,
550                    "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u SDE %p,%u\n",
551                    iter->n,
552                    qp_idle(qp) ? "I" : "B",
553                    qp->ibqp.qp_num,
554                    atomic_read(&qp->refcount),
555                    qp_type_str[qp->ibqp.qp_type],
556                    qp->state,
557                    wqe ? wqe->wr.opcode : 0,
558                    qp->s_hdrwords,
559                    qp->s_flags,
560                    atomic_read(&priv->s_iowait.sdma_busy),
561                    !list_empty(&priv->s_iowait.list),
562                    qp->timeout,
563                    wqe ? wqe->ssn : 0,
564                    qp->s_lsn,
565                    qp->s_last_psn,
566                    qp->s_psn, qp->s_next_psn,
567                    qp->s_sending_psn, qp->s_sending_hpsn,
568                    qp->s_last, qp->s_acked, qp->s_cur,
569                    qp->s_tail, qp->s_head, qp->s_size,
570                    qp->remote_qpn,
571                    qp->remote_ah_attr.dlid,
572                    qp->remote_ah_attr.sl,
573                    qp->pmtu,
574                    qp->s_retry_cnt,
575                    qp->s_rnr_retry_cnt,
576                    sde,
577                    sde ? sde->this_idx : 0);
578 }
579
580 void qp_comm_est(struct rvt_qp *qp)
581 {
582         qp->r_flags |= RVT_R_COMM_EST;
583         if (qp->ibqp.event_handler) {
584                 struct ib_event ev;
585
586                 ev.device = qp->ibqp.device;
587                 ev.element.qp = &qp->ibqp;
588                 ev.event = IB_EVENT_COMM_EST;
589                 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
590         }
591 }
592
593 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
594                     gfp_t gfp)
595 {
596         struct hfi1_qp_priv *priv;
597
598         priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
599         if (!priv)
600                 return ERR_PTR(-ENOMEM);
601
602         priv->owner = qp;
603
604         priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
605         if (!priv->s_hdr) {
606                 kfree(priv);
607                 return ERR_PTR(-ENOMEM);
608         }
609         setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
610         qp->s_timer.function = hfi1_rc_timeout;
611         return priv;
612 }
613
614 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
615 {
616         struct hfi1_qp_priv *priv = qp->priv;
617
618         kfree(priv->s_hdr);
619         kfree(priv);
620 }
621
622 unsigned free_all_qps(struct rvt_dev_info *rdi)
623 {
624         struct hfi1_ibdev *verbs_dev = container_of(rdi,
625                                                     struct hfi1_ibdev,
626                                                     rdi);
627         struct hfi1_devdata *dd = container_of(verbs_dev,
628                                                struct hfi1_devdata,
629                                                verbs_dev);
630         int n;
631         unsigned qp_inuse = 0;
632
633         for (n = 0; n < dd->num_pports; n++) {
634                 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
635
636                 rcu_read_lock();
637                 if (rcu_dereference(ibp->rvp.qp[0]))
638                         qp_inuse++;
639                 if (rcu_dereference(ibp->rvp.qp[1]))
640                         qp_inuse++;
641                 rcu_read_unlock();
642         }
643
644         return qp_inuse;
645 }
646
647 void flush_qp_waiters(struct rvt_qp *qp)
648 {
649         flush_iowait(qp);
650         hfi1_stop_rc_timers(qp);
651 }
652
653 void stop_send_queue(struct rvt_qp *qp)
654 {
655         struct hfi1_qp_priv *priv = qp->priv;
656
657         cancel_work_sync(&priv->s_iowait.iowork);
658         hfi1_del_timers_sync(qp);
659 }
660
661 void quiesce_qp(struct rvt_qp *qp)
662 {
663         struct hfi1_qp_priv *priv = qp->priv;
664
665         iowait_sdma_drain(&priv->s_iowait);
666         flush_tx_list(qp);
667 }
668
669 void notify_qp_reset(struct rvt_qp *qp)
670 {
671         struct hfi1_qp_priv *priv = qp->priv;
672
673         iowait_init(
674                 &priv->s_iowait,
675                 1,
676                 _hfi1_do_send,
677                 iowait_sleep,
678                 iowait_wakeup);
679         priv->r_adefered = 0;
680         clear_ahg(qp);
681 }
682
683 /*
684  * Switch to alternate path.
685  * The QP s_lock should be held and interrupts disabled.
686  */
687 void hfi1_migrate_qp(struct rvt_qp *qp)
688 {
689         struct hfi1_qp_priv *priv = qp->priv;
690         struct ib_event ev;
691
692         qp->s_mig_state = IB_MIG_MIGRATED;
693         qp->remote_ah_attr = qp->alt_ah_attr;
694         qp->port_num = qp->alt_ah_attr.port_num;
695         qp->s_pkey_index = qp->s_alt_pkey_index;
696         qp->s_flags |= RVT_S_AHG_CLEAR;
697         priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
698         priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
699
700         ev.device = qp->ibqp.device;
701         ev.element.qp = &qp->ibqp;
702         ev.event = IB_EVENT_PATH_MIG;
703         qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
704 }
705
706 int mtu_to_path_mtu(u32 mtu)
707 {
708         return mtu_to_enum(mtu, OPA_MTU_8192);
709 }
710
711 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
712 {
713         u32 mtu;
714         struct hfi1_ibdev *verbs_dev = container_of(rdi,
715                                                     struct hfi1_ibdev,
716                                                     rdi);
717         struct hfi1_devdata *dd = container_of(verbs_dev,
718                                                struct hfi1_devdata,
719                                                verbs_dev);
720         struct hfi1_ibport *ibp;
721         u8 sc, vl;
722
723         ibp = &dd->pport[qp->port_num - 1].ibport_data;
724         sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
725         vl = sc_to_vlt(dd, sc);
726
727         mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
728         if (vl < PER_VL_SEND_CONTEXTS)
729                 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
730         return mtu;
731 }
732
733 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
734                        struct ib_qp_attr *attr)
735 {
736         int mtu, pidx = qp->port_num - 1;
737         struct hfi1_ibdev *verbs_dev = container_of(rdi,
738                                                     struct hfi1_ibdev,
739                                                     rdi);
740         struct hfi1_devdata *dd = container_of(verbs_dev,
741                                                struct hfi1_devdata,
742                                                verbs_dev);
743         mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
744         if (mtu == -1)
745                 return -1; /* values less than 0 are error */
746
747         if (mtu > dd->pport[pidx].ibmtu)
748                 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
749         else
750                 return attr->path_mtu;
751 }
752
753 void notify_error_qp(struct rvt_qp *qp)
754 {
755         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
756         struct hfi1_qp_priv *priv = qp->priv;
757
758         write_seqlock(&dev->iowait_lock);
759         if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
760                 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
761                 list_del_init(&priv->s_iowait.list);
762                 if (atomic_dec_and_test(&qp->refcount))
763                         wake_up(&qp->wait);
764         }
765         write_sequnlock(&dev->iowait_lock);
766
767         if (!(qp->s_flags & RVT_S_BUSY)) {
768                 qp->s_hdrwords = 0;
769                 if (qp->s_rdma_mr) {
770                         rvt_put_mr(qp->s_rdma_mr);
771                         qp->s_rdma_mr = NULL;
772                 }
773                 flush_tx_list(qp);
774         }
775 }
776