2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
44 #include <rdma/rdma_vt.h>
47 #include "qib_common.h"
49 static unsigned int ib_qib_qp_table_size = 256;
50 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(qp_table_size, "QP table size");
53 static unsigned int qib_lkey_table_size = 16;
54 module_param_named(lkey_table_size, qib_lkey_table_size, uint,
56 MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
59 static unsigned int ib_qib_max_pds = 0xFFFF;
60 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61 MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
64 static unsigned int ib_qib_max_ahs = 0xFFFF;
65 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
68 unsigned int ib_qib_max_cqes = 0x2FFFF;
69 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
73 unsigned int ib_qib_max_cqs = 0x1FFFF;
74 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
77 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 unsigned int ib_qib_max_qps = 16384;
82 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
85 unsigned int ib_qib_max_sges = 0x60;
86 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
89 unsigned int ib_qib_max_mcast_grps = 16384;
90 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
94 unsigned int ib_qib_max_mcast_qp_attached = 16;
95 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
97 MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
100 unsigned int ib_qib_max_srqs = 1024;
101 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
104 unsigned int ib_qib_max_srq_sges = 128;
105 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
108 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
112 static unsigned int ib_qib_disable_sma;
113 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
117 * Translate ib_wr_opcode into ib_wc_opcode.
119 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
132 __be64 ib_qib_sys_image_guid;
135 * qib_copy_sge - copy data to SGE memory
137 * @data: the data to copy
138 * @length: the length of the data
140 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
142 struct rvt_sge *sge = &ss->sge;
145 u32 len = rvt_get_sge_length(sge, length);
147 WARN_ON_ONCE(len == 0);
148 memcpy(sge->vaddr, data, len);
149 rvt_update_sge(ss, len, release);
156 * Count the number of DMA descriptors needed to send length bytes of data.
157 * Don't modify the qib_sge_state to get the count.
158 * Return zero if any of the segments is not aligned.
160 static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
162 struct rvt_sge *sg_list = ss->sg_list;
163 struct rvt_sge sge = ss->sge;
164 u8 num_sge = ss->num_sge;
165 u32 ndesc = 1; /* count the header */
168 u32 len = sge.length;
172 if (len > sge.sge_length)
173 len = sge.sge_length;
175 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
176 (len != length && (len & (sizeof(u32) - 1)))) {
183 sge.sge_length -= len;
184 if (sge.sge_length == 0) {
187 } else if (sge.length == 0 && sge.mr->lkey) {
188 if (++sge.n >= RVT_SEGSZ) {
189 if (++sge.m >= sge.mr->mapsz)
194 sge.mr->map[sge.m]->segs[sge.n].vaddr;
196 sge.mr->map[sge.m]->segs[sge.n].length;
204 * Copy from the SGEs to the data buffer.
206 static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
208 struct rvt_sge *sge = &ss->sge;
211 u32 len = sge->length;
215 if (len > sge->sge_length)
216 len = sge->sge_length;
218 memcpy(data, sge->vaddr, len);
221 sge->sge_length -= len;
222 if (sge->sge_length == 0) {
224 *sge = *ss->sg_list++;
225 } else if (sge->length == 0 && sge->mr->lkey) {
226 if (++sge->n >= RVT_SEGSZ) {
227 if (++sge->m >= sge->mr->mapsz)
232 sge->mr->map[sge->m]->segs[sge->n].vaddr;
234 sge->mr->map[sge->m]->segs[sge->n].length;
242 * qib_qp_rcv - processing an incoming packet on a QP
243 * @rcd: the context pointer
244 * @hdr: the packet header
245 * @has_grh: true if the packet has a GRH
246 * @data: the packet data
247 * @tlen: the packet length
248 * @qp: the QP the packet came on
250 * This is called from qib_ib_rcv() to process an incoming packet
252 * Called at interrupt level.
254 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
255 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
257 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
259 spin_lock(&qp->r_lock);
261 /* Check for valid receive state. */
262 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
263 ibp->rvp.n_pkt_drops++;
267 switch (qp->ibqp.qp_type) {
270 if (ib_qib_disable_sma)
274 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
278 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
282 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
290 spin_unlock(&qp->r_lock);
294 * qib_ib_rcv - process an incoming packet
295 * @rcd: the context pointer
296 * @rhdr: the header of the packet
297 * @data: the packet payload
298 * @tlen: the packet length
300 * This is called from qib_kreceive() to process an incoming packet at
301 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
303 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
305 struct qib_pportdata *ppd = rcd->ppd;
306 struct qib_ibport *ibp = &ppd->ibport_data;
307 struct ib_header *hdr = rhdr;
308 struct qib_devdata *dd = ppd->dd;
309 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
310 struct ib_other_headers *ohdr;
317 /* 24 == LRH+BTH+CRC */
318 if (unlikely(tlen < 24))
321 /* Check for a valid destination LID (see ch. 7.11.1). */
322 lid = be16_to_cpu(hdr->lrh[1]);
323 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
324 lid &= ~((1 << ppd->lmc) - 1);
325 if (unlikely(lid != ppd->lid))
330 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
331 if (lnh == QIB_LRH_BTH)
333 else if (lnh == QIB_LRH_GRH) {
336 ohdr = &hdr->u.l.oth;
337 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
339 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
340 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
345 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
346 #ifdef CONFIG_DEBUG_FS
347 rcd->opstats->stats[opcode].n_bytes += tlen;
348 rcd->opstats->stats[opcode].n_packets++;
351 /* Get the destination QP number. */
352 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
353 if (qp_num == QIB_MULTICAST_QPN) {
354 struct rvt_mcast *mcast;
355 struct rvt_mcast_qp *p;
357 if (lnh != QIB_LRH_GRH)
359 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
362 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
363 list_for_each_entry_rcu(p, &mcast->qp_list, list)
364 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
366 * Notify rvt_multicast_detach() if it is waiting for us
369 if (atomic_dec_return(&mcast->refcount) <= 1)
370 wake_up(&mcast->wait);
373 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
378 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
379 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
385 ibp->rvp.n_pkt_drops++;
389 * This is called from a timer to check for QPs
390 * which need kernel memory in order to send a packet.
392 static void mem_timer(unsigned long data)
394 struct qib_ibdev *dev = (struct qib_ibdev *) data;
395 struct list_head *list = &dev->memwait;
396 struct rvt_qp *qp = NULL;
397 struct qib_qp_priv *priv = NULL;
400 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
401 if (!list_empty(list)) {
402 priv = list_entry(list->next, struct qib_qp_priv, iowait);
404 list_del_init(&priv->iowait);
406 if (!list_empty(list))
407 mod_timer(&dev->mem_timer, jiffies + 1);
409 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
412 spin_lock_irqsave(&qp->s_lock, flags);
413 if (qp->s_flags & RVT_S_WAIT_KMEM) {
414 qp->s_flags &= ~RVT_S_WAIT_KMEM;
415 qib_schedule_send(qp);
417 spin_unlock_irqrestore(&qp->s_lock, flags);
422 #ifdef __LITTLE_ENDIAN
423 static inline u32 get_upper_bits(u32 data, u32 shift)
425 return data >> shift;
428 static inline u32 set_upper_bits(u32 data, u32 shift)
430 return data << shift;
433 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
435 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
436 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
440 static inline u32 get_upper_bits(u32 data, u32 shift)
442 return data << shift;
445 static inline u32 set_upper_bits(u32 data, u32 shift)
447 return data >> shift;
450 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
452 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
453 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
458 static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
459 u32 length, unsigned flush_wc)
466 u32 len = ss->sge.length;
471 if (len > ss->sge.sge_length)
472 len = ss->sge.sge_length;
474 /* If the source address is not aligned, try to align it. */
475 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
477 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
479 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
482 y = sizeof(u32) - off;
485 if (len + extra >= sizeof(u32)) {
486 data |= set_upper_bits(v, extra *
488 len = sizeof(u32) - extra;
493 __raw_writel(data, piobuf);
498 /* Clear unused upper bytes */
499 data |= clear_upper_bytes(v, len, extra);
507 /* Source address is aligned. */
508 u32 *addr = (u32 *) ss->sge.vaddr;
509 int shift = extra * BITS_PER_BYTE;
510 int ushift = 32 - shift;
513 while (l >= sizeof(u32)) {
516 data |= set_upper_bits(v, shift);
517 __raw_writel(data, piobuf);
518 data = get_upper_bits(v, ushift);
524 * We still have 'extra' number of bytes leftover.
529 if (l + extra >= sizeof(u32)) {
530 data |= set_upper_bits(v, shift);
531 len -= l + extra - sizeof(u32);
536 __raw_writel(data, piobuf);
541 /* Clear unused upper bytes */
542 data |= clear_upper_bytes(v, l, extra);
549 } else if (len == length) {
553 } else if (len == length) {
557 * Need to round up for the last dword in the
561 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
563 last = ((u32 *) ss->sge.vaddr)[w - 1];
568 qib_pio_copy(piobuf, ss->sge.vaddr, w);
571 extra = len & (sizeof(u32) - 1);
573 u32 v = ((u32 *) ss->sge.vaddr)[w];
575 /* Clear unused upper bytes */
576 data = clear_upper_bytes(v, extra, 0);
579 rvt_update_sge(ss, len, false);
582 /* Update address before sending packet. */
583 rvt_update_sge(ss, length, false);
585 /* must flush early everything before trigger word */
587 __raw_writel(last, piobuf);
588 /* be sure trigger word is written */
591 __raw_writel(last, piobuf);
594 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
597 struct qib_qp_priv *priv = qp->priv;
598 struct qib_verbs_txreq *tx;
601 spin_lock_irqsave(&qp->s_lock, flags);
602 spin_lock(&dev->rdi.pending_lock);
604 if (!list_empty(&dev->txreq_free)) {
605 struct list_head *l = dev->txreq_free.next;
608 spin_unlock(&dev->rdi.pending_lock);
609 spin_unlock_irqrestore(&qp->s_lock, flags);
610 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
612 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
613 list_empty(&priv->iowait)) {
615 qp->s_flags |= RVT_S_WAIT_TX;
616 list_add_tail(&priv->iowait, &dev->txwait);
618 qp->s_flags &= ~RVT_S_BUSY;
619 spin_unlock(&dev->rdi.pending_lock);
620 spin_unlock_irqrestore(&qp->s_lock, flags);
621 tx = ERR_PTR(-EBUSY);
626 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
629 struct qib_verbs_txreq *tx;
632 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
633 /* assume the list non empty */
634 if (likely(!list_empty(&dev->txreq_free))) {
635 struct list_head *l = dev->txreq_free.next;
638 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
639 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
641 /* call slow path to get the extra lock */
642 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
643 tx = __get_txreq(dev, qp);
648 void qib_put_txreq(struct qib_verbs_txreq *tx)
650 struct qib_ibdev *dev;
652 struct qib_qp_priv *priv;
656 dev = to_idev(qp->ibqp.device);
662 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
663 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
664 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
665 tx->txreq.addr, tx->hdr_dwords << 2,
667 kfree(tx->align_buf);
670 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
672 /* Put struct back on free list */
673 list_add(&tx->txreq.list, &dev->txreq_free);
675 if (!list_empty(&dev->txwait)) {
676 /* Wake up first QP wanting a free struct */
677 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
680 list_del_init(&priv->iowait);
682 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
684 spin_lock_irqsave(&qp->s_lock, flags);
685 if (qp->s_flags & RVT_S_WAIT_TX) {
686 qp->s_flags &= ~RVT_S_WAIT_TX;
687 qib_schedule_send(qp);
689 spin_unlock_irqrestore(&qp->s_lock, flags);
693 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
697 * This is called when there are send DMA descriptors that might be
700 * This is called with ppd->sdma_lock held.
702 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
704 struct rvt_qp *qp, *nqp;
705 struct qib_qp_priv *qpp, *nqpp;
706 struct rvt_qp *qps[20];
707 struct qib_ibdev *dev;
711 dev = &ppd->dd->verbs_dev;
712 spin_lock(&dev->rdi.pending_lock);
714 /* Search wait list for first QP wanting DMA descriptors. */
715 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
718 if (qp->port_num != ppd->port)
720 if (n == ARRAY_SIZE(qps))
722 if (qpp->s_tx->txreq.sg_count > avail)
724 avail -= qpp->s_tx->txreq.sg_count;
725 list_del_init(&qpp->iowait);
730 spin_unlock(&dev->rdi.pending_lock);
732 for (i = 0; i < n; i++) {
734 spin_lock(&qp->s_lock);
735 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
736 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
737 qib_schedule_send(qp);
739 spin_unlock(&qp->s_lock);
745 * This is called with ppd->sdma_lock held.
747 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
749 struct qib_verbs_txreq *tx =
750 container_of(cookie, struct qib_verbs_txreq, txreq);
751 struct rvt_qp *qp = tx->qp;
752 struct qib_qp_priv *priv = qp->priv;
754 spin_lock(&qp->s_lock);
756 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
757 else if (qp->ibqp.qp_type == IB_QPT_RC) {
758 struct ib_header *hdr;
760 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
761 hdr = &tx->align_buf->hdr;
763 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
765 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
767 qib_rc_send_complete(qp, hdr);
769 if (atomic_dec_and_test(&priv->s_dma_busy)) {
770 if (qp->state == IB_QPS_RESET)
771 wake_up(&priv->wait_dma);
772 else if (qp->s_flags & RVT_S_WAIT_DMA) {
773 qp->s_flags &= ~RVT_S_WAIT_DMA;
774 qib_schedule_send(qp);
777 spin_unlock(&qp->s_lock);
782 static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
784 struct qib_qp_priv *priv = qp->priv;
788 spin_lock_irqsave(&qp->s_lock, flags);
789 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
790 spin_lock(&dev->rdi.pending_lock);
791 if (list_empty(&priv->iowait)) {
792 if (list_empty(&dev->memwait))
793 mod_timer(&dev->mem_timer, jiffies + 1);
794 qp->s_flags |= RVT_S_WAIT_KMEM;
795 list_add_tail(&priv->iowait, &dev->memwait);
797 spin_unlock(&dev->rdi.pending_lock);
798 qp->s_flags &= ~RVT_S_BUSY;
801 spin_unlock_irqrestore(&qp->s_lock, flags);
806 static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
807 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
808 u32 plen, u32 dwords)
810 struct qib_qp_priv *priv = qp->priv;
811 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
812 struct qib_devdata *dd = dd_from_dev(dev);
813 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
814 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
815 struct qib_verbs_txreq *tx;
816 struct qib_pio_header *phdr;
824 /* resend previously constructed packet */
825 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
829 tx = get_txreq(dev, qp);
833 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
834 be16_to_cpu(hdr->lrh[0]) >> 12);
837 tx->mr = qp->s_rdma_mr;
839 qp->s_rdma_mr = NULL;
840 tx->txreq.callback = sdma_complete;
841 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
842 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
844 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
845 if (plen + 1 > dd->piosize2kmax_dwords)
846 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
850 * Don't try to DMA if it takes more descriptors than
853 ndesc = qib_count_sge(ss, len);
854 if (ndesc >= ppd->sdma_descq_cnt)
859 phdr = &dev->pio_hdrs[tx->hdr_inx];
860 phdr->pbc[0] = cpu_to_le32(plen);
861 phdr->pbc[1] = cpu_to_le32(control);
862 memcpy(&phdr->hdr, hdr, hdrwords << 2);
863 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
864 tx->txreq.sg_count = ndesc;
865 tx->txreq.addr = dev->pio_hdrs_phys +
866 tx->hdr_inx * sizeof(struct qib_pio_header);
867 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
868 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
872 /* Allocate a buffer and copy the header and payload to it. */
873 tx->hdr_dwords = plen + 1;
874 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
877 phdr->pbc[0] = cpu_to_le32(plen);
878 phdr->pbc[1] = cpu_to_le32(control);
879 memcpy(&phdr->hdr, hdr, hdrwords << 2);
880 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
882 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
883 tx->hdr_dwords << 2, DMA_TO_DEVICE);
884 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
886 tx->align_buf = phdr;
887 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
888 tx->txreq.sg_count = 1;
889 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
896 ret = wait_kmem(dev, qp);
898 ibp->rvp.n_unaligned++;
907 * If we are now in the error state, return zero to flush the
910 static int no_bufs_available(struct rvt_qp *qp)
912 struct qib_qp_priv *priv = qp->priv;
913 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
914 struct qib_devdata *dd;
919 * Note that as soon as want_buffer() is called and
920 * possibly before it returns, qib_ib_piobufavail()
921 * could be called. Therefore, put QP on the I/O wait list before
922 * enabling the PIO avail interrupt.
924 spin_lock_irqsave(&qp->s_lock, flags);
925 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
926 spin_lock(&dev->rdi.pending_lock);
927 if (list_empty(&priv->iowait)) {
929 qp->s_flags |= RVT_S_WAIT_PIO;
930 list_add_tail(&priv->iowait, &dev->piowait);
931 dd = dd_from_dev(dev);
932 dd->f_wantpiobuf_intr(dd, 1);
934 spin_unlock(&dev->rdi.pending_lock);
935 qp->s_flags &= ~RVT_S_BUSY;
938 spin_unlock_irqrestore(&qp->s_lock, flags);
942 static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
943 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
944 u32 plen, u32 dwords)
946 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
947 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
948 u32 *hdr = (u32 *) ibhdr;
949 u32 __iomem *piobuf_orig;
957 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
958 be16_to_cpu(ibhdr->lrh[0]) >> 12);
959 pbc = ((u64) control << 32) | plen;
960 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
961 if (unlikely(piobuf == NULL))
962 return no_bufs_available(qp);
966 * We have to flush after the PBC for correctness on some cpus
967 * or WC buffer can be written out of order.
970 piobuf_orig = piobuf;
973 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
976 * If there is just the header portion, must flush before
977 * writing last word of header for correctness, and after
978 * the last header word (trigger word).
982 qib_pio_copy(piobuf, hdr, hdrwords - 1);
984 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
987 qib_pio_copy(piobuf, hdr, hdrwords);
993 qib_pio_copy(piobuf, hdr, hdrwords);
996 /* The common case is aligned and contained in one segment. */
997 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
998 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
999 u32 *addr = (u32 *) ss->sge.vaddr;
1001 /* Update address before sending packet. */
1002 rvt_update_sge(ss, len, false);
1004 qib_pio_copy(piobuf, addr, dwords - 1);
1005 /* must flush early everything before trigger word */
1007 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1008 /* be sure trigger word is written */
1011 qib_pio_copy(piobuf, addr, dwords);
1014 copy_io(piobuf, ss, len, flush_wc);
1016 if (dd->flags & QIB_USE_SPCL_TRIG) {
1017 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1020 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1022 qib_sendbuf_done(dd, pbufn);
1023 if (qp->s_rdma_mr) {
1024 rvt_put_mr(qp->s_rdma_mr);
1025 qp->s_rdma_mr = NULL;
1028 spin_lock_irqsave(&qp->s_lock, flags);
1029 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1030 spin_unlock_irqrestore(&qp->s_lock, flags);
1031 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1032 spin_lock_irqsave(&qp->s_lock, flags);
1033 qib_rc_send_complete(qp, ibhdr);
1034 spin_unlock_irqrestore(&qp->s_lock, flags);
1040 * qib_verbs_send - send a packet
1041 * @qp: the QP to send on
1042 * @hdr: the packet header
1043 * @hdrwords: the number of 32-bit words in the header
1044 * @ss: the SGE to send
1045 * @len: the length of the packet in bytes
1047 * Return zero if packet is sent or queued OK.
1048 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1050 int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
1051 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
1053 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1056 u32 dwords = (len + 3) >> 2;
1059 * Calculate the send buffer trigger address.
1060 * The +1 counts for the pbc control dword following the pbc length.
1062 plen = hdrwords + dwords + 1;
1065 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1066 * can defer SDMA restart until link goes ACTIVE without
1067 * worrying about just how we got there.
1069 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1070 !(dd->flags & QIB_HAS_SEND_DMA))
1071 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1074 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1080 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1081 u64 *rwords, u64 *spkts, u64 *rpkts,
1085 struct qib_devdata *dd = ppd->dd;
1087 if (!(dd->flags & QIB_PRESENT)) {
1088 /* no hardware, freeze, etc. */
1092 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1093 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1094 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1095 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1096 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1105 * qib_get_counters - get various chip counters
1106 * @dd: the qlogic_ib device
1107 * @cntrs: counters are placed here
1109 * Return the counters needed by recv_pma_get_portcounters().
1111 int qib_get_counters(struct qib_pportdata *ppd,
1112 struct qib_verbs_counters *cntrs)
1116 if (!(ppd->dd->flags & QIB_PRESENT)) {
1117 /* no hardware, freeze, etc. */
1121 cntrs->symbol_error_counter =
1122 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1123 cntrs->link_error_recovery_counter =
1124 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1126 * The link downed counter counts when the other side downs the
1127 * connection. We add in the number of times we downed the link
1128 * due to local link integrity errors to compensate.
1130 cntrs->link_downed_counter =
1131 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1132 cntrs->port_rcv_errors =
1133 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1134 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1135 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1136 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1137 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1138 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1139 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1140 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1141 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1142 cntrs->port_rcv_errors +=
1143 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1144 cntrs->port_rcv_errors +=
1145 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1146 cntrs->port_rcv_remphys_errors =
1147 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1148 cntrs->port_xmit_discards =
1149 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1150 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1151 QIBPORTCNTR_WORDSEND);
1152 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1153 QIBPORTCNTR_WORDRCV);
1154 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1155 QIBPORTCNTR_PKTSEND);
1156 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1157 QIBPORTCNTR_PKTRCV);
1158 cntrs->local_link_integrity_errors =
1159 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1160 cntrs->excessive_buffer_overrun_errors =
1161 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1162 cntrs->vl15_dropped =
1163 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1172 * qib_ib_piobufavail - callback when a PIO buffer is available
1173 * @dd: the device pointer
1175 * This is called from qib_intr() at interrupt level when a PIO buffer is
1176 * available after qib_verbs_send() returned an error that no buffers were
1177 * available. Disable the interrupt if there are no more QPs waiting.
1179 void qib_ib_piobufavail(struct qib_devdata *dd)
1181 struct qib_ibdev *dev = &dd->verbs_dev;
1182 struct list_head *list;
1183 struct rvt_qp *qps[5];
1185 unsigned long flags;
1187 struct qib_qp_priv *priv;
1189 list = &dev->piowait;
1193 * Note: checking that the piowait list is empty and clearing
1194 * the buffer available interrupt needs to be atomic or we
1195 * could end up with QPs on the wait list with the interrupt
1198 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1199 while (!list_empty(list)) {
1200 if (n == ARRAY_SIZE(qps))
1202 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1204 list_del_init(&priv->iowait);
1208 dd->f_wantpiobuf_intr(dd, 0);
1210 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1212 for (i = 0; i < n; i++) {
1215 spin_lock_irqsave(&qp->s_lock, flags);
1216 if (qp->s_flags & RVT_S_WAIT_PIO) {
1217 qp->s_flags &= ~RVT_S_WAIT_PIO;
1218 qib_schedule_send(qp);
1220 spin_unlock_irqrestore(&qp->s_lock, flags);
1222 /* Notify qib_destroy_qp() if it is waiting. */
1227 static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
1228 struct ib_port_attr *props)
1230 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1231 struct qib_devdata *dd = dd_from_dev(ibdev);
1232 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1236 /* props being zeroed by the caller, avoid zeroing it here */
1237 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1238 props->lmc = ppd->lmc;
1239 props->state = dd->f_iblink_state(ppd->lastibcstat);
1240 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1241 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1242 props->active_width = ppd->link_width_active;
1243 /* See rate_show() */
1244 props->active_speed = ppd->link_speed_active;
1245 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1247 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1248 switch (ppd->ibmtu) {
1267 props->active_mtu = mtu;
1272 static int qib_modify_device(struct ib_device *device,
1273 int device_modify_mask,
1274 struct ib_device_modify *device_modify)
1276 struct qib_devdata *dd = dd_from_ibdev(device);
1280 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1281 IB_DEVICE_MODIFY_NODE_DESC)) {
1286 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1287 memcpy(device->node_desc, device_modify->node_desc,
1288 IB_DEVICE_NODE_DESC_MAX);
1289 for (i = 0; i < dd->num_pports; i++) {
1290 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1292 qib_node_desc_chg(ibp);
1296 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1297 ib_qib_sys_image_guid =
1298 cpu_to_be64(device_modify->sys_image_guid);
1299 for (i = 0; i < dd->num_pports; i++) {
1300 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1302 qib_sys_guid_chg(ibp);
1312 static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1314 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1315 struct qib_devdata *dd = dd_from_dev(ibdev);
1316 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
1318 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1323 static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1324 int guid_index, __be64 *guid)
1326 struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
1327 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1329 if (guid_index == 0)
1331 else if (guid_index < QIB_GUIDS_PER_PORT)
1332 *guid = ibp->guids[guid_index - 1];
1339 int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1341 if (rdma_ah_get_sl(ah_attr) > 15)
1347 static void qib_notify_new_ah(struct ib_device *ibdev,
1348 struct rdma_ah_attr *ah_attr,
1351 struct qib_ibport *ibp;
1352 struct qib_pportdata *ppd;
1355 * Do not trust reading anything from rvt_ah at this point as it is not
1356 * done being setup. We can however modify things which we need to set.
1359 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1360 ppd = ppd_from_ibp(ibp);
1361 ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)];
1362 ah->log_pmtu = ilog2(ppd->ibmtu);
1365 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1367 struct rdma_ah_attr attr;
1368 struct ib_ah *ah = ERR_PTR(-EINVAL);
1370 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1371 struct qib_devdata *dd = dd_from_ppd(ppd);
1372 u8 port_num = ppd->port;
1374 memset(&attr, 0, sizeof(attr));
1375 attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
1376 rdma_ah_set_dlid(&attr, dlid);
1377 rdma_ah_set_port_num(&attr, port_num);
1379 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1381 ah = rdma_create_ah(qp0->ibqp.pd, &attr);
1387 * qib_get_npkeys - return the size of the PKEY table for context 0
1388 * @dd: the qlogic_ib device
1390 unsigned qib_get_npkeys(struct qib_devdata *dd)
1392 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1396 * Return the indexed PKEY from the port PKEY table.
1397 * No need to validate rcd[ctxt]; the port is setup if we are here.
1399 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1401 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1402 struct qib_devdata *dd = ppd->dd;
1403 unsigned ctxt = ppd->hw_pidx;
1406 /* dd->rcd null if mini_init or some init failures */
1407 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1410 ret = dd->rcd[ctxt]->pkeys[index];
1415 static void init_ibport(struct qib_pportdata *ppd)
1417 struct qib_verbs_counters cntrs;
1418 struct qib_ibport *ibp = &ppd->ibport_data;
1420 spin_lock_init(&ibp->rvp.lock);
1421 /* Set the prefix to the default value (see ch. 4.1.1) */
1422 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1423 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1424 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1425 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1426 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1427 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1428 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1429 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1430 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1431 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1432 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1433 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1434 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1435 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1437 /* Snapshot current HW counters to "clear" them. */
1438 qib_get_counters(ppd, &cntrs);
1439 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1440 ibp->z_link_error_recovery_counter =
1441 cntrs.link_error_recovery_counter;
1442 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1443 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1444 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1445 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1446 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1447 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1448 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1449 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1450 ibp->z_local_link_integrity_errors =
1451 cntrs.local_link_integrity_errors;
1452 ibp->z_excessive_buffer_overrun_errors =
1453 cntrs.excessive_buffer_overrun_errors;
1454 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1455 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1456 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1460 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1461 * @dd: the device data structure
1463 static void qib_fill_device_attr(struct qib_devdata *dd)
1465 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1467 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1469 rdi->dparms.props.max_pd = ib_qib_max_pds;
1470 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1471 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1472 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1473 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1474 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1475 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1476 rdi->dparms.props.vendor_id =
1477 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1478 rdi->dparms.props.vendor_part_id = dd->deviceid;
1479 rdi->dparms.props.hw_ver = dd->minrev;
1480 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1481 rdi->dparms.props.max_mr_size = ~0ULL;
1482 rdi->dparms.props.max_qp = ib_qib_max_qps;
1483 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1484 rdi->dparms.props.max_sge = ib_qib_max_sges;
1485 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1486 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1487 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1488 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1489 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1490 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1491 rdi->dparms.props.max_map_per_fmr = 32767;
1492 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1493 rdi->dparms.props.max_qp_init_rd_atom = 255;
1494 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1495 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1496 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1497 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1498 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1499 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1500 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1501 rdi->dparms.props.max_total_mcast_qp_attach =
1502 rdi->dparms.props.max_mcast_qp_attach *
1503 rdi->dparms.props.max_mcast_grp;
1504 /* post send table */
1505 dd->verbs_dev.rdi.post_parms = qib_post_parms;
1509 * qib_register_ib_device - register our device with the infiniband core
1510 * @dd: the device data structure
1511 * Return the allocated qib_ibdev pointer or NULL on error.
1513 int qib_register_ib_device(struct qib_devdata *dd)
1515 struct qib_ibdev *dev = &dd->verbs_dev;
1516 struct ib_device *ibdev = &dev->rdi.ibdev;
1517 struct qib_pportdata *ppd = dd->pport;
1521 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
1522 for (i = 0; i < dd->num_pports; i++)
1523 init_ibport(ppd + i);
1525 /* Only need to initialize non-zero fields. */
1526 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
1528 INIT_LIST_HEAD(&dev->piowait);
1529 INIT_LIST_HEAD(&dev->dmawait);
1530 INIT_LIST_HEAD(&dev->txwait);
1531 INIT_LIST_HEAD(&dev->memwait);
1532 INIT_LIST_HEAD(&dev->txreq_free);
1534 if (ppd->sdma_descq_cnt) {
1535 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1536 ppd->sdma_descq_cnt *
1537 sizeof(struct qib_pio_header),
1538 &dev->pio_hdrs_phys,
1540 if (!dev->pio_hdrs) {
1546 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1547 struct qib_verbs_txreq *tx;
1549 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
1555 list_add(&tx->txreq.list, &dev->txreq_free);
1559 * The system image GUID is supposed to be the same for all
1560 * IB HCAs in a single system but since there can be other
1561 * device types in the system, we can't be sure this is unique.
1563 if (!ib_qib_sys_image_guid)
1564 ib_qib_sys_image_guid = ppd->guid;
1566 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1567 ibdev->owner = THIS_MODULE;
1568 ibdev->node_guid = ppd->guid;
1569 ibdev->phys_port_cnt = dd->num_pports;
1570 ibdev->dev.parent = &dd->pcidev->dev;
1571 ibdev->modify_device = qib_modify_device;
1572 ibdev->process_mad = qib_process_mad;
1574 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
1575 "Intel Infiniband HCA %s", init_utsname()->nodename);
1578 * Fill in rvt info object.
1580 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
1581 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1582 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
1583 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
1584 dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
1585 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
1586 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
1587 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
1588 dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
1589 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
1590 dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
1591 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1592 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
1593 dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
1594 dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
1595 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
1596 dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
1597 dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
1598 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
1599 dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
1600 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
1601 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
1602 dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
1603 dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
1604 dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
1605 dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
1606 qib_notify_create_mad_agent;
1607 dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
1608 qib_notify_free_mad_agent;
1610 dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
1611 dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
1612 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
1613 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1614 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1615 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1616 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1617 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1618 dd->verbs_dev.rdi.dparms.qos_shift = 1;
1619 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
1620 dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
1621 dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
1622 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1623 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
1624 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
1625 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1626 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
1628 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1629 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1630 "qib_cq%d", dd->unit);
1632 qib_fill_device_attr(dd);
1635 for (i = 0; i < dd->num_pports; i++, ppd++) {
1636 ctxt = ppd->hw_pidx;
1637 rvt_init_port(&dd->verbs_dev.rdi,
1638 &ppd->ibport_data.rvp,
1640 dd->rcd[ctxt]->pkeys);
1643 ret = rvt_register_device(&dd->verbs_dev.rdi);
1647 ret = qib_verbs_register_sysfs(dd);
1654 rvt_unregister_device(&dd->verbs_dev.rdi);
1656 while (!list_empty(&dev->txreq_free)) {
1657 struct list_head *l = dev->txreq_free.next;
1658 struct qib_verbs_txreq *tx;
1661 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1664 if (ppd->sdma_descq_cnt)
1665 dma_free_coherent(&dd->pcidev->dev,
1666 ppd->sdma_descq_cnt *
1667 sizeof(struct qib_pio_header),
1668 dev->pio_hdrs, dev->pio_hdrs_phys);
1670 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1674 void qib_unregister_ib_device(struct qib_devdata *dd)
1676 struct qib_ibdev *dev = &dd->verbs_dev;
1678 qib_verbs_unregister_sysfs(dd);
1680 rvt_unregister_device(&dd->verbs_dev.rdi);
1682 if (!list_empty(&dev->piowait))
1683 qib_dev_err(dd, "piowait list not empty!\n");
1684 if (!list_empty(&dev->dmawait))
1685 qib_dev_err(dd, "dmawait list not empty!\n");
1686 if (!list_empty(&dev->txwait))
1687 qib_dev_err(dd, "txwait list not empty!\n");
1688 if (!list_empty(&dev->memwait))
1689 qib_dev_err(dd, "memwait list not empty!\n");
1691 del_timer_sync(&dev->mem_timer);
1692 while (!list_empty(&dev->txreq_free)) {
1693 struct list_head *l = dev->txreq_free.next;
1694 struct qib_verbs_txreq *tx;
1697 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1700 if (dd->pport->sdma_descq_cnt)
1701 dma_free_coherent(&dd->pcidev->dev,
1702 dd->pport->sdma_descq_cnt *
1703 sizeof(struct qib_pio_header),
1704 dev->pio_hdrs, dev->pio_hdrs_phys);
1708 * _qib_schedule_send - schedule progress
1711 * This schedules progress w/o regard to the s_flags.
1713 * It is only used in post send, which doesn't hold
1716 void _qib_schedule_send(struct rvt_qp *qp)
1718 struct qib_ibport *ibp =
1719 to_iport(qp->ibqp.device, qp->port_num);
1720 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1721 struct qib_qp_priv *priv = qp->priv;
1723 queue_work(ppd->qib_wq, &priv->s_work);
1727 * qib_schedule_send - schedule progress
1730 * This schedules qp progress. The s_lock
1733 void qib_schedule_send(struct rvt_qp *qp)
1735 if (qib_send_ok(qp))
1736 _qib_schedule_send(qp);