2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_cm.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/vmalloc.h>
42 int ipoib_max_conn_qp = 128;
44 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
45 MODULE_PARM_DESC(max_nonsrq_conn_qp,
46 "Max number of connected-mode QPs per interface "
47 "(applied only if shared receive queue is not available)");
49 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
50 static int data_debug_level;
52 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
53 MODULE_PARM_DESC(cm_data_debug_level,
54 "Enable data path debug tracing for connected mode if > 0");
57 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
59 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
60 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
61 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
62 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
64 static struct ib_qp_attr ipoib_cm_err_attr = {
65 .qp_state = IB_QPS_ERR
68 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
70 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
71 .wr_id = IPOIB_CM_RX_DRAIN_WRID,
75 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
76 struct ib_cm_event *event);
78 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
79 u64 mapping[IPOIB_CM_RX_SG])
83 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
85 for (i = 0; i < frags; ++i)
86 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
89 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
91 struct ipoib_dev_priv *priv = netdev_priv(dev);
92 struct ib_recv_wr *bad_wr;
95 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
97 for (i = 0; i < priv->cm.num_frags; ++i)
98 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
100 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
102 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
103 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
104 priv->cm.srq_ring[id].mapping);
105 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
106 priv->cm.srq_ring[id].skb = NULL;
112 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
113 struct ipoib_cm_rx *rx,
114 struct ib_recv_wr *wr,
115 struct ib_sge *sge, int id)
117 struct ipoib_dev_priv *priv = netdev_priv(dev);
118 struct ib_recv_wr *bad_wr;
121 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
123 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
124 sge[i].addr = rx->rx_ring[id].mapping[i];
126 ret = ib_post_recv(rx->qp, wr, &bad_wr);
128 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
129 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
130 rx->rx_ring[id].mapping);
131 dev_kfree_skb_any(rx->rx_ring[id].skb);
132 rx->rx_ring[id].skb = NULL;
138 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
139 struct ipoib_cm_rx_buf *rx_ring,
141 u64 mapping[IPOIB_CM_RX_SG])
143 struct ipoib_dev_priv *priv = netdev_priv(dev);
147 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
152 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
153 * IP header to a multiple of 16.
155 skb_reserve(skb, 12);
157 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
159 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
160 dev_kfree_skb_any(skb);
164 for (i = 0; i < frags; i++) {
165 struct page *page = alloc_page(GFP_ATOMIC);
169 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
171 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
172 0, PAGE_SIZE, DMA_FROM_DEVICE);
173 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
177 rx_ring[id].skb = skb;
182 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
185 ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
187 dev_kfree_skb_any(skb);
191 static void ipoib_cm_free_rx_ring(struct net_device *dev,
192 struct ipoib_cm_rx_buf *rx_ring)
194 struct ipoib_dev_priv *priv = netdev_priv(dev);
197 for (i = 0; i < ipoib_recvq_size; ++i)
198 if (rx_ring[i].skb) {
199 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
201 dev_kfree_skb_any(rx_ring[i].skb);
207 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
209 struct ib_send_wr *bad_wr;
210 struct ipoib_cm_rx *p;
212 /* We only reserved 1 extra slot in CQ for drain WRs, so
213 * make sure we have at most 1 outstanding WR. */
214 if (list_empty(&priv->cm.rx_flush_list) ||
215 !list_empty(&priv->cm.rx_drain_list))
219 * QPs on flush list are error state. This way, a "flush
220 * error" WC will be immediately generated for each WR we post.
222 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
223 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
224 ipoib_warn(priv, "failed to post drain wr\n");
226 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
229 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
231 struct ipoib_cm_rx *p = ctx;
232 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
235 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
238 spin_lock_irqsave(&priv->lock, flags);
239 list_move(&p->list, &priv->cm.rx_flush_list);
240 p->state = IPOIB_CM_RX_FLUSH;
241 ipoib_cm_start_rx_drain(priv);
242 spin_unlock_irqrestore(&priv->lock, flags);
245 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
246 struct ipoib_cm_rx *p)
248 struct ipoib_dev_priv *priv = netdev_priv(dev);
249 struct ib_qp_init_attr attr = {
250 .event_handler = ipoib_cm_rx_event_handler,
251 .send_cq = priv->recv_cq, /* For drain WR */
252 .recv_cq = priv->recv_cq,
254 .cap.max_send_wr = 1, /* For drain WR */
255 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
256 .sq_sig_type = IB_SIGNAL_ALL_WR,
257 .qp_type = IB_QPT_RC,
261 if (!ipoib_cm_has_srq(dev)) {
262 attr.cap.max_recv_wr = ipoib_recvq_size;
263 attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
266 return ib_create_qp(priv->pd, &attr);
269 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
270 struct ib_cm_id *cm_id, struct ib_qp *qp,
273 struct ipoib_dev_priv *priv = netdev_priv(dev);
274 struct ib_qp_attr qp_attr;
275 int qp_attr_mask, ret;
277 qp_attr.qp_state = IB_QPS_INIT;
278 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
280 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
283 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
285 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
288 qp_attr.qp_state = IB_QPS_RTR;
289 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
291 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
294 qp_attr.rq_psn = psn;
295 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
297 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
302 * Current Mellanox HCA firmware won't generate completions
303 * with error for drain WRs unless the QP has been moved to
304 * RTS first. This work-around leaves a window where a QP has
305 * moved to error asynchronously, but this will eventually get
306 * fixed in firmware, so let's not error out if modify QP
309 qp_attr.qp_state = IB_QPS_RTS;
310 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
312 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
315 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
317 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
324 static void ipoib_cm_init_rx_wr(struct net_device *dev,
325 struct ib_recv_wr *wr,
328 struct ipoib_dev_priv *priv = netdev_priv(dev);
331 for (i = 0; i < priv->cm.num_frags; ++i)
332 sge[i].lkey = priv->mr->lkey;
334 sge[0].length = IPOIB_CM_HEAD_SIZE;
335 for (i = 1; i < priv->cm.num_frags; ++i)
336 sge[i].length = PAGE_SIZE;
340 wr->num_sge = priv->cm.num_frags;
343 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
344 struct ipoib_cm_rx *rx)
346 struct ipoib_dev_priv *priv = netdev_priv(dev);
348 struct ib_recv_wr wr;
349 struct ib_sge sge[IPOIB_CM_RX_SG];
354 rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
356 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
357 priv->ca->name, ipoib_recvq_size);
361 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
363 t = kmalloc(sizeof *t, GFP_KERNEL);
369 ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
371 spin_lock_irq(&priv->lock);
373 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
374 spin_unlock_irq(&priv->lock);
375 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
379 ++priv->cm.nonsrq_conn_qp;
381 spin_unlock_irq(&priv->lock);
383 for (i = 0; i < ipoib_recvq_size; ++i) {
384 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
385 rx->rx_ring[i].mapping)) {
386 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
390 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
392 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
393 "failed for buf %d\n", i);
399 rx->recv_count = ipoib_recvq_size;
406 spin_lock_irq(&priv->lock);
407 --priv->cm.nonsrq_conn_qp;
408 spin_unlock_irq(&priv->lock);
412 ipoib_cm_free_rx_ring(dev, rx->rx_ring);
417 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
418 struct ib_qp *qp, struct ib_cm_req_event_param *req,
421 struct ipoib_dev_priv *priv = netdev_priv(dev);
422 struct ipoib_cm_data data = {};
423 struct ib_cm_rep_param rep = {};
425 data.qpn = cpu_to_be32(priv->qp->qp_num);
426 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
428 rep.private_data = &data;
429 rep.private_data_len = sizeof data;
430 rep.flow_control = 0;
431 rep.rnr_retry_count = req->rnr_retry_count;
432 rep.srq = ipoib_cm_has_srq(dev);
433 rep.qp_num = qp->qp_num;
434 rep.starting_psn = psn;
435 return ib_send_cm_rep(cm_id, &rep);
438 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
440 struct net_device *dev = cm_id->context;
441 struct ipoib_dev_priv *priv = netdev_priv(dev);
442 struct ipoib_cm_rx *p;
446 ipoib_dbg(priv, "REQ arrived\n");
447 p = kzalloc(sizeof *p, GFP_KERNEL);
453 p->state = IPOIB_CM_RX_LIVE;
454 p->jiffies = jiffies;
455 INIT_LIST_HEAD(&p->list);
457 p->qp = ipoib_cm_create_rx_qp(dev, p);
459 ret = PTR_ERR(p->qp);
463 psn = random32() & 0xffffff;
464 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
468 if (!ipoib_cm_has_srq(dev)) {
469 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
474 spin_lock_irq(&priv->lock);
475 queue_delayed_work(ipoib_workqueue,
476 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
477 /* Add this entry to passive ids list head, but do not re-add it
478 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
479 p->jiffies = jiffies;
480 if (p->state == IPOIB_CM_RX_LIVE)
481 list_move(&p->list, &priv->cm.passive_ids);
482 spin_unlock_irq(&priv->lock);
484 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
486 ipoib_warn(priv, "failed to send REP: %d\n", ret);
487 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
488 ipoib_warn(priv, "unable to move qp to error state\n");
493 ib_destroy_qp(p->qp);
499 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
500 struct ib_cm_event *event)
502 struct ipoib_cm_rx *p;
503 struct ipoib_dev_priv *priv;
505 switch (event->event) {
506 case IB_CM_REQ_RECEIVED:
507 return ipoib_cm_req_handler(cm_id, event);
508 case IB_CM_DREQ_RECEIVED:
510 ib_send_cm_drep(cm_id, NULL, 0);
512 case IB_CM_REJ_RECEIVED:
514 priv = netdev_priv(p->dev);
515 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
516 ipoib_warn(priv, "unable to move qp to error state\n");
522 /* Adjust length of skb with fragments to match received data */
523 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
524 unsigned int length, struct sk_buff *toskb)
529 /* put header into skb */
530 size = min(length, hdr_space);
535 num_frags = skb_shinfo(skb)->nr_frags;
536 for (i = 0; i < num_frags; i++) {
537 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
540 /* don't need this page */
541 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
542 --skb_shinfo(skb)->nr_frags;
544 size = min(length, (unsigned) PAGE_SIZE);
547 skb->data_len += size;
548 skb->truesize += size;
555 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
557 struct ipoib_dev_priv *priv = netdev_priv(dev);
558 struct ipoib_cm_rx_buf *rx_ring;
559 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
560 struct sk_buff *skb, *newskb;
561 struct ipoib_cm_rx *p;
563 u64 mapping[IPOIB_CM_RX_SG];
566 struct sk_buff *small_skb;
568 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
571 if (unlikely(wr_id >= ipoib_recvq_size)) {
572 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
573 spin_lock_irqsave(&priv->lock, flags);
574 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
575 ipoib_cm_start_rx_drain(priv);
576 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
577 spin_unlock_irqrestore(&priv->lock, flags);
579 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
580 wr_id, ipoib_recvq_size);
584 p = wc->qp->qp_context;
586 has_srq = ipoib_cm_has_srq(dev);
587 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
589 skb = rx_ring[wr_id].skb;
591 if (unlikely(wc->status != IB_WC_SUCCESS)) {
592 ipoib_dbg(priv, "cm recv error "
593 "(status=%d, wrid=%d vend_err %x)\n",
594 wc->status, wr_id, wc->vendor_err);
595 ++dev->stats.rx_dropped;
599 if (!--p->recv_count) {
600 spin_lock_irqsave(&priv->lock, flags);
601 list_move(&p->list, &priv->cm.rx_reap_list);
602 spin_unlock_irqrestore(&priv->lock, flags);
603 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
609 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
610 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
611 spin_lock_irqsave(&priv->lock, flags);
612 p->jiffies = jiffies;
613 /* Move this entry to list head, but do not re-add it
614 * if it has been moved out of list. */
615 if (p->state == IPOIB_CM_RX_LIVE)
616 list_move(&p->list, &priv->cm.passive_ids);
617 spin_unlock_irqrestore(&priv->lock, flags);
621 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
622 int dlen = wc->byte_len;
624 small_skb = dev_alloc_skb(dlen + 12);
626 skb_reserve(small_skb, 12);
627 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
628 dlen, DMA_FROM_DEVICE);
629 skb_copy_from_linear_data(skb, small_skb->data, dlen);
630 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
631 dlen, DMA_FROM_DEVICE);
632 skb_put(small_skb, dlen);
638 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
639 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
641 newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
642 if (unlikely(!newskb)) {
644 * If we can't allocate a new RX buffer, dump
645 * this packet and reuse the old buffer.
647 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
648 ++dev->stats.rx_dropped;
652 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
653 memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
655 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
656 wc->byte_len, wc->slid);
658 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
661 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
662 skb_reset_mac_header(skb);
663 skb_pull(skb, IPOIB_ENCAP_LEN);
665 dev->last_rx = jiffies;
666 ++dev->stats.rx_packets;
667 dev->stats.rx_bytes += skb->len;
670 /* XXX get correct PACKET_ type here */
671 skb->pkt_type = PACKET_HOST;
672 netif_receive_skb(skb);
676 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
677 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
678 "for buf %d\n", wr_id);
680 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
685 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
686 "for buf %d\n", wr_id);
691 static inline int post_send(struct ipoib_dev_priv *priv,
692 struct ipoib_cm_tx *tx,
696 struct ib_send_wr *bad_wr;
698 priv->tx_sge[0].addr = addr;
699 priv->tx_sge[0].length = len;
701 priv->tx_wr.num_sge = 1;
702 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
704 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
707 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
709 struct ipoib_dev_priv *priv = netdev_priv(dev);
710 struct ipoib_cm_tx_buf *tx_req;
713 if (unlikely(skb->len > tx->mtu)) {
714 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
716 ++dev->stats.tx_dropped;
717 ++dev->stats.tx_errors;
718 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
722 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
723 tx->tx_head, skb->len, tx->qp->qp_num);
726 * We put the skb into the tx_ring _before_ we call post_send()
727 * because it's entirely possible that the completion handler will
728 * run before we execute anything after the post_send(). That
729 * means we have to make sure everything is properly recorded and
730 * our state is consistent before we call post_send().
732 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
734 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
735 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
736 ++dev->stats.tx_errors;
737 dev_kfree_skb_any(skb);
741 tx_req->mapping = addr;
743 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
745 ipoib_warn(priv, "post_send failed\n");
746 ++dev->stats.tx_errors;
747 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
748 dev_kfree_skb_any(skb);
750 dev->trans_start = jiffies;
753 if (++priv->tx_outstanding == ipoib_sendq_size) {
754 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
756 netif_stop_queue(dev);
761 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
763 struct ipoib_dev_priv *priv = netdev_priv(dev);
764 struct ipoib_cm_tx *tx = wc->qp->qp_context;
765 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
766 struct ipoib_cm_tx_buf *tx_req;
769 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
772 if (unlikely(wr_id >= ipoib_sendq_size)) {
773 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
774 wr_id, ipoib_sendq_size);
778 tx_req = &tx->tx_ring[wr_id];
780 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
782 /* FIXME: is this right? Shouldn't we only increment on success? */
783 ++dev->stats.tx_packets;
784 dev->stats.tx_bytes += tx_req->skb->len;
786 dev_kfree_skb_any(tx_req->skb);
791 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
792 netif_queue_stopped(dev) &&
793 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
794 netif_wake_queue(dev);
796 if (wc->status != IB_WC_SUCCESS &&
797 wc->status != IB_WC_WR_FLUSH_ERR) {
798 struct ipoib_neigh *neigh;
800 ipoib_dbg(priv, "failed cm send event "
801 "(status=%d, wrid=%d vend_err %x)\n",
802 wc->status, wr_id, wc->vendor_err);
804 spin_lock_irqsave(&priv->lock, flags);
809 list_del(&neigh->list);
811 ipoib_put_ah(neigh->ah);
812 ipoib_neigh_free(dev, neigh);
817 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
818 list_move(&tx->list, &priv->cm.reap_list);
819 queue_work(ipoib_workqueue, &priv->cm.reap_task);
822 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
824 spin_unlock_irqrestore(&priv->lock, flags);
827 netif_tx_unlock(dev);
830 int ipoib_cm_dev_open(struct net_device *dev)
832 struct ipoib_dev_priv *priv = netdev_priv(dev);
835 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
838 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
839 if (IS_ERR(priv->cm.id)) {
840 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
841 ret = PTR_ERR(priv->cm.id);
845 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
848 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
849 IPOIB_CM_IETF_ID | priv->qp->qp_num);
856 ib_destroy_cm_id(priv->cm.id);
862 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
864 struct ipoib_dev_priv *priv = netdev_priv(dev);
865 struct ipoib_cm_rx *rx, *n;
868 spin_lock_irq(&priv->lock);
869 list_splice_init(&priv->cm.rx_reap_list, &list);
870 spin_unlock_irq(&priv->lock);
872 list_for_each_entry_safe(rx, n, &list, list) {
873 ib_destroy_cm_id(rx->id);
874 ib_destroy_qp(rx->qp);
875 if (!ipoib_cm_has_srq(dev)) {
876 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
877 spin_lock_irq(&priv->lock);
878 --priv->cm.nonsrq_conn_qp;
879 spin_unlock_irq(&priv->lock);
885 void ipoib_cm_dev_stop(struct net_device *dev)
887 struct ipoib_dev_priv *priv = netdev_priv(dev);
888 struct ipoib_cm_rx *p;
892 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
895 ib_destroy_cm_id(priv->cm.id);
898 spin_lock_irq(&priv->lock);
899 while (!list_empty(&priv->cm.passive_ids)) {
900 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
901 list_move(&p->list, &priv->cm.rx_error_list);
902 p->state = IPOIB_CM_RX_ERROR;
903 spin_unlock_irq(&priv->lock);
904 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
906 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
907 spin_lock_irq(&priv->lock);
910 /* Wait for all RX to be drained */
913 while (!list_empty(&priv->cm.rx_error_list) ||
914 !list_empty(&priv->cm.rx_flush_list) ||
915 !list_empty(&priv->cm.rx_drain_list)) {
916 if (time_after(jiffies, begin + 5 * HZ)) {
917 ipoib_warn(priv, "RX drain timing out\n");
920 * assume the HW is wedged and just free up everything.
922 list_splice_init(&priv->cm.rx_flush_list,
923 &priv->cm.rx_reap_list);
924 list_splice_init(&priv->cm.rx_error_list,
925 &priv->cm.rx_reap_list);
926 list_splice_init(&priv->cm.rx_drain_list,
927 &priv->cm.rx_reap_list);
930 spin_unlock_irq(&priv->lock);
933 spin_lock_irq(&priv->lock);
936 spin_unlock_irq(&priv->lock);
938 ipoib_cm_free_rx_reap_list(dev);
940 cancel_delayed_work(&priv->cm.stale_task);
943 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
945 struct ipoib_cm_tx *p = cm_id->context;
946 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
947 struct ipoib_cm_data *data = event->private_data;
948 struct sk_buff_head skqueue;
949 struct ib_qp_attr qp_attr;
950 int qp_attr_mask, ret;
953 p->mtu = be32_to_cpu(data->mtu);
955 if (p->mtu <= IPOIB_ENCAP_LEN) {
956 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
957 p->mtu, IPOIB_ENCAP_LEN);
961 qp_attr.qp_state = IB_QPS_RTR;
962 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
964 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
968 qp_attr.rq_psn = 0 /* FIXME */;
969 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
971 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
975 qp_attr.qp_state = IB_QPS_RTS;
976 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
978 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
981 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
983 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
987 skb_queue_head_init(&skqueue);
989 spin_lock_irq(&priv->lock);
990 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
992 while ((skb = __skb_dequeue(&p->neigh->queue)))
993 __skb_queue_tail(&skqueue, skb);
994 spin_unlock_irq(&priv->lock);
996 while ((skb = __skb_dequeue(&skqueue))) {
998 if (dev_queue_xmit(skb))
999 ipoib_warn(priv, "dev_queue_xmit failed "
1000 "to requeue packet\n");
1003 ret = ib_send_cm_rtu(cm_id, NULL, 0);
1005 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1011 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1013 struct ipoib_dev_priv *priv = netdev_priv(dev);
1014 struct ib_qp_init_attr attr = {
1015 .send_cq = priv->recv_cq,
1016 .recv_cq = priv->recv_cq,
1017 .srq = priv->cm.srq,
1018 .cap.max_send_wr = ipoib_sendq_size,
1019 .cap.max_send_sge = 1,
1020 .sq_sig_type = IB_SIGNAL_ALL_WR,
1021 .qp_type = IB_QPT_RC,
1025 return ib_create_qp(priv->pd, &attr);
1028 static int ipoib_cm_send_req(struct net_device *dev,
1029 struct ib_cm_id *id, struct ib_qp *qp,
1031 struct ib_sa_path_rec *pathrec)
1033 struct ipoib_dev_priv *priv = netdev_priv(dev);
1034 struct ipoib_cm_data data = {};
1035 struct ib_cm_req_param req = {};
1037 data.qpn = cpu_to_be32(priv->qp->qp_num);
1038 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1040 req.primary_path = pathrec;
1041 req.alternate_path = NULL;
1042 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1043 req.qp_num = qp->qp_num;
1044 req.qp_type = qp->qp_type;
1045 req.private_data = &data;
1046 req.private_data_len = sizeof data;
1047 req.flow_control = 0;
1049 req.starting_psn = 0; /* FIXME */
1052 * Pick some arbitrary defaults here; we could make these
1053 * module parameters if anyone cared about setting them.
1055 req.responder_resources = 4;
1056 req.remote_cm_response_timeout = 20;
1057 req.local_cm_response_timeout = 20;
1058 req.retry_count = 0; /* RFC draft warns against retries */
1059 req.rnr_retry_count = 0; /* RFC draft warns against retries */
1060 req.max_cm_retries = 15;
1061 req.srq = ipoib_cm_has_srq(dev);
1062 return ib_send_cm_req(id, &req);
1065 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1066 struct ib_cm_id *cm_id, struct ib_qp *qp)
1068 struct ipoib_dev_priv *priv = netdev_priv(dev);
1069 struct ib_qp_attr qp_attr;
1070 int qp_attr_mask, ret;
1071 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1073 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1077 qp_attr.qp_state = IB_QPS_INIT;
1078 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1079 qp_attr.port_num = priv->port;
1080 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1082 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1084 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1090 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1091 struct ib_sa_path_rec *pathrec)
1093 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1096 p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring);
1098 ipoib_warn(priv, "failed to allocate tx ring\n");
1102 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1104 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1105 if (IS_ERR(p->qp)) {
1106 ret = PTR_ERR(p->qp);
1107 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1111 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1112 if (IS_ERR(p->id)) {
1113 ret = PTR_ERR(p->id);
1114 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1118 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
1120 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1124 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1126 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1130 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1131 p->qp->qp_num, pathrec->dgid.raw, qpn);
1137 ib_destroy_cm_id(p->id);
1140 ib_destroy_qp(p->qp);
1148 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1150 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1151 struct ipoib_cm_tx_buf *tx_req;
1152 unsigned long begin;
1154 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1155 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1158 ib_destroy_cm_id(p->id);
1161 /* Wait for all sends to complete */
1163 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1164 if (time_after(jiffies, begin + 5 * HZ)) {
1165 ipoib_warn(priv, "timing out; %d sends not completed\n",
1166 p->tx_head - p->tx_tail);
1176 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1177 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1178 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
1180 dev_kfree_skb_any(tx_req->skb);
1182 netif_tx_lock_bh(p->dev);
1183 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1184 netif_queue_stopped(p->dev) &&
1185 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1186 netif_wake_queue(p->dev);
1187 netif_tx_unlock_bh(p->dev);
1191 ib_destroy_qp(p->qp);
1197 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1198 struct ib_cm_event *event)
1200 struct ipoib_cm_tx *tx = cm_id->context;
1201 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1202 struct net_device *dev = priv->dev;
1203 struct ipoib_neigh *neigh;
1204 unsigned long flags;
1207 switch (event->event) {
1208 case IB_CM_DREQ_RECEIVED:
1209 ipoib_dbg(priv, "DREQ received.\n");
1210 ib_send_cm_drep(cm_id, NULL, 0);
1212 case IB_CM_REP_RECEIVED:
1213 ipoib_dbg(priv, "REP received.\n");
1214 ret = ipoib_cm_rep_handler(cm_id, event);
1216 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1219 case IB_CM_REQ_ERROR:
1220 case IB_CM_REJ_RECEIVED:
1221 case IB_CM_TIMEWAIT_EXIT:
1222 ipoib_dbg(priv, "CM error %d.\n", event->event);
1223 netif_tx_lock_bh(dev);
1224 spin_lock_irqsave(&priv->lock, flags);
1229 list_del(&neigh->list);
1231 ipoib_put_ah(neigh->ah);
1232 ipoib_neigh_free(dev, neigh);
1237 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1238 list_move(&tx->list, &priv->cm.reap_list);
1239 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1242 spin_unlock_irqrestore(&priv->lock, flags);
1243 netif_tx_unlock_bh(dev);
1252 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1253 struct ipoib_neigh *neigh)
1255 struct ipoib_dev_priv *priv = netdev_priv(dev);
1256 struct ipoib_cm_tx *tx;
1258 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1266 list_add(&tx->list, &priv->cm.start_list);
1267 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1268 queue_work(ipoib_workqueue, &priv->cm.start_task);
1272 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1274 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1275 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1276 list_move(&tx->list, &priv->cm.reap_list);
1277 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1278 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1279 tx->neigh->dgid.raw);
1284 static void ipoib_cm_tx_start(struct work_struct *work)
1286 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1288 struct net_device *dev = priv->dev;
1289 struct ipoib_neigh *neigh;
1290 struct ipoib_cm_tx *p;
1291 unsigned long flags;
1294 struct ib_sa_path_rec pathrec;
1297 netif_tx_lock_bh(dev);
1298 spin_lock_irqsave(&priv->lock, flags);
1300 while (!list_empty(&priv->cm.start_list)) {
1301 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1302 list_del_init(&p->list);
1304 qpn = IPOIB_QPN(neigh->neighbour->ha);
1305 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1307 spin_unlock_irqrestore(&priv->lock, flags);
1308 netif_tx_unlock_bh(dev);
1310 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1312 netif_tx_lock_bh(dev);
1313 spin_lock_irqsave(&priv->lock, flags);
1319 list_del(&neigh->list);
1321 ipoib_put_ah(neigh->ah);
1322 ipoib_neigh_free(dev, neigh);
1329 spin_unlock_irqrestore(&priv->lock, flags);
1330 netif_tx_unlock_bh(dev);
1333 static void ipoib_cm_tx_reap(struct work_struct *work)
1335 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1337 struct net_device *dev = priv->dev;
1338 struct ipoib_cm_tx *p;
1339 unsigned long flags;
1341 netif_tx_lock_bh(dev);
1342 spin_lock_irqsave(&priv->lock, flags);
1344 while (!list_empty(&priv->cm.reap_list)) {
1345 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1347 spin_unlock_irqrestore(&priv->lock, flags);
1348 netif_tx_unlock_bh(dev);
1349 ipoib_cm_tx_destroy(p);
1350 netif_tx_lock_bh(dev);
1351 spin_lock_irqsave(&priv->lock, flags);
1354 spin_unlock_irqrestore(&priv->lock, flags);
1355 netif_tx_unlock_bh(dev);
1358 static void ipoib_cm_skb_reap(struct work_struct *work)
1360 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1362 struct net_device *dev = priv->dev;
1363 struct sk_buff *skb;
1364 unsigned long flags;
1365 unsigned mtu = priv->mcast_mtu;
1367 netif_tx_lock_bh(dev);
1368 spin_lock_irqsave(&priv->lock, flags);
1370 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1371 spin_unlock_irqrestore(&priv->lock, flags);
1372 netif_tx_unlock_bh(dev);
1374 if (skb->protocol == htons(ETH_P_IP))
1375 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1376 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1377 else if (skb->protocol == htons(ETH_P_IPV6))
1378 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
1380 dev_kfree_skb_any(skb);
1382 netif_tx_lock_bh(dev);
1383 spin_lock_irqsave(&priv->lock, flags);
1386 spin_unlock_irqrestore(&priv->lock, flags);
1387 netif_tx_unlock_bh(dev);
1390 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1393 struct ipoib_dev_priv *priv = netdev_priv(dev);
1394 int e = skb_queue_empty(&priv->cm.skb_queue);
1397 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
1399 skb_queue_tail(&priv->cm.skb_queue, skb);
1401 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1404 static void ipoib_cm_rx_reap(struct work_struct *work)
1406 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1407 cm.rx_reap_task)->dev);
1410 static void ipoib_cm_stale_task(struct work_struct *work)
1412 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1413 cm.stale_task.work);
1414 struct ipoib_cm_rx *p;
1417 spin_lock_irq(&priv->lock);
1418 while (!list_empty(&priv->cm.passive_ids)) {
1419 /* List is sorted by LRU, start from tail,
1420 * stop when we see a recently used entry */
1421 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1422 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1424 list_move(&p->list, &priv->cm.rx_error_list);
1425 p->state = IPOIB_CM_RX_ERROR;
1426 spin_unlock_irq(&priv->lock);
1427 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1429 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1430 spin_lock_irq(&priv->lock);
1433 if (!list_empty(&priv->cm.passive_ids))
1434 queue_delayed_work(ipoib_workqueue,
1435 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1436 spin_unlock_irq(&priv->lock);
1440 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1443 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1445 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1446 return sprintf(buf, "connected\n");
1448 return sprintf(buf, "datagram\n");
1451 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1452 const char *buf, size_t count)
1454 struct net_device *dev = to_net_dev(d);
1455 struct ipoib_dev_priv *priv = netdev_priv(dev);
1457 if (!rtnl_trylock())
1458 return restart_syscall();
1460 /* flush paths if we switch modes so that connections are restarted */
1461 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1462 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1463 ipoib_warn(priv, "enabling connected mode "
1464 "will cause multicast packet drops\n");
1466 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1468 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1470 ipoib_flush_paths(dev);
1474 if (!strcmp(buf, "datagram\n")) {
1475 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1477 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1478 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1479 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1480 dev->features |= NETIF_F_TSO;
1482 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
1484 ipoib_flush_paths(dev);
1493 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1495 int ipoib_cm_add_mode_attr(struct net_device *dev)
1497 return device_create_file(&dev->dev, &dev_attr_mode);
1500 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1502 struct ipoib_dev_priv *priv = netdev_priv(dev);
1503 struct ib_srq_init_attr srq_init_attr = {
1505 .max_wr = ipoib_recvq_size,
1510 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1511 if (IS_ERR(priv->cm.srq)) {
1512 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1513 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1514 priv->ca->name, PTR_ERR(priv->cm.srq));
1515 priv->cm.srq = NULL;
1519 priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1520 if (!priv->cm.srq_ring) {
1521 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1522 priv->ca->name, ipoib_recvq_size);
1523 ib_destroy_srq(priv->cm.srq);
1524 priv->cm.srq = NULL;
1528 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1531 int ipoib_cm_dev_init(struct net_device *dev)
1533 struct ipoib_dev_priv *priv = netdev_priv(dev);
1535 struct ib_device_attr attr;
1537 INIT_LIST_HEAD(&priv->cm.passive_ids);
1538 INIT_LIST_HEAD(&priv->cm.reap_list);
1539 INIT_LIST_HEAD(&priv->cm.start_list);
1540 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1541 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1542 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1543 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1544 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1545 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1546 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1547 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1548 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1550 skb_queue_head_init(&priv->cm.skb_queue);
1552 ret = ib_query_device(priv->ca, &attr);
1554 printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
1558 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
1560 attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
1561 ipoib_cm_create_srq(dev, attr.max_srq_sge);
1562 if (ipoib_cm_has_srq(dev)) {
1563 priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
1564 priv->cm.num_frags = attr.max_srq_sge;
1565 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1566 priv->cm.max_cm_mtu, priv->cm.num_frags);
1568 priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1569 priv->cm.num_frags = IPOIB_CM_RX_SG;
1572 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1574 if (ipoib_cm_has_srq(dev)) {
1575 for (i = 0; i < ipoib_recvq_size; ++i) {
1576 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1577 priv->cm.num_frags - 1,
1578 priv->cm.srq_ring[i].mapping)) {
1579 ipoib_warn(priv, "failed to allocate "
1580 "receive buffer %d\n", i);
1581 ipoib_cm_dev_cleanup(dev);
1585 if (ipoib_cm_post_receive_srq(dev, i)) {
1586 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1587 "failed for buf %d\n", i);
1588 ipoib_cm_dev_cleanup(dev);
1594 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1598 void ipoib_cm_dev_cleanup(struct net_device *dev)
1600 struct ipoib_dev_priv *priv = netdev_priv(dev);
1606 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1608 ret = ib_destroy_srq(priv->cm.srq);
1610 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1612 priv->cm.srq = NULL;
1613 if (!priv->cm.srq_ring)
1616 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1617 priv->cm.srq_ring = NULL;