2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
42 #include <linux/tcp.h>
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level;
49 module_param(data_debug_level, int, 0644);
50 MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
54 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
55 struct ib_pd *pd, struct rdma_ah_attr *attr)
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
62 return ERR_PTR(-ENOMEM);
68 vah = rdma_create_ah(pd, attr);
71 ah = (struct ipoib_ah *)vah;
74 ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
80 void ipoib_free_ah(struct kref *kref)
82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
87 spin_lock_irqsave(&priv->lock, flags);
88 list_add_tail(&ah->list, &priv->dead_ahs);
89 spin_unlock_irqrestore(&priv->lock, flags);
92 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 u64 mapping[IPOIB_UD_RX_SG])
95 ib_dma_unmap_single(priv->ca, mapping[0],
96 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
100 static int ipoib_ib_post_receive(struct net_device *dev, int id)
102 struct ipoib_dev_priv *priv = ipoib_priv(dev);
103 struct ib_recv_wr *bad_wr;
106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
107 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
108 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
111 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
113 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
114 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
115 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL;
122 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
124 struct ipoib_dev_priv *priv = ipoib_priv(dev);
129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
131 skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
136 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
139 skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
141 mapping = priv->rx_ring[id].mapping;
142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
144 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
147 priv->rx_ring[id].skb = skb;
150 dev_kfree_skb_any(skb);
154 static int ipoib_ib_post_receives(struct net_device *dev)
156 struct ipoib_dev_priv *priv = ipoib_priv(dev);
159 for (i = 0; i < ipoib_recvq_size; ++i) {
160 if (!ipoib_alloc_rx_skb(dev, i)) {
161 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
164 if (ipoib_ib_post_receive(dev, i)) {
165 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
173 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
175 struct ipoib_dev_priv *priv = ipoib_priv(dev);
176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
178 u64 mapping[IPOIB_UD_RX_SG];
182 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
185 if (unlikely(wr_id >= ipoib_recvq_size)) {
186 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
187 wr_id, ipoib_recvq_size);
191 skb = priv->rx_ring[wr_id].skb;
193 if (unlikely(wc->status != IB_WC_SUCCESS)) {
194 if (wc->status != IB_WC_WR_FLUSH_ERR)
195 ipoib_warn(priv, "failed recv event "
196 "(status=%d, wrid=%d vend_err %x)\n",
197 wc->status, wr_id, wc->vendor_err);
198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
199 dev_kfree_skb_any(skb);
200 priv->rx_ring[wr_id].skb = NULL;
204 memcpy(mapping, priv->rx_ring[wr_id].mapping,
205 IPOIB_UD_RX_SG * sizeof *mapping);
208 * If we can't allocate a new RX buffer, dump
209 * this packet and reuse the old buffer.
211 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
212 ++dev->stats.rx_dropped;
216 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
217 wc->byte_len, wc->slid);
219 ipoib_ud_dma_unmap_rx(priv, mapping);
221 skb_put(skb, wc->byte_len);
223 /* First byte of dgid signals multicast when 0xff */
224 dgid = &((struct ib_grh *)skb->data)->dgid;
226 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
227 skb->pkt_type = PACKET_HOST;
228 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
229 skb->pkt_type = PACKET_BROADCAST;
231 skb->pkt_type = PACKET_MULTICAST;
233 sgid = &((struct ib_grh *)skb->data)->sgid;
236 * Drop packets that this interface sent, ie multicast packets
237 * that the HCA has replicated.
239 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
242 if ((wc->wc_flags & IB_WC_GRH) &&
243 sgid->global.interface_id != priv->local_gid.global.interface_id)
247 dev_kfree_skb_any(skb);
252 skb_pull(skb, IB_GRH_BYTES);
254 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
255 skb_add_pseudo_hdr(skb);
257 ++dev->stats.rx_packets;
258 dev->stats.rx_bytes += skb->len;
261 if ((dev->features & NETIF_F_RXCSUM) &&
262 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
263 skb->ip_summed = CHECKSUM_UNNECESSARY;
265 napi_gro_receive(&priv->napi, skb);
268 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
269 ipoib_warn(priv, "ipoib_ib_post_receive failed "
270 "for buf %d\n", wr_id);
273 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
275 struct sk_buff *skb = tx_req->skb;
276 u64 *mapping = tx_req->mapping;
280 if (skb_headlen(skb)) {
281 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
283 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
290 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
291 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
292 mapping[i + off] = ib_dma_map_page(ca,
294 frag->page_offset, skb_frag_size(frag),
296 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
303 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
305 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
309 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
314 void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
315 struct ipoib_tx_buf *tx_req)
317 struct sk_buff *skb = tx_req->skb;
318 u64 *mapping = tx_req->mapping;
322 if (skb_headlen(skb)) {
323 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
329 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
330 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
332 ib_dma_unmap_page(priv->ca, mapping[i + off],
333 skb_frag_size(frag), DMA_TO_DEVICE);
338 * As the result of a completion error the QP Can be transferred to SQE states.
339 * The function checks if the (send)QP is in SQE state and
340 * moves it back to RTS state, that in order to have it functional again.
342 static void ipoib_qp_state_validate_work(struct work_struct *work)
344 struct ipoib_qp_state_validate *qp_work =
345 container_of(work, struct ipoib_qp_state_validate, work);
347 struct ipoib_dev_priv *priv = qp_work->priv;
348 struct ib_qp_attr qp_attr;
349 struct ib_qp_init_attr query_init_attr;
352 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
354 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
358 pr_info("%s: QP: 0x%x is in state: %d\n",
359 __func__, priv->qp->qp_num, qp_attr.qp_state);
361 /* currently support only in SQE->RTS transition*/
362 if (qp_attr.qp_state == IB_QPS_SQE) {
363 qp_attr.qp_state = IB_QPS_RTS;
365 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
367 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
368 ret, priv->qp->qp_num);
371 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
372 __func__, priv->qp->qp_num);
374 pr_warn("QP (%d) will stay in state: %d\n",
375 priv->qp->qp_num, qp_attr.qp_state);
382 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
384 struct ipoib_dev_priv *priv = ipoib_priv(dev);
385 unsigned int wr_id = wc->wr_id;
386 struct ipoib_tx_buf *tx_req;
388 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
391 if (unlikely(wr_id >= ipoib_sendq_size)) {
392 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
393 wr_id, ipoib_sendq_size);
397 tx_req = &priv->tx_ring[wr_id];
399 ipoib_dma_unmap_tx(priv, tx_req);
401 ++dev->stats.tx_packets;
402 dev->stats.tx_bytes += tx_req->skb->len;
404 dev_kfree_skb_any(tx_req->skb);
407 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
408 netif_queue_stopped(dev) &&
409 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
410 netif_wake_queue(dev);
412 if (wc->status != IB_WC_SUCCESS &&
413 wc->status != IB_WC_WR_FLUSH_ERR) {
414 struct ipoib_qp_state_validate *qp_work;
415 ipoib_warn(priv, "failed send event "
416 "(status=%d, wrid=%d vend_err %x)\n",
417 wc->status, wr_id, wc->vendor_err);
418 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
422 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
423 qp_work->priv = priv;
424 queue_work(priv->wq, &qp_work->work);
428 static int poll_tx(struct ipoib_dev_priv *priv)
432 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
433 for (i = 0; i < n; ++i)
434 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
436 return n == MAX_SEND_CQE;
439 int ipoib_poll(struct napi_struct *napi, int budget)
441 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
442 struct net_device *dev = priv->dev;
450 while (done < budget) {
451 int max = (budget - done);
453 t = min(IPOIB_NUM_WC, max);
454 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
456 for (i = 0; i < n; i++) {
457 struct ib_wc *wc = priv->ibwc + i;
459 if (wc->wr_id & IPOIB_OP_RECV) {
461 if (wc->wr_id & IPOIB_OP_CM)
462 ipoib_cm_handle_rx_wc(dev, wc);
464 ipoib_ib_handle_rx_wc(dev, wc);
466 ipoib_cm_handle_tx_wc(priv->dev, wc);
475 if (unlikely(ib_req_notify_cq(priv->recv_cq,
477 IB_CQ_REPORT_MISSED_EVENTS)) &&
478 napi_reschedule(napi))
485 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
487 struct net_device *dev = dev_ptr;
488 struct ipoib_dev_priv *priv = ipoib_priv(dev);
490 napi_schedule(&priv->napi);
493 static void drain_tx_cq(struct net_device *dev)
495 struct ipoib_dev_priv *priv = ipoib_priv(dev);
498 while (poll_tx(priv))
501 if (netif_queue_stopped(dev))
502 mod_timer(&priv->poll_timer, jiffies + 1);
504 netif_tx_unlock(dev);
507 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
509 struct ipoib_dev_priv *priv = ipoib_priv(dev_ptr);
511 mod_timer(&priv->poll_timer, jiffies);
514 static inline int post_send(struct ipoib_dev_priv *priv,
516 struct ib_ah *address, u32 dqpn,
517 struct ipoib_tx_buf *tx_req,
518 void *head, int hlen)
520 struct ib_send_wr *bad_wr;
521 struct sk_buff *skb = tx_req->skb;
523 ipoib_build_sge(priv, tx_req);
525 priv->tx_wr.wr.wr_id = wr_id;
526 priv->tx_wr.remote_qpn = dqpn;
527 priv->tx_wr.ah = address;
530 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
531 priv->tx_wr.header = head;
532 priv->tx_wr.hlen = hlen;
533 priv->tx_wr.wr.opcode = IB_WR_LSO;
535 priv->tx_wr.wr.opcode = IB_WR_SEND;
537 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
540 int ipoib_send(struct net_device *dev, struct sk_buff *skb,
541 struct ib_ah *address, u32 dqpn)
543 struct ipoib_dev_priv *priv = ipoib_priv(dev);
544 struct ipoib_tx_buf *tx_req;
547 unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
549 if (skb_is_gso(skb)) {
550 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
552 if (unlikely(!skb_pull(skb, hlen))) {
553 ipoib_warn(priv, "linear data too small\n");
554 ++dev->stats.tx_dropped;
555 ++dev->stats.tx_errors;
556 dev_kfree_skb_any(skb);
560 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
561 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
562 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
563 ++dev->stats.tx_dropped;
564 ++dev->stats.tx_errors;
565 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
571 if (skb_shinfo(skb)->nr_frags > usable_sge) {
572 if (skb_linearize(skb) < 0) {
573 ipoib_warn(priv, "skb could not be linearized\n");
574 ++dev->stats.tx_dropped;
575 ++dev->stats.tx_errors;
576 dev_kfree_skb_any(skb);
579 /* Does skb_linearize return ok without reducing nr_frags? */
580 if (skb_shinfo(skb)->nr_frags > usable_sge) {
581 ipoib_warn(priv, "too many frags after skb linearize\n");
582 ++dev->stats.tx_dropped;
583 ++dev->stats.tx_errors;
584 dev_kfree_skb_any(skb);
590 "sending packet, length=%d address=%p dqpn=0x%06x\n",
591 skb->len, address, dqpn);
594 * We put the skb into the tx_ring _before_ we call post_send()
595 * because it's entirely possible that the completion handler will
596 * run before we execute anything after the post_send(). That
597 * means we have to make sure everything is properly recorded and
598 * our state is consistent before we call post_send().
600 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
602 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
603 ++dev->stats.tx_errors;
604 dev_kfree_skb_any(skb);
608 if (skb->ip_summed == CHECKSUM_PARTIAL)
609 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
611 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
613 if (++priv->tx_outstanding == ipoib_sendq_size) {
614 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
615 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
616 ipoib_warn(priv, "request notify on send CQ failed\n");
617 netif_stop_queue(dev);
623 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
624 address, dqpn, tx_req, phead, hlen);
626 ipoib_warn(priv, "post_send failed, error %d\n", rc);
627 ++dev->stats.tx_errors;
628 --priv->tx_outstanding;
629 ipoib_dma_unmap_tx(priv, tx_req);
630 dev_kfree_skb_any(skb);
631 if (netif_queue_stopped(dev))
632 netif_wake_queue(dev);
635 netif_trans_update(dev);
641 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
642 while (poll_tx(priv))
648 static void __ipoib_reap_ah(struct net_device *dev)
650 struct ipoib_dev_priv *priv = ipoib_priv(dev);
651 struct ipoib_ah *ah, *tah;
652 LIST_HEAD(remove_list);
655 netif_tx_lock_bh(dev);
656 spin_lock_irqsave(&priv->lock, flags);
658 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
659 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
661 rdma_destroy_ah(ah->ah);
665 spin_unlock_irqrestore(&priv->lock, flags);
666 netif_tx_unlock_bh(dev);
669 void ipoib_reap_ah(struct work_struct *work)
671 struct ipoib_dev_priv *priv =
672 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
673 struct net_device *dev = priv->dev;
675 __ipoib_reap_ah(dev);
677 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
678 queue_delayed_work(priv->wq, &priv->ah_reap_task,
679 round_jiffies_relative(HZ));
682 static void ipoib_flush_ah(struct net_device *dev)
684 struct ipoib_dev_priv *priv = ipoib_priv(dev);
686 cancel_delayed_work(&priv->ah_reap_task);
687 flush_workqueue(priv->wq);
688 ipoib_reap_ah(&priv->ah_reap_task.work);
691 static void ipoib_stop_ah(struct net_device *dev)
693 struct ipoib_dev_priv *priv = ipoib_priv(dev);
695 set_bit(IPOIB_STOP_REAPER, &priv->flags);
699 static int recvs_pending(struct net_device *dev)
701 struct ipoib_dev_priv *priv = ipoib_priv(dev);
705 for (i = 0; i < ipoib_recvq_size; ++i)
706 if (priv->rx_ring[i].skb)
712 int ipoib_ib_dev_stop_default(struct net_device *dev)
714 struct ipoib_dev_priv *priv = ipoib_priv(dev);
715 struct ib_qp_attr qp_attr;
717 struct ipoib_tx_buf *tx_req;
720 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
721 napi_disable(&priv->napi);
723 ipoib_cm_dev_stop(dev);
726 * Move our QP to the error state and then reinitialize in
727 * when all work requests have completed or have been flushed.
729 qp_attr.qp_state = IB_QPS_ERR;
730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
731 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
733 /* Wait for all sends and receives to complete */
736 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
737 if (time_after(jiffies, begin + 5 * HZ)) {
739 "timing out; %d sends %d receives not completed\n",
740 priv->tx_head - priv->tx_tail,
744 * assume the HW is wedged and just free up
745 * all our pending work requests.
747 while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
748 tx_req = &priv->tx_ring[priv->tx_tail &
749 (ipoib_sendq_size - 1)];
750 ipoib_dma_unmap_tx(priv, tx_req);
751 dev_kfree_skb_any(tx_req->skb);
753 --priv->tx_outstanding;
756 for (i = 0; i < ipoib_recvq_size; ++i) {
757 struct ipoib_rx_buf *rx_req;
759 rx_req = &priv->rx_ring[i];
762 ipoib_ud_dma_unmap_rx(priv,
763 priv->rx_ring[i].mapping);
764 dev_kfree_skb_any(rx_req->skb);
776 ipoib_dbg(priv, "All sends and receives done.\n");
779 del_timer_sync(&priv->poll_timer);
780 qp_attr.qp_state = IB_QPS_RESET;
781 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
782 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
784 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
789 int ipoib_ib_dev_stop(struct net_device *dev)
791 struct ipoib_dev_priv *priv = ipoib_priv(dev);
793 priv->rn_ops->ndo_stop(dev);
795 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
801 void ipoib_ib_tx_timer_func(unsigned long ctx)
803 drain_tx_cq((struct net_device *)ctx);
806 int ipoib_ib_dev_open_default(struct net_device *dev)
808 struct ipoib_dev_priv *priv = ipoib_priv(dev);
811 ret = ipoib_init_qp(dev);
813 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
817 ret = ipoib_ib_post_receives(dev);
819 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
823 ret = ipoib_cm_dev_open(dev);
825 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
829 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
830 napi_enable(&priv->napi);
837 int ipoib_ib_dev_open(struct net_device *dev)
839 struct ipoib_dev_priv *priv = ipoib_priv(dev);
841 ipoib_pkey_dev_check_presence(dev);
843 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
844 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
845 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
849 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
850 queue_delayed_work(priv->wq, &priv->ah_reap_task,
851 round_jiffies_relative(HZ));
853 if (priv->rn_ops->ndo_open(dev)) {
854 pr_warn("%s: Failed to open dev\n", dev->name);
858 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
863 set_bit(IPOIB_STOP_REAPER, &priv->flags);
864 cancel_delayed_work(&priv->ah_reap_task);
865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
866 napi_enable(&priv->napi);
867 ipoib_ib_dev_stop(dev);
871 void ipoib_pkey_dev_check_presence(struct net_device *dev)
873 struct ipoib_dev_priv *priv = ipoib_priv(dev);
875 if (!(priv->pkey & 0x7fff) ||
876 ib_find_pkey(priv->ca, priv->port, priv->pkey,
878 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
880 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
883 void ipoib_ib_dev_up(struct net_device *dev)
885 struct ipoib_dev_priv *priv = ipoib_priv(dev);
887 ipoib_pkey_dev_check_presence(dev);
889 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
890 ipoib_dbg(priv, "PKEY is not assigned.\n");
894 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
896 ipoib_mcast_start_thread(dev);
899 void ipoib_ib_dev_down(struct net_device *dev)
901 struct ipoib_dev_priv *priv = ipoib_priv(dev);
903 ipoib_dbg(priv, "downing ib_dev\n");
905 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
906 netif_carrier_off(dev);
908 ipoib_mcast_stop_thread(dev);
909 ipoib_mcast_dev_flush(dev);
911 ipoib_flush_paths(dev);
914 void ipoib_drain_cq(struct net_device *dev)
916 struct ipoib_dev_priv *priv = ipoib_priv(dev);
920 * We call completion handling routines that expect to be
921 * called from the BH-disabled NAPI poll context, so disable
927 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
928 for (i = 0; i < n; ++i) {
930 * Convert any successful completions to flush
931 * errors to avoid passing packets up the
932 * stack after bringing the device down.
934 if (priv->ibwc[i].status == IB_WC_SUCCESS)
935 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
937 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
938 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
939 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
941 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
943 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
945 } while (n == IPOIB_NUM_WC);
947 while (poll_tx(priv))
954 * Takes whatever value which is in pkey index 0 and updates priv->pkey
955 * returns 0 if the pkey value was changed.
957 static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
962 prev_pkey = priv->pkey;
963 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
965 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
970 priv->pkey |= 0x8000;
972 if (prev_pkey != priv->pkey) {
973 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
974 prev_pkey, priv->pkey);
976 * Update the pkey in the broadcast address, while making sure to set
977 * the full membership bit, so that we join the right broadcast group.
979 priv->dev->broadcast[8] = priv->pkey >> 8;
980 priv->dev->broadcast[9] = priv->pkey & 0xff;
983 * Update the broadcast address in the priv->broadcast object,
984 * in case it already exists, otherwise no one will do that.
986 if (priv->broadcast) {
987 spin_lock_irq(&priv->lock);
988 memcpy(priv->broadcast->mcmember.mgid.raw,
989 priv->dev->broadcast + 4,
990 sizeof(union ib_gid));
991 spin_unlock_irq(&priv->lock);
1000 * returns 0 if pkey value was found in a different slot.
1002 static inline int update_child_pkey(struct ipoib_dev_priv *priv)
1004 u16 old_index = priv->pkey_index;
1006 priv->pkey_index = 0;
1007 ipoib_pkey_dev_check_presence(priv->dev);
1009 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1010 (old_index == priv->pkey_index))
1016 * returns true if the device address of the ipoib interface has changed and the
1017 * new address is a valid one (i.e in the gid table), return false otherwise.
1019 static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1021 union ib_gid search_gid;
1023 union ib_gid *netdev_gid;
1029 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1030 if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
1033 netif_addr_lock_bh(priv->dev);
1035 /* The subnet prefix may have changed, update it now so we won't have
1038 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1039 netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1040 search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1042 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1044 netif_addr_unlock_bh(priv->dev);
1046 err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
1047 priv->dev, &port, &index);
1049 netif_addr_lock_bh(priv->dev);
1051 if (search_gid.global.interface_id !=
1052 priv->local_gid.global.interface_id)
1053 /* There was a change while we were looking up the gid, bail
1054 * here and let the next work sort this out
1058 /* The next section of code needs some background:
1059 * Per IB spec the port GUID can't change if the HCA is powered on.
1060 * port GUID is the basis for GID at index 0 which is the basis for
1061 * the default device address of a ipoib interface.
1063 * so it seems the flow should be:
1064 * if user_changed_dev_addr && gid in gid tbl
1065 * set bit dev_addr_set
1070 * The issue is that there are devices that don't follow the spec,
1071 * they change the port GUID when the HCA is powered, so in order
1072 * not to break userspace applications, We need to check if the
1073 * user wanted to control the device address and we assume that
1074 * if he sets the device address back to be based on GID index 0,
1075 * he no longer wishs to control it.
1077 * If the user doesn't control the the device address,
1078 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1079 * the port GUID has changed and GID at index 0 has changed
1080 * so we need to change priv->local_gid and priv->dev->dev_addr
1081 * to reflect the new GID.
1083 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1084 if (!err && port == priv->port) {
1085 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1087 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1090 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1096 if (!err && port == priv->port) {
1099 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1100 memcpy(&priv->local_gid, &gid0,
1101 sizeof(priv->local_gid));
1102 memcpy(priv->dev->dev_addr + 4, &gid0,
1103 sizeof(priv->local_gid));
1110 netif_addr_unlock_bh(priv->dev);
1115 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1116 enum ipoib_flush_level level,
1119 struct ipoib_dev_priv *cpriv;
1120 struct net_device *dev = priv->dev;
1123 down_read_nested(&priv->vlan_rwsem, nesting);
1126 * Flush any child interfaces too -- they might be up even if
1127 * the parent is down.
1129 list_for_each_entry(cpriv, &priv->child_intfs, list)
1130 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1132 up_read(&priv->vlan_rwsem);
1134 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1135 level != IPOIB_FLUSH_HEAVY) {
1136 /* Make sure the dev_addr is set even if not flushing */
1137 if (level == IPOIB_FLUSH_LIGHT)
1138 ipoib_dev_addr_changed_valid(priv);
1139 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1143 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1144 /* interface is down. update pkey and leave. */
1145 if (level == IPOIB_FLUSH_HEAVY) {
1146 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1147 update_parent_pkey(priv);
1149 update_child_pkey(priv);
1150 } else if (level == IPOIB_FLUSH_LIGHT)
1151 ipoib_dev_addr_changed_valid(priv);
1152 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1156 if (level == IPOIB_FLUSH_HEAVY) {
1157 /* child devices chase their origin pkey value, while non-child
1158 * (parent) devices should always takes what present in pkey index 0
1160 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1161 result = update_child_pkey(priv);
1163 /* restart QP only if P_Key index is changed */
1164 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1169 result = update_parent_pkey(priv);
1170 /* restart QP only if P_Key value changed */
1172 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1178 if (level == IPOIB_FLUSH_LIGHT) {
1180 ipoib_mark_paths_invalid(dev);
1181 /* Set IPoIB operation as down to prevent races between:
1182 * the flush flow which leaves MCG and on the fly joins
1183 * which can happen during that time. mcast restart task
1184 * should deal with join requests we missed.
1186 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1187 ipoib_mcast_dev_flush(dev);
1189 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1190 ipoib_flush_ah(dev);
1193 if (level >= IPOIB_FLUSH_NORMAL)
1194 ipoib_ib_dev_down(dev);
1196 if (level == IPOIB_FLUSH_HEAVY) {
1197 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1198 ipoib_ib_dev_stop(dev);
1199 if (ipoib_ib_dev_open(dev) != 0)
1201 if (netif_queue_stopped(dev))
1202 netif_start_queue(dev);
1206 * The device could have been brought down between the start and when
1207 * we get here, don't bring it back up if it's not configured up
1209 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1210 if (level >= IPOIB_FLUSH_NORMAL)
1211 ipoib_ib_dev_up(dev);
1212 if (ipoib_dev_addr_changed_valid(priv))
1213 ipoib_mcast_restart_task(&priv->restart_task);
1217 void ipoib_ib_dev_flush_light(struct work_struct *work)
1219 struct ipoib_dev_priv *priv =
1220 container_of(work, struct ipoib_dev_priv, flush_light);
1222 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1225 void ipoib_ib_dev_flush_normal(struct work_struct *work)
1227 struct ipoib_dev_priv *priv =
1228 container_of(work, struct ipoib_dev_priv, flush_normal);
1230 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1233 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1235 struct ipoib_dev_priv *priv =
1236 container_of(work, struct ipoib_dev_priv, flush_heavy);
1238 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1241 void ipoib_ib_dev_cleanup(struct net_device *dev)
1243 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1245 ipoib_dbg(priv, "cleaning up ib_dev\n");
1247 * We must make sure there are no more (path) completions
1248 * that may wish to touch priv fields that are no longer valid
1250 ipoib_flush_paths(dev);
1252 ipoib_mcast_stop_thread(dev);
1253 ipoib_mcast_dev_flush(dev);
1256 * All of our ah references aren't free until after
1257 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1258 * the neighbor garbage collection is stopped and reaped.
1259 * That should all be done now, so make a final ah flush.
1263 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1265 priv->rn_ops->ndo_uninit(dev);
1268 ib_dealloc_pd(priv->pd);