2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/moduleparam.h>
40 #include <linux/igmp.h>
41 #include <linux/inetdevice.h>
42 #include <linux/delay.h>
43 #include <linux/completion.h>
44 #include <linux/slab.h>
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
58 struct ipoib_mcast_iter {
59 struct net_device *dev;
61 unsigned long created;
62 unsigned int queuelen;
63 unsigned int complete;
64 unsigned int send_only;
67 /* join state that allows creating mcg with sendonly member request */
68 #define SENDONLY_FULLMEMBER_JOIN 8
71 * This should be called with the priv->lock held
73 static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
74 struct ipoib_mcast *mcast,
77 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
81 * We will be scheduling *something*, so cancel whatever is
82 * currently scheduled first
84 cancel_delayed_work(&priv->mcast_task);
87 * We had a failure and want to schedule a retry later
90 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
91 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
92 mcast->delay_until = jiffies + (mcast->backoff * HZ);
94 * Mark this mcast for its delay, but restart the
95 * task immediately. The join task will make sure to
96 * clear out all entries without delays, and then
97 * schedule itself to run again when the earliest
100 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
103 * Special case of retrying after a failure to
104 * allocate the broadcast multicast group, wait
105 * 1 second and try again
107 queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
109 queue_delayed_work(priv->wq, &priv->mcast_task, 0);
112 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
114 struct net_device *dev = mcast->dev;
117 ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n",
118 mcast->mcmember.mgid.raw);
120 /* remove all neigh connected to this mcast */
121 ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
124 ipoib_put_ah(mcast->ah);
126 while (!skb_queue_empty(&mcast->pkt_queue)) {
128 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
131 netif_tx_lock_bh(dev);
132 dev->stats.tx_dropped += tx_dropped;
133 netif_tx_unlock_bh(dev);
138 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
141 struct ipoib_mcast *mcast;
143 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
148 mcast->created = jiffies;
149 mcast->delay_until = jiffies;
152 INIT_LIST_HEAD(&mcast->list);
153 INIT_LIST_HEAD(&mcast->neigh_list);
154 skb_queue_head_init(&mcast->pkt_queue);
159 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
161 struct ipoib_dev_priv *priv = ipoib_priv(dev);
162 struct rb_node *n = priv->multicast_tree.rb_node;
165 struct ipoib_mcast *mcast;
168 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
170 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
171 sizeof (union ib_gid));
183 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
185 struct ipoib_dev_priv *priv = ipoib_priv(dev);
186 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
189 struct ipoib_mcast *tmcast;
193 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
195 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
196 sizeof (union ib_gid));
205 rb_link_node(&mcast->rb_node, pn, n);
206 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
211 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
212 struct ib_sa_mcmember_rec *mcmember)
214 struct net_device *dev = mcast->dev;
215 struct ipoib_dev_priv *priv = ipoib_priv(dev);
216 struct rdma_netdev *rn = netdev_priv(dev);
221 mcast->mcmember = *mcmember;
223 /* Set the multicast MTU and cached Q_Key before we attach if it's
224 * the broadcast group.
226 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
227 sizeof (union ib_gid))) {
228 spin_lock_irq(&priv->lock);
229 if (!priv->broadcast) {
230 spin_unlock_irq(&priv->lock);
233 /*update priv member according to the new mcast*/
234 priv->broadcast->mcmember.qkey = mcmember->qkey;
235 priv->broadcast->mcmember.mtu = mcmember->mtu;
236 priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
237 priv->broadcast->mcmember.rate = mcmember->rate;
238 priv->broadcast->mcmember.sl = mcmember->sl;
239 priv->broadcast->mcmember.flow_label = mcmember->flow_label;
240 priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
241 /* assume if the admin and the mcast are the same both can be changed */
242 if (priv->mcast_mtu == priv->admin_mtu)
245 IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
248 IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
250 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
251 spin_unlock_irq(&priv->lock);
252 priv->tx_wr.remote_qkey = priv->qkey;
256 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
257 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
258 ipoib_warn(priv, "multicast group %pI6 already attached\n",
259 mcast->mcmember.mgid.raw);
264 ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
265 be16_to_cpu(mcast->mcmember.mlid),
266 set_qkey, priv->qkey);
268 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
269 mcast->mcmember.mgid.raw);
271 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
277 struct ib_ah_attr av = {
278 .dlid = be16_to_cpu(mcast->mcmember.mlid),
279 .port_num = priv->port,
280 .sl = mcast->mcmember.sl,
281 .ah_flags = IB_AH_GRH,
282 .static_rate = mcast->mcmember.rate,
284 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
285 .hop_limit = mcast->mcmember.hop_limit,
287 .traffic_class = mcast->mcmember.traffic_class
290 av.grh.dgid = mcast->mcmember.mgid;
292 ah = ipoib_create_ah(dev, priv->pd, &av);
294 ipoib_warn(priv, "ib_address_create failed %ld\n",
296 /* use original error */
299 spin_lock_irq(&priv->lock);
301 spin_unlock_irq(&priv->lock);
303 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
304 mcast->mcmember.mgid.raw,
306 be16_to_cpu(mcast->mcmember.mlid),
311 /* actually send any queued packets */
312 netif_tx_lock_bh(dev);
313 while (!skb_queue_empty(&mcast->pkt_queue)) {
314 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
316 netif_tx_unlock_bh(dev);
320 ret = dev_queue_xmit(skb);
322 ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
324 netif_tx_lock_bh(dev);
326 netif_tx_unlock_bh(dev);
331 void ipoib_mcast_carrier_on_task(struct work_struct *work)
333 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
335 struct ib_port_attr attr;
338 if (ib_query_port(priv->ca, priv->port, &attr) ||
339 attr.state != IB_PORT_ACTIVE) {
340 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
344 * Check if can send sendonly MCG's with sendonly-fullmember join state.
345 * It done here after the successfully join to the broadcast group,
346 * because the broadcast group must always be joined first and is always
347 * re-joined if the SM changes substantially.
349 ret = ipoib_check_sm_sendonly_fullmember_support(priv);
351 pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
352 priv->dev->name, ret);
355 * Take rtnl_lock to avoid racing with ipoib_stop() and
356 * turning the carrier back on while a device is being
357 * removed. However, ipoib_stop() will attempt to flush
358 * the workqueue while holding the rtnl lock, so loop
359 * on trylock until either we get the lock or we see
360 * FLAG_OPER_UP go away as that signals that we are bailing
361 * and can safely ignore the carrier on work.
363 while (!rtnl_trylock()) {
364 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
369 if (!ipoib_cm_admin_enabled(priv->dev))
370 dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
371 netif_carrier_on(priv->dev);
375 static int ipoib_mcast_join_complete(int status,
376 struct ib_sa_multicast *multicast)
378 struct ipoib_mcast *mcast = multicast->context;
379 struct net_device *dev = mcast->dev;
380 struct ipoib_dev_priv *priv = ipoib_priv(dev);
382 ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
383 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
385 mcast->mcmember.mgid.raw, status);
387 /* We trap for port events ourselves. */
388 if (status == -ENETRESET) {
394 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
398 mcast->delay_until = jiffies;
401 * Defer carrier on work to priv->wq to avoid a
402 * deadlock on rtnl_lock here. Requeue our multicast
403 * work too, which will end up happening right after
404 * our carrier on task work and will allow us to
405 * send out all of the non-broadcast joins
407 if (mcast == priv->broadcast) {
408 spin_lock_irq(&priv->lock);
409 queue_work(priv->wq, &priv->carrier_on_task);
410 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
415 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
418 if (mcast->logcount < 20) {
419 if (status == -ETIMEDOUT || status == -EAGAIN ||
421 ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
422 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
423 mcast->mcmember.mgid.raw, status);
425 ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
426 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
427 mcast->mcmember.mgid.raw, status);
434 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
435 mcast->backoff >= 2) {
437 * We only retry sendonly joins once before we drop
438 * the packet and quit trying to deal with the
439 * group. However, we leave the group in the
440 * mcast list as an unjoined group. If we want to
441 * try joining again, we simply queue up a packet
442 * and restart the join thread. The empty queue
443 * is why the join thread ignores this group.
446 netif_tx_lock_bh(dev);
447 while (!skb_queue_empty(&mcast->pkt_queue)) {
448 ++dev->stats.tx_dropped;
449 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
451 netif_tx_unlock_bh(dev);
453 spin_lock_irq(&priv->lock);
454 /* Requeue this join task with a backoff delay */
455 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
460 spin_lock_irq(&priv->lock);
463 * Make sure to set mcast->mc before we clear the busy flag to avoid
464 * racing with code that checks for BUSY before checking mcast->mc
469 mcast->mc = multicast;
470 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
471 spin_unlock_irq(&priv->lock);
472 complete(&mcast->done);
478 * Caller must hold 'priv->lock'
480 static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
482 struct ipoib_dev_priv *priv = ipoib_priv(dev);
483 struct ib_sa_multicast *multicast;
484 struct ib_sa_mcmember_rec rec = {
487 ib_sa_comp_mask comp_mask;
490 if (!priv->broadcast ||
491 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
494 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
496 rec.mgid = mcast->mcmember.mgid;
497 rec.port_gid = priv->local_gid;
498 rec.pkey = cpu_to_be16(priv->pkey);
501 IB_SA_MCMEMBER_REC_MGID |
502 IB_SA_MCMEMBER_REC_PORT_GID |
503 IB_SA_MCMEMBER_REC_PKEY |
504 IB_SA_MCMEMBER_REC_JOIN_STATE;
506 if (mcast != priv->broadcast) {
509 * The MGID MUST use the same P_Key, Q_Key, SL, MTU,
510 * and HopLimit as those used in the broadcast-GID. The rest
511 * of attributes SHOULD follow the values used in the
512 * broadcast-GID as well.
515 IB_SA_MCMEMBER_REC_QKEY |
516 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
517 IB_SA_MCMEMBER_REC_MTU |
518 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
519 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
520 IB_SA_MCMEMBER_REC_RATE |
521 IB_SA_MCMEMBER_REC_SL |
522 IB_SA_MCMEMBER_REC_FLOW_LABEL |
523 IB_SA_MCMEMBER_REC_HOP_LIMIT;
525 rec.qkey = priv->broadcast->mcmember.qkey;
526 rec.mtu_selector = IB_SA_EQ;
527 rec.mtu = priv->broadcast->mcmember.mtu;
528 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
529 rec.rate_selector = IB_SA_EQ;
530 rec.rate = priv->broadcast->mcmember.rate;
531 rec.sl = priv->broadcast->mcmember.sl;
532 rec.flow_label = priv->broadcast->mcmember.flow_label;
533 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
536 * Send-only IB Multicast joins work at the core IB layer but
537 * require specific SM support.
538 * We can use such joins here only if the current SM supports that feature.
539 * However, if not, we emulate an Ethernet multicast send,
540 * which does not require a multicast subscription and will
541 * still send properly. The most appropriate thing to
542 * do is to create the group if it doesn't exist as that
543 * most closely emulates the behavior, from a user space
544 * application perspective, of Ethernet multicast operation.
546 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
547 priv->sm_fullmember_sendonly_support)
548 /* SM supports sendonly-fullmember, otherwise fallback to full-member */
549 rec.join_state = SENDONLY_FULLMEMBER_JOIN;
551 spin_unlock_irq(&priv->lock);
553 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
554 &rec, comp_mask, GFP_KERNEL,
555 ipoib_mcast_join_complete, mcast);
556 spin_lock_irq(&priv->lock);
557 if (IS_ERR(multicast)) {
558 ret = PTR_ERR(multicast);
559 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
560 /* Requeue this join task with a backoff delay */
561 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
562 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
563 spin_unlock_irq(&priv->lock);
564 complete(&mcast->done);
565 spin_lock_irq(&priv->lock);
570 void ipoib_mcast_join_task(struct work_struct *work)
572 struct ipoib_dev_priv *priv =
573 container_of(work, struct ipoib_dev_priv, mcast_task.work);
574 struct net_device *dev = priv->dev;
575 struct ib_port_attr port_attr;
576 unsigned long delay_until = 0;
577 struct ipoib_mcast *mcast = NULL;
579 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
582 if (ib_query_port(priv->ca, priv->port, &port_attr)) {
583 ipoib_dbg(priv, "ib_query_port() failed\n");
586 if (port_attr.state != IB_PORT_ACTIVE) {
587 ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
591 priv->local_lid = port_attr.lid;
592 netif_addr_lock_bh(dev);
594 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
595 netif_addr_unlock_bh(dev);
598 netif_addr_unlock_bh(dev);
600 spin_lock_irq(&priv->lock);
601 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
604 if (!priv->broadcast) {
605 struct ipoib_mcast *broadcast;
607 broadcast = ipoib_mcast_alloc(dev, 0);
609 ipoib_warn(priv, "failed to allocate broadcast group\n");
611 * Restart us after a 1 second delay to retry
612 * creating our broadcast group and attaching to
613 * it. Until this succeeds, this ipoib dev is
614 * completely stalled (multicast wise).
616 __ipoib_mcast_schedule_join_thread(priv, NULL, 1);
620 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
621 sizeof (union ib_gid));
622 priv->broadcast = broadcast;
624 __ipoib_mcast_add(dev, priv->broadcast);
627 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
628 if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
629 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
630 mcast = priv->broadcast;
631 if (mcast->backoff > 1 &&
632 time_before(jiffies, mcast->delay_until)) {
633 delay_until = mcast->delay_until;
641 * We'll never get here until the broadcast group is both allocated
644 list_for_each_entry(mcast, &priv->multicast_list, list) {
645 if (IS_ERR_OR_NULL(mcast->mc) &&
646 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
647 (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
648 !skb_queue_empty(&mcast->pkt_queue))) {
649 if (mcast->backoff == 1 ||
650 time_after_eq(jiffies, mcast->delay_until)) {
651 /* Found the next unjoined group */
652 init_completion(&mcast->done);
653 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
654 if (ipoib_mcast_join(dev, mcast)) {
655 spin_unlock_irq(&priv->lock);
658 } else if (!delay_until ||
659 time_before(mcast->delay_until, delay_until))
660 delay_until = mcast->delay_until;
665 ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
669 cancel_delayed_work(&priv->mcast_task);
670 queue_delayed_work(priv->wq, &priv->mcast_task,
671 delay_until - jiffies);
674 init_completion(&mcast->done);
675 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
676 ipoib_mcast_join(dev, mcast);
678 spin_unlock_irq(&priv->lock);
681 void ipoib_mcast_start_thread(struct net_device *dev)
683 struct ipoib_dev_priv *priv = ipoib_priv(dev);
686 ipoib_dbg_mcast(priv, "starting multicast thread\n");
688 spin_lock_irqsave(&priv->lock, flags);
689 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
690 spin_unlock_irqrestore(&priv->lock, flags);
693 int ipoib_mcast_stop_thread(struct net_device *dev)
695 struct ipoib_dev_priv *priv = ipoib_priv(dev);
698 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
700 spin_lock_irqsave(&priv->lock, flags);
701 cancel_delayed_work(&priv->mcast_task);
702 spin_unlock_irqrestore(&priv->lock, flags);
704 flush_workqueue(priv->wq);
709 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
711 struct ipoib_dev_priv *priv = ipoib_priv(dev);
712 struct rdma_netdev *rn = netdev_priv(dev);
715 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
716 ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
718 if (!IS_ERR_OR_NULL(mcast->mc))
719 ib_sa_free_multicast(mcast->mc);
721 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
722 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
723 mcast->mcmember.mgid.raw);
725 /* Remove ourselves from the multicast group */
726 ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
727 be16_to_cpu(mcast->mcmember.mlid));
729 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
730 } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
731 ipoib_dbg(priv, "leaving with no mcmember but not a "
738 * Check if the multicast group is sendonly. If so remove it from the maps
739 * and add to the remove list
741 void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
742 struct list_head *remove_list)
744 /* Is this multicast ? */
746 struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid);
748 if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
749 list_del(&mcast->list);
750 rb_erase(&mcast->rb_node, &priv->multicast_tree);
751 list_add_tail(&mcast->list, remove_list);
756 void ipoib_mcast_remove_list(struct list_head *remove_list)
758 struct ipoib_mcast *mcast, *tmcast;
760 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
761 ipoib_mcast_leave(mcast->dev, mcast);
762 ipoib_mcast_free(mcast);
766 void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
768 struct ipoib_dev_priv *priv = ipoib_priv(dev);
769 struct rdma_netdev *rn = netdev_priv(dev);
770 struct ipoib_mcast *mcast;
772 void *mgid = daddr + 4;
774 spin_lock_irqsave(&priv->lock, flags);
776 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
778 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
779 ++dev->stats.tx_dropped;
780 dev_kfree_skb_any(skb);
784 mcast = __ipoib_mcast_find(dev, mgid);
785 if (!mcast || !mcast->ah) {
787 /* Let's create a new send only group now */
788 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
791 mcast = ipoib_mcast_alloc(dev, 0);
793 ipoib_warn(priv, "unable to allocate memory "
794 "for multicast structure\n");
795 ++dev->stats.tx_dropped;
796 dev_kfree_skb_any(skb);
800 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
801 memcpy(mcast->mcmember.mgid.raw, mgid,
802 sizeof (union ib_gid));
803 __ipoib_mcast_add(dev, mcast);
804 list_add_tail(&mcast->list, &priv->multicast_list);
806 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
807 /* put pseudoheader back on for next time */
808 skb_push(skb, sizeof(struct ipoib_pseudo_header));
809 skb_queue_tail(&mcast->pkt_queue, skb);
811 ++dev->stats.tx_dropped;
812 dev_kfree_skb_any(skb);
814 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
815 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
818 struct ipoib_neigh *neigh;
820 spin_unlock_irqrestore(&priv->lock, flags);
821 neigh = ipoib_neigh_get(dev, daddr);
822 spin_lock_irqsave(&priv->lock, flags);
824 neigh = ipoib_neigh_alloc(daddr, dev);
826 kref_get(&mcast->ah->ref);
827 neigh->ah = mcast->ah;
828 list_add_tail(&neigh->list, &mcast->neigh_list);
831 spin_unlock_irqrestore(&priv->lock, flags);
832 mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
835 ipoib_neigh_put(neigh);
840 spin_unlock_irqrestore(&priv->lock, flags);
843 void ipoib_mcast_dev_flush(struct net_device *dev)
845 struct ipoib_dev_priv *priv = ipoib_priv(dev);
846 LIST_HEAD(remove_list);
847 struct ipoib_mcast *mcast, *tmcast;
850 ipoib_dbg_mcast(priv, "flushing multicast list\n");
852 spin_lock_irqsave(&priv->lock, flags);
854 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
855 list_del(&mcast->list);
856 rb_erase(&mcast->rb_node, &priv->multicast_tree);
857 list_add_tail(&mcast->list, &remove_list);
860 if (priv->broadcast) {
861 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
862 list_add_tail(&priv->broadcast->list, &remove_list);
863 priv->broadcast = NULL;
866 spin_unlock_irqrestore(&priv->lock, flags);
869 * make sure the in-flight joins have finished before we attempt
872 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
873 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
874 wait_for_completion(&mcast->done);
876 ipoib_mcast_remove_list(&remove_list);
879 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
881 /* reserved QPN, prefix, scope */
882 if (memcmp(addr, broadcast, 6))
884 /* signature lower, pkey */
885 if (memcmp(addr + 7, broadcast + 7, 3))
890 void ipoib_mcast_restart_task(struct work_struct *work)
892 struct ipoib_dev_priv *priv =
893 container_of(work, struct ipoib_dev_priv, restart_task);
894 struct net_device *dev = priv->dev;
895 struct netdev_hw_addr *ha;
896 struct ipoib_mcast *mcast, *tmcast;
897 LIST_HEAD(remove_list);
899 struct ib_sa_mcmember_rec rec;
901 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
903 * shortcut...on shutdown flush is called next, just
904 * let it do all the work
908 ipoib_dbg_mcast(priv, "restarting multicast task\n");
910 local_irq_save(flags);
911 netif_addr_lock(dev);
912 spin_lock(&priv->lock);
915 * Unfortunately, the networking core only gives us a list of all of
916 * the multicast hardware addresses. We need to figure out which ones
917 * are new and which ones have been removed
920 /* Clear out the found flag */
921 list_for_each_entry(mcast, &priv->multicast_list, list)
922 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
924 /* Mark all of the entries that are found or don't exist */
925 netdev_for_each_mc_addr(ha, dev) {
928 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
931 memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
933 mcast = __ipoib_mcast_find(dev, &mgid);
934 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
935 struct ipoib_mcast *nmcast;
937 /* ignore group which is directly joined by userspace */
938 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
939 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
940 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
945 /* Not found or send-only group, let's add a new entry */
946 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
949 nmcast = ipoib_mcast_alloc(dev, 0);
951 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
955 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
957 nmcast->mcmember.mgid = mgid;
960 /* Destroy the send only entry */
961 list_move_tail(&mcast->list, &remove_list);
963 rb_replace_node(&mcast->rb_node,
965 &priv->multicast_tree);
967 __ipoib_mcast_add(dev, nmcast);
969 list_add_tail(&nmcast->list, &priv->multicast_list);
973 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
976 /* Remove all of the entries don't exist anymore */
977 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
978 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
979 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
980 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
981 mcast->mcmember.mgid.raw);
983 rb_erase(&mcast->rb_node, &priv->multicast_tree);
985 /* Move to the remove list */
986 list_move_tail(&mcast->list, &remove_list);
990 spin_unlock(&priv->lock);
991 netif_addr_unlock(dev);
992 local_irq_restore(flags);
995 * make sure the in-flight joins have finished before we attempt
998 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
999 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
1000 wait_for_completion(&mcast->done);
1002 ipoib_mcast_remove_list(&remove_list);
1005 * Double check that we are still up
1007 if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1008 spin_lock_irqsave(&priv->lock, flags);
1009 __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
1010 spin_unlock_irqrestore(&priv->lock, flags);
1014 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
1016 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
1018 struct ipoib_mcast_iter *iter;
1020 iter = kmalloc(sizeof *iter, GFP_KERNEL);
1025 memset(iter->mgid.raw, 0, 16);
1027 if (ipoib_mcast_iter_next(iter)) {
1035 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
1037 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
1039 struct ipoib_mcast *mcast;
1042 spin_lock_irq(&priv->lock);
1044 n = rb_first(&priv->multicast_tree);
1047 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
1049 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
1050 sizeof (union ib_gid)) < 0) {
1051 iter->mgid = mcast->mcmember.mgid;
1052 iter->created = mcast->created;
1053 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
1054 iter->complete = !!mcast->ah;
1055 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
1065 spin_unlock_irq(&priv->lock);
1070 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
1072 unsigned long *created,
1073 unsigned int *queuelen,
1074 unsigned int *complete,
1075 unsigned int *send_only)
1078 *created = iter->created;
1079 *queuelen = iter->queuelen;
1080 *complete = iter->complete;
1081 *send_only = iter->send_only;
1084 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */