2 * Copyright (c) 2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/slab.h>
38 #include <linux/bitops.h>
39 #include <linux/random.h>
41 #include <rdma/ib_cache.h>
44 static void mcast_add_one(struct ib_device *device);
45 static void mcast_remove_one(struct ib_device *device);
47 static struct ib_client mcast_client = {
48 .name = "ib_multicast",
50 .remove = mcast_remove_one
53 static struct ib_sa_client sa_client;
54 static struct workqueue_struct *mcast_wq;
55 static union ib_gid mgid0;
60 struct mcast_device *dev;
64 struct completion comp;
69 struct ib_device *device;
70 struct ib_event_handler event_handler;
73 struct mcast_port port[0];
82 enum mcast_group_state {
90 MCAST_INVALID_PKEY_INDEX = 0xFFFF
96 struct ib_sa_mcmember_rec rec;
98 struct mcast_port *port;
100 struct work_struct work;
101 struct list_head pending_list;
102 struct list_head active_list;
103 struct mcast_member *last_join;
106 enum mcast_group_state state;
107 struct ib_sa_query *query;
114 struct mcast_member {
115 struct ib_sa_multicast multicast;
116 struct ib_sa_client *client;
117 struct mcast_group *group;
118 struct list_head list;
119 enum mcast_state state;
121 struct completion comp;
124 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
126 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
129 static struct mcast_group *mcast_find(struct mcast_port *port,
132 struct rb_node *node = port->table.rb_node;
133 struct mcast_group *group;
137 group = rb_entry(node, struct mcast_group, node);
138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
143 node = node->rb_left;
145 node = node->rb_right;
150 static struct mcast_group *mcast_insert(struct mcast_port *port,
151 struct mcast_group *group,
152 int allow_duplicates)
154 struct rb_node **link = &port->table.rb_node;
155 struct rb_node *parent = NULL;
156 struct mcast_group *cur_group;
161 cur_group = rb_entry(parent, struct mcast_group, node);
163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
164 sizeof group->rec.mgid);
166 link = &(*link)->rb_left;
168 link = &(*link)->rb_right;
169 else if (allow_duplicates)
170 link = &(*link)->rb_left;
174 rb_link_node(&group->node, parent, link);
175 rb_insert_color(&group->node, &port->table);
179 static void deref_port(struct mcast_port *port)
181 if (atomic_dec_and_test(&port->refcount))
182 complete(&port->comp);
185 static void release_group(struct mcast_group *group)
187 struct mcast_port *port = group->port;
190 spin_lock_irqsave(&port->lock, flags);
191 if (atomic_dec_and_test(&group->refcount)) {
192 rb_erase(&group->node, &port->table);
193 spin_unlock_irqrestore(&port->lock, flags);
197 spin_unlock_irqrestore(&port->lock, flags);
200 static void deref_member(struct mcast_member *member)
202 if (atomic_dec_and_test(&member->refcount))
203 complete(&member->comp);
206 static void queue_join(struct mcast_member *member)
208 struct mcast_group *group = member->group;
211 spin_lock_irqsave(&group->lock, flags);
212 list_add_tail(&member->list, &group->pending_list);
213 if (group->state == MCAST_IDLE) {
214 group->state = MCAST_BUSY;
215 atomic_inc(&group->refcount);
216 queue_work(mcast_wq, &group->work);
218 spin_unlock_irqrestore(&group->lock, flags);
222 * A multicast group has three types of members: full member, non member, and
223 * send only member. We need to keep track of the number of members of each
224 * type based on their join state. Adjust the number of members the belong to
225 * the specified join states.
227 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
231 for (i = 0; i < 3; i++, join_state >>= 1)
232 if (join_state & 0x1)
233 group->members[i] += inc;
237 * If a multicast group has zero members left for a particular join state, but
238 * the group is still a member with the SA, we need to leave that join state.
239 * Determine which join states we still belong to, but that do not have any
242 static u8 get_leave_state(struct mcast_group *group)
247 for (i = 0; i < 3; i++)
248 if (!group->members[i])
249 leave_state |= (0x1 << i);
251 return leave_state & group->rec.join_state;
254 static int check_selector(ib_sa_comp_mask comp_mask,
255 ib_sa_comp_mask selector_mask,
256 ib_sa_comp_mask value_mask,
257 u8 selector, u8 src_value, u8 dst_value)
261 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
266 err = (src_value <= dst_value);
269 err = (src_value >= dst_value);
272 err = (src_value != dst_value);
282 static int cmp_rec(struct ib_sa_mcmember_rec *src,
283 struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
285 /* MGID must already match */
287 if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
288 memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
290 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
292 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
294 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
295 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
298 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
299 src->traffic_class != dst->traffic_class)
301 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
303 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
304 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
305 src->rate, dst->rate))
307 if (check_selector(comp_mask,
308 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
309 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
310 dst->packet_life_time_selector,
311 src->packet_life_time, dst->packet_life_time))
313 if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
315 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
316 src->flow_label != dst->flow_label)
318 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
319 src->hop_limit != dst->hop_limit)
321 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
324 /* join_state checked separately, proxy_join ignored */
329 static int send_join(struct mcast_group *group, struct mcast_member *member)
331 struct mcast_port *port = group->port;
334 group->last_join = member;
335 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
336 port->port_num, IB_MGMT_METHOD_SET,
337 &member->multicast.rec,
338 member->multicast.comp_mask,
339 3000, GFP_KERNEL, join_handler, group,
342 group->query_id = ret;
348 static int send_leave(struct mcast_group *group, u8 leave_state)
350 struct mcast_port *port = group->port;
351 struct ib_sa_mcmember_rec rec;
355 rec.join_state = leave_state;
356 group->leave_state = leave_state;
358 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
359 port->port_num, IB_SA_METHOD_DELETE, &rec,
360 IB_SA_MCMEMBER_REC_MGID |
361 IB_SA_MCMEMBER_REC_PORT_GID |
362 IB_SA_MCMEMBER_REC_JOIN_STATE,
363 3000, GFP_KERNEL, leave_handler,
364 group, &group->query);
366 group->query_id = ret;
372 static void join_group(struct mcast_group *group, struct mcast_member *member,
375 member->state = MCAST_MEMBER;
376 adjust_membership(group, join_state, 1);
377 group->rec.join_state |= join_state;
378 member->multicast.rec = group->rec;
379 member->multicast.rec.join_state = join_state;
380 list_move(&member->list, &group->active_list);
383 static int fail_join(struct mcast_group *group, struct mcast_member *member,
386 spin_lock_irq(&group->lock);
387 list_del_init(&member->list);
388 spin_unlock_irq(&group->lock);
389 return member->multicast.callback(status, &member->multicast);
392 static void process_group_error(struct mcast_group *group)
394 struct mcast_member *member;
398 if (group->state == MCAST_PKEY_EVENT)
399 ret = ib_find_pkey(group->port->dev->device,
400 group->port->port_num,
401 be16_to_cpu(group->rec.pkey), &pkey_index);
403 spin_lock_irq(&group->lock);
404 if (group->state == MCAST_PKEY_EVENT && !ret &&
405 group->pkey_index == pkey_index)
408 while (!list_empty(&group->active_list)) {
409 member = list_entry(group->active_list.next,
410 struct mcast_member, list);
411 atomic_inc(&member->refcount);
412 list_del_init(&member->list);
413 adjust_membership(group, member->multicast.rec.join_state, -1);
414 member->state = MCAST_ERROR;
415 spin_unlock_irq(&group->lock);
417 ret = member->multicast.callback(-ENETRESET,
419 deref_member(member);
421 ib_sa_free_multicast(&member->multicast);
422 spin_lock_irq(&group->lock);
425 group->rec.join_state = 0;
427 group->state = MCAST_BUSY;
428 spin_unlock_irq(&group->lock);
431 static void mcast_work_handler(struct work_struct *work)
433 struct mcast_group *group;
434 struct mcast_member *member;
435 struct ib_sa_multicast *multicast;
439 group = container_of(work, typeof(*group), work);
441 spin_lock_irq(&group->lock);
442 while (!list_empty(&group->pending_list) ||
443 (group->state != MCAST_BUSY)) {
445 if (group->state != MCAST_BUSY) {
446 spin_unlock_irq(&group->lock);
447 process_group_error(group);
451 member = list_entry(group->pending_list.next,
452 struct mcast_member, list);
453 multicast = &member->multicast;
454 join_state = multicast->rec.join_state;
455 atomic_inc(&member->refcount);
457 if (join_state == (group->rec.join_state & join_state)) {
458 status = cmp_rec(&group->rec, &multicast->rec,
459 multicast->comp_mask);
461 join_group(group, member, join_state);
463 list_del_init(&member->list);
464 spin_unlock_irq(&group->lock);
465 ret = multicast->callback(status, multicast);
467 spin_unlock_irq(&group->lock);
468 status = send_join(group, member);
470 deref_member(member);
473 ret = fail_join(group, member, status);
476 deref_member(member);
478 ib_sa_free_multicast(&member->multicast);
479 spin_lock_irq(&group->lock);
482 join_state = get_leave_state(group);
484 group->rec.join_state &= ~join_state;
485 spin_unlock_irq(&group->lock);
486 if (send_leave(group, join_state))
489 group->state = MCAST_IDLE;
490 spin_unlock_irq(&group->lock);
491 release_group(group);
496 * Fail a join request if it is still active - at the head of the pending queue.
498 static void process_join_error(struct mcast_group *group, int status)
500 struct mcast_member *member;
503 spin_lock_irq(&group->lock);
504 member = list_entry(group->pending_list.next,
505 struct mcast_member, list);
506 if (group->last_join == member) {
507 atomic_inc(&member->refcount);
508 list_del_init(&member->list);
509 spin_unlock_irq(&group->lock);
510 ret = member->multicast.callback(status, &member->multicast);
511 deref_member(member);
513 ib_sa_free_multicast(&member->multicast);
515 spin_unlock_irq(&group->lock);
518 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
521 struct mcast_group *group = context;
522 u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
525 process_join_error(group, status);
527 ib_find_pkey(group->port->dev->device, group->port->port_num,
528 be16_to_cpu(rec->pkey), &pkey_index);
530 spin_lock_irq(&group->port->lock);
532 if (group->state == MCAST_BUSY &&
533 group->pkey_index == MCAST_INVALID_PKEY_INDEX)
534 group->pkey_index = pkey_index;
535 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
536 rb_erase(&group->node, &group->port->table);
537 mcast_insert(group->port, group, 1);
539 spin_unlock_irq(&group->port->lock);
541 mcast_work_handler(&group->work);
544 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
547 struct mcast_group *group = context;
549 if (status && group->retries > 0 &&
550 !send_leave(group, group->leave_state))
553 mcast_work_handler(&group->work);
556 static struct mcast_group *acquire_group(struct mcast_port *port,
557 union ib_gid *mgid, gfp_t gfp_mask)
559 struct mcast_group *group, *cur_group;
563 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
565 spin_lock_irqsave(&port->lock, flags);
566 group = mcast_find(port, mgid);
569 spin_unlock_irqrestore(&port->lock, flags);
572 group = kzalloc(sizeof *group, gfp_mask);
578 group->rec.mgid = *mgid;
579 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
580 INIT_LIST_HEAD(&group->pending_list);
581 INIT_LIST_HEAD(&group->active_list);
582 INIT_WORK(&group->work, mcast_work_handler);
583 spin_lock_init(&group->lock);
585 spin_lock_irqsave(&port->lock, flags);
586 cur_group = mcast_insert(port, group, is_mgid0);
591 atomic_inc(&port->refcount);
593 atomic_inc(&group->refcount);
594 spin_unlock_irqrestore(&port->lock, flags);
599 * We serialize all join requests to a single group to make our lives much
600 * easier. Otherwise, two users could try to join the same group
601 * simultaneously, with different configurations, one could leave while the
602 * join is in progress, etc., which makes locking around error recovery
605 struct ib_sa_multicast *
606 ib_sa_join_multicast(struct ib_sa_client *client,
607 struct ib_device *device, u8 port_num,
608 struct ib_sa_mcmember_rec *rec,
609 ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
610 int (*callback)(int status,
611 struct ib_sa_multicast *multicast),
614 struct mcast_device *dev;
615 struct mcast_member *member;
616 struct ib_sa_multicast *multicast;
619 dev = ib_get_client_data(device, &mcast_client);
621 return ERR_PTR(-ENODEV);
623 member = kmalloc(sizeof *member, gfp_mask);
625 return ERR_PTR(-ENOMEM);
627 ib_sa_client_get(client);
628 member->client = client;
629 member->multicast.rec = *rec;
630 member->multicast.comp_mask = comp_mask;
631 member->multicast.callback = callback;
632 member->multicast.context = context;
633 init_completion(&member->comp);
634 atomic_set(&member->refcount, 1);
635 member->state = MCAST_JOINING;
637 member->group = acquire_group(&dev->port[port_num - dev->start_port],
638 &rec->mgid, gfp_mask);
639 if (!member->group) {
645 * The user will get the multicast structure in their callback. They
646 * could then free the multicast structure before we can return from
647 * this routine. So we save the pointer to return before queuing
650 multicast = &member->multicast;
655 ib_sa_client_put(client);
659 EXPORT_SYMBOL(ib_sa_join_multicast);
661 void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
663 struct mcast_member *member;
664 struct mcast_group *group;
666 member = container_of(multicast, struct mcast_member, multicast);
667 group = member->group;
669 spin_lock_irq(&group->lock);
670 if (member->state == MCAST_MEMBER)
671 adjust_membership(group, multicast->rec.join_state, -1);
673 list_del_init(&member->list);
675 if (group->state == MCAST_IDLE) {
676 group->state = MCAST_BUSY;
677 spin_unlock_irq(&group->lock);
678 /* Continue to hold reference on group until callback */
679 queue_work(mcast_wq, &group->work);
681 spin_unlock_irq(&group->lock);
682 release_group(group);
685 deref_member(member);
686 wait_for_completion(&member->comp);
687 ib_sa_client_put(member->client);
690 EXPORT_SYMBOL(ib_sa_free_multicast);
692 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
693 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
695 struct mcast_device *dev;
696 struct mcast_port *port;
697 struct mcast_group *group;
701 dev = ib_get_client_data(device, &mcast_client);
705 port = &dev->port[port_num - dev->start_port];
706 spin_lock_irqsave(&port->lock, flags);
707 group = mcast_find(port, mgid);
711 ret = -EADDRNOTAVAIL;
712 spin_unlock_irqrestore(&port->lock, flags);
716 EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
718 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
719 struct ib_sa_mcmember_rec *rec,
720 struct ib_ah_attr *ah_attr)
726 ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
730 memset(ah_attr, 0, sizeof *ah_attr);
731 ah_attr->dlid = be16_to_cpu(rec->mlid);
732 ah_attr->sl = rec->sl;
733 ah_attr->port_num = port_num;
734 ah_attr->static_rate = rec->rate;
736 ah_attr->ah_flags = IB_AH_GRH;
737 ah_attr->grh.dgid = rec->mgid;
739 ah_attr->grh.sgid_index = (u8) gid_index;
740 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
741 ah_attr->grh.hop_limit = rec->hop_limit;
742 ah_attr->grh.traffic_class = rec->traffic_class;
746 EXPORT_SYMBOL(ib_init_ah_from_mcmember);
748 static void mcast_groups_event(struct mcast_port *port,
749 enum mcast_group_state state)
751 struct mcast_group *group;
752 struct rb_node *node;
755 spin_lock_irqsave(&port->lock, flags);
756 for (node = rb_first(&port->table); node; node = rb_next(node)) {
757 group = rb_entry(node, struct mcast_group, node);
758 spin_lock(&group->lock);
759 if (group->state == MCAST_IDLE) {
760 atomic_inc(&group->refcount);
761 queue_work(mcast_wq, &group->work);
763 if (group->state != MCAST_GROUP_ERROR)
764 group->state = state;
765 spin_unlock(&group->lock);
767 spin_unlock_irqrestore(&port->lock, flags);
770 static void mcast_event_handler(struct ib_event_handler *handler,
771 struct ib_event *event)
773 struct mcast_device *dev;
776 dev = container_of(handler, struct mcast_device, event_handler);
777 if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
778 IB_LINK_LAYER_INFINIBAND)
781 index = event->element.port_num - dev->start_port;
783 switch (event->event) {
784 case IB_EVENT_PORT_ERR:
785 case IB_EVENT_LID_CHANGE:
786 case IB_EVENT_SM_CHANGE:
787 case IB_EVENT_CLIENT_REREGISTER:
788 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
790 case IB_EVENT_PKEY_CHANGE:
791 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
798 static void mcast_add_one(struct ib_device *device)
800 struct mcast_device *dev;
801 struct mcast_port *port;
805 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
808 dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
813 if (device->node_type == RDMA_NODE_IB_SWITCH)
814 dev->start_port = dev->end_port = 0;
817 dev->end_port = device->phys_port_cnt;
820 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
821 if (rdma_port_get_link_layer(device, dev->start_port + i) !=
822 IB_LINK_LAYER_INFINIBAND)
824 port = &dev->port[i];
826 port->port_num = dev->start_port + i;
827 spin_lock_init(&port->lock);
828 port->table = RB_ROOT;
829 init_completion(&port->comp);
830 atomic_set(&port->refcount, 1);
839 dev->device = device;
840 ib_set_client_data(device, &mcast_client, dev);
842 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
843 ib_register_event_handler(&dev->event_handler);
846 static void mcast_remove_one(struct ib_device *device)
848 struct mcast_device *dev;
849 struct mcast_port *port;
852 dev = ib_get_client_data(device, &mcast_client);
856 ib_unregister_event_handler(&dev->event_handler);
857 flush_workqueue(mcast_wq);
859 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
860 if (rdma_port_get_link_layer(device, dev->start_port + i) ==
861 IB_LINK_LAYER_INFINIBAND) {
862 port = &dev->port[i];
864 wait_for_completion(&port->comp);
875 mcast_wq = create_singlethread_workqueue("ib_mcast");
879 ib_sa_register_client(&sa_client);
881 ret = ib_register_client(&mcast_client);
887 ib_sa_unregister_client(&sa_client);
888 destroy_workqueue(mcast_wq);
892 void mcast_cleanup(void)
894 ib_unregister_client(&mcast_client);
895 ib_sa_unregister_client(&sa_client);
896 destroy_workqueue(mcast_wq);