]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/infiniband/core/multicast.c
Merge branch 'vhost-net-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mst...
[mv-sheeva.git] / drivers / infiniband / core / multicast.c
1 /*
2  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/completion.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/slab.h>
38 #include <linux/bitops.h>
39 #include <linux/random.h>
40
41 #include <rdma/ib_cache.h>
42 #include "sa.h"
43
44 static void mcast_add_one(struct ib_device *device);
45 static void mcast_remove_one(struct ib_device *device);
46
47 static struct ib_client mcast_client = {
48         .name   = "ib_multicast",
49         .add    = mcast_add_one,
50         .remove = mcast_remove_one
51 };
52
53 static struct ib_sa_client      sa_client;
54 static struct workqueue_struct  *mcast_wq;
55 static union ib_gid mgid0;
56
57 struct mcast_device;
58
59 struct mcast_port {
60         struct mcast_device     *dev;
61         spinlock_t              lock;
62         struct rb_root          table;
63         atomic_t                refcount;
64         struct completion       comp;
65         u8                      port_num;
66 };
67
68 struct mcast_device {
69         struct ib_device        *device;
70         struct ib_event_handler event_handler;
71         int                     start_port;
72         int                     end_port;
73         struct mcast_port       port[0];
74 };
75
76 enum mcast_state {
77         MCAST_JOINING,
78         MCAST_MEMBER,
79         MCAST_ERROR,
80 };
81
82 enum mcast_group_state {
83         MCAST_IDLE,
84         MCAST_BUSY,
85         MCAST_GROUP_ERROR,
86         MCAST_PKEY_EVENT
87 };
88
89 enum {
90         MCAST_INVALID_PKEY_INDEX = 0xFFFF
91 };
92
93 struct mcast_member;
94
95 struct mcast_group {
96         struct ib_sa_mcmember_rec rec;
97         struct rb_node          node;
98         struct mcast_port       *port;
99         spinlock_t              lock;
100         struct work_struct      work;
101         struct list_head        pending_list;
102         struct list_head        active_list;
103         struct mcast_member     *last_join;
104         int                     members[3];
105         atomic_t                refcount;
106         enum mcast_group_state  state;
107         struct ib_sa_query      *query;
108         int                     query_id;
109         u16                     pkey_index;
110         u8                      leave_state;
111         int                     retries;
112 };
113
114 struct mcast_member {
115         struct ib_sa_multicast  multicast;
116         struct ib_sa_client     *client;
117         struct mcast_group      *group;
118         struct list_head        list;
119         enum mcast_state        state;
120         atomic_t                refcount;
121         struct completion       comp;
122 };
123
124 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
125                          void *context);
126 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
127                           void *context);
128
129 static struct mcast_group *mcast_find(struct mcast_port *port,
130                                       union ib_gid *mgid)
131 {
132         struct rb_node *node = port->table.rb_node;
133         struct mcast_group *group;
134         int ret;
135
136         while (node) {
137                 group = rb_entry(node, struct mcast_group, node);
138                 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
139                 if (!ret)
140                         return group;
141
142                 if (ret < 0)
143                         node = node->rb_left;
144                 else
145                         node = node->rb_right;
146         }
147         return NULL;
148 }
149
150 static struct mcast_group *mcast_insert(struct mcast_port *port,
151                                         struct mcast_group *group,
152                                         int allow_duplicates)
153 {
154         struct rb_node **link = &port->table.rb_node;
155         struct rb_node *parent = NULL;
156         struct mcast_group *cur_group;
157         int ret;
158
159         while (*link) {
160                 parent = *link;
161                 cur_group = rb_entry(parent, struct mcast_group, node);
162
163                 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
164                              sizeof group->rec.mgid);
165                 if (ret < 0)
166                         link = &(*link)->rb_left;
167                 else if (ret > 0)
168                         link = &(*link)->rb_right;
169                 else if (allow_duplicates)
170                         link = &(*link)->rb_left;
171                 else
172                         return cur_group;
173         }
174         rb_link_node(&group->node, parent, link);
175         rb_insert_color(&group->node, &port->table);
176         return NULL;
177 }
178
179 static void deref_port(struct mcast_port *port)
180 {
181         if (atomic_dec_and_test(&port->refcount))
182                 complete(&port->comp);
183 }
184
185 static void release_group(struct mcast_group *group)
186 {
187         struct mcast_port *port = group->port;
188         unsigned long flags;
189
190         spin_lock_irqsave(&port->lock, flags);
191         if (atomic_dec_and_test(&group->refcount)) {
192                 rb_erase(&group->node, &port->table);
193                 spin_unlock_irqrestore(&port->lock, flags);
194                 kfree(group);
195                 deref_port(port);
196         } else
197                 spin_unlock_irqrestore(&port->lock, flags);
198 }
199
200 static void deref_member(struct mcast_member *member)
201 {
202         if (atomic_dec_and_test(&member->refcount))
203                 complete(&member->comp);
204 }
205
206 static void queue_join(struct mcast_member *member)
207 {
208         struct mcast_group *group = member->group;
209         unsigned long flags;
210
211         spin_lock_irqsave(&group->lock, flags);
212         list_add_tail(&member->list, &group->pending_list);
213         if (group->state == MCAST_IDLE) {
214                 group->state = MCAST_BUSY;
215                 atomic_inc(&group->refcount);
216                 queue_work(mcast_wq, &group->work);
217         }
218         spin_unlock_irqrestore(&group->lock, flags);
219 }
220
221 /*
222  * A multicast group has three types of members: full member, non member, and
223  * send only member.  We need to keep track of the number of members of each
224  * type based on their join state.  Adjust the number of members the belong to
225  * the specified join states.
226  */
227 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
228 {
229         int i;
230
231         for (i = 0; i < 3; i++, join_state >>= 1)
232                 if (join_state & 0x1)
233                         group->members[i] += inc;
234 }
235
236 /*
237  * If a multicast group has zero members left for a particular join state, but
238  * the group is still a member with the SA, we need to leave that join state.
239  * Determine which join states we still belong to, but that do not have any
240  * active members.
241  */
242 static u8 get_leave_state(struct mcast_group *group)
243 {
244         u8 leave_state = 0;
245         int i;
246
247         for (i = 0; i < 3; i++)
248                 if (!group->members[i])
249                         leave_state |= (0x1 << i);
250
251         return leave_state & group->rec.join_state;
252 }
253
254 static int check_selector(ib_sa_comp_mask comp_mask,
255                           ib_sa_comp_mask selector_mask,
256                           ib_sa_comp_mask value_mask,
257                           u8 selector, u8 src_value, u8 dst_value)
258 {
259         int err;
260
261         if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
262                 return 0;
263
264         switch (selector) {
265         case IB_SA_GT:
266                 err = (src_value <= dst_value);
267                 break;
268         case IB_SA_LT:
269                 err = (src_value >= dst_value);
270                 break;
271         case IB_SA_EQ:
272                 err = (src_value != dst_value);
273                 break;
274         default:
275                 err = 0;
276                 break;
277         }
278
279         return err;
280 }
281
282 static int cmp_rec(struct ib_sa_mcmember_rec *src,
283                    struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
284 {
285         /* MGID must already match */
286
287         if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
288             memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
289                 return -EINVAL;
290         if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
291                 return -EINVAL;
292         if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
293                 return -EINVAL;
294         if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
295                            IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
296                            src->mtu, dst->mtu))
297                 return -EINVAL;
298         if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
299             src->traffic_class != dst->traffic_class)
300                 return -EINVAL;
301         if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
302                 return -EINVAL;
303         if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
304                            IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
305                            src->rate, dst->rate))
306                 return -EINVAL;
307         if (check_selector(comp_mask,
308                            IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
309                            IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
310                            dst->packet_life_time_selector,
311                            src->packet_life_time, dst->packet_life_time))
312                 return -EINVAL;
313         if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
314                 return -EINVAL;
315         if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
316             src->flow_label != dst->flow_label)
317                 return -EINVAL;
318         if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
319             src->hop_limit != dst->hop_limit)
320                 return -EINVAL;
321         if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
322                 return -EINVAL;
323
324         /* join_state checked separately, proxy_join ignored */
325
326         return 0;
327 }
328
329 static int send_join(struct mcast_group *group, struct mcast_member *member)
330 {
331         struct mcast_port *port = group->port;
332         int ret;
333
334         group->last_join = member;
335         ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
336                                        port->port_num, IB_MGMT_METHOD_SET,
337                                        &member->multicast.rec,
338                                        member->multicast.comp_mask,
339                                        3000, GFP_KERNEL, join_handler, group,
340                                        &group->query);
341         if (ret >= 0) {
342                 group->query_id = ret;
343                 ret = 0;
344         }
345         return ret;
346 }
347
348 static int send_leave(struct mcast_group *group, u8 leave_state)
349 {
350         struct mcast_port *port = group->port;
351         struct ib_sa_mcmember_rec rec;
352         int ret;
353
354         rec = group->rec;
355         rec.join_state = leave_state;
356         group->leave_state = leave_state;
357
358         ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
359                                        port->port_num, IB_SA_METHOD_DELETE, &rec,
360                                        IB_SA_MCMEMBER_REC_MGID     |
361                                        IB_SA_MCMEMBER_REC_PORT_GID |
362                                        IB_SA_MCMEMBER_REC_JOIN_STATE,
363                                        3000, GFP_KERNEL, leave_handler,
364                                        group, &group->query);
365         if (ret >= 0) {
366                 group->query_id = ret;
367                 ret = 0;
368         }
369         return ret;
370 }
371
372 static void join_group(struct mcast_group *group, struct mcast_member *member,
373                        u8 join_state)
374 {
375         member->state = MCAST_MEMBER;
376         adjust_membership(group, join_state, 1);
377         group->rec.join_state |= join_state;
378         member->multicast.rec = group->rec;
379         member->multicast.rec.join_state = join_state;
380         list_move(&member->list, &group->active_list);
381 }
382
383 static int fail_join(struct mcast_group *group, struct mcast_member *member,
384                      int status)
385 {
386         spin_lock_irq(&group->lock);
387         list_del_init(&member->list);
388         spin_unlock_irq(&group->lock);
389         return member->multicast.callback(status, &member->multicast);
390 }
391
392 static void process_group_error(struct mcast_group *group)
393 {
394         struct mcast_member *member;
395         int ret = 0;
396         u16 pkey_index;
397
398         if (group->state == MCAST_PKEY_EVENT)
399                 ret = ib_find_pkey(group->port->dev->device,
400                                    group->port->port_num,
401                                    be16_to_cpu(group->rec.pkey), &pkey_index);
402
403         spin_lock_irq(&group->lock);
404         if (group->state == MCAST_PKEY_EVENT && !ret &&
405             group->pkey_index == pkey_index)
406                 goto out;
407
408         while (!list_empty(&group->active_list)) {
409                 member = list_entry(group->active_list.next,
410                                     struct mcast_member, list);
411                 atomic_inc(&member->refcount);
412                 list_del_init(&member->list);
413                 adjust_membership(group, member->multicast.rec.join_state, -1);
414                 member->state = MCAST_ERROR;
415                 spin_unlock_irq(&group->lock);
416
417                 ret = member->multicast.callback(-ENETRESET,
418                                                  &member->multicast);
419                 deref_member(member);
420                 if (ret)
421                         ib_sa_free_multicast(&member->multicast);
422                 spin_lock_irq(&group->lock);
423         }
424
425         group->rec.join_state = 0;
426 out:
427         group->state = MCAST_BUSY;
428         spin_unlock_irq(&group->lock);
429 }
430
431 static void mcast_work_handler(struct work_struct *work)
432 {
433         struct mcast_group *group;
434         struct mcast_member *member;
435         struct ib_sa_multicast *multicast;
436         int status, ret;
437         u8 join_state;
438
439         group = container_of(work, typeof(*group), work);
440 retest:
441         spin_lock_irq(&group->lock);
442         while (!list_empty(&group->pending_list) ||
443                (group->state != MCAST_BUSY)) {
444
445                 if (group->state != MCAST_BUSY) {
446                         spin_unlock_irq(&group->lock);
447                         process_group_error(group);
448                         goto retest;
449                 }
450
451                 member = list_entry(group->pending_list.next,
452                                     struct mcast_member, list);
453                 multicast = &member->multicast;
454                 join_state = multicast->rec.join_state;
455                 atomic_inc(&member->refcount);
456
457                 if (join_state == (group->rec.join_state & join_state)) {
458                         status = cmp_rec(&group->rec, &multicast->rec,
459                                          multicast->comp_mask);
460                         if (!status)
461                                 join_group(group, member, join_state);
462                         else
463                                 list_del_init(&member->list);
464                         spin_unlock_irq(&group->lock);
465                         ret = multicast->callback(status, multicast);
466                 } else {
467                         spin_unlock_irq(&group->lock);
468                         status = send_join(group, member);
469                         if (!status) {
470                                 deref_member(member);
471                                 return;
472                         }
473                         ret = fail_join(group, member, status);
474                 }
475
476                 deref_member(member);
477                 if (ret)
478                         ib_sa_free_multicast(&member->multicast);
479                 spin_lock_irq(&group->lock);
480         }
481
482         join_state = get_leave_state(group);
483         if (join_state) {
484                 group->rec.join_state &= ~join_state;
485                 spin_unlock_irq(&group->lock);
486                 if (send_leave(group, join_state))
487                         goto retest;
488         } else {
489                 group->state = MCAST_IDLE;
490                 spin_unlock_irq(&group->lock);
491                 release_group(group);
492         }
493 }
494
495 /*
496  * Fail a join request if it is still active - at the head of the pending queue.
497  */
498 static void process_join_error(struct mcast_group *group, int status)
499 {
500         struct mcast_member *member;
501         int ret;
502
503         spin_lock_irq(&group->lock);
504         member = list_entry(group->pending_list.next,
505                             struct mcast_member, list);
506         if (group->last_join == member) {
507                 atomic_inc(&member->refcount);
508                 list_del_init(&member->list);
509                 spin_unlock_irq(&group->lock);
510                 ret = member->multicast.callback(status, &member->multicast);
511                 deref_member(member);
512                 if (ret)
513                         ib_sa_free_multicast(&member->multicast);
514         } else
515                 spin_unlock_irq(&group->lock);
516 }
517
518 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
519                          void *context)
520 {
521         struct mcast_group *group = context;
522         u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
523
524         if (status)
525                 process_join_error(group, status);
526         else {
527                 ib_find_pkey(group->port->dev->device, group->port->port_num,
528                              be16_to_cpu(rec->pkey), &pkey_index);
529
530                 spin_lock_irq(&group->port->lock);
531                 group->rec = *rec;
532                 if (group->state == MCAST_BUSY &&
533                     group->pkey_index == MCAST_INVALID_PKEY_INDEX)
534                         group->pkey_index = pkey_index;
535                 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
536                         rb_erase(&group->node, &group->port->table);
537                         mcast_insert(group->port, group, 1);
538                 }
539                 spin_unlock_irq(&group->port->lock);
540         }
541         mcast_work_handler(&group->work);
542 }
543
544 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
545                           void *context)
546 {
547         struct mcast_group *group = context;
548
549         if (status && group->retries > 0 &&
550             !send_leave(group, group->leave_state))
551                 group->retries--;
552         else
553                 mcast_work_handler(&group->work);
554 }
555
556 static struct mcast_group *acquire_group(struct mcast_port *port,
557                                          union ib_gid *mgid, gfp_t gfp_mask)
558 {
559         struct mcast_group *group, *cur_group;
560         unsigned long flags;
561         int is_mgid0;
562
563         is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
564         if (!is_mgid0) {
565                 spin_lock_irqsave(&port->lock, flags);
566                 group = mcast_find(port, mgid);
567                 if (group)
568                         goto found;
569                 spin_unlock_irqrestore(&port->lock, flags);
570         }
571
572         group = kzalloc(sizeof *group, gfp_mask);
573         if (!group)
574                 return NULL;
575
576         group->retries = 3;
577         group->port = port;
578         group->rec.mgid = *mgid;
579         group->pkey_index = MCAST_INVALID_PKEY_INDEX;
580         INIT_LIST_HEAD(&group->pending_list);
581         INIT_LIST_HEAD(&group->active_list);
582         INIT_WORK(&group->work, mcast_work_handler);
583         spin_lock_init(&group->lock);
584
585         spin_lock_irqsave(&port->lock, flags);
586         cur_group = mcast_insert(port, group, is_mgid0);
587         if (cur_group) {
588                 kfree(group);
589                 group = cur_group;
590         } else
591                 atomic_inc(&port->refcount);
592 found:
593         atomic_inc(&group->refcount);
594         spin_unlock_irqrestore(&port->lock, flags);
595         return group;
596 }
597
598 /*
599  * We serialize all join requests to a single group to make our lives much
600  * easier.  Otherwise, two users could try to join the same group
601  * simultaneously, with different configurations, one could leave while the
602  * join is in progress, etc., which makes locking around error recovery
603  * difficult.
604  */
605 struct ib_sa_multicast *
606 ib_sa_join_multicast(struct ib_sa_client *client,
607                      struct ib_device *device, u8 port_num,
608                      struct ib_sa_mcmember_rec *rec,
609                      ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
610                      int (*callback)(int status,
611                                      struct ib_sa_multicast *multicast),
612                      void *context)
613 {
614         struct mcast_device *dev;
615         struct mcast_member *member;
616         struct ib_sa_multicast *multicast;
617         int ret;
618
619         dev = ib_get_client_data(device, &mcast_client);
620         if (!dev)
621                 return ERR_PTR(-ENODEV);
622
623         member = kmalloc(sizeof *member, gfp_mask);
624         if (!member)
625                 return ERR_PTR(-ENOMEM);
626
627         ib_sa_client_get(client);
628         member->client = client;
629         member->multicast.rec = *rec;
630         member->multicast.comp_mask = comp_mask;
631         member->multicast.callback = callback;
632         member->multicast.context = context;
633         init_completion(&member->comp);
634         atomic_set(&member->refcount, 1);
635         member->state = MCAST_JOINING;
636
637         member->group = acquire_group(&dev->port[port_num - dev->start_port],
638                                       &rec->mgid, gfp_mask);
639         if (!member->group) {
640                 ret = -ENOMEM;
641                 goto err;
642         }
643
644         /*
645          * The user will get the multicast structure in their callback.  They
646          * could then free the multicast structure before we can return from
647          * this routine.  So we save the pointer to return before queuing
648          * any callback.
649          */
650         multicast = &member->multicast;
651         queue_join(member);
652         return multicast;
653
654 err:
655         ib_sa_client_put(client);
656         kfree(member);
657         return ERR_PTR(ret);
658 }
659 EXPORT_SYMBOL(ib_sa_join_multicast);
660
661 void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
662 {
663         struct mcast_member *member;
664         struct mcast_group *group;
665
666         member = container_of(multicast, struct mcast_member, multicast);
667         group = member->group;
668
669         spin_lock_irq(&group->lock);
670         if (member->state == MCAST_MEMBER)
671                 adjust_membership(group, multicast->rec.join_state, -1);
672
673         list_del_init(&member->list);
674
675         if (group->state == MCAST_IDLE) {
676                 group->state = MCAST_BUSY;
677                 spin_unlock_irq(&group->lock);
678                 /* Continue to hold reference on group until callback */
679                 queue_work(mcast_wq, &group->work);
680         } else {
681                 spin_unlock_irq(&group->lock);
682                 release_group(group);
683         }
684
685         deref_member(member);
686         wait_for_completion(&member->comp);
687         ib_sa_client_put(member->client);
688         kfree(member);
689 }
690 EXPORT_SYMBOL(ib_sa_free_multicast);
691
692 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
693                            union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
694 {
695         struct mcast_device *dev;
696         struct mcast_port *port;
697         struct mcast_group *group;
698         unsigned long flags;
699         int ret = 0;
700
701         dev = ib_get_client_data(device, &mcast_client);
702         if (!dev)
703                 return -ENODEV;
704
705         port = &dev->port[port_num - dev->start_port];
706         spin_lock_irqsave(&port->lock, flags);
707         group = mcast_find(port, mgid);
708         if (group)
709                 *rec = group->rec;
710         else
711                 ret = -EADDRNOTAVAIL;
712         spin_unlock_irqrestore(&port->lock, flags);
713
714         return ret;
715 }
716 EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
717
718 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
719                              struct ib_sa_mcmember_rec *rec,
720                              struct ib_ah_attr *ah_attr)
721 {
722         int ret;
723         u16 gid_index;
724         u8 p;
725
726         ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
727         if (ret)
728                 return ret;
729
730         memset(ah_attr, 0, sizeof *ah_attr);
731         ah_attr->dlid = be16_to_cpu(rec->mlid);
732         ah_attr->sl = rec->sl;
733         ah_attr->port_num = port_num;
734         ah_attr->static_rate = rec->rate;
735
736         ah_attr->ah_flags = IB_AH_GRH;
737         ah_attr->grh.dgid = rec->mgid;
738
739         ah_attr->grh.sgid_index = (u8) gid_index;
740         ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
741         ah_attr->grh.hop_limit = rec->hop_limit;
742         ah_attr->grh.traffic_class = rec->traffic_class;
743
744         return 0;
745 }
746 EXPORT_SYMBOL(ib_init_ah_from_mcmember);
747
748 static void mcast_groups_event(struct mcast_port *port,
749                                enum mcast_group_state state)
750 {
751         struct mcast_group *group;
752         struct rb_node *node;
753         unsigned long flags;
754
755         spin_lock_irqsave(&port->lock, flags);
756         for (node = rb_first(&port->table); node; node = rb_next(node)) {
757                 group = rb_entry(node, struct mcast_group, node);
758                 spin_lock(&group->lock);
759                 if (group->state == MCAST_IDLE) {
760                         atomic_inc(&group->refcount);
761                         queue_work(mcast_wq, &group->work);
762                 }
763                 if (group->state != MCAST_GROUP_ERROR)
764                         group->state = state;
765                 spin_unlock(&group->lock);
766         }
767         spin_unlock_irqrestore(&port->lock, flags);
768 }
769
770 static void mcast_event_handler(struct ib_event_handler *handler,
771                                 struct ib_event *event)
772 {
773         struct mcast_device *dev;
774         int index;
775
776         dev = container_of(handler, struct mcast_device, event_handler);
777         if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
778             IB_LINK_LAYER_INFINIBAND)
779                 return;
780
781         index = event->element.port_num - dev->start_port;
782
783         switch (event->event) {
784         case IB_EVENT_PORT_ERR:
785         case IB_EVENT_LID_CHANGE:
786         case IB_EVENT_SM_CHANGE:
787         case IB_EVENT_CLIENT_REREGISTER:
788                 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
789                 break;
790         case IB_EVENT_PKEY_CHANGE:
791                 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
792                 break;
793         default:
794                 break;
795         }
796 }
797
798 static void mcast_add_one(struct ib_device *device)
799 {
800         struct mcast_device *dev;
801         struct mcast_port *port;
802         int i;
803         int count = 0;
804
805         if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
806                 return;
807
808         dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
809                       GFP_KERNEL);
810         if (!dev)
811                 return;
812
813         if (device->node_type == RDMA_NODE_IB_SWITCH)
814                 dev->start_port = dev->end_port = 0;
815         else {
816                 dev->start_port = 1;
817                 dev->end_port = device->phys_port_cnt;
818         }
819
820         for (i = 0; i <= dev->end_port - dev->start_port; i++) {
821                 if (rdma_port_get_link_layer(device, dev->start_port + i) !=
822                     IB_LINK_LAYER_INFINIBAND)
823                         continue;
824                 port = &dev->port[i];
825                 port->dev = dev;
826                 port->port_num = dev->start_port + i;
827                 spin_lock_init(&port->lock);
828                 port->table = RB_ROOT;
829                 init_completion(&port->comp);
830                 atomic_set(&port->refcount, 1);
831                 ++count;
832         }
833
834         if (!count) {
835                 kfree(dev);
836                 return;
837         }
838
839         dev->device = device;
840         ib_set_client_data(device, &mcast_client, dev);
841
842         INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
843         ib_register_event_handler(&dev->event_handler);
844 }
845
846 static void mcast_remove_one(struct ib_device *device)
847 {
848         struct mcast_device *dev;
849         struct mcast_port *port;
850         int i;
851
852         dev = ib_get_client_data(device, &mcast_client);
853         if (!dev)
854                 return;
855
856         ib_unregister_event_handler(&dev->event_handler);
857         flush_workqueue(mcast_wq);
858
859         for (i = 0; i <= dev->end_port - dev->start_port; i++) {
860                 if (rdma_port_get_link_layer(device, dev->start_port + i) ==
861                     IB_LINK_LAYER_INFINIBAND) {
862                         port = &dev->port[i];
863                         deref_port(port);
864                         wait_for_completion(&port->comp);
865                 }
866         }
867
868         kfree(dev);
869 }
870
871 int mcast_init(void)
872 {
873         int ret;
874
875         mcast_wq = create_singlethread_workqueue("ib_mcast");
876         if (!mcast_wq)
877                 return -ENOMEM;
878
879         ib_sa_register_client(&sa_client);
880
881         ret = ib_register_client(&mcast_client);
882         if (ret)
883                 goto err;
884         return 0;
885
886 err:
887         ib_sa_unregister_client(&sa_client);
888         destroy_workqueue(mcast_wq);
889         return ret;
890 }
891
892 void mcast_cleanup(void)
893 {
894         ib_unregister_client(&mcast_client);
895         ib_sa_unregister_client(&sa_client);
896         destroy_workqueue(mcast_wq);
897 }