2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <rdma/rdma_netlink.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include "core_priv.h"
48 MODULE_AUTHOR("Roland Dreier");
49 MODULE_DESCRIPTION("core kernel InfiniBand API");
50 MODULE_LICENSE("Dual BSD/GPL");
52 struct ib_client_data {
53 struct list_head list;
54 struct ib_client *client;
56 /* The device or client is going down. Do not call client or device
57 * callbacks other than remove(). */
61 struct workqueue_struct *ib_wq;
62 EXPORT_SYMBOL_GPL(ib_wq);
64 /* The device_list and client_list contain devices and clients after their
65 * registration has completed, and the devices and clients are removed
66 * during unregistration. */
67 static LIST_HEAD(device_list);
68 static LIST_HEAD(client_list);
71 * device_mutex and lists_rwsem protect access to both device_list and
72 * client_list. device_mutex protects writer access by device and client
73 * registration / de-registration. lists_rwsem protects reader access to
74 * these lists. Iterators of these lists must lock it for read, while updates
75 * to the lists must be done with a write lock. A special case is when the
76 * device_mutex is locked. In this case locking the lists for read access is
77 * not necessary as the device_mutex implies it.
79 * lists_rwsem also protects access to the client data list.
81 static DEFINE_MUTEX(device_mutex);
82 static DECLARE_RWSEM(lists_rwsem);
85 static int ib_device_check_mandatory(struct ib_device *device)
87 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
91 } mandatory_table[] = {
92 IB_MANDATORY_FUNC(query_device),
93 IB_MANDATORY_FUNC(query_port),
94 IB_MANDATORY_FUNC(query_pkey),
95 IB_MANDATORY_FUNC(query_gid),
96 IB_MANDATORY_FUNC(alloc_pd),
97 IB_MANDATORY_FUNC(dealloc_pd),
98 IB_MANDATORY_FUNC(create_ah),
99 IB_MANDATORY_FUNC(destroy_ah),
100 IB_MANDATORY_FUNC(create_qp),
101 IB_MANDATORY_FUNC(modify_qp),
102 IB_MANDATORY_FUNC(destroy_qp),
103 IB_MANDATORY_FUNC(post_send),
104 IB_MANDATORY_FUNC(post_recv),
105 IB_MANDATORY_FUNC(create_cq),
106 IB_MANDATORY_FUNC(destroy_cq),
107 IB_MANDATORY_FUNC(poll_cq),
108 IB_MANDATORY_FUNC(req_notify_cq),
109 IB_MANDATORY_FUNC(get_dma_mr),
110 IB_MANDATORY_FUNC(dereg_mr),
111 IB_MANDATORY_FUNC(get_port_immutable)
115 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
116 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
117 printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
118 device->name, mandatory_table[i].name);
126 static struct ib_device *__ib_device_get_by_name(const char *name)
128 struct ib_device *device;
130 list_for_each_entry(device, &device_list, core_list)
131 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
138 static int alloc_name(char *name)
140 unsigned long *inuse;
141 char buf[IB_DEVICE_NAME_MAX];
142 struct ib_device *device;
145 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
149 list_for_each_entry(device, &device_list, core_list) {
150 if (!sscanf(device->name, name, &i))
152 if (i < 0 || i >= PAGE_SIZE * 8)
154 snprintf(buf, sizeof buf, name, i);
155 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
159 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
160 free_page((unsigned long) inuse);
161 snprintf(buf, sizeof buf, name, i);
163 if (__ib_device_get_by_name(buf))
166 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
170 static void ib_device_release(struct device *device)
172 struct ib_device *dev = container_of(device, struct ib_device, dev);
174 ib_cache_release_one(dev);
175 kfree(dev->port_immutable);
179 static int ib_device_uevent(struct device *device,
180 struct kobj_uevent_env *env)
182 struct ib_device *dev = container_of(device, struct ib_device, dev);
184 if (add_uevent_var(env, "NAME=%s", dev->name))
188 * It would be nice to pass the node GUID with the event...
194 static struct class ib_class = {
195 .name = "infiniband",
196 .dev_release = ib_device_release,
197 .dev_uevent = ib_device_uevent,
201 * ib_alloc_device - allocate an IB device struct
202 * @size:size of structure to allocate
204 * Low-level drivers should use ib_alloc_device() to allocate &struct
205 * ib_device. @size is the size of the structure to be allocated,
206 * including any private data used by the low-level driver.
207 * ib_dealloc_device() must be used to free structures allocated with
210 struct ib_device *ib_alloc_device(size_t size)
212 struct ib_device *device;
214 if (WARN_ON(size < sizeof(struct ib_device)))
217 device = kzalloc(size, GFP_KERNEL);
221 device->dev.class = &ib_class;
222 device_initialize(&device->dev);
224 dev_set_drvdata(&device->dev, device);
226 INIT_LIST_HEAD(&device->event_handler_list);
227 spin_lock_init(&device->event_handler_lock);
228 spin_lock_init(&device->client_data_lock);
229 INIT_LIST_HEAD(&device->client_data_list);
230 INIT_LIST_HEAD(&device->port_list);
234 EXPORT_SYMBOL(ib_alloc_device);
237 * ib_dealloc_device - free an IB device struct
238 * @device:structure to free
240 * Free a structure allocated with ib_alloc_device().
242 void ib_dealloc_device(struct ib_device *device)
244 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
245 device->reg_state != IB_DEV_UNINITIALIZED);
246 kobject_put(&device->dev.kobj);
248 EXPORT_SYMBOL(ib_dealloc_device);
250 static int add_client_context(struct ib_device *device, struct ib_client *client)
252 struct ib_client_data *context;
255 context = kmalloc(sizeof *context, GFP_KERNEL);
257 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
258 device->name, client->name);
262 context->client = client;
263 context->data = NULL;
264 context->going_down = false;
266 down_write(&lists_rwsem);
267 spin_lock_irqsave(&device->client_data_lock, flags);
268 list_add(&context->list, &device->client_data_list);
269 spin_unlock_irqrestore(&device->client_data_lock, flags);
270 up_write(&lists_rwsem);
275 static int verify_immutable(const struct ib_device *dev, u8 port)
277 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
278 rdma_max_mad_size(dev, port) != 0);
281 static int read_port_immutable(struct ib_device *device)
284 u8 start_port = rdma_start_port(device);
285 u8 end_port = rdma_end_port(device);
289 * device->port_immutable is indexed directly by the port number to make
290 * access to this data as efficient as possible.
292 * Therefore port_immutable is declared as a 1 based array with
293 * potential empty slots at the beginning.
295 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
298 if (!device->port_immutable)
301 for (port = start_port; port <= end_port; ++port) {
302 ret = device->get_port_immutable(device, port,
303 &device->port_immutable[port]);
307 if (verify_immutable(device, port))
314 * ib_register_device - Register an IB device with IB core
315 * @device:Device to register
317 * Low-level drivers use ib_register_device() to register their
318 * devices with the IB core. All registered clients will receive a
319 * callback for each device that is added. @device must be allocated
320 * with ib_alloc_device().
322 int ib_register_device(struct ib_device *device,
323 int (*port_callback)(struct ib_device *,
324 u8, struct kobject *))
328 mutex_lock(&device_mutex);
330 if (strchr(device->name, '%')) {
331 ret = alloc_name(device->name);
336 if (ib_device_check_mandatory(device)) {
341 ret = read_port_immutable(device);
343 printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
348 ret = ib_cache_setup_one(device);
350 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
354 ret = ib_device_register_sysfs(device, port_callback);
356 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
358 ib_cache_cleanup_one(device);
362 device->reg_state = IB_DEV_REGISTERED;
365 struct ib_client *client;
367 list_for_each_entry(client, &client_list, list)
368 if (client->add && !add_client_context(device, client))
372 down_write(&lists_rwsem);
373 list_add_tail(&device->core_list, &device_list);
374 up_write(&lists_rwsem);
376 mutex_unlock(&device_mutex);
379 EXPORT_SYMBOL(ib_register_device);
382 * ib_unregister_device - Unregister an IB device
383 * @device:Device to unregister
385 * Unregister an IB device. All clients will receive a remove callback.
387 void ib_unregister_device(struct ib_device *device)
389 struct ib_client_data *context, *tmp;
392 mutex_lock(&device_mutex);
394 down_write(&lists_rwsem);
395 list_del(&device->core_list);
396 spin_lock_irqsave(&device->client_data_lock, flags);
397 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
398 context->going_down = true;
399 spin_unlock_irqrestore(&device->client_data_lock, flags);
400 downgrade_write(&lists_rwsem);
402 list_for_each_entry_safe(context, tmp, &device->client_data_list,
404 if (context->client->remove)
405 context->client->remove(device, context->data);
407 up_read(&lists_rwsem);
409 mutex_unlock(&device_mutex);
411 ib_device_unregister_sysfs(device);
412 ib_cache_cleanup_one(device);
414 down_write(&lists_rwsem);
415 spin_lock_irqsave(&device->client_data_lock, flags);
416 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
418 spin_unlock_irqrestore(&device->client_data_lock, flags);
419 up_write(&lists_rwsem);
421 device->reg_state = IB_DEV_UNREGISTERED;
423 EXPORT_SYMBOL(ib_unregister_device);
426 * ib_register_client - Register an IB client
427 * @client:Client to register
429 * Upper level users of the IB drivers can use ib_register_client() to
430 * register callbacks for IB device addition and removal. When an IB
431 * device is added, each registered client's add method will be called
432 * (in the order the clients were registered), and when a device is
433 * removed, each client's remove method will be called (in the reverse
434 * order that clients were registered). In addition, when
435 * ib_register_client() is called, the client will receive an add
436 * callback for all devices already registered.
438 int ib_register_client(struct ib_client *client)
440 struct ib_device *device;
442 mutex_lock(&device_mutex);
444 list_for_each_entry(device, &device_list, core_list)
445 if (client->add && !add_client_context(device, client))
448 down_write(&lists_rwsem);
449 list_add_tail(&client->list, &client_list);
450 up_write(&lists_rwsem);
452 mutex_unlock(&device_mutex);
456 EXPORT_SYMBOL(ib_register_client);
459 * ib_unregister_client - Unregister an IB client
460 * @client:Client to unregister
462 * Upper level users use ib_unregister_client() to remove their client
463 * registration. When ib_unregister_client() is called, the client
464 * will receive a remove callback for each IB device still registered.
466 void ib_unregister_client(struct ib_client *client)
468 struct ib_client_data *context, *tmp;
469 struct ib_device *device;
472 mutex_lock(&device_mutex);
474 down_write(&lists_rwsem);
475 list_del(&client->list);
476 up_write(&lists_rwsem);
478 list_for_each_entry(device, &device_list, core_list) {
479 struct ib_client_data *found_context = NULL;
481 down_write(&lists_rwsem);
482 spin_lock_irqsave(&device->client_data_lock, flags);
483 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
484 if (context->client == client) {
485 context->going_down = true;
486 found_context = context;
489 spin_unlock_irqrestore(&device->client_data_lock, flags);
490 up_write(&lists_rwsem);
493 client->remove(device, found_context ?
494 found_context->data : NULL);
496 if (!found_context) {
497 pr_warn("No client context found for %s/%s\n",
498 device->name, client->name);
502 down_write(&lists_rwsem);
503 spin_lock_irqsave(&device->client_data_lock, flags);
504 list_del(&found_context->list);
505 kfree(found_context);
506 spin_unlock_irqrestore(&device->client_data_lock, flags);
507 up_write(&lists_rwsem);
510 mutex_unlock(&device_mutex);
512 EXPORT_SYMBOL(ib_unregister_client);
515 * ib_get_client_data - Get IB client context
516 * @device:Device to get context for
517 * @client:Client to get context for
519 * ib_get_client_data() returns client context set with
520 * ib_set_client_data().
522 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
524 struct ib_client_data *context;
528 spin_lock_irqsave(&device->client_data_lock, flags);
529 list_for_each_entry(context, &device->client_data_list, list)
530 if (context->client == client) {
534 spin_unlock_irqrestore(&device->client_data_lock, flags);
538 EXPORT_SYMBOL(ib_get_client_data);
541 * ib_set_client_data - Set IB client context
542 * @device:Device to set context for
543 * @client:Client to set context for
544 * @data:Context to set
546 * ib_set_client_data() sets client context that can be retrieved with
547 * ib_get_client_data().
549 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
552 struct ib_client_data *context;
555 spin_lock_irqsave(&device->client_data_lock, flags);
556 list_for_each_entry(context, &device->client_data_list, list)
557 if (context->client == client) {
558 context->data = data;
562 printk(KERN_WARNING "No client context found for %s/%s\n",
563 device->name, client->name);
566 spin_unlock_irqrestore(&device->client_data_lock, flags);
568 EXPORT_SYMBOL(ib_set_client_data);
571 * ib_register_event_handler - Register an IB event handler
572 * @event_handler:Handler to register
574 * ib_register_event_handler() registers an event handler that will be
575 * called back when asynchronous IB events occur (as defined in
576 * chapter 11 of the InfiniBand Architecture Specification). This
577 * callback may occur in interrupt context.
579 int ib_register_event_handler (struct ib_event_handler *event_handler)
583 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
584 list_add_tail(&event_handler->list,
585 &event_handler->device->event_handler_list);
586 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
590 EXPORT_SYMBOL(ib_register_event_handler);
593 * ib_unregister_event_handler - Unregister an event handler
594 * @event_handler:Handler to unregister
596 * Unregister an event handler registered with
597 * ib_register_event_handler().
599 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
603 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
604 list_del(&event_handler->list);
605 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
609 EXPORT_SYMBOL(ib_unregister_event_handler);
612 * ib_dispatch_event - Dispatch an asynchronous event
613 * @event:Event to dispatch
615 * Low-level drivers must call ib_dispatch_event() to dispatch the
616 * event to all registered event handlers when an asynchronous event
619 void ib_dispatch_event(struct ib_event *event)
622 struct ib_event_handler *handler;
624 spin_lock_irqsave(&event->device->event_handler_lock, flags);
626 list_for_each_entry(handler, &event->device->event_handler_list, list)
627 handler->handler(handler, event);
629 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
631 EXPORT_SYMBOL(ib_dispatch_event);
634 * ib_query_device - Query IB device attributes
635 * @device:Device to query
636 * @device_attr:Device attributes
638 * ib_query_device() returns the attributes of a device through the
639 * @device_attr pointer.
641 int ib_query_device(struct ib_device *device,
642 struct ib_device_attr *device_attr)
644 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
646 memset(device_attr, 0, sizeof(*device_attr));
648 return device->query_device(device, device_attr, &uhw);
650 EXPORT_SYMBOL(ib_query_device);
653 * ib_query_port - Query IB port attributes
654 * @device:Device to query
655 * @port_num:Port number to query
656 * @port_attr:Port attributes
658 * ib_query_port() returns the attributes of a port through the
659 * @port_attr pointer.
661 int ib_query_port(struct ib_device *device,
663 struct ib_port_attr *port_attr)
665 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
668 return device->query_port(device, port_num, port_attr);
670 EXPORT_SYMBOL(ib_query_port);
673 * ib_query_gid - Get GID table entry
674 * @device:Device to query
675 * @port_num:Port number to query
676 * @index:GID table index to query
679 * ib_query_gid() fetches the specified GID table entry.
681 int ib_query_gid(struct ib_device *device,
682 u8 port_num, int index, union ib_gid *gid)
684 if (rdma_cap_roce_gid_table(device, port_num))
685 return ib_get_cached_gid(device, port_num, index, gid);
687 return device->query_gid(device, port_num, index, gid);
689 EXPORT_SYMBOL(ib_query_gid);
692 * ib_enum_roce_netdev - enumerate all RoCE ports
693 * @ib_dev : IB device we want to query
694 * @filter: Should we call the callback?
695 * @filter_cookie: Cookie passed to filter
696 * @cb: Callback to call for each found RoCE ports
697 * @cookie: Cookie passed back to the callback
699 * Enumerates all of the physical RoCE ports of ib_dev
700 * which are related to netdevice and calls callback() on each
701 * device for which filter() function returns non zero.
703 void ib_enum_roce_netdev(struct ib_device *ib_dev,
704 roce_netdev_filter filter,
706 roce_netdev_callback cb,
711 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
713 if (rdma_protocol_roce(ib_dev, port)) {
714 struct net_device *idev = NULL;
716 if (ib_dev->get_netdev)
717 idev = ib_dev->get_netdev(ib_dev, port);
720 idev->reg_state >= NETREG_UNREGISTERED) {
725 if (filter(ib_dev, port, idev, filter_cookie))
726 cb(ib_dev, port, idev, cookie);
734 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
735 * @filter: Should we call the callback?
736 * @filter_cookie: Cookie passed to filter
737 * @cb: Callback to call for each found RoCE ports
738 * @cookie: Cookie passed back to the callback
740 * Enumerates all RoCE devices' physical ports which are related
741 * to netdevices and calls callback() on each device for which
742 * filter() function returns non zero.
744 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
746 roce_netdev_callback cb,
749 struct ib_device *dev;
751 down_read(&lists_rwsem);
752 list_for_each_entry(dev, &device_list, core_list)
753 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
754 up_read(&lists_rwsem);
758 * ib_query_pkey - Get P_Key table entry
759 * @device:Device to query
760 * @port_num:Port number to query
761 * @index:P_Key table index to query
762 * @pkey:Returned P_Key
764 * ib_query_pkey() fetches the specified P_Key table entry.
766 int ib_query_pkey(struct ib_device *device,
767 u8 port_num, u16 index, u16 *pkey)
769 return device->query_pkey(device, port_num, index, pkey);
771 EXPORT_SYMBOL(ib_query_pkey);
774 * ib_modify_device - Change IB device attributes
775 * @device:Device to modify
776 * @device_modify_mask:Mask of attributes to change
777 * @device_modify:New attribute values
779 * ib_modify_device() changes a device's attributes as specified by
780 * the @device_modify_mask and @device_modify structure.
782 int ib_modify_device(struct ib_device *device,
783 int device_modify_mask,
784 struct ib_device_modify *device_modify)
786 if (!device->modify_device)
789 return device->modify_device(device, device_modify_mask,
792 EXPORT_SYMBOL(ib_modify_device);
795 * ib_modify_port - Modifies the attributes for the specified port.
796 * @device: The device to modify.
797 * @port_num: The number of the port to modify.
798 * @port_modify_mask: Mask used to specify which attributes of the port
800 * @port_modify: New attribute values for the port.
802 * ib_modify_port() changes a port's attributes as specified by the
803 * @port_modify_mask and @port_modify structure.
805 int ib_modify_port(struct ib_device *device,
806 u8 port_num, int port_modify_mask,
807 struct ib_port_modify *port_modify)
809 if (!device->modify_port)
812 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
815 return device->modify_port(device, port_num, port_modify_mask,
818 EXPORT_SYMBOL(ib_modify_port);
821 * ib_find_gid - Returns the port number and GID table index where
822 * a specified GID value occurs.
823 * @device: The device to query.
824 * @gid: The GID value to search for.
825 * @port_num: The port number of the device where the GID value was found.
826 * @index: The index into the GID table where the GID was found. This
827 * parameter may be NULL.
829 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
830 u8 *port_num, u16 *index)
832 union ib_gid tmp_gid;
835 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
836 if (rdma_cap_roce_gid_table(device, port)) {
837 if (!ib_cache_gid_find_by_port(device, gid, port,
844 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
845 ret = ib_query_gid(device, port, i, &tmp_gid);
848 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
859 EXPORT_SYMBOL(ib_find_gid);
862 * ib_find_pkey - Returns the PKey table index where a specified
864 * @device: The device to query.
865 * @port_num: The port number of the device to search for the PKey.
866 * @pkey: The PKey value to search for.
867 * @index: The index into the PKey table where the PKey was found.
869 int ib_find_pkey(struct ib_device *device,
870 u8 port_num, u16 pkey, u16 *index)
876 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
877 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
880 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
881 /* if there is full-member pkey take it.*/
882 if (tmp_pkey & 0x8000) {
891 /*no full-member, if exists take the limited*/
892 if (partial_ix >= 0) {
898 EXPORT_SYMBOL(ib_find_pkey);
901 * ib_get_net_dev_by_params() - Return the appropriate net_dev
902 * for a received CM request
903 * @dev: An RDMA device on which the request has been received.
904 * @port: Port number on the RDMA device.
905 * @pkey: The Pkey the request came on.
906 * @gid: A GID that the net_dev uses to communicate.
907 * @addr: Contains the IP address that the request specified as its
910 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
913 const union ib_gid *gid,
914 const struct sockaddr *addr)
916 struct net_device *net_dev = NULL;
917 struct ib_client_data *context;
919 if (!rdma_protocol_ib(dev, port))
922 down_read(&lists_rwsem);
924 list_for_each_entry(context, &dev->client_data_list, list) {
925 struct ib_client *client = context->client;
927 if (context->going_down)
930 if (client->get_net_dev_by_params) {
931 net_dev = client->get_net_dev_by_params(dev, port, pkey,
939 up_read(&lists_rwsem);
943 EXPORT_SYMBOL(ib_get_net_dev_by_params);
945 static int __init ib_core_init(void)
949 ib_wq = alloc_workqueue("infiniband", 0, 0);
953 ret = class_register(&ib_class);
955 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
961 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
970 class_unregister(&ib_class);
973 destroy_workqueue(ib_wq);
977 static void __exit ib_core_cleanup(void)
981 class_unregister(&ib_class);
982 /* Make sure that any pending umem accounting work is done. */
983 destroy_workqueue(ib_wq);
986 module_init(ib_core_init);
987 module_exit(ib_core_cleanup);