2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59 #define IB_SA_CPI_MAX_RETRY_CNT 3
60 #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
61 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
70 enum rdma_class_port_info_type {
71 RDMA_CLASS_PORT_INFO_IB,
72 RDMA_CLASS_PORT_INFO_OPA
75 struct rdma_class_port_info {
76 enum rdma_class_port_info_type type;
78 struct ib_class_port_info ib;
79 struct opa_class_port_info opa;
83 struct ib_sa_classport_cache {
86 struct rdma_class_port_info data;
90 struct ib_mad_agent *agent;
91 struct ib_sa_sm_ah *sm_ah;
92 struct work_struct update_task;
93 struct ib_sa_classport_cache classport_info;
94 struct delayed_work ib_cpi_work;
95 spinlock_t classport_lock; /* protects class port info set */
100 struct ib_sa_device {
101 int start_port, end_port;
102 struct ib_event_handler event_handler;
103 struct ib_sa_port port[0];
107 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
108 void (*release)(struct ib_sa_query *);
109 struct ib_sa_client *client;
110 struct ib_sa_port *port;
111 struct ib_mad_send_buf *mad_buf;
112 struct ib_sa_sm_ah *sm_ah;
115 struct list_head list; /* Local svc request list */
116 u32 seq; /* Local svc request sequence number */
117 unsigned long timeout; /* Local svc timeout */
118 u8 path_use; /* How will the pathrecord be used */
121 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
122 #define IB_SA_CANCEL 0x00000002
123 #define IB_SA_QUERY_OPA 0x00000004
125 struct ib_sa_service_query {
126 void (*callback)(int, struct ib_sa_service_rec *, void *);
128 struct ib_sa_query sa_query;
131 struct ib_sa_path_query {
132 void (*callback)(int, struct sa_path_rec *, void *);
134 struct ib_sa_query sa_query;
137 struct ib_sa_guidinfo_query {
138 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
140 struct ib_sa_query sa_query;
143 struct ib_sa_classport_info_query {
144 void (*callback)(void *);
146 struct ib_sa_query sa_query;
149 struct ib_sa_mcmember_query {
150 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
152 struct ib_sa_query sa_query;
155 static LIST_HEAD(ib_nl_request_list);
156 static DEFINE_SPINLOCK(ib_nl_request_lock);
157 static atomic_t ib_nl_sa_request_seq;
158 static struct workqueue_struct *ib_nl_wq;
159 static struct delayed_work ib_nl_timed_work;
160 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
161 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
162 .len = sizeof(struct ib_path_rec_data)},
163 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
164 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
165 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
166 .len = sizeof(struct rdma_nla_ls_gid)},
167 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
168 .len = sizeof(struct rdma_nla_ls_gid)},
169 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
170 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
171 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
175 static void ib_sa_add_one(struct ib_device *device);
176 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
178 static struct ib_client sa_client = {
180 .add = ib_sa_add_one,
181 .remove = ib_sa_remove_one
184 static DEFINE_SPINLOCK(idr_lock);
185 static DEFINE_IDR(query_idr);
187 static DEFINE_SPINLOCK(tid_lock);
190 #define PATH_REC_FIELD(field) \
191 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
192 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
193 .field_name = "sa_path_rec:" #field
195 static const struct ib_field path_rec_table[] = {
196 { PATH_REC_FIELD(service_id),
200 { PATH_REC_FIELD(dgid),
204 { PATH_REC_FIELD(sgid),
208 { PATH_REC_FIELD(dlid),
212 { PATH_REC_FIELD(slid),
216 { PATH_REC_FIELD(raw_traffic),
224 { PATH_REC_FIELD(flow_label),
228 { PATH_REC_FIELD(hop_limit),
232 { PATH_REC_FIELD(traffic_class),
236 { PATH_REC_FIELD(reversible),
240 { PATH_REC_FIELD(numb_path),
244 { PATH_REC_FIELD(pkey),
248 { PATH_REC_FIELD(qos_class),
252 { PATH_REC_FIELD(sl),
256 { PATH_REC_FIELD(mtu_selector),
260 { PATH_REC_FIELD(mtu),
264 { PATH_REC_FIELD(rate_selector),
268 { PATH_REC_FIELD(rate),
272 { PATH_REC_FIELD(packet_life_time_selector),
276 { PATH_REC_FIELD(packet_life_time),
280 { PATH_REC_FIELD(preference),
290 #define MCMEMBER_REC_FIELD(field) \
291 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
292 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
293 .field_name = "sa_mcmember_rec:" #field
295 static const struct ib_field mcmember_rec_table[] = {
296 { MCMEMBER_REC_FIELD(mgid),
300 { MCMEMBER_REC_FIELD(port_gid),
304 { MCMEMBER_REC_FIELD(qkey),
308 { MCMEMBER_REC_FIELD(mlid),
312 { MCMEMBER_REC_FIELD(mtu_selector),
316 { MCMEMBER_REC_FIELD(mtu),
320 { MCMEMBER_REC_FIELD(traffic_class),
324 { MCMEMBER_REC_FIELD(pkey),
328 { MCMEMBER_REC_FIELD(rate_selector),
332 { MCMEMBER_REC_FIELD(rate),
336 { MCMEMBER_REC_FIELD(packet_life_time_selector),
340 { MCMEMBER_REC_FIELD(packet_life_time),
344 { MCMEMBER_REC_FIELD(sl),
348 { MCMEMBER_REC_FIELD(flow_label),
352 { MCMEMBER_REC_FIELD(hop_limit),
356 { MCMEMBER_REC_FIELD(scope),
360 { MCMEMBER_REC_FIELD(join_state),
364 { MCMEMBER_REC_FIELD(proxy_join),
374 #define SERVICE_REC_FIELD(field) \
375 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
376 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
377 .field_name = "sa_service_rec:" #field
379 static const struct ib_field service_rec_table[] = {
380 { SERVICE_REC_FIELD(id),
384 { SERVICE_REC_FIELD(gid),
388 { SERVICE_REC_FIELD(pkey),
392 { SERVICE_REC_FIELD(lease),
396 { SERVICE_REC_FIELD(key),
400 { SERVICE_REC_FIELD(name),
404 { SERVICE_REC_FIELD(data8),
408 { SERVICE_REC_FIELD(data16),
412 { SERVICE_REC_FIELD(data32),
416 { SERVICE_REC_FIELD(data64),
422 #define CLASSPORTINFO_REC_FIELD(field) \
423 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
424 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
425 .field_name = "ib_class_port_info:" #field
427 static const struct ib_field ib_classport_info_rec_table[] = {
428 { CLASSPORTINFO_REC_FIELD(base_version),
432 { CLASSPORTINFO_REC_FIELD(class_version),
436 { CLASSPORTINFO_REC_FIELD(capability_mask),
440 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
444 { CLASSPORTINFO_REC_FIELD(redirect_gid),
448 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
452 { CLASSPORTINFO_REC_FIELD(redirect_lid),
456 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
461 { CLASSPORTINFO_REC_FIELD(redirect_qp),
465 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
470 { CLASSPORTINFO_REC_FIELD(trap_gid),
474 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
479 { CLASSPORTINFO_REC_FIELD(trap_lid),
483 { CLASSPORTINFO_REC_FIELD(trap_pkey),
488 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
492 { CLASSPORTINFO_REC_FIELD(trap_qkey),
498 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
499 .struct_offset_bytes =\
500 offsetof(struct opa_class_port_info, field), \
501 .struct_size_bytes = \
502 sizeof((struct opa_class_port_info *)0)->field, \
503 .field_name = "opa_class_port_info:" #field
505 static const struct ib_field opa_classport_info_rec_table[] = {
506 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
510 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
514 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
518 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
522 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
526 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
530 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
534 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
538 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
542 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
546 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
550 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
554 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
558 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
562 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
566 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
570 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
580 #define GUIDINFO_REC_FIELD(field) \
581 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
582 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
583 .field_name = "sa_guidinfo_rec:" #field
585 static const struct ib_field guidinfo_rec_table[] = {
586 { GUIDINFO_REC_FIELD(lid),
590 { GUIDINFO_REC_FIELD(block_num),
594 { GUIDINFO_REC_FIELD(res1),
598 { GUIDINFO_REC_FIELD(res2),
602 { GUIDINFO_REC_FIELD(guid_info_list),
608 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
610 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
613 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
615 return (query->flags & IB_SA_CANCEL);
618 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
619 struct ib_sa_query *query)
621 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
622 struct ib_sa_mad *mad = query->mad_buf->mad;
623 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
626 struct rdma_ls_resolve_header *header;
628 query->mad_buf->context[1] = NULL;
630 /* Construct the family header first */
631 header = (struct rdma_ls_resolve_header *)
632 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
633 memcpy(header->device_name, query->port->agent->device->name,
635 header->port_num = query->port->port_num;
637 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
638 sa_rec->reversible != 0)
639 query->path_use = LS_RESOLVE_PATH_USE_GMP;
641 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
642 header->path_use = query->path_use;
644 /* Now build the attributes */
645 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
646 val64 = be64_to_cpu(sa_rec->service_id);
647 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
648 sizeof(val64), &val64);
650 if (comp_mask & IB_SA_PATH_REC_DGID)
651 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
652 sizeof(sa_rec->dgid), &sa_rec->dgid);
653 if (comp_mask & IB_SA_PATH_REC_SGID)
654 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
655 sizeof(sa_rec->sgid), &sa_rec->sgid);
656 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
657 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
658 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
660 if (comp_mask & IB_SA_PATH_REC_PKEY) {
661 val16 = be16_to_cpu(sa_rec->pkey);
662 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
663 sizeof(val16), &val16);
665 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
666 val16 = be16_to_cpu(sa_rec->qos_class);
667 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
668 sizeof(val16), &val16);
672 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
676 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
677 len += nla_total_size(sizeof(u64));
678 if (comp_mask & IB_SA_PATH_REC_DGID)
679 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
680 if (comp_mask & IB_SA_PATH_REC_SGID)
681 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
682 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
683 len += nla_total_size(sizeof(u8));
684 if (comp_mask & IB_SA_PATH_REC_PKEY)
685 len += nla_total_size(sizeof(u16));
686 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
687 len += nla_total_size(sizeof(u16));
690 * Make sure that at least some of the required comp_mask bits are
693 if (WARN_ON(len == 0))
696 /* Add the family header */
697 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
702 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
704 struct sk_buff *skb = NULL;
705 struct nlmsghdr *nlh;
708 struct ib_sa_mad *mad;
711 mad = query->mad_buf->mad;
712 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
716 skb = nlmsg_new(len, gfp_mask);
720 /* Put nlmsg header only for now */
721 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
722 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
729 ib_nl_set_path_rec_attrs(skb, query);
731 /* Repair the nlmsg header length */
734 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
743 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
749 INIT_LIST_HEAD(&query->list);
750 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
752 /* Put the request on the list first.*/
753 spin_lock_irqsave(&ib_nl_request_lock, flags);
754 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
755 query->timeout = delay + jiffies;
756 list_add_tail(&query->list, &ib_nl_request_list);
757 /* Start the timeout if this is the only request */
758 if (ib_nl_request_list.next == &query->list)
759 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
760 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
762 ret = ib_nl_send_msg(query, gfp_mask);
765 /* Remove the request */
766 spin_lock_irqsave(&ib_nl_request_lock, flags);
767 list_del(&query->list);
768 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
776 static int ib_nl_cancel_request(struct ib_sa_query *query)
779 struct ib_sa_query *wait_query;
782 spin_lock_irqsave(&ib_nl_request_lock, flags);
783 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
784 /* Let the timeout to take care of the callback */
785 if (query == wait_query) {
786 query->flags |= IB_SA_CANCEL;
787 query->timeout = jiffies;
788 list_move(&query->list, &ib_nl_request_list);
790 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
794 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
799 static void send_handler(struct ib_mad_agent *agent,
800 struct ib_mad_send_wc *mad_send_wc);
802 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
803 const struct nlmsghdr *nlh)
805 struct ib_mad_send_wc mad_send_wc;
806 struct ib_sa_mad *mad = NULL;
807 const struct nlattr *head, *curr;
808 struct ib_path_rec_data *rec;
813 if (query->callback) {
814 head = (const struct nlattr *) nlmsg_data(nlh);
815 len = nlmsg_len(nlh);
816 switch (query->path_use) {
817 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
818 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
821 case LS_RESOLVE_PATH_USE_ALL:
822 case LS_RESOLVE_PATH_USE_GMP:
824 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
825 IB_PATH_BIDIRECTIONAL;
828 nla_for_each_attr(curr, head, len, rem) {
829 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
830 rec = nla_data(curr);
832 * Get the first one. In the future, we may
833 * need to get up to 6 pathrecords.
835 if ((rec->flags & mask) == mask) {
836 mad = query->mad_buf->mad;
837 mad->mad_hdr.method |=
839 memcpy(mad->data, rec->path_rec,
840 sizeof(rec->path_rec));
846 query->callback(query, status, mad);
849 mad_send_wc.send_buf = query->mad_buf;
850 mad_send_wc.status = IB_WC_SUCCESS;
851 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
854 static void ib_nl_request_timeout(struct work_struct *work)
857 struct ib_sa_query *query;
859 struct ib_mad_send_wc mad_send_wc;
862 spin_lock_irqsave(&ib_nl_request_lock, flags);
863 while (!list_empty(&ib_nl_request_list)) {
864 query = list_entry(ib_nl_request_list.next,
865 struct ib_sa_query, list);
867 if (time_after(query->timeout, jiffies)) {
868 delay = query->timeout - jiffies;
869 if ((long)delay <= 0)
871 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
875 list_del(&query->list);
876 ib_sa_disable_local_svc(query);
877 /* Hold the lock to protect against query cancellation */
878 if (ib_sa_query_cancelled(query))
881 ret = ib_post_send_mad(query->mad_buf, NULL);
883 mad_send_wc.send_buf = query->mad_buf;
884 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
885 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
886 send_handler(query->port->agent, &mad_send_wc);
887 spin_lock_irqsave(&ib_nl_request_lock, flags);
890 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
893 int ib_nl_handle_set_timeout(struct sk_buff *skb,
894 struct netlink_callback *cb)
896 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
897 int timeout, delta, abs_delta;
898 const struct nlattr *attr;
900 struct ib_sa_query *query;
902 struct nlattr *tb[LS_NLA_TYPE_MAX];
905 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
906 !(NETLINK_CB(skb).sk) ||
907 !netlink_capable(skb, CAP_NET_ADMIN))
910 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
911 nlmsg_len(nlh), ib_nl_policy, NULL);
912 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
916 timeout = *(int *) nla_data(attr);
917 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
918 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
919 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
920 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
922 delta = timeout - sa_local_svc_timeout_ms;
929 spin_lock_irqsave(&ib_nl_request_lock, flags);
930 sa_local_svc_timeout_ms = timeout;
931 list_for_each_entry(query, &ib_nl_request_list, list) {
932 if (delta < 0 && abs_delta > query->timeout)
935 query->timeout += delta;
937 /* Get the new delay from the first entry */
939 delay = query->timeout - jiffies;
945 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
946 (unsigned long)delay);
947 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
954 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
956 struct nlattr *tb[LS_NLA_TYPE_MAX];
959 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
962 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
963 nlmsg_len(nlh), ib_nl_policy, NULL);
970 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
971 struct netlink_callback *cb)
973 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
975 struct ib_sa_query *query;
976 struct ib_mad_send_buf *send_buf;
977 struct ib_mad_send_wc mad_send_wc;
981 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
982 !(NETLINK_CB(skb).sk) ||
983 !netlink_capable(skb, CAP_NET_ADMIN))
986 spin_lock_irqsave(&ib_nl_request_lock, flags);
987 list_for_each_entry(query, &ib_nl_request_list, list) {
989 * If the query is cancelled, let the timeout routine
992 if (nlh->nlmsg_seq == query->seq) {
993 found = !ib_sa_query_cancelled(query);
995 list_del(&query->list);
1001 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1005 send_buf = query->mad_buf;
1007 if (!ib_nl_is_good_resolve_resp(nlh)) {
1008 /* if the result is a failure, send out the packet via IB */
1009 ib_sa_disable_local_svc(query);
1010 ret = ib_post_send_mad(query->mad_buf, NULL);
1011 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1013 mad_send_wc.send_buf = send_buf;
1014 mad_send_wc.status = IB_WC_GENERAL_ERR;
1015 send_handler(query->port->agent, &mad_send_wc);
1018 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1019 ib_nl_process_good_resolve_rsp(query, nlh);
1026 static void free_sm_ah(struct kref *kref)
1028 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1030 rdma_destroy_ah(sm_ah->ah);
1034 void ib_sa_register_client(struct ib_sa_client *client)
1036 atomic_set(&client->users, 1);
1037 init_completion(&client->comp);
1039 EXPORT_SYMBOL(ib_sa_register_client);
1041 void ib_sa_unregister_client(struct ib_sa_client *client)
1043 ib_sa_client_put(client);
1044 wait_for_completion(&client->comp);
1046 EXPORT_SYMBOL(ib_sa_unregister_client);
1049 * ib_sa_cancel_query - try to cancel an SA query
1050 * @id:ID of query to cancel
1051 * @query:query pointer to cancel
1053 * Try to cancel an SA query. If the id and query don't match up or
1054 * the query has already completed, nothing is done. Otherwise the
1055 * query is canceled and will complete with a status of -EINTR.
1057 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1059 unsigned long flags;
1060 struct ib_mad_agent *agent;
1061 struct ib_mad_send_buf *mad_buf;
1063 spin_lock_irqsave(&idr_lock, flags);
1064 if (idr_find(&query_idr, id) != query) {
1065 spin_unlock_irqrestore(&idr_lock, flags);
1068 agent = query->port->agent;
1069 mad_buf = query->mad_buf;
1070 spin_unlock_irqrestore(&idr_lock, flags);
1073 * If the query is still on the netlink request list, schedule
1074 * it to be cancelled by the timeout routine. Otherwise, it has been
1075 * sent to the MAD layer and has to be cancelled from there.
1077 if (!ib_nl_cancel_request(query))
1078 ib_cancel_mad(agent, mad_buf);
1080 EXPORT_SYMBOL(ib_sa_cancel_query);
1082 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1084 struct ib_sa_device *sa_dev;
1085 struct ib_sa_port *port;
1086 unsigned long flags;
1089 sa_dev = ib_get_client_data(device, &sa_client);
1093 port = &sa_dev->port[port_num - sa_dev->start_port];
1094 spin_lock_irqsave(&port->ah_lock, flags);
1095 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1096 spin_unlock_irqrestore(&port->ah_lock, flags);
1098 return src_path_mask;
1101 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1102 struct sa_path_rec *rec,
1103 struct rdma_ah_attr *ah_attr)
1108 struct net_device *ndev = NULL;
1110 memset(ah_attr, 0, sizeof *ah_attr);
1111 ah_attr->type = rdma_ah_find_type(device, port_num);
1113 rdma_ah_set_dlid(ah_attr, be16_to_cpu(rec->dlid));
1114 rdma_ah_set_sl(ah_attr, rec->sl);
1115 rdma_ah_set_path_bits(ah_attr, be16_to_cpu(rec->slid) &
1116 get_src_path_mask(device, port_num));
1117 rdma_ah_set_port_num(ah_attr, port_num);
1118 rdma_ah_set_static_rate(ah_attr, rec->rate);
1119 use_roce = rdma_cap_eth_ah(device, port_num);
1122 struct net_device *idev;
1123 struct net_device *resolved_dev;
1124 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1125 .net = rec->net ? rec->net :
1128 struct sockaddr _sockaddr;
1129 struct sockaddr_in _sockaddr_in;
1130 struct sockaddr_in6 _sockaddr_in6;
1131 } sgid_addr, dgid_addr;
1133 if (!device->get_netdev)
1136 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1137 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1139 /* validate the route */
1140 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1141 &dgid_addr._sockaddr, &dev_addr);
1145 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1146 dev_addr.network == RDMA_NETWORK_IPV6) &&
1147 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1150 idev = device->get_netdev(device, port_num);
1154 resolved_dev = dev_get_by_index(dev_addr.net,
1155 dev_addr.bound_dev_if);
1156 if (resolved_dev->flags & IFF_LOOPBACK) {
1157 dev_put(resolved_dev);
1158 resolved_dev = idev;
1159 dev_hold(resolved_dev);
1161 ndev = ib_get_ndev_from_path(rec);
1163 if ((ndev && ndev != resolved_dev) ||
1164 (resolved_dev != idev &&
1165 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1166 ret = -EHOSTUNREACH;
1169 dev_put(resolved_dev);
1177 if (rec->hop_limit > 0 || use_roce) {
1178 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1179 rec->gid_type, port_num, ndev,
1187 rdma_ah_set_grh(ah_attr, &rec->dgid,
1188 be32_to_cpu(rec->flow_label),
1189 gid_index, rec->hop_limit,
1190 rec->traffic_class);
1196 memcpy(ah_attr->roce.dmac, rec->dmac, ETH_ALEN);
1200 EXPORT_SYMBOL(ib_init_ah_from_path);
1202 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1204 unsigned long flags;
1206 spin_lock_irqsave(&query->port->ah_lock, flags);
1207 if (!query->port->sm_ah) {
1208 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1211 kref_get(&query->port->sm_ah->ref);
1212 query->sm_ah = query->port->sm_ah;
1213 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1215 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1216 query->sm_ah->pkey_index,
1217 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1219 ((query->flags & IB_SA_QUERY_OPA) ?
1220 OPA_MGMT_BASE_VERSION :
1221 IB_MGMT_BASE_VERSION));
1222 if (IS_ERR(query->mad_buf)) {
1223 kref_put(&query->sm_ah->ref, free_sm_ah);
1227 query->mad_buf->ah = query->sm_ah->ah;
1232 static void free_mad(struct ib_sa_query *query)
1234 ib_free_send_mad(query->mad_buf);
1235 kref_put(&query->sm_ah->ref, free_sm_ah);
1238 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1240 struct ib_sa_mad *mad = query->mad_buf->mad;
1241 unsigned long flags;
1243 memset(mad, 0, sizeof *mad);
1245 if (query->flags & IB_SA_QUERY_OPA) {
1246 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1247 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1249 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1250 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1252 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1253 spin_lock_irqsave(&tid_lock, flags);
1255 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1256 spin_unlock_irqrestore(&tid_lock, flags);
1259 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1261 bool preload = gfpflags_allow_blocking(gfp_mask);
1262 unsigned long flags;
1266 idr_preload(gfp_mask);
1267 spin_lock_irqsave(&idr_lock, flags);
1269 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1271 spin_unlock_irqrestore(&idr_lock, flags);
1277 query->mad_buf->timeout_ms = timeout_ms;
1278 query->mad_buf->context[0] = query;
1281 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1282 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1283 if (!ib_nl_make_request(query, gfp_mask))
1286 ib_sa_disable_local_svc(query);
1289 ret = ib_post_send_mad(query->mad_buf, NULL);
1291 spin_lock_irqsave(&idr_lock, flags);
1292 idr_remove(&query_idr, id);
1293 spin_unlock_irqrestore(&idr_lock, flags);
1297 * It's not safe to dereference query any more, because the
1298 * send may already have completed and freed the query in
1301 return ret ? ret : id;
1304 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1306 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1308 EXPORT_SYMBOL(ib_sa_unpack_path);
1310 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1312 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1314 EXPORT_SYMBOL(ib_sa_pack_path);
1316 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1318 struct ib_sa_mad *mad)
1320 struct ib_sa_path_query *query =
1321 container_of(sa_query, struct ib_sa_path_query, sa_query);
1324 struct sa_path_rec rec;
1326 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1330 rec.gid_type = IB_GID_TYPE_IB;
1331 eth_zero_addr(rec.dmac);
1332 query->callback(status, &rec, query->context);
1334 query->callback(status, NULL, query->context);
1337 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1339 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1343 * ib_sa_path_rec_get - Start a Path get query
1345 * @device:device to send query on
1346 * @port_num: port number to send query on
1347 * @rec:Path Record to send in query
1348 * @comp_mask:component mask to send in query
1349 * @timeout_ms:time to wait for response
1350 * @gfp_mask:GFP mask to use for internal allocations
1351 * @callback:function called when query completes, times out or is
1353 * @context:opaque user context passed to callback
1354 * @sa_query:query context, used to cancel query
1356 * Send a Path Record Get query to the SA to look up a path. The
1357 * callback function will be called when the query completes (or
1358 * fails); status is 0 for a successful response, -EINTR if the query
1359 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1360 * occurred sending the query. The resp parameter of the callback is
1361 * only valid if status is 0.
1363 * If the return value of ib_sa_path_rec_get() is negative, it is an
1364 * error code. Otherwise it is a query ID that can be used to cancel
1367 int ib_sa_path_rec_get(struct ib_sa_client *client,
1368 struct ib_device *device, u8 port_num,
1369 struct sa_path_rec *rec,
1370 ib_sa_comp_mask comp_mask,
1371 int timeout_ms, gfp_t gfp_mask,
1372 void (*callback)(int status,
1373 struct sa_path_rec *resp,
1376 struct ib_sa_query **sa_query)
1378 struct ib_sa_path_query *query;
1379 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1380 struct ib_sa_port *port;
1381 struct ib_mad_agent *agent;
1382 struct ib_sa_mad *mad;
1388 port = &sa_dev->port[port_num - sa_dev->start_port];
1389 agent = port->agent;
1391 query = kzalloc(sizeof(*query), gfp_mask);
1395 query->sa_query.port = port;
1396 ret = alloc_mad(&query->sa_query, gfp_mask);
1400 ib_sa_client_get(client);
1401 query->sa_query.client = client;
1402 query->callback = callback;
1403 query->context = context;
1405 mad = query->sa_query.mad_buf->mad;
1406 init_mad(&query->sa_query, agent);
1408 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1409 query->sa_query.release = ib_sa_path_rec_release;
1410 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1411 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1412 mad->sa_hdr.comp_mask = comp_mask;
1414 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1416 *sa_query = &query->sa_query;
1418 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1419 query->sa_query.mad_buf->context[1] = rec;
1421 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1429 ib_sa_client_put(query->sa_query.client);
1430 free_mad(&query->sa_query);
1436 EXPORT_SYMBOL(ib_sa_path_rec_get);
1438 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1440 struct ib_sa_mad *mad)
1442 struct ib_sa_service_query *query =
1443 container_of(sa_query, struct ib_sa_service_query, sa_query);
1446 struct ib_sa_service_rec rec;
1448 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1450 query->callback(status, &rec, query->context);
1452 query->callback(status, NULL, query->context);
1455 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1457 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1461 * ib_sa_service_rec_query - Start Service Record operation
1463 * @device:device to send request on
1464 * @port_num: port number to send request on
1465 * @method:SA method - should be get, set, or delete
1466 * @rec:Service Record to send in request
1467 * @comp_mask:component mask to send in request
1468 * @timeout_ms:time to wait for response
1469 * @gfp_mask:GFP mask to use for internal allocations
1470 * @callback:function called when request completes, times out or is
1472 * @context:opaque user context passed to callback
1473 * @sa_query:request context, used to cancel request
1475 * Send a Service Record set/get/delete to the SA to register,
1476 * unregister or query a service record.
1477 * The callback function will be called when the request completes (or
1478 * fails); status is 0 for a successful response, -EINTR if the query
1479 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1480 * occurred sending the query. The resp parameter of the callback is
1481 * only valid if status is 0.
1483 * If the return value of ib_sa_service_rec_query() is negative, it is an
1484 * error code. Otherwise it is a request ID that can be used to cancel
1487 int ib_sa_service_rec_query(struct ib_sa_client *client,
1488 struct ib_device *device, u8 port_num, u8 method,
1489 struct ib_sa_service_rec *rec,
1490 ib_sa_comp_mask comp_mask,
1491 int timeout_ms, gfp_t gfp_mask,
1492 void (*callback)(int status,
1493 struct ib_sa_service_rec *resp,
1496 struct ib_sa_query **sa_query)
1498 struct ib_sa_service_query *query;
1499 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1500 struct ib_sa_port *port;
1501 struct ib_mad_agent *agent;
1502 struct ib_sa_mad *mad;
1508 port = &sa_dev->port[port_num - sa_dev->start_port];
1509 agent = port->agent;
1511 if (method != IB_MGMT_METHOD_GET &&
1512 method != IB_MGMT_METHOD_SET &&
1513 method != IB_SA_METHOD_DELETE)
1516 query = kzalloc(sizeof(*query), gfp_mask);
1520 query->sa_query.port = port;
1521 ret = alloc_mad(&query->sa_query, gfp_mask);
1525 ib_sa_client_get(client);
1526 query->sa_query.client = client;
1527 query->callback = callback;
1528 query->context = context;
1530 mad = query->sa_query.mad_buf->mad;
1531 init_mad(&query->sa_query, agent);
1533 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1534 query->sa_query.release = ib_sa_service_rec_release;
1535 mad->mad_hdr.method = method;
1536 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1537 mad->sa_hdr.comp_mask = comp_mask;
1539 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1542 *sa_query = &query->sa_query;
1544 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1552 ib_sa_client_put(query->sa_query.client);
1553 free_mad(&query->sa_query);
1559 EXPORT_SYMBOL(ib_sa_service_rec_query);
1561 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1563 struct ib_sa_mad *mad)
1565 struct ib_sa_mcmember_query *query =
1566 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1569 struct ib_sa_mcmember_rec rec;
1571 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1573 query->callback(status, &rec, query->context);
1575 query->callback(status, NULL, query->context);
1578 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1580 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1583 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1584 struct ib_device *device, u8 port_num,
1586 struct ib_sa_mcmember_rec *rec,
1587 ib_sa_comp_mask comp_mask,
1588 int timeout_ms, gfp_t gfp_mask,
1589 void (*callback)(int status,
1590 struct ib_sa_mcmember_rec *resp,
1593 struct ib_sa_query **sa_query)
1595 struct ib_sa_mcmember_query *query;
1596 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1597 struct ib_sa_port *port;
1598 struct ib_mad_agent *agent;
1599 struct ib_sa_mad *mad;
1605 port = &sa_dev->port[port_num - sa_dev->start_port];
1606 agent = port->agent;
1608 query = kzalloc(sizeof(*query), gfp_mask);
1612 query->sa_query.port = port;
1613 ret = alloc_mad(&query->sa_query, gfp_mask);
1617 ib_sa_client_get(client);
1618 query->sa_query.client = client;
1619 query->callback = callback;
1620 query->context = context;
1622 mad = query->sa_query.mad_buf->mad;
1623 init_mad(&query->sa_query, agent);
1625 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1626 query->sa_query.release = ib_sa_mcmember_rec_release;
1627 mad->mad_hdr.method = method;
1628 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1629 mad->sa_hdr.comp_mask = comp_mask;
1631 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1634 *sa_query = &query->sa_query;
1636 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1644 ib_sa_client_put(query->sa_query.client);
1645 free_mad(&query->sa_query);
1652 /* Support GuidInfoRecord */
1653 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1655 struct ib_sa_mad *mad)
1657 struct ib_sa_guidinfo_query *query =
1658 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1661 struct ib_sa_guidinfo_rec rec;
1663 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1665 query->callback(status, &rec, query->context);
1667 query->callback(status, NULL, query->context);
1670 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1672 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1675 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1676 struct ib_device *device, u8 port_num,
1677 struct ib_sa_guidinfo_rec *rec,
1678 ib_sa_comp_mask comp_mask, u8 method,
1679 int timeout_ms, gfp_t gfp_mask,
1680 void (*callback)(int status,
1681 struct ib_sa_guidinfo_rec *resp,
1684 struct ib_sa_query **sa_query)
1686 struct ib_sa_guidinfo_query *query;
1687 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1688 struct ib_sa_port *port;
1689 struct ib_mad_agent *agent;
1690 struct ib_sa_mad *mad;
1696 if (method != IB_MGMT_METHOD_GET &&
1697 method != IB_MGMT_METHOD_SET &&
1698 method != IB_SA_METHOD_DELETE) {
1702 port = &sa_dev->port[port_num - sa_dev->start_port];
1703 agent = port->agent;
1705 query = kzalloc(sizeof(*query), gfp_mask);
1709 query->sa_query.port = port;
1710 ret = alloc_mad(&query->sa_query, gfp_mask);
1714 ib_sa_client_get(client);
1715 query->sa_query.client = client;
1716 query->callback = callback;
1717 query->context = context;
1719 mad = query->sa_query.mad_buf->mad;
1720 init_mad(&query->sa_query, agent);
1722 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1723 query->sa_query.release = ib_sa_guidinfo_rec_release;
1725 mad->mad_hdr.method = method;
1726 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1727 mad->sa_hdr.comp_mask = comp_mask;
1729 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1732 *sa_query = &query->sa_query;
1734 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1742 ib_sa_client_put(query->sa_query.client);
1743 free_mad(&query->sa_query);
1749 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1751 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
1752 struct ib_device *device,
1755 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1756 struct ib_sa_port *port;
1758 unsigned long flags;
1763 port = &sa_dev->port[port_num - sa_dev->start_port];
1765 spin_lock_irqsave(&port->classport_lock, flags);
1766 if ((port->classport_info.valid) &&
1767 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
1768 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
1769 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
1770 spin_unlock_irqrestore(&port->classport_lock, flags);
1773 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
1775 struct ib_classport_info_context {
1776 struct completion done;
1777 struct ib_sa_query *sa_query;
1780 static void ib_classportinfo_cb(void *context)
1782 struct ib_classport_info_context *cb_ctx = context;
1784 complete(&cb_ctx->done);
1787 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1789 struct ib_sa_mad *mad)
1791 unsigned long flags;
1792 struct ib_sa_classport_info_query *query =
1793 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1794 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
1797 if (sa_query->flags & IB_SA_QUERY_OPA) {
1798 struct opa_class_port_info rec;
1800 ib_unpack(opa_classport_info_rec_table,
1801 ARRAY_SIZE(opa_classport_info_rec_table),
1804 spin_lock_irqsave(&sa_query->port->classport_lock,
1806 if (!status && !info->valid) {
1807 memcpy(&info->data.opa, &rec,
1808 sizeof(info->data.opa));
1811 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
1813 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1817 struct ib_class_port_info rec;
1819 ib_unpack(ib_classport_info_rec_table,
1820 ARRAY_SIZE(ib_classport_info_rec_table),
1823 spin_lock_irqsave(&sa_query->port->classport_lock,
1825 if (!status && !info->valid) {
1826 memcpy(&info->data.ib, &rec,
1827 sizeof(info->data.ib));
1830 info->data.type = RDMA_CLASS_PORT_INFO_IB;
1832 spin_unlock_irqrestore(&sa_query->port->classport_lock,
1836 query->callback(query->context);
1839 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
1841 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1845 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
1847 void (*callback)(void *context),
1849 struct ib_sa_query **sa_query)
1851 struct ib_mad_agent *agent;
1852 struct ib_sa_classport_info_query *query;
1853 struct ib_sa_mad *mad;
1854 gfp_t gfp_mask = GFP_KERNEL;
1857 agent = port->agent;
1859 query = kzalloc(sizeof(*query), gfp_mask);
1863 query->sa_query.port = port;
1864 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
1866 IB_SA_QUERY_OPA : 0;
1867 ret = alloc_mad(&query->sa_query, gfp_mask);
1871 query->callback = callback;
1872 query->context = context;
1874 mad = query->sa_query.mad_buf->mad;
1875 init_mad(&query->sa_query, agent);
1877 query->sa_query.callback = ib_sa_classport_info_rec_callback;
1878 query->sa_query.release = ib_sa_classport_info_rec_release;
1879 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1880 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1881 mad->sa_hdr.comp_mask = 0;
1882 *sa_query = &query->sa_query;
1884 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1892 free_mad(&query->sa_query);
1899 static void update_ib_cpi(struct work_struct *work)
1901 struct ib_sa_port *port =
1902 container_of(work, struct ib_sa_port, ib_cpi_work.work);
1903 struct ib_classport_info_context *cb_context;
1904 unsigned long flags;
1907 /* If the classport info is valid, nothing
1910 spin_lock_irqsave(&port->classport_lock, flags);
1911 if (port->classport_info.valid) {
1912 spin_unlock_irqrestore(&port->classport_lock, flags);
1915 spin_unlock_irqrestore(&port->classport_lock, flags);
1917 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
1921 init_completion(&cb_context->done);
1923 ret = ib_sa_classport_info_rec_query(port, 3000,
1924 ib_classportinfo_cb, cb_context,
1925 &cb_context->sa_query);
1928 wait_for_completion(&cb_context->done);
1931 spin_lock_irqsave(&port->classport_lock, flags);
1933 /* If the classport info is still not valid, the query should have
1934 * failed for some reason. Retry issuing the query
1936 if (!port->classport_info.valid) {
1937 port->classport_info.retry_cnt++;
1938 if (port->classport_info.retry_cnt <=
1939 IB_SA_CPI_MAX_RETRY_CNT) {
1940 unsigned long delay =
1941 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
1943 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
1946 spin_unlock_irqrestore(&port->classport_lock, flags);
1952 static void send_handler(struct ib_mad_agent *agent,
1953 struct ib_mad_send_wc *mad_send_wc)
1955 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1956 unsigned long flags;
1958 if (query->callback)
1959 switch (mad_send_wc->status) {
1961 /* No callback -- already got recv */
1963 case IB_WC_RESP_TIMEOUT_ERR:
1964 query->callback(query, -ETIMEDOUT, NULL);
1966 case IB_WC_WR_FLUSH_ERR:
1967 query->callback(query, -EINTR, NULL);
1970 query->callback(query, -EIO, NULL);
1974 spin_lock_irqsave(&idr_lock, flags);
1975 idr_remove(&query_idr, query->id);
1976 spin_unlock_irqrestore(&idr_lock, flags);
1980 ib_sa_client_put(query->client);
1981 query->release(query);
1984 static void recv_handler(struct ib_mad_agent *mad_agent,
1985 struct ib_mad_send_buf *send_buf,
1986 struct ib_mad_recv_wc *mad_recv_wc)
1988 struct ib_sa_query *query;
1993 query = send_buf->context[0];
1994 if (query->callback) {
1995 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1996 query->callback(query,
1997 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1999 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2001 query->callback(query, -EIO, NULL);
2004 ib_free_recv_mad(mad_recv_wc);
2007 static void update_sm_ah(struct work_struct *work)
2009 struct ib_sa_port *port =
2010 container_of(work, struct ib_sa_port, update_task);
2011 struct ib_sa_sm_ah *new_ah;
2012 struct ib_port_attr port_attr;
2013 struct rdma_ah_attr ah_attr;
2015 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2016 pr_warn("Couldn't query port\n");
2020 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2024 kref_init(&new_ah->ref);
2025 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2027 new_ah->pkey_index = 0;
2028 if (ib_find_pkey(port->agent->device, port->port_num,
2029 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2030 pr_err("Couldn't find index for default PKey\n");
2032 memset(&ah_attr, 0, sizeof(ah_attr));
2033 ah_attr.type = rdma_ah_find_type(port->agent->device,
2035 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2036 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2037 rdma_ah_set_port_num(&ah_attr, port->port_num);
2038 if (port_attr.grh_required) {
2039 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2041 rdma_ah_set_subnet_prefix(&ah_attr,
2042 cpu_to_be64(port_attr.subnet_prefix));
2043 rdma_ah_set_interface_id(&ah_attr,
2044 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2047 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2048 if (IS_ERR(new_ah->ah)) {
2049 pr_warn("Couldn't create new SM AH\n");
2054 spin_lock_irq(&port->ah_lock);
2056 kref_put(&port->sm_ah->ref, free_sm_ah);
2057 port->sm_ah = new_ah;
2058 spin_unlock_irq(&port->ah_lock);
2061 static void ib_sa_event(struct ib_event_handler *handler,
2062 struct ib_event *event)
2064 if (event->event == IB_EVENT_PORT_ERR ||
2065 event->event == IB_EVENT_PORT_ACTIVE ||
2066 event->event == IB_EVENT_LID_CHANGE ||
2067 event->event == IB_EVENT_PKEY_CHANGE ||
2068 event->event == IB_EVENT_SM_CHANGE ||
2069 event->event == IB_EVENT_CLIENT_REREGISTER) {
2070 unsigned long flags;
2071 struct ib_sa_device *sa_dev =
2072 container_of(handler, typeof(*sa_dev), event_handler);
2073 u8 port_num = event->element.port_num - sa_dev->start_port;
2074 struct ib_sa_port *port = &sa_dev->port[port_num];
2076 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2079 spin_lock_irqsave(&port->ah_lock, flags);
2081 kref_put(&port->sm_ah->ref, free_sm_ah);
2083 spin_unlock_irqrestore(&port->ah_lock, flags);
2085 if (event->event == IB_EVENT_SM_CHANGE ||
2086 event->event == IB_EVENT_CLIENT_REREGISTER ||
2087 event->event == IB_EVENT_LID_CHANGE ||
2088 event->event == IB_EVENT_PORT_ACTIVE) {
2089 unsigned long delay =
2090 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2092 spin_lock_irqsave(&port->classport_lock, flags);
2093 port->classport_info.valid = false;
2094 port->classport_info.retry_cnt = 0;
2095 spin_unlock_irqrestore(&port->classport_lock, flags);
2096 queue_delayed_work(ib_wq,
2097 &port->ib_cpi_work, delay);
2099 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2103 static void ib_sa_add_one(struct ib_device *device)
2105 struct ib_sa_device *sa_dev;
2109 s = rdma_start_port(device);
2110 e = rdma_end_port(device);
2112 sa_dev = kzalloc(sizeof *sa_dev +
2113 (e - s + 1) * sizeof (struct ib_sa_port),
2118 sa_dev->start_port = s;
2119 sa_dev->end_port = e;
2121 for (i = 0; i <= e - s; ++i) {
2122 spin_lock_init(&sa_dev->port[i].ah_lock);
2123 if (!rdma_cap_ib_sa(device, i + 1))
2126 sa_dev->port[i].sm_ah = NULL;
2127 sa_dev->port[i].port_num = i + s;
2129 spin_lock_init(&sa_dev->port[i].classport_lock);
2130 sa_dev->port[i].classport_info.valid = false;
2132 sa_dev->port[i].agent =
2133 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2134 NULL, 0, send_handler,
2135 recv_handler, sa_dev, 0);
2136 if (IS_ERR(sa_dev->port[i].agent))
2139 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2140 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2149 ib_set_client_data(device, &sa_client, sa_dev);
2152 * We register our event handler after everything is set up,
2153 * and then update our cached info after the event handler is
2154 * registered to avoid any problems if a port changes state
2155 * during our initialization.
2158 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2159 if (ib_register_event_handler(&sa_dev->event_handler))
2162 for (i = 0; i <= e - s; ++i) {
2163 if (rdma_cap_ib_sa(device, i + 1))
2164 update_sm_ah(&sa_dev->port[i].update_task);
2171 if (rdma_cap_ib_sa(device, i + 1))
2172 ib_unregister_mad_agent(sa_dev->port[i].agent);
2179 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2181 struct ib_sa_device *sa_dev = client_data;
2187 ib_unregister_event_handler(&sa_dev->event_handler);
2188 flush_workqueue(ib_wq);
2190 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2191 if (rdma_cap_ib_sa(device, i + 1)) {
2192 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2193 ib_unregister_mad_agent(sa_dev->port[i].agent);
2194 if (sa_dev->port[i].sm_ah)
2195 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2203 int ib_sa_init(void)
2207 get_random_bytes(&tid, sizeof tid);
2209 atomic_set(&ib_nl_sa_request_seq, 0);
2211 ret = ib_register_client(&sa_client);
2213 pr_err("Couldn't register ib_sa client\n");
2219 pr_err("Couldn't initialize multicast handling\n");
2223 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2229 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2236 ib_unregister_client(&sa_client);
2241 void ib_sa_cleanup(void)
2243 cancel_delayed_work(&ib_nl_timed_work);
2244 flush_workqueue(ib_nl_wq);
2245 destroy_workqueue(ib_nl_wq);
2247 ib_unregister_client(&sa_client);
2248 idr_destroy(&query_idr);