2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static const char * const ibcm_rej_reason_strs[] = {
61 [IB_CM_REJ_NO_QP] = "no QP",
62 [IB_CM_REJ_NO_EEC] = "no EEC",
63 [IB_CM_REJ_NO_RESOURCES] = "no resources",
64 [IB_CM_REJ_TIMEOUT] = "timeout",
65 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
66 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
67 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
68 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
69 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
70 [IB_CM_REJ_STALE_CONN] = "stale conn",
71 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
72 [IB_CM_REJ_INVALID_GID] = "invalid GID",
73 [IB_CM_REJ_INVALID_LID] = "invalid LID",
74 [IB_CM_REJ_INVALID_SL] = "invalid SL",
75 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
76 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
77 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
78 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
79 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
80 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
81 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
82 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
83 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
84 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
85 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
86 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
87 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
88 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
89 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
90 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
91 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
92 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
93 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
96 const char *__attribute_const__ ibcm_reject_msg(int reason)
98 size_t index = reason;
100 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
101 ibcm_rej_reason_strs[index])
102 return ibcm_rej_reason_strs[index];
104 return "unrecognized reason";
106 EXPORT_SYMBOL(ibcm_reject_msg);
108 static void cm_add_one(struct ib_device *device);
109 static void cm_remove_one(struct ib_device *device, void *client_data);
111 static struct ib_client cm_client = {
114 .remove = cm_remove_one
117 static struct ib_cm {
119 struct list_head device_list;
120 rwlock_t device_lock;
121 struct rb_root listen_service_table;
122 u64 listen_service_id;
123 /* struct rb_root peer_service_table; todo: fix peer to peer */
124 struct rb_root remote_qp_table;
125 struct rb_root remote_id_table;
126 struct rb_root remote_sidr_table;
127 struct idr local_id_table;
128 __be32 random_id_operand;
129 struct list_head timewait_list;
130 struct workqueue_struct *wq;
131 /* Sync on cm change port state */
132 spinlock_t state_lock;
135 /* Counter indexes ordered by attribute ID */
149 CM_ATTR_ID_OFFSET = 0x0010,
160 static char const counter_group_names[CM_COUNTER_GROUPS]
161 [sizeof("cm_rx_duplicates")] = {
162 "cm_tx_msgs", "cm_tx_retries",
163 "cm_rx_msgs", "cm_rx_duplicates"
166 struct cm_counter_group {
168 atomic_long_t counter[CM_ATTR_COUNT];
171 struct cm_counter_attribute {
172 struct attribute attr;
176 #define CM_COUNTER_ATTR(_name, _index) \
177 struct cm_counter_attribute cm_##_name##_counter_attr = { \
178 .attr = { .name = __stringify(_name), .mode = 0444 }, \
182 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
183 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
184 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
185 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
186 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
187 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
188 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
189 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
190 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
191 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
192 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
194 static struct attribute *cm_counter_default_attrs[] = {
195 &cm_req_counter_attr.attr,
196 &cm_mra_counter_attr.attr,
197 &cm_rej_counter_attr.attr,
198 &cm_rep_counter_attr.attr,
199 &cm_rtu_counter_attr.attr,
200 &cm_dreq_counter_attr.attr,
201 &cm_drep_counter_attr.attr,
202 &cm_sidr_req_counter_attr.attr,
203 &cm_sidr_rep_counter_attr.attr,
204 &cm_lap_counter_attr.attr,
205 &cm_apr_counter_attr.attr,
210 struct cm_device *cm_dev;
211 struct ib_mad_agent *mad_agent;
212 struct kobject port_obj;
214 struct list_head cm_priv_prim_list;
215 struct list_head cm_priv_altr_list;
216 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
220 struct list_head list;
221 struct ib_device *ib_device;
222 struct device *device;
225 struct cm_port *port[0];
229 struct cm_port *port;
231 struct rdma_ah_attr ah_attr;
237 struct delayed_work work;
238 struct list_head list;
239 struct cm_port *port;
240 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
241 __be32 local_id; /* Established / timewait */
243 struct ib_cm_event cm_event;
244 struct sa_path_rec path[0];
247 struct cm_timewait_info {
248 struct cm_work work; /* Must be first. */
249 struct list_head list;
250 struct rb_node remote_qp_node;
251 struct rb_node remote_id_node;
252 __be64 remote_ca_guid;
254 u8 inserted_remote_qp;
255 u8 inserted_remote_id;
258 struct cm_id_private {
261 struct rb_node service_node;
262 struct rb_node sidr_id_node;
263 spinlock_t lock; /* Do not acquire inside cm.lock */
264 struct completion comp;
266 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
267 * Protected by the cm.lock spinlock. */
268 int listen_sharecount;
270 struct ib_mad_send_buf *msg;
271 struct cm_timewait_info *timewait_info;
272 /* todo: use alternate port on send failure */
280 enum ib_qp_type qp_type;
284 enum ib_mtu path_mtu;
289 u8 responder_resources;
296 struct list_head prim_list;
297 struct list_head altr_list;
298 /* Indicates that the send port mad is registered and av is set */
299 int prim_send_port_not_ready;
300 int altr_send_port_not_ready;
302 struct list_head work_list;
306 static void cm_work_handler(struct work_struct *work);
308 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
310 if (atomic_dec_and_test(&cm_id_priv->refcount))
311 complete(&cm_id_priv->comp);
314 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
315 struct ib_mad_send_buf **msg)
317 struct ib_mad_agent *mad_agent;
318 struct ib_mad_send_buf *m;
321 unsigned long flags, flags2;
324 /* don't let the port to be released till the agent is down */
325 spin_lock_irqsave(&cm.state_lock, flags2);
326 spin_lock_irqsave(&cm.lock, flags);
327 if (!cm_id_priv->prim_send_port_not_ready)
328 av = &cm_id_priv->av;
329 else if (!cm_id_priv->altr_send_port_not_ready &&
330 (cm_id_priv->alt_av.port))
331 av = &cm_id_priv->alt_av;
333 pr_info("%s: not valid CM id\n", __func__);
335 spin_unlock_irqrestore(&cm.lock, flags);
338 spin_unlock_irqrestore(&cm.lock, flags);
339 /* Make sure the port haven't released the mad yet */
340 mad_agent = cm_id_priv->av.port->mad_agent;
342 pr_info("%s: not a valid MAD agent\n", __func__);
346 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr);
352 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
354 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
356 IB_MGMT_BASE_VERSION);
363 /* Timeout set by caller if response is expected. */
365 m->retries = cm_id_priv->max_cm_retries;
367 atomic_inc(&cm_id_priv->refcount);
368 m->context[0] = cm_id_priv;
372 spin_unlock_irqrestore(&cm.state_lock, flags2);
376 static int cm_alloc_response_msg(struct cm_port *port,
377 struct ib_mad_recv_wc *mad_recv_wc,
378 struct ib_mad_send_buf **msg)
380 struct ib_mad_send_buf *m;
383 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
384 mad_recv_wc->recv_buf.grh, port->port_num);
388 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
389 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
391 IB_MGMT_BASE_VERSION);
401 static void cm_free_msg(struct ib_mad_send_buf *msg)
403 rdma_destroy_ah(msg->ah);
405 cm_deref_id(msg->context[0]);
406 ib_free_send_mad(msg);
409 static void * cm_copy_private_data(const void *private_data,
414 if (!private_data || !private_data_len)
417 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
419 return ERR_PTR(-ENOMEM);
424 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
425 void *private_data, u8 private_data_len)
427 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
428 kfree(cm_id_priv->private_data);
430 cm_id_priv->private_data = private_data;
431 cm_id_priv->private_data_len = private_data_len;
434 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
435 struct ib_grh *grh, struct cm_av *av)
438 av->pkey_index = wc->pkey_index;
439 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
443 static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
444 struct cm_id_private *cm_id_priv)
446 struct cm_device *cm_dev;
447 struct cm_port *port = NULL;
451 struct net_device *ndev = ib_get_ndev_from_path(path);
453 read_lock_irqsave(&cm.device_lock, flags);
454 list_for_each_entry(cm_dev, &cm.device_list, list) {
455 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
456 path->gid_type, ndev, &p, NULL)) {
457 port = cm_dev->port[p-1];
461 read_unlock_irqrestore(&cm.device_lock, flags);
469 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
470 be16_to_cpu(path->pkey), &av->pkey_index);
475 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
477 av->timeout = path->packet_life_time + 1;
479 spin_lock_irqsave(&cm.lock, flags);
480 if (&cm_id_priv->av == av)
481 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
482 else if (&cm_id_priv->alt_av == av)
483 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
487 spin_unlock_irqrestore(&cm.lock, flags);
492 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
497 idr_preload(GFP_KERNEL);
498 spin_lock_irqsave(&cm.lock, flags);
500 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
502 spin_unlock_irqrestore(&cm.lock, flags);
505 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
506 return id < 0 ? id : 0;
509 static void cm_free_id(__be32 local_id)
511 spin_lock_irq(&cm.lock);
512 idr_remove(&cm.local_id_table,
513 (__force int) (local_id ^ cm.random_id_operand));
514 spin_unlock_irq(&cm.lock);
517 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
519 struct cm_id_private *cm_id_priv;
521 cm_id_priv = idr_find(&cm.local_id_table,
522 (__force int) (local_id ^ cm.random_id_operand));
524 if (cm_id_priv->id.remote_id == remote_id)
525 atomic_inc(&cm_id_priv->refcount);
533 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
535 struct cm_id_private *cm_id_priv;
537 spin_lock_irq(&cm.lock);
538 cm_id_priv = cm_get_id(local_id, remote_id);
539 spin_unlock_irq(&cm.lock);
545 * Trivial helpers to strip endian annotation and compare; the
546 * endianness doesn't actually matter since we just need a stable
547 * order for the RB tree.
549 static int be32_lt(__be32 a, __be32 b)
551 return (__force u32) a < (__force u32) b;
554 static int be32_gt(__be32 a, __be32 b)
556 return (__force u32) a > (__force u32) b;
559 static int be64_lt(__be64 a, __be64 b)
561 return (__force u64) a < (__force u64) b;
564 static int be64_gt(__be64 a, __be64 b)
566 return (__force u64) a > (__force u64) b;
569 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
571 struct rb_node **link = &cm.listen_service_table.rb_node;
572 struct rb_node *parent = NULL;
573 struct cm_id_private *cur_cm_id_priv;
574 __be64 service_id = cm_id_priv->id.service_id;
575 __be64 service_mask = cm_id_priv->id.service_mask;
579 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
581 if ((cur_cm_id_priv->id.service_mask & service_id) ==
582 (service_mask & cur_cm_id_priv->id.service_id) &&
583 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
584 return cur_cm_id_priv;
586 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
587 link = &(*link)->rb_left;
588 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
589 link = &(*link)->rb_right;
590 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
591 link = &(*link)->rb_left;
592 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
593 link = &(*link)->rb_right;
595 link = &(*link)->rb_right;
597 rb_link_node(&cm_id_priv->service_node, parent, link);
598 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
602 static struct cm_id_private * cm_find_listen(struct ib_device *device,
605 struct rb_node *node = cm.listen_service_table.rb_node;
606 struct cm_id_private *cm_id_priv;
609 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
610 if ((cm_id_priv->id.service_mask & service_id) ==
611 cm_id_priv->id.service_id &&
612 (cm_id_priv->id.device == device))
615 if (device < cm_id_priv->id.device)
616 node = node->rb_left;
617 else if (device > cm_id_priv->id.device)
618 node = node->rb_right;
619 else if (be64_lt(service_id, cm_id_priv->id.service_id))
620 node = node->rb_left;
621 else if (be64_gt(service_id, cm_id_priv->id.service_id))
622 node = node->rb_right;
624 node = node->rb_right;
629 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
632 struct rb_node **link = &cm.remote_id_table.rb_node;
633 struct rb_node *parent = NULL;
634 struct cm_timewait_info *cur_timewait_info;
635 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
636 __be32 remote_id = timewait_info->work.remote_id;
640 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
642 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
643 link = &(*link)->rb_left;
644 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
645 link = &(*link)->rb_right;
646 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
647 link = &(*link)->rb_left;
648 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
649 link = &(*link)->rb_right;
651 return cur_timewait_info;
653 timewait_info->inserted_remote_id = 1;
654 rb_link_node(&timewait_info->remote_id_node, parent, link);
655 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
659 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
662 struct rb_node *node = cm.remote_id_table.rb_node;
663 struct cm_timewait_info *timewait_info;
666 timewait_info = rb_entry(node, struct cm_timewait_info,
668 if (be32_lt(remote_id, timewait_info->work.remote_id))
669 node = node->rb_left;
670 else if (be32_gt(remote_id, timewait_info->work.remote_id))
671 node = node->rb_right;
672 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
673 node = node->rb_left;
674 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
675 node = node->rb_right;
677 return timewait_info;
682 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
685 struct rb_node **link = &cm.remote_qp_table.rb_node;
686 struct rb_node *parent = NULL;
687 struct cm_timewait_info *cur_timewait_info;
688 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
689 __be32 remote_qpn = timewait_info->remote_qpn;
693 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
695 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
696 link = &(*link)->rb_left;
697 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
698 link = &(*link)->rb_right;
699 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
700 link = &(*link)->rb_left;
701 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
702 link = &(*link)->rb_right;
704 return cur_timewait_info;
706 timewait_info->inserted_remote_qp = 1;
707 rb_link_node(&timewait_info->remote_qp_node, parent, link);
708 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
712 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
715 struct rb_node **link = &cm.remote_sidr_table.rb_node;
716 struct rb_node *parent = NULL;
717 struct cm_id_private *cur_cm_id_priv;
718 union ib_gid *port_gid = &cm_id_priv->av.dgid;
719 __be32 remote_id = cm_id_priv->id.remote_id;
723 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
725 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
726 link = &(*link)->rb_left;
727 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
728 link = &(*link)->rb_right;
731 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
734 link = &(*link)->rb_left;
736 link = &(*link)->rb_right;
738 return cur_cm_id_priv;
741 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
742 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
746 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
747 enum ib_cm_sidr_status status)
749 struct ib_cm_sidr_rep_param param;
751 memset(¶m, 0, sizeof param);
752 param.status = status;
753 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
756 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
757 ib_cm_handler cm_handler,
760 struct cm_id_private *cm_id_priv;
763 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
765 return ERR_PTR(-ENOMEM);
767 cm_id_priv->id.state = IB_CM_IDLE;
768 cm_id_priv->id.device = device;
769 cm_id_priv->id.cm_handler = cm_handler;
770 cm_id_priv->id.context = context;
771 cm_id_priv->id.remote_cm_qpn = 1;
772 ret = cm_alloc_id(cm_id_priv);
776 spin_lock_init(&cm_id_priv->lock);
777 init_completion(&cm_id_priv->comp);
778 INIT_LIST_HEAD(&cm_id_priv->work_list);
779 INIT_LIST_HEAD(&cm_id_priv->prim_list);
780 INIT_LIST_HEAD(&cm_id_priv->altr_list);
781 atomic_set(&cm_id_priv->work_count, -1);
782 atomic_set(&cm_id_priv->refcount, 1);
783 return &cm_id_priv->id;
787 return ERR_PTR(-ENOMEM);
789 EXPORT_SYMBOL(ib_create_cm_id);
791 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
793 struct cm_work *work;
795 if (list_empty(&cm_id_priv->work_list))
798 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
799 list_del(&work->list);
803 static void cm_free_work(struct cm_work *work)
805 if (work->mad_recv_wc)
806 ib_free_recv_mad(work->mad_recv_wc);
810 static inline int cm_convert_to_ms(int iba_time)
812 /* approximate conversion to ms from 4.096us x 2^iba_time */
813 return 1 << max(iba_time - 8, 0);
817 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
818 * Because of how ack_timeout is stored, adding one doubles the timeout.
819 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
820 * increment it (round up) only if the other is within 50%.
822 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
824 int ack_timeout = packet_life_time + 1;
826 if (ack_timeout >= ca_ack_delay)
827 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
829 ack_timeout = ca_ack_delay +
830 (ack_timeout >= (ca_ack_delay - 1));
832 return min(31, ack_timeout);
835 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
837 if (timewait_info->inserted_remote_id) {
838 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
839 timewait_info->inserted_remote_id = 0;
842 if (timewait_info->inserted_remote_qp) {
843 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
844 timewait_info->inserted_remote_qp = 0;
848 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
850 struct cm_timewait_info *timewait_info;
852 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
854 return ERR_PTR(-ENOMEM);
856 timewait_info->work.local_id = local_id;
857 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
858 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
859 return timewait_info;
862 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
866 struct cm_device *cm_dev;
868 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
872 spin_lock_irqsave(&cm.lock, flags);
873 cm_cleanup_timewait(cm_id_priv->timewait_info);
874 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
875 spin_unlock_irqrestore(&cm.lock, flags);
878 * The cm_id could be destroyed by the user before we exit timewait.
879 * To protect against this, we search for the cm_id after exiting
880 * timewait before notifying the user that we've exited timewait.
882 cm_id_priv->id.state = IB_CM_TIMEWAIT;
883 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
885 /* Check if the device started its remove_one */
886 spin_lock_irqsave(&cm.lock, flags);
887 if (!cm_dev->going_down)
888 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
889 msecs_to_jiffies(wait_time));
890 spin_unlock_irqrestore(&cm.lock, flags);
892 cm_id_priv->timewait_info = NULL;
895 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
899 cm_id_priv->id.state = IB_CM_IDLE;
900 if (cm_id_priv->timewait_info) {
901 spin_lock_irqsave(&cm.lock, flags);
902 cm_cleanup_timewait(cm_id_priv->timewait_info);
903 spin_unlock_irqrestore(&cm.lock, flags);
904 kfree(cm_id_priv->timewait_info);
905 cm_id_priv->timewait_info = NULL;
909 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
911 struct cm_id_private *cm_id_priv;
912 struct cm_work *work;
914 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
916 spin_lock_irq(&cm_id_priv->lock);
917 switch (cm_id->state) {
919 spin_unlock_irq(&cm_id_priv->lock);
921 spin_lock_irq(&cm.lock);
922 if (--cm_id_priv->listen_sharecount > 0) {
923 /* The id is still shared. */
924 cm_deref_id(cm_id_priv);
925 spin_unlock_irq(&cm.lock);
928 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
929 spin_unlock_irq(&cm.lock);
931 case IB_CM_SIDR_REQ_SENT:
932 cm_id->state = IB_CM_IDLE;
933 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
934 spin_unlock_irq(&cm_id_priv->lock);
936 case IB_CM_SIDR_REQ_RCVD:
937 spin_unlock_irq(&cm_id_priv->lock);
938 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
939 spin_lock_irq(&cm.lock);
940 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
941 rb_erase(&cm_id_priv->sidr_id_node,
942 &cm.remote_sidr_table);
943 spin_unlock_irq(&cm.lock);
946 case IB_CM_MRA_REQ_RCVD:
947 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
948 spin_unlock_irq(&cm_id_priv->lock);
949 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
950 &cm_id_priv->id.device->node_guid,
951 sizeof cm_id_priv->id.device->node_guid,
955 if (err == -ENOMEM) {
956 /* Do not reject to allow future retries. */
957 cm_reset_to_idle(cm_id_priv);
958 spin_unlock_irq(&cm_id_priv->lock);
960 spin_unlock_irq(&cm_id_priv->lock);
961 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
966 case IB_CM_MRA_REP_RCVD:
967 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
969 case IB_CM_MRA_REQ_SENT:
971 case IB_CM_MRA_REP_SENT:
972 spin_unlock_irq(&cm_id_priv->lock);
973 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
976 case IB_CM_ESTABLISHED:
977 spin_unlock_irq(&cm_id_priv->lock);
978 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
980 ib_send_cm_dreq(cm_id, NULL, 0);
982 case IB_CM_DREQ_SENT:
983 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
984 cm_enter_timewait(cm_id_priv);
985 spin_unlock_irq(&cm_id_priv->lock);
987 case IB_CM_DREQ_RCVD:
988 spin_unlock_irq(&cm_id_priv->lock);
989 ib_send_cm_drep(cm_id, NULL, 0);
992 spin_unlock_irq(&cm_id_priv->lock);
996 spin_lock_irq(&cm.lock);
997 if (!list_empty(&cm_id_priv->altr_list) &&
998 (!cm_id_priv->altr_send_port_not_ready))
999 list_del(&cm_id_priv->altr_list);
1000 if (!list_empty(&cm_id_priv->prim_list) &&
1001 (!cm_id_priv->prim_send_port_not_ready))
1002 list_del(&cm_id_priv->prim_list);
1003 spin_unlock_irq(&cm.lock);
1005 cm_free_id(cm_id->local_id);
1006 cm_deref_id(cm_id_priv);
1007 wait_for_completion(&cm_id_priv->comp);
1008 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1010 kfree(cm_id_priv->private_data);
1014 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1016 cm_destroy_id(cm_id, 0);
1018 EXPORT_SYMBOL(ib_destroy_cm_id);
1021 * __ib_cm_listen - Initiates listening on the specified service ID for
1022 * connection and service ID resolution requests.
1023 * @cm_id: Connection identifier associated with the listen request.
1024 * @service_id: Service identifier matched against incoming connection
1025 * and service ID resolution requests. The service ID should be specified
1026 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1027 * assign a service ID to the caller.
1028 * @service_mask: Mask applied to service ID used to listen across a
1029 * range of service IDs. If set to 0, the service ID is matched
1030 * exactly. This parameter is ignored if %service_id is set to
1031 * IB_CM_ASSIGN_SERVICE_ID.
1033 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1034 __be64 service_mask)
1036 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1039 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1040 service_id &= service_mask;
1041 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1042 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1045 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1046 if (cm_id->state != IB_CM_IDLE)
1049 cm_id->state = IB_CM_LISTEN;
1050 ++cm_id_priv->listen_sharecount;
1052 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1053 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1054 cm_id->service_mask = ~cpu_to_be64(0);
1056 cm_id->service_id = service_id;
1057 cm_id->service_mask = service_mask;
1059 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1061 if (cur_cm_id_priv) {
1062 cm_id->state = IB_CM_IDLE;
1063 --cm_id_priv->listen_sharecount;
1069 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1071 unsigned long flags;
1074 spin_lock_irqsave(&cm.lock, flags);
1075 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1076 spin_unlock_irqrestore(&cm.lock, flags);
1080 EXPORT_SYMBOL(ib_cm_listen);
1083 * Create a new listening ib_cm_id and listen on the given service ID.
1085 * If there's an existing ID listening on that same device and service ID,
1088 * @device: Device associated with the cm_id. All related communication will
1089 * be associated with the specified device.
1090 * @cm_handler: Callback invoked to notify the user of CM events.
1091 * @service_id: Service identifier matched against incoming connection
1092 * and service ID resolution requests. The service ID should be specified
1093 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1094 * assign a service ID to the caller.
1096 * Callers should call ib_destroy_cm_id when done with the listener ID.
1098 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1099 ib_cm_handler cm_handler,
1102 struct cm_id_private *cm_id_priv;
1103 struct ib_cm_id *cm_id;
1104 unsigned long flags;
1107 /* Create an ID in advance, since the creation may sleep */
1108 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1112 spin_lock_irqsave(&cm.lock, flags);
1114 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1117 /* Find an existing ID */
1118 cm_id_priv = cm_find_listen(device, service_id);
1120 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1121 /* Sharing an ib_cm_id with different handlers is not
1123 spin_unlock_irqrestore(&cm.lock, flags);
1124 return ERR_PTR(-EINVAL);
1126 atomic_inc(&cm_id_priv->refcount);
1127 ++cm_id_priv->listen_sharecount;
1128 spin_unlock_irqrestore(&cm.lock, flags);
1130 ib_destroy_cm_id(cm_id);
1131 cm_id = &cm_id_priv->id;
1136 /* Use newly created ID */
1137 err = __ib_cm_listen(cm_id, service_id, 0);
1139 spin_unlock_irqrestore(&cm.lock, flags);
1142 ib_destroy_cm_id(cm_id);
1143 return ERR_PTR(err);
1147 EXPORT_SYMBOL(ib_cm_insert_listen);
1149 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1150 enum cm_msg_sequence msg_seq)
1152 u64 hi_tid, low_tid;
1154 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1155 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1157 return cpu_to_be64(hi_tid | low_tid);
1160 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1161 __be16 attr_id, __be64 tid)
1163 hdr->base_version = IB_MGMT_BASE_VERSION;
1164 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1165 hdr->class_version = IB_CM_CLASS_VERSION;
1166 hdr->method = IB_MGMT_METHOD_SEND;
1167 hdr->attr_id = attr_id;
1171 static void cm_format_req(struct cm_req_msg *req_msg,
1172 struct cm_id_private *cm_id_priv,
1173 struct ib_cm_req_param *param)
1175 struct sa_path_rec *pri_path = param->primary_path;
1176 struct sa_path_rec *alt_path = param->alternate_path;
1178 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1179 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1181 req_msg->local_comm_id = cm_id_priv->id.local_id;
1182 req_msg->service_id = param->service_id;
1183 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1184 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1185 cm_req_set_init_depth(req_msg, param->initiator_depth);
1186 cm_req_set_remote_resp_timeout(req_msg,
1187 param->remote_cm_response_timeout);
1188 cm_req_set_qp_type(req_msg, param->qp_type);
1189 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1190 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1191 cm_req_set_local_resp_timeout(req_msg,
1192 param->local_cm_response_timeout);
1193 req_msg->pkey = param->primary_path->pkey;
1194 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1195 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1197 if (param->qp_type != IB_QPT_XRC_INI) {
1198 cm_req_set_resp_res(req_msg, param->responder_resources);
1199 cm_req_set_retry_count(req_msg, param->retry_count);
1200 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1201 cm_req_set_srq(req_msg, param->srq);
1204 if (pri_path->hop_limit <= 1) {
1205 req_msg->primary_local_lid = pri_path->slid;
1206 req_msg->primary_remote_lid = pri_path->dlid;
1208 /* Work-around until there's a way to obtain remote LID info */
1209 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1210 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1212 req_msg->primary_local_gid = pri_path->sgid;
1213 req_msg->primary_remote_gid = pri_path->dgid;
1214 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1215 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1216 req_msg->primary_traffic_class = pri_path->traffic_class;
1217 req_msg->primary_hop_limit = pri_path->hop_limit;
1218 cm_req_set_primary_sl(req_msg, pri_path->sl);
1219 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1220 cm_req_set_primary_local_ack_timeout(req_msg,
1221 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1222 pri_path->packet_life_time));
1225 if (alt_path->hop_limit <= 1) {
1226 req_msg->alt_local_lid = alt_path->slid;
1227 req_msg->alt_remote_lid = alt_path->dlid;
1229 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1230 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1232 req_msg->alt_local_gid = alt_path->sgid;
1233 req_msg->alt_remote_gid = alt_path->dgid;
1234 cm_req_set_alt_flow_label(req_msg,
1235 alt_path->flow_label);
1236 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1237 req_msg->alt_traffic_class = alt_path->traffic_class;
1238 req_msg->alt_hop_limit = alt_path->hop_limit;
1239 cm_req_set_alt_sl(req_msg, alt_path->sl);
1240 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1241 cm_req_set_alt_local_ack_timeout(req_msg,
1242 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1243 alt_path->packet_life_time));
1246 if (param->private_data && param->private_data_len)
1247 memcpy(req_msg->private_data, param->private_data,
1248 param->private_data_len);
1251 static int cm_validate_req_param(struct ib_cm_req_param *param)
1253 /* peer-to-peer not supported */
1254 if (param->peer_to_peer)
1257 if (!param->primary_path)
1260 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1261 param->qp_type != IB_QPT_XRC_INI)
1264 if (param->private_data &&
1265 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1268 if (param->alternate_path &&
1269 (param->alternate_path->pkey != param->primary_path->pkey ||
1270 param->alternate_path->mtu != param->primary_path->mtu))
1276 int ib_send_cm_req(struct ib_cm_id *cm_id,
1277 struct ib_cm_req_param *param)
1279 struct cm_id_private *cm_id_priv;
1280 struct cm_req_msg *req_msg;
1281 unsigned long flags;
1284 ret = cm_validate_req_param(param);
1288 /* Verify that we're not in timewait. */
1289 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1290 spin_lock_irqsave(&cm_id_priv->lock, flags);
1291 if (cm_id->state != IB_CM_IDLE) {
1292 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1296 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1298 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1300 if (IS_ERR(cm_id_priv->timewait_info)) {
1301 ret = PTR_ERR(cm_id_priv->timewait_info);
1305 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1309 if (param->alternate_path) {
1310 ret = cm_init_av_by_path(param->alternate_path,
1311 &cm_id_priv->alt_av, cm_id_priv);
1315 cm_id->service_id = param->service_id;
1316 cm_id->service_mask = ~cpu_to_be64(0);
1317 cm_id_priv->timeout_ms = cm_convert_to_ms(
1318 param->primary_path->packet_life_time) * 2 +
1320 param->remote_cm_response_timeout);
1321 cm_id_priv->max_cm_retries = param->max_cm_retries;
1322 cm_id_priv->initiator_depth = param->initiator_depth;
1323 cm_id_priv->responder_resources = param->responder_resources;
1324 cm_id_priv->retry_count = param->retry_count;
1325 cm_id_priv->path_mtu = param->primary_path->mtu;
1326 cm_id_priv->pkey = param->primary_path->pkey;
1327 cm_id_priv->qp_type = param->qp_type;
1329 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1333 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1334 cm_format_req(req_msg, cm_id_priv, param);
1335 cm_id_priv->tid = req_msg->hdr.tid;
1336 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1337 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1339 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1340 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1342 spin_lock_irqsave(&cm_id_priv->lock, flags);
1343 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1345 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1348 BUG_ON(cm_id->state != IB_CM_IDLE);
1349 cm_id->state = IB_CM_REQ_SENT;
1350 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1353 error2: cm_free_msg(cm_id_priv->msg);
1354 error1: kfree(cm_id_priv->timewait_info);
1357 EXPORT_SYMBOL(ib_send_cm_req);
1359 static int cm_issue_rej(struct cm_port *port,
1360 struct ib_mad_recv_wc *mad_recv_wc,
1361 enum ib_cm_rej_reason reason,
1362 enum cm_msg_response msg_rejected,
1363 void *ari, u8 ari_length)
1365 struct ib_mad_send_buf *msg = NULL;
1366 struct cm_rej_msg *rej_msg, *rcv_msg;
1369 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1373 /* We just need common CM header information. Cast to any message. */
1374 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1375 rej_msg = (struct cm_rej_msg *) msg->mad;
1377 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1378 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1379 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1380 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1381 rej_msg->reason = cpu_to_be16(reason);
1383 if (ari && ari_length) {
1384 cm_rej_set_reject_info_len(rej_msg, ari_length);
1385 memcpy(rej_msg->ari, ari, ari_length);
1388 ret = ib_post_send_mad(msg, NULL);
1395 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1396 __be32 local_qpn, __be32 remote_qpn)
1398 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1399 ((local_ca_guid == remote_ca_guid) &&
1400 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1403 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1404 struct sa_path_rec *primary_path,
1405 struct sa_path_rec *alt_path)
1407 memset(primary_path, 0, sizeof(*primary_path));
1408 primary_path->dgid = req_msg->primary_local_gid;
1409 primary_path->sgid = req_msg->primary_remote_gid;
1410 primary_path->dlid = req_msg->primary_local_lid;
1411 primary_path->slid = req_msg->primary_remote_lid;
1412 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1413 primary_path->hop_limit = req_msg->primary_hop_limit;
1414 primary_path->traffic_class = req_msg->primary_traffic_class;
1415 primary_path->reversible = 1;
1416 primary_path->pkey = req_msg->pkey;
1417 primary_path->sl = cm_req_get_primary_sl(req_msg);
1418 primary_path->mtu_selector = IB_SA_EQ;
1419 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1420 primary_path->rate_selector = IB_SA_EQ;
1421 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1422 primary_path->packet_life_time_selector = IB_SA_EQ;
1423 primary_path->packet_life_time =
1424 cm_req_get_primary_local_ack_timeout(req_msg);
1425 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1426 primary_path->service_id = req_msg->service_id;
1428 if (req_msg->alt_local_lid) {
1429 memset(alt_path, 0, sizeof(*alt_path));
1430 alt_path->dgid = req_msg->alt_local_gid;
1431 alt_path->sgid = req_msg->alt_remote_gid;
1432 alt_path->dlid = req_msg->alt_local_lid;
1433 alt_path->slid = req_msg->alt_remote_lid;
1434 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1435 alt_path->hop_limit = req_msg->alt_hop_limit;
1436 alt_path->traffic_class = req_msg->alt_traffic_class;
1437 alt_path->reversible = 1;
1438 alt_path->pkey = req_msg->pkey;
1439 alt_path->sl = cm_req_get_alt_sl(req_msg);
1440 alt_path->mtu_selector = IB_SA_EQ;
1441 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1442 alt_path->rate_selector = IB_SA_EQ;
1443 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1444 alt_path->packet_life_time_selector = IB_SA_EQ;
1445 alt_path->packet_life_time =
1446 cm_req_get_alt_local_ack_timeout(req_msg);
1447 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1448 alt_path->service_id = req_msg->service_id;
1452 static u16 cm_get_bth_pkey(struct cm_work *work)
1454 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1455 u8 port_num = work->port->port_num;
1456 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1460 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1462 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1463 port_num, pkey_index, ret);
1470 static void cm_format_req_event(struct cm_work *work,
1471 struct cm_id_private *cm_id_priv,
1472 struct ib_cm_id *listen_id)
1474 struct cm_req_msg *req_msg;
1475 struct ib_cm_req_event_param *param;
1477 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1478 param = &work->cm_event.param.req_rcvd;
1479 param->listen_id = listen_id;
1480 param->bth_pkey = cm_get_bth_pkey(work);
1481 param->port = cm_id_priv->av.port->port_num;
1482 param->primary_path = &work->path[0];
1483 if (req_msg->alt_local_lid)
1484 param->alternate_path = &work->path[1];
1486 param->alternate_path = NULL;
1487 param->remote_ca_guid = req_msg->local_ca_guid;
1488 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1489 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1490 param->qp_type = cm_req_get_qp_type(req_msg);
1491 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1492 param->responder_resources = cm_req_get_init_depth(req_msg);
1493 param->initiator_depth = cm_req_get_resp_res(req_msg);
1494 param->local_cm_response_timeout =
1495 cm_req_get_remote_resp_timeout(req_msg);
1496 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1497 param->remote_cm_response_timeout =
1498 cm_req_get_local_resp_timeout(req_msg);
1499 param->retry_count = cm_req_get_retry_count(req_msg);
1500 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1501 param->srq = cm_req_get_srq(req_msg);
1502 work->cm_event.private_data = &req_msg->private_data;
1505 static void cm_process_work(struct cm_id_private *cm_id_priv,
1506 struct cm_work *work)
1510 /* We will typically only have the current event to report. */
1511 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1514 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1515 spin_lock_irq(&cm_id_priv->lock);
1516 work = cm_dequeue_work(cm_id_priv);
1517 spin_unlock_irq(&cm_id_priv->lock);
1519 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1523 cm_deref_id(cm_id_priv);
1525 cm_destroy_id(&cm_id_priv->id, ret);
1528 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1529 struct cm_id_private *cm_id_priv,
1530 enum cm_msg_response msg_mraed, u8 service_timeout,
1531 const void *private_data, u8 private_data_len)
1533 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1534 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1535 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1536 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1537 cm_mra_set_service_timeout(mra_msg, service_timeout);
1539 if (private_data && private_data_len)
1540 memcpy(mra_msg->private_data, private_data, private_data_len);
1543 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1544 struct cm_id_private *cm_id_priv,
1545 enum ib_cm_rej_reason reason,
1548 const void *private_data,
1549 u8 private_data_len)
1551 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1552 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1554 switch(cm_id_priv->id.state) {
1555 case IB_CM_REQ_RCVD:
1556 rej_msg->local_comm_id = 0;
1557 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1559 case IB_CM_MRA_REQ_SENT:
1560 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1561 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1563 case IB_CM_REP_RCVD:
1564 case IB_CM_MRA_REP_SENT:
1565 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1566 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1569 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1570 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1574 rej_msg->reason = cpu_to_be16(reason);
1575 if (ari && ari_length) {
1576 cm_rej_set_reject_info_len(rej_msg, ari_length);
1577 memcpy(rej_msg->ari, ari, ari_length);
1580 if (private_data && private_data_len)
1581 memcpy(rej_msg->private_data, private_data, private_data_len);
1584 static void cm_dup_req_handler(struct cm_work *work,
1585 struct cm_id_private *cm_id_priv)
1587 struct ib_mad_send_buf *msg = NULL;
1590 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1591 counter[CM_REQ_COUNTER]);
1593 /* Quick state check to discard duplicate REQs. */
1594 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1597 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1601 spin_lock_irq(&cm_id_priv->lock);
1602 switch (cm_id_priv->id.state) {
1603 case IB_CM_MRA_REQ_SENT:
1604 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1605 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1606 cm_id_priv->private_data,
1607 cm_id_priv->private_data_len);
1609 case IB_CM_TIMEWAIT:
1610 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1611 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1616 spin_unlock_irq(&cm_id_priv->lock);
1618 ret = ib_post_send_mad(msg, NULL);
1623 unlock: spin_unlock_irq(&cm_id_priv->lock);
1624 free: cm_free_msg(msg);
1627 static struct cm_id_private * cm_match_req(struct cm_work *work,
1628 struct cm_id_private *cm_id_priv)
1630 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1631 struct cm_timewait_info *timewait_info;
1632 struct cm_req_msg *req_msg;
1633 struct ib_cm_id *cm_id;
1635 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1637 /* Check for possible duplicate REQ. */
1638 spin_lock_irq(&cm.lock);
1639 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1640 if (timewait_info) {
1641 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1642 timewait_info->work.remote_id);
1643 spin_unlock_irq(&cm.lock);
1644 if (cur_cm_id_priv) {
1645 cm_dup_req_handler(work, cur_cm_id_priv);
1646 cm_deref_id(cur_cm_id_priv);
1651 /* Check for stale connections. */
1652 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1653 if (timewait_info) {
1654 cm_cleanup_timewait(cm_id_priv->timewait_info);
1655 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1656 timewait_info->work.remote_id);
1658 spin_unlock_irq(&cm.lock);
1659 cm_issue_rej(work->port, work->mad_recv_wc,
1660 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1662 if (cur_cm_id_priv) {
1663 cm_id = &cur_cm_id_priv->id;
1664 ib_send_cm_dreq(cm_id, NULL, 0);
1665 cm_deref_id(cur_cm_id_priv);
1670 /* Find matching listen request. */
1671 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1672 req_msg->service_id);
1673 if (!listen_cm_id_priv) {
1674 cm_cleanup_timewait(cm_id_priv->timewait_info);
1675 spin_unlock_irq(&cm.lock);
1676 cm_issue_rej(work->port, work->mad_recv_wc,
1677 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1681 atomic_inc(&listen_cm_id_priv->refcount);
1682 atomic_inc(&cm_id_priv->refcount);
1683 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1684 atomic_inc(&cm_id_priv->work_count);
1685 spin_unlock_irq(&cm.lock);
1687 return listen_cm_id_priv;
1691 * Work-around for inter-subnet connections. If the LIDs are permissive,
1692 * we need to override the LID/SL data in the REQ with the LID information
1693 * in the work completion.
1695 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1697 if (!cm_req_get_primary_subnet_local(req_msg)) {
1698 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1699 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1700 cm_req_set_primary_sl(req_msg, wc->sl);
1703 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1704 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1707 if (!cm_req_get_alt_subnet_local(req_msg)) {
1708 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1709 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1710 cm_req_set_alt_sl(req_msg, wc->sl);
1713 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1714 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1718 static int cm_req_handler(struct cm_work *work)
1720 struct ib_cm_id *cm_id;
1721 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1722 struct cm_req_msg *req_msg;
1724 struct ib_gid_attr gid_attr;
1725 const struct ib_global_route *grh;
1728 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1730 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1732 return PTR_ERR(cm_id);
1734 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1735 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1736 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1737 work->mad_recv_wc->recv_buf.grh,
1739 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1741 if (IS_ERR(cm_id_priv->timewait_info)) {
1742 ret = PTR_ERR(cm_id_priv->timewait_info);
1745 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1746 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1747 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1749 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1750 if (!listen_cm_id_priv) {
1752 kfree(cm_id_priv->timewait_info);
1756 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1757 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1758 cm_id_priv->id.service_id = req_msg->service_id;
1759 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1761 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1762 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1764 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
1765 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.roce.dmac,
1767 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1768 work->path[0].hop_limit = grh->hop_limit;
1769 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1770 work->port->port_num,
1774 if (gid_attr.ndev) {
1775 work->path[0].ifindex = gid_attr.ndev->ifindex;
1776 work->path[0].net = dev_net(gid_attr.ndev);
1777 dev_put(gid_attr.ndev);
1779 work->path[0].gid_type = gid_attr.gid_type;
1780 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1784 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1785 work->port->port_num, 0,
1786 &work->path[0].sgid,
1788 if (!err && gid_attr.ndev) {
1789 work->path[0].ifindex = gid_attr.ndev->ifindex;
1790 work->path[0].net = dev_net(gid_attr.ndev);
1791 dev_put(gid_attr.ndev);
1793 work->path[0].gid_type = gid_attr.gid_type;
1794 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1795 &work->path[0].sgid, sizeof work->path[0].sgid,
1799 if (req_msg->alt_local_lid) {
1800 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1803 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1804 &work->path[0].sgid,
1805 sizeof work->path[0].sgid, NULL, 0);
1809 cm_id_priv->tid = req_msg->hdr.tid;
1810 cm_id_priv->timeout_ms = cm_convert_to_ms(
1811 cm_req_get_local_resp_timeout(req_msg));
1812 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1813 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1814 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1815 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1816 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1817 cm_id_priv->pkey = req_msg->pkey;
1818 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1819 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1820 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1821 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1823 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1824 cm_process_work(cm_id_priv, work);
1825 cm_deref_id(listen_cm_id_priv);
1829 atomic_dec(&cm_id_priv->refcount);
1830 cm_deref_id(listen_cm_id_priv);
1832 ib_destroy_cm_id(cm_id);
1836 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1837 struct cm_id_private *cm_id_priv,
1838 struct ib_cm_rep_param *param)
1840 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1841 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1842 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1843 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1844 rep_msg->resp_resources = param->responder_resources;
1845 cm_rep_set_target_ack_delay(rep_msg,
1846 cm_id_priv->av.port->cm_dev->ack_delay);
1847 cm_rep_set_failover(rep_msg, param->failover_accepted);
1848 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1849 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1851 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1852 rep_msg->initiator_depth = param->initiator_depth;
1853 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1854 cm_rep_set_srq(rep_msg, param->srq);
1855 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1857 cm_rep_set_srq(rep_msg, 1);
1858 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1861 if (param->private_data && param->private_data_len)
1862 memcpy(rep_msg->private_data, param->private_data,
1863 param->private_data_len);
1866 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1867 struct ib_cm_rep_param *param)
1869 struct cm_id_private *cm_id_priv;
1870 struct ib_mad_send_buf *msg;
1871 struct cm_rep_msg *rep_msg;
1872 unsigned long flags;
1875 if (param->private_data &&
1876 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1879 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1880 spin_lock_irqsave(&cm_id_priv->lock, flags);
1881 if (cm_id->state != IB_CM_REQ_RCVD &&
1882 cm_id->state != IB_CM_MRA_REQ_SENT) {
1887 ret = cm_alloc_msg(cm_id_priv, &msg);
1891 rep_msg = (struct cm_rep_msg *) msg->mad;
1892 cm_format_rep(rep_msg, cm_id_priv, param);
1893 msg->timeout_ms = cm_id_priv->timeout_ms;
1894 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1896 ret = ib_post_send_mad(msg, NULL);
1898 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1903 cm_id->state = IB_CM_REP_SENT;
1904 cm_id_priv->msg = msg;
1905 cm_id_priv->initiator_depth = param->initiator_depth;
1906 cm_id_priv->responder_resources = param->responder_resources;
1907 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1908 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1910 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1913 EXPORT_SYMBOL(ib_send_cm_rep);
1915 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1916 struct cm_id_private *cm_id_priv,
1917 const void *private_data,
1918 u8 private_data_len)
1920 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1921 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1922 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1924 if (private_data && private_data_len)
1925 memcpy(rtu_msg->private_data, private_data, private_data_len);
1928 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1929 const void *private_data,
1930 u8 private_data_len)
1932 struct cm_id_private *cm_id_priv;
1933 struct ib_mad_send_buf *msg;
1934 unsigned long flags;
1938 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1941 data = cm_copy_private_data(private_data, private_data_len);
1943 return PTR_ERR(data);
1945 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1946 spin_lock_irqsave(&cm_id_priv->lock, flags);
1947 if (cm_id->state != IB_CM_REP_RCVD &&
1948 cm_id->state != IB_CM_MRA_REP_SENT) {
1953 ret = cm_alloc_msg(cm_id_priv, &msg);
1957 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1958 private_data, private_data_len);
1960 ret = ib_post_send_mad(msg, NULL);
1962 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1968 cm_id->state = IB_CM_ESTABLISHED;
1969 cm_set_private_data(cm_id_priv, data, private_data_len);
1970 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1973 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1977 EXPORT_SYMBOL(ib_send_cm_rtu);
1979 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1981 struct cm_rep_msg *rep_msg;
1982 struct ib_cm_rep_event_param *param;
1984 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1985 param = &work->cm_event.param.rep_rcvd;
1986 param->remote_ca_guid = rep_msg->local_ca_guid;
1987 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1988 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1989 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1990 param->responder_resources = rep_msg->initiator_depth;
1991 param->initiator_depth = rep_msg->resp_resources;
1992 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1993 param->failover_accepted = cm_rep_get_failover(rep_msg);
1994 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1995 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1996 param->srq = cm_rep_get_srq(rep_msg);
1997 work->cm_event.private_data = &rep_msg->private_data;
2000 static void cm_dup_rep_handler(struct cm_work *work)
2002 struct cm_id_private *cm_id_priv;
2003 struct cm_rep_msg *rep_msg;
2004 struct ib_mad_send_buf *msg = NULL;
2007 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2008 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2009 rep_msg->local_comm_id);
2013 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2014 counter[CM_REP_COUNTER]);
2015 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2019 spin_lock_irq(&cm_id_priv->lock);
2020 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2021 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2022 cm_id_priv->private_data,
2023 cm_id_priv->private_data_len);
2024 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2025 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2026 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2027 cm_id_priv->private_data,
2028 cm_id_priv->private_data_len);
2031 spin_unlock_irq(&cm_id_priv->lock);
2033 ret = ib_post_send_mad(msg, NULL);
2038 unlock: spin_unlock_irq(&cm_id_priv->lock);
2039 free: cm_free_msg(msg);
2040 deref: cm_deref_id(cm_id_priv);
2043 static int cm_rep_handler(struct cm_work *work)
2045 struct cm_id_private *cm_id_priv;
2046 struct cm_rep_msg *rep_msg;
2048 struct cm_id_private *cur_cm_id_priv;
2049 struct ib_cm_id *cm_id;
2050 struct cm_timewait_info *timewait_info;
2052 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2053 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2055 cm_dup_rep_handler(work);
2059 cm_format_rep_event(work, cm_id_priv->qp_type);
2061 spin_lock_irq(&cm_id_priv->lock);
2062 switch (cm_id_priv->id.state) {
2063 case IB_CM_REQ_SENT:
2064 case IB_CM_MRA_REQ_RCVD:
2067 spin_unlock_irq(&cm_id_priv->lock);
2072 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2073 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2074 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2076 spin_lock(&cm.lock);
2077 /* Check for duplicate REP. */
2078 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2079 spin_unlock(&cm.lock);
2080 spin_unlock_irq(&cm_id_priv->lock);
2084 /* Check for a stale connection. */
2085 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2086 if (timewait_info) {
2087 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2088 &cm.remote_id_table);
2089 cm_id_priv->timewait_info->inserted_remote_id = 0;
2090 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2091 timewait_info->work.remote_id);
2093 spin_unlock(&cm.lock);
2094 spin_unlock_irq(&cm_id_priv->lock);
2095 cm_issue_rej(work->port, work->mad_recv_wc,
2096 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2099 if (cur_cm_id_priv) {
2100 cm_id = &cur_cm_id_priv->id;
2101 ib_send_cm_dreq(cm_id, NULL, 0);
2102 cm_deref_id(cur_cm_id_priv);
2107 spin_unlock(&cm.lock);
2109 cm_id_priv->id.state = IB_CM_REP_RCVD;
2110 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2111 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2112 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2113 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2114 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2115 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2116 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2117 cm_id_priv->av.timeout =
2118 cm_ack_timeout(cm_id_priv->target_ack_delay,
2119 cm_id_priv->av.timeout - 1);
2120 cm_id_priv->alt_av.timeout =
2121 cm_ack_timeout(cm_id_priv->target_ack_delay,
2122 cm_id_priv->alt_av.timeout - 1);
2124 /* todo: handle peer_to_peer */
2126 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2127 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2129 list_add_tail(&work->list, &cm_id_priv->work_list);
2130 spin_unlock_irq(&cm_id_priv->lock);
2133 cm_process_work(cm_id_priv, work);
2135 cm_deref_id(cm_id_priv);
2139 cm_deref_id(cm_id_priv);
2143 static int cm_establish_handler(struct cm_work *work)
2145 struct cm_id_private *cm_id_priv;
2148 /* See comment in cm_establish about lookup. */
2149 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2153 spin_lock_irq(&cm_id_priv->lock);
2154 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2155 spin_unlock_irq(&cm_id_priv->lock);
2159 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2160 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2162 list_add_tail(&work->list, &cm_id_priv->work_list);
2163 spin_unlock_irq(&cm_id_priv->lock);
2166 cm_process_work(cm_id_priv, work);
2168 cm_deref_id(cm_id_priv);
2171 cm_deref_id(cm_id_priv);
2175 static int cm_rtu_handler(struct cm_work *work)
2177 struct cm_id_private *cm_id_priv;
2178 struct cm_rtu_msg *rtu_msg;
2181 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2182 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2183 rtu_msg->local_comm_id);
2187 work->cm_event.private_data = &rtu_msg->private_data;
2189 spin_lock_irq(&cm_id_priv->lock);
2190 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2191 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2192 spin_unlock_irq(&cm_id_priv->lock);
2193 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2194 counter[CM_RTU_COUNTER]);
2197 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2199 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2200 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2202 list_add_tail(&work->list, &cm_id_priv->work_list);
2203 spin_unlock_irq(&cm_id_priv->lock);
2206 cm_process_work(cm_id_priv, work);
2208 cm_deref_id(cm_id_priv);
2211 cm_deref_id(cm_id_priv);
2215 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2216 struct cm_id_private *cm_id_priv,
2217 const void *private_data,
2218 u8 private_data_len)
2220 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2221 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2222 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2223 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2224 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2226 if (private_data && private_data_len)
2227 memcpy(dreq_msg->private_data, private_data, private_data_len);
2230 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2231 const void *private_data,
2232 u8 private_data_len)
2234 struct cm_id_private *cm_id_priv;
2235 struct ib_mad_send_buf *msg;
2236 unsigned long flags;
2239 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2242 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2243 spin_lock_irqsave(&cm_id_priv->lock, flags);
2244 if (cm_id->state != IB_CM_ESTABLISHED) {
2249 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2250 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2251 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2253 ret = cm_alloc_msg(cm_id_priv, &msg);
2255 cm_enter_timewait(cm_id_priv);
2259 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2260 private_data, private_data_len);
2261 msg->timeout_ms = cm_id_priv->timeout_ms;
2262 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2264 ret = ib_post_send_mad(msg, NULL);
2266 cm_enter_timewait(cm_id_priv);
2267 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2272 cm_id->state = IB_CM_DREQ_SENT;
2273 cm_id_priv->msg = msg;
2274 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2277 EXPORT_SYMBOL(ib_send_cm_dreq);
2279 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2280 struct cm_id_private *cm_id_priv,
2281 const void *private_data,
2282 u8 private_data_len)
2284 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2285 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2286 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2288 if (private_data && private_data_len)
2289 memcpy(drep_msg->private_data, private_data, private_data_len);
2292 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2293 const void *private_data,
2294 u8 private_data_len)
2296 struct cm_id_private *cm_id_priv;
2297 struct ib_mad_send_buf *msg;
2298 unsigned long flags;
2302 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2305 data = cm_copy_private_data(private_data, private_data_len);
2307 return PTR_ERR(data);
2309 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2310 spin_lock_irqsave(&cm_id_priv->lock, flags);
2311 if (cm_id->state != IB_CM_DREQ_RCVD) {
2312 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2317 cm_set_private_data(cm_id_priv, data, private_data_len);
2318 cm_enter_timewait(cm_id_priv);
2320 ret = cm_alloc_msg(cm_id_priv, &msg);
2324 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2325 private_data, private_data_len);
2327 ret = ib_post_send_mad(msg, NULL);
2329 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2334 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2337 EXPORT_SYMBOL(ib_send_cm_drep);
2339 static int cm_issue_drep(struct cm_port *port,
2340 struct ib_mad_recv_wc *mad_recv_wc)
2342 struct ib_mad_send_buf *msg = NULL;
2343 struct cm_dreq_msg *dreq_msg;
2344 struct cm_drep_msg *drep_msg;
2347 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2351 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2352 drep_msg = (struct cm_drep_msg *) msg->mad;
2354 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2355 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2356 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2358 ret = ib_post_send_mad(msg, NULL);
2365 static int cm_dreq_handler(struct cm_work *work)
2367 struct cm_id_private *cm_id_priv;
2368 struct cm_dreq_msg *dreq_msg;
2369 struct ib_mad_send_buf *msg = NULL;
2372 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2373 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2374 dreq_msg->local_comm_id);
2376 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2377 counter[CM_DREQ_COUNTER]);
2378 cm_issue_drep(work->port, work->mad_recv_wc);
2382 work->cm_event.private_data = &dreq_msg->private_data;
2384 spin_lock_irq(&cm_id_priv->lock);
2385 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2388 switch (cm_id_priv->id.state) {
2389 case IB_CM_REP_SENT:
2390 case IB_CM_DREQ_SENT:
2391 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2393 case IB_CM_ESTABLISHED:
2394 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2395 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2396 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2398 case IB_CM_MRA_REP_RCVD:
2400 case IB_CM_TIMEWAIT:
2401 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2402 counter[CM_DREQ_COUNTER]);
2403 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2406 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2407 cm_id_priv->private_data,
2408 cm_id_priv->private_data_len);
2409 spin_unlock_irq(&cm_id_priv->lock);
2411 if (ib_post_send_mad(msg, NULL))
2414 case IB_CM_DREQ_RCVD:
2415 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2416 counter[CM_DREQ_COUNTER]);
2421 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2422 cm_id_priv->tid = dreq_msg->hdr.tid;
2423 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2425 list_add_tail(&work->list, &cm_id_priv->work_list);
2426 spin_unlock_irq(&cm_id_priv->lock);
2429 cm_process_work(cm_id_priv, work);
2431 cm_deref_id(cm_id_priv);
2434 unlock: spin_unlock_irq(&cm_id_priv->lock);
2435 deref: cm_deref_id(cm_id_priv);
2439 static int cm_drep_handler(struct cm_work *work)
2441 struct cm_id_private *cm_id_priv;
2442 struct cm_drep_msg *drep_msg;
2445 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2446 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2447 drep_msg->local_comm_id);
2451 work->cm_event.private_data = &drep_msg->private_data;
2453 spin_lock_irq(&cm_id_priv->lock);
2454 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2455 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2456 spin_unlock_irq(&cm_id_priv->lock);
2459 cm_enter_timewait(cm_id_priv);
2461 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2462 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2464 list_add_tail(&work->list, &cm_id_priv->work_list);
2465 spin_unlock_irq(&cm_id_priv->lock);
2468 cm_process_work(cm_id_priv, work);
2470 cm_deref_id(cm_id_priv);
2473 cm_deref_id(cm_id_priv);
2477 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2478 enum ib_cm_rej_reason reason,
2481 const void *private_data,
2482 u8 private_data_len)
2484 struct cm_id_private *cm_id_priv;
2485 struct ib_mad_send_buf *msg;
2486 unsigned long flags;
2489 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2490 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2493 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2495 spin_lock_irqsave(&cm_id_priv->lock, flags);
2496 switch (cm_id->state) {
2497 case IB_CM_REQ_SENT:
2498 case IB_CM_MRA_REQ_RCVD:
2499 case IB_CM_REQ_RCVD:
2500 case IB_CM_MRA_REQ_SENT:
2501 case IB_CM_REP_RCVD:
2502 case IB_CM_MRA_REP_SENT:
2503 ret = cm_alloc_msg(cm_id_priv, &msg);
2505 cm_format_rej((struct cm_rej_msg *) msg->mad,
2506 cm_id_priv, reason, ari, ari_length,
2507 private_data, private_data_len);
2509 cm_reset_to_idle(cm_id_priv);
2511 case IB_CM_REP_SENT:
2512 case IB_CM_MRA_REP_RCVD:
2513 ret = cm_alloc_msg(cm_id_priv, &msg);
2515 cm_format_rej((struct cm_rej_msg *) msg->mad,
2516 cm_id_priv, reason, ari, ari_length,
2517 private_data, private_data_len);
2519 cm_enter_timewait(cm_id_priv);
2529 ret = ib_post_send_mad(msg, NULL);
2533 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2536 EXPORT_SYMBOL(ib_send_cm_rej);
2538 static void cm_format_rej_event(struct cm_work *work)
2540 struct cm_rej_msg *rej_msg;
2541 struct ib_cm_rej_event_param *param;
2543 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2544 param = &work->cm_event.param.rej_rcvd;
2545 param->ari = rej_msg->ari;
2546 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2547 param->reason = __be16_to_cpu(rej_msg->reason);
2548 work->cm_event.private_data = &rej_msg->private_data;
2551 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2553 struct cm_timewait_info *timewait_info;
2554 struct cm_id_private *cm_id_priv;
2557 remote_id = rej_msg->local_comm_id;
2559 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2560 spin_lock_irq(&cm.lock);
2561 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2563 if (!timewait_info) {
2564 spin_unlock_irq(&cm.lock);
2567 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2568 (timewait_info->work.local_id ^
2569 cm.random_id_operand));
2571 if (cm_id_priv->id.remote_id == remote_id)
2572 atomic_inc(&cm_id_priv->refcount);
2576 spin_unlock_irq(&cm.lock);
2577 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2578 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2580 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2585 static int cm_rej_handler(struct cm_work *work)
2587 struct cm_id_private *cm_id_priv;
2588 struct cm_rej_msg *rej_msg;
2591 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2592 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2596 cm_format_rej_event(work);
2598 spin_lock_irq(&cm_id_priv->lock);
2599 switch (cm_id_priv->id.state) {
2600 case IB_CM_REQ_SENT:
2601 case IB_CM_MRA_REQ_RCVD:
2602 case IB_CM_REP_SENT:
2603 case IB_CM_MRA_REP_RCVD:
2604 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2606 case IB_CM_REQ_RCVD:
2607 case IB_CM_MRA_REQ_SENT:
2608 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2609 cm_enter_timewait(cm_id_priv);
2611 cm_reset_to_idle(cm_id_priv);
2613 case IB_CM_DREQ_SENT:
2614 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2616 case IB_CM_REP_RCVD:
2617 case IB_CM_MRA_REP_SENT:
2618 cm_enter_timewait(cm_id_priv);
2620 case IB_CM_ESTABLISHED:
2621 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2622 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2623 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2624 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2626 cm_enter_timewait(cm_id_priv);
2631 spin_unlock_irq(&cm_id_priv->lock);
2636 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2638 list_add_tail(&work->list, &cm_id_priv->work_list);
2639 spin_unlock_irq(&cm_id_priv->lock);
2642 cm_process_work(cm_id_priv, work);
2644 cm_deref_id(cm_id_priv);
2647 cm_deref_id(cm_id_priv);
2651 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2653 const void *private_data,
2654 u8 private_data_len)
2656 struct cm_id_private *cm_id_priv;
2657 struct ib_mad_send_buf *msg;
2658 enum ib_cm_state cm_state;
2659 enum ib_cm_lap_state lap_state;
2660 enum cm_msg_response msg_response;
2662 unsigned long flags;
2665 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2668 data = cm_copy_private_data(private_data, private_data_len);
2670 return PTR_ERR(data);
2672 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2674 spin_lock_irqsave(&cm_id_priv->lock, flags);
2675 switch(cm_id_priv->id.state) {
2676 case IB_CM_REQ_RCVD:
2677 cm_state = IB_CM_MRA_REQ_SENT;
2678 lap_state = cm_id->lap_state;
2679 msg_response = CM_MSG_RESPONSE_REQ;
2681 case IB_CM_REP_RCVD:
2682 cm_state = IB_CM_MRA_REP_SENT;
2683 lap_state = cm_id->lap_state;
2684 msg_response = CM_MSG_RESPONSE_REP;
2686 case IB_CM_ESTABLISHED:
2687 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2688 cm_state = cm_id->state;
2689 lap_state = IB_CM_MRA_LAP_SENT;
2690 msg_response = CM_MSG_RESPONSE_OTHER;
2698 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2699 ret = cm_alloc_msg(cm_id_priv, &msg);
2703 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2704 msg_response, service_timeout,
2705 private_data, private_data_len);
2706 ret = ib_post_send_mad(msg, NULL);
2711 cm_id->state = cm_state;
2712 cm_id->lap_state = lap_state;
2713 cm_id_priv->service_timeout = service_timeout;
2714 cm_set_private_data(cm_id_priv, data, private_data_len);
2715 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2718 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2722 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2727 EXPORT_SYMBOL(ib_send_cm_mra);
2729 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2731 switch (cm_mra_get_msg_mraed(mra_msg)) {
2732 case CM_MSG_RESPONSE_REQ:
2733 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2734 case CM_MSG_RESPONSE_REP:
2735 case CM_MSG_RESPONSE_OTHER:
2736 return cm_acquire_id(mra_msg->remote_comm_id,
2737 mra_msg->local_comm_id);
2743 static int cm_mra_handler(struct cm_work *work)
2745 struct cm_id_private *cm_id_priv;
2746 struct cm_mra_msg *mra_msg;
2749 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2750 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2754 work->cm_event.private_data = &mra_msg->private_data;
2755 work->cm_event.param.mra_rcvd.service_timeout =
2756 cm_mra_get_service_timeout(mra_msg);
2757 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2758 cm_convert_to_ms(cm_id_priv->av.timeout);
2760 spin_lock_irq(&cm_id_priv->lock);
2761 switch (cm_id_priv->id.state) {
2762 case IB_CM_REQ_SENT:
2763 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2764 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2765 cm_id_priv->msg, timeout))
2767 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2769 case IB_CM_REP_SENT:
2770 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2771 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2772 cm_id_priv->msg, timeout))
2774 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2776 case IB_CM_ESTABLISHED:
2777 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2778 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2779 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2780 cm_id_priv->msg, timeout)) {
2781 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2782 atomic_long_inc(&work->port->
2783 counter_group[CM_RECV_DUPLICATES].
2784 counter[CM_MRA_COUNTER]);
2787 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2789 case IB_CM_MRA_REQ_RCVD:
2790 case IB_CM_MRA_REP_RCVD:
2791 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2792 counter[CM_MRA_COUNTER]);
2798 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2799 cm_id_priv->id.state;
2800 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2802 list_add_tail(&work->list, &cm_id_priv->work_list);
2803 spin_unlock_irq(&cm_id_priv->lock);
2806 cm_process_work(cm_id_priv, work);
2808 cm_deref_id(cm_id_priv);
2811 spin_unlock_irq(&cm_id_priv->lock);
2812 cm_deref_id(cm_id_priv);
2816 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2817 struct cm_id_private *cm_id_priv,
2818 struct sa_path_rec *alternate_path,
2819 const void *private_data,
2820 u8 private_data_len)
2822 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2823 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2824 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2825 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2826 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2827 /* todo: need remote CM response timeout */
2828 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2829 lap_msg->alt_local_lid = alternate_path->slid;
2830 lap_msg->alt_remote_lid = alternate_path->dlid;
2831 lap_msg->alt_local_gid = alternate_path->sgid;
2832 lap_msg->alt_remote_gid = alternate_path->dgid;
2833 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2834 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2835 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2836 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2837 cm_lap_set_sl(lap_msg, alternate_path->sl);
2838 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2839 cm_lap_set_local_ack_timeout(lap_msg,
2840 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2841 alternate_path->packet_life_time));
2843 if (private_data && private_data_len)
2844 memcpy(lap_msg->private_data, private_data, private_data_len);
2847 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2848 struct sa_path_rec *alternate_path,
2849 const void *private_data,
2850 u8 private_data_len)
2852 struct cm_id_private *cm_id_priv;
2853 struct ib_mad_send_buf *msg;
2854 unsigned long flags;
2857 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2860 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2861 spin_lock_irqsave(&cm_id_priv->lock, flags);
2862 if (cm_id->state != IB_CM_ESTABLISHED ||
2863 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2864 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2869 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
2873 cm_id_priv->alt_av.timeout =
2874 cm_ack_timeout(cm_id_priv->target_ack_delay,
2875 cm_id_priv->alt_av.timeout - 1);
2877 ret = cm_alloc_msg(cm_id_priv, &msg);
2881 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2882 alternate_path, private_data, private_data_len);
2883 msg->timeout_ms = cm_id_priv->timeout_ms;
2884 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2886 ret = ib_post_send_mad(msg, NULL);
2888 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2893 cm_id->lap_state = IB_CM_LAP_SENT;
2894 cm_id_priv->msg = msg;
2896 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2899 EXPORT_SYMBOL(ib_send_cm_lap);
2901 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2902 struct sa_path_rec *path,
2903 struct cm_lap_msg *lap_msg)
2905 memset(path, 0, sizeof *path);
2906 path->dgid = lap_msg->alt_local_gid;
2907 path->sgid = lap_msg->alt_remote_gid;
2908 path->dlid = lap_msg->alt_local_lid;
2909 path->slid = lap_msg->alt_remote_lid;
2910 path->flow_label = cm_lap_get_flow_label(lap_msg);
2911 path->hop_limit = lap_msg->alt_hop_limit;
2912 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2913 path->reversible = 1;
2914 path->pkey = cm_id_priv->pkey;
2915 path->sl = cm_lap_get_sl(lap_msg);
2916 path->mtu_selector = IB_SA_EQ;
2917 path->mtu = cm_id_priv->path_mtu;
2918 path->rate_selector = IB_SA_EQ;
2919 path->rate = cm_lap_get_packet_rate(lap_msg);
2920 path->packet_life_time_selector = IB_SA_EQ;
2921 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2922 path->packet_life_time -= (path->packet_life_time > 0);
2925 static int cm_lap_handler(struct cm_work *work)
2927 struct cm_id_private *cm_id_priv;
2928 struct cm_lap_msg *lap_msg;
2929 struct ib_cm_lap_event_param *param;
2930 struct ib_mad_send_buf *msg = NULL;
2933 /* todo: verify LAP request and send reject APR if invalid. */
2934 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2935 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2936 lap_msg->local_comm_id);
2940 param = &work->cm_event.param.lap_rcvd;
2941 param->alternate_path = &work->path[0];
2942 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2943 work->cm_event.private_data = &lap_msg->private_data;
2945 spin_lock_irq(&cm_id_priv->lock);
2946 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2949 switch (cm_id_priv->id.lap_state) {
2950 case IB_CM_LAP_UNINIT:
2951 case IB_CM_LAP_IDLE:
2953 case IB_CM_MRA_LAP_SENT:
2954 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2955 counter[CM_LAP_COUNTER]);
2956 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2959 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2960 CM_MSG_RESPONSE_OTHER,
2961 cm_id_priv->service_timeout,
2962 cm_id_priv->private_data,
2963 cm_id_priv->private_data_len);
2964 spin_unlock_irq(&cm_id_priv->lock);
2966 if (ib_post_send_mad(msg, NULL))
2969 case IB_CM_LAP_RCVD:
2970 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2971 counter[CM_LAP_COUNTER]);
2977 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2978 cm_id_priv->tid = lap_msg->hdr.tid;
2979 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2980 work->mad_recv_wc->recv_buf.grh,
2982 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
2984 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2986 list_add_tail(&work->list, &cm_id_priv->work_list);
2987 spin_unlock_irq(&cm_id_priv->lock);
2990 cm_process_work(cm_id_priv, work);
2992 cm_deref_id(cm_id_priv);
2995 unlock: spin_unlock_irq(&cm_id_priv->lock);
2996 deref: cm_deref_id(cm_id_priv);
3000 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3001 struct cm_id_private *cm_id_priv,
3002 enum ib_cm_apr_status status,
3005 const void *private_data,
3006 u8 private_data_len)
3008 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3009 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3010 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3011 apr_msg->ap_status = (u8) status;
3013 if (info && info_length) {
3014 apr_msg->info_length = info_length;
3015 memcpy(apr_msg->info, info, info_length);
3018 if (private_data && private_data_len)
3019 memcpy(apr_msg->private_data, private_data, private_data_len);
3022 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3023 enum ib_cm_apr_status status,
3026 const void *private_data,
3027 u8 private_data_len)
3029 struct cm_id_private *cm_id_priv;
3030 struct ib_mad_send_buf *msg;
3031 unsigned long flags;
3034 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3035 (info && info_length > IB_CM_APR_INFO_LENGTH))
3038 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3039 spin_lock_irqsave(&cm_id_priv->lock, flags);
3040 if (cm_id->state != IB_CM_ESTABLISHED ||
3041 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3042 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3047 ret = cm_alloc_msg(cm_id_priv, &msg);
3051 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3052 info, info_length, private_data, private_data_len);
3053 ret = ib_post_send_mad(msg, NULL);
3055 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3060 cm_id->lap_state = IB_CM_LAP_IDLE;
3061 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3064 EXPORT_SYMBOL(ib_send_cm_apr);
3066 static int cm_apr_handler(struct cm_work *work)
3068 struct cm_id_private *cm_id_priv;
3069 struct cm_apr_msg *apr_msg;
3072 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3073 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3074 apr_msg->local_comm_id);
3076 return -EINVAL; /* Unmatched reply. */
3078 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3079 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3080 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3081 work->cm_event.private_data = &apr_msg->private_data;
3083 spin_lock_irq(&cm_id_priv->lock);
3084 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3085 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3086 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3087 spin_unlock_irq(&cm_id_priv->lock);
3090 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3091 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3092 cm_id_priv->msg = NULL;
3094 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3096 list_add_tail(&work->list, &cm_id_priv->work_list);
3097 spin_unlock_irq(&cm_id_priv->lock);
3100 cm_process_work(cm_id_priv, work);
3102 cm_deref_id(cm_id_priv);
3105 cm_deref_id(cm_id_priv);
3109 static int cm_timewait_handler(struct cm_work *work)
3111 struct cm_timewait_info *timewait_info;
3112 struct cm_id_private *cm_id_priv;
3115 timewait_info = (struct cm_timewait_info *)work;
3116 spin_lock_irq(&cm.lock);
3117 list_del(&timewait_info->list);
3118 spin_unlock_irq(&cm.lock);
3120 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3121 timewait_info->work.remote_id);
3125 spin_lock_irq(&cm_id_priv->lock);
3126 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3127 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3128 spin_unlock_irq(&cm_id_priv->lock);
3131 cm_id_priv->id.state = IB_CM_IDLE;
3132 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3134 list_add_tail(&work->list, &cm_id_priv->work_list);
3135 spin_unlock_irq(&cm_id_priv->lock);
3138 cm_process_work(cm_id_priv, work);
3140 cm_deref_id(cm_id_priv);
3143 cm_deref_id(cm_id_priv);
3147 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3148 struct cm_id_private *cm_id_priv,
3149 struct ib_cm_sidr_req_param *param)
3151 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3152 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3153 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3154 sidr_req_msg->pkey = param->path->pkey;
3155 sidr_req_msg->service_id = param->service_id;
3157 if (param->private_data && param->private_data_len)
3158 memcpy(sidr_req_msg->private_data, param->private_data,
3159 param->private_data_len);
3162 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3163 struct ib_cm_sidr_req_param *param)
3165 struct cm_id_private *cm_id_priv;
3166 struct ib_mad_send_buf *msg;
3167 unsigned long flags;
3170 if (!param->path || (param->private_data &&
3171 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3174 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3175 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3179 cm_id->service_id = param->service_id;
3180 cm_id->service_mask = ~cpu_to_be64(0);
3181 cm_id_priv->timeout_ms = param->timeout_ms;
3182 cm_id_priv->max_cm_retries = param->max_cm_retries;
3183 ret = cm_alloc_msg(cm_id_priv, &msg);
3187 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3189 msg->timeout_ms = cm_id_priv->timeout_ms;
3190 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3192 spin_lock_irqsave(&cm_id_priv->lock, flags);
3193 if (cm_id->state == IB_CM_IDLE)
3194 ret = ib_post_send_mad(msg, NULL);
3199 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3203 cm_id->state = IB_CM_SIDR_REQ_SENT;
3204 cm_id_priv->msg = msg;
3205 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3209 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3211 static void cm_format_sidr_req_event(struct cm_work *work,
3212 struct ib_cm_id *listen_id)
3214 struct cm_sidr_req_msg *sidr_req_msg;
3215 struct ib_cm_sidr_req_event_param *param;
3217 sidr_req_msg = (struct cm_sidr_req_msg *)
3218 work->mad_recv_wc->recv_buf.mad;
3219 param = &work->cm_event.param.sidr_req_rcvd;
3220 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3221 param->listen_id = listen_id;
3222 param->service_id = sidr_req_msg->service_id;
3223 param->bth_pkey = cm_get_bth_pkey(work);
3224 param->port = work->port->port_num;
3225 work->cm_event.private_data = &sidr_req_msg->private_data;
3228 static int cm_sidr_req_handler(struct cm_work *work)
3230 struct ib_cm_id *cm_id;
3231 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3232 struct cm_sidr_req_msg *sidr_req_msg;
3235 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3237 return PTR_ERR(cm_id);
3238 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3240 /* Record SGID/SLID and request ID for lookup. */
3241 sidr_req_msg = (struct cm_sidr_req_msg *)
3242 work->mad_recv_wc->recv_buf.mad;
3243 wc = work->mad_recv_wc->wc;
3244 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3245 cm_id_priv->av.dgid.global.interface_id = 0;
3246 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3247 work->mad_recv_wc->recv_buf.grh,
3249 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3250 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3251 atomic_inc(&cm_id_priv->work_count);
3253 spin_lock_irq(&cm.lock);
3254 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3255 if (cur_cm_id_priv) {
3256 spin_unlock_irq(&cm.lock);
3257 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3258 counter[CM_SIDR_REQ_COUNTER]);
3259 goto out; /* Duplicate message. */
3261 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3262 cur_cm_id_priv = cm_find_listen(cm_id->device,
3263 sidr_req_msg->service_id);
3264 if (!cur_cm_id_priv) {
3265 spin_unlock_irq(&cm.lock);
3266 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3267 goto out; /* No match. */
3269 atomic_inc(&cur_cm_id_priv->refcount);
3270 atomic_inc(&cm_id_priv->refcount);
3271 spin_unlock_irq(&cm.lock);
3273 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3274 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3275 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3276 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3278 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3279 cm_process_work(cm_id_priv, work);
3280 cm_deref_id(cur_cm_id_priv);
3283 ib_destroy_cm_id(&cm_id_priv->id);
3287 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3288 struct cm_id_private *cm_id_priv,
3289 struct ib_cm_sidr_rep_param *param)
3291 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3293 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3294 sidr_rep_msg->status = param->status;
3295 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3296 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3297 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3299 if (param->info && param->info_length)
3300 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3302 if (param->private_data && param->private_data_len)
3303 memcpy(sidr_rep_msg->private_data, param->private_data,
3304 param->private_data_len);
3307 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3308 struct ib_cm_sidr_rep_param *param)
3310 struct cm_id_private *cm_id_priv;
3311 struct ib_mad_send_buf *msg;
3312 unsigned long flags;
3315 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3316 (param->private_data &&
3317 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3320 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3321 spin_lock_irqsave(&cm_id_priv->lock, flags);
3322 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3327 ret = cm_alloc_msg(cm_id_priv, &msg);
3331 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3333 ret = ib_post_send_mad(msg, NULL);
3335 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3339 cm_id->state = IB_CM_IDLE;
3340 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3342 spin_lock_irqsave(&cm.lock, flags);
3343 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3344 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3345 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3347 spin_unlock_irqrestore(&cm.lock, flags);
3350 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3353 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3355 static void cm_format_sidr_rep_event(struct cm_work *work)
3357 struct cm_sidr_rep_msg *sidr_rep_msg;
3358 struct ib_cm_sidr_rep_event_param *param;
3360 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3361 work->mad_recv_wc->recv_buf.mad;
3362 param = &work->cm_event.param.sidr_rep_rcvd;
3363 param->status = sidr_rep_msg->status;
3364 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3365 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3366 param->info = &sidr_rep_msg->info;
3367 param->info_len = sidr_rep_msg->info_length;
3368 work->cm_event.private_data = &sidr_rep_msg->private_data;
3371 static int cm_sidr_rep_handler(struct cm_work *work)
3373 struct cm_sidr_rep_msg *sidr_rep_msg;
3374 struct cm_id_private *cm_id_priv;
3376 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3377 work->mad_recv_wc->recv_buf.mad;
3378 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3380 return -EINVAL; /* Unmatched reply. */
3382 spin_lock_irq(&cm_id_priv->lock);
3383 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3384 spin_unlock_irq(&cm_id_priv->lock);
3387 cm_id_priv->id.state = IB_CM_IDLE;
3388 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3389 spin_unlock_irq(&cm_id_priv->lock);
3391 cm_format_sidr_rep_event(work);
3392 cm_process_work(cm_id_priv, work);
3395 cm_deref_id(cm_id_priv);
3399 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3400 enum ib_wc_status wc_status)
3402 struct cm_id_private *cm_id_priv;
3403 struct ib_cm_event cm_event;
3404 enum ib_cm_state state;
3407 memset(&cm_event, 0, sizeof cm_event);
3408 cm_id_priv = msg->context[0];
3410 /* Discard old sends or ones without a response. */
3411 spin_lock_irq(&cm_id_priv->lock);
3412 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3413 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3416 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3417 state, ib_wc_status_msg(wc_status));
3419 case IB_CM_REQ_SENT:
3420 case IB_CM_MRA_REQ_RCVD:
3421 cm_reset_to_idle(cm_id_priv);
3422 cm_event.event = IB_CM_REQ_ERROR;
3424 case IB_CM_REP_SENT:
3425 case IB_CM_MRA_REP_RCVD:
3426 cm_reset_to_idle(cm_id_priv);
3427 cm_event.event = IB_CM_REP_ERROR;
3429 case IB_CM_DREQ_SENT:
3430 cm_enter_timewait(cm_id_priv);
3431 cm_event.event = IB_CM_DREQ_ERROR;
3433 case IB_CM_SIDR_REQ_SENT:
3434 cm_id_priv->id.state = IB_CM_IDLE;
3435 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3440 spin_unlock_irq(&cm_id_priv->lock);
3441 cm_event.param.send_status = wc_status;
3443 /* No other events can occur on the cm_id at this point. */
3444 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3447 ib_destroy_cm_id(&cm_id_priv->id);
3450 spin_unlock_irq(&cm_id_priv->lock);
3454 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3455 struct ib_mad_send_wc *mad_send_wc)
3457 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3458 struct cm_port *port;
3461 port = mad_agent->context;
3462 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3463 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3466 * If the send was in response to a received message (context[0] is not
3467 * set to a cm_id), and is not a REJ, then it is a send that was
3470 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3473 atomic_long_add(1 + msg->retries,
3474 &port->counter_group[CM_XMIT].counter[attr_index]);
3476 atomic_long_add(msg->retries,
3477 &port->counter_group[CM_XMIT_RETRIES].
3478 counter[attr_index]);
3480 switch (mad_send_wc->status) {
3482 case IB_WC_WR_FLUSH_ERR:
3486 if (msg->context[0] && msg->context[1])
3487 cm_process_send_error(msg, mad_send_wc->status);
3494 static void cm_work_handler(struct work_struct *_work)
3496 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3499 switch (work->cm_event.event) {
3500 case IB_CM_REQ_RECEIVED:
3501 ret = cm_req_handler(work);
3503 case IB_CM_MRA_RECEIVED:
3504 ret = cm_mra_handler(work);
3506 case IB_CM_REJ_RECEIVED:
3507 ret = cm_rej_handler(work);
3509 case IB_CM_REP_RECEIVED:
3510 ret = cm_rep_handler(work);
3512 case IB_CM_RTU_RECEIVED:
3513 ret = cm_rtu_handler(work);
3515 case IB_CM_USER_ESTABLISHED:
3516 ret = cm_establish_handler(work);
3518 case IB_CM_DREQ_RECEIVED:
3519 ret = cm_dreq_handler(work);
3521 case IB_CM_DREP_RECEIVED:
3522 ret = cm_drep_handler(work);
3524 case IB_CM_SIDR_REQ_RECEIVED:
3525 ret = cm_sidr_req_handler(work);
3527 case IB_CM_SIDR_REP_RECEIVED:
3528 ret = cm_sidr_rep_handler(work);
3530 case IB_CM_LAP_RECEIVED:
3531 ret = cm_lap_handler(work);
3533 case IB_CM_APR_RECEIVED:
3534 ret = cm_apr_handler(work);
3536 case IB_CM_TIMEWAIT_EXIT:
3537 ret = cm_timewait_handler(work);
3547 static int cm_establish(struct ib_cm_id *cm_id)
3549 struct cm_id_private *cm_id_priv;
3550 struct cm_work *work;
3551 unsigned long flags;
3553 struct cm_device *cm_dev;
3555 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3559 work = kmalloc(sizeof *work, GFP_ATOMIC);
3563 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3564 spin_lock_irqsave(&cm_id_priv->lock, flags);
3565 switch (cm_id->state)
3567 case IB_CM_REP_SENT:
3568 case IB_CM_MRA_REP_RCVD:
3569 cm_id->state = IB_CM_ESTABLISHED;
3571 case IB_CM_ESTABLISHED:
3578 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3586 * The CM worker thread may try to destroy the cm_id before it
3587 * can execute this work item. To prevent potential deadlock,
3588 * we need to find the cm_id once we're in the context of the
3589 * worker thread, rather than holding a reference on it.
3591 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3592 work->local_id = cm_id->local_id;
3593 work->remote_id = cm_id->remote_id;
3594 work->mad_recv_wc = NULL;
3595 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3597 /* Check if the device started its remove_one */
3598 spin_lock_irqsave(&cm.lock, flags);
3599 if (!cm_dev->going_down) {
3600 queue_delayed_work(cm.wq, &work->work, 0);
3605 spin_unlock_irqrestore(&cm.lock, flags);
3611 static int cm_migrate(struct ib_cm_id *cm_id)
3613 struct cm_id_private *cm_id_priv;
3614 struct cm_av tmp_av;
3615 unsigned long flags;
3616 int tmp_send_port_not_ready;
3619 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3620 spin_lock_irqsave(&cm_id_priv->lock, flags);
3621 if (cm_id->state == IB_CM_ESTABLISHED &&
3622 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3623 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3624 cm_id->lap_state = IB_CM_LAP_IDLE;
3625 /* Swap address vector */
3626 tmp_av = cm_id_priv->av;
3627 cm_id_priv->av = cm_id_priv->alt_av;
3628 cm_id_priv->alt_av = tmp_av;
3629 /* Swap port send ready state */
3630 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3631 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3632 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3635 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3640 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3645 case IB_EVENT_COMM_EST:
3646 ret = cm_establish(cm_id);
3648 case IB_EVENT_PATH_MIG:
3649 ret = cm_migrate(cm_id);
3656 EXPORT_SYMBOL(ib_cm_notify);
3658 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3659 struct ib_mad_send_buf *send_buf,
3660 struct ib_mad_recv_wc *mad_recv_wc)
3662 struct cm_port *port = mad_agent->context;
3663 struct cm_work *work;
3664 enum ib_cm_event_type event;
3669 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3670 case CM_REQ_ATTR_ID:
3671 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3672 alt_local_lid != 0);
3673 event = IB_CM_REQ_RECEIVED;
3675 case CM_MRA_ATTR_ID:
3676 event = IB_CM_MRA_RECEIVED;
3678 case CM_REJ_ATTR_ID:
3679 event = IB_CM_REJ_RECEIVED;
3681 case CM_REP_ATTR_ID:
3682 event = IB_CM_REP_RECEIVED;
3684 case CM_RTU_ATTR_ID:
3685 event = IB_CM_RTU_RECEIVED;
3687 case CM_DREQ_ATTR_ID:
3688 event = IB_CM_DREQ_RECEIVED;
3690 case CM_DREP_ATTR_ID:
3691 event = IB_CM_DREP_RECEIVED;
3693 case CM_SIDR_REQ_ATTR_ID:
3694 event = IB_CM_SIDR_REQ_RECEIVED;
3696 case CM_SIDR_REP_ATTR_ID:
3697 event = IB_CM_SIDR_REP_RECEIVED;
3699 case CM_LAP_ATTR_ID:
3701 event = IB_CM_LAP_RECEIVED;
3703 case CM_APR_ATTR_ID:
3704 event = IB_CM_APR_RECEIVED;
3707 ib_free_recv_mad(mad_recv_wc);
3711 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3712 atomic_long_inc(&port->counter_group[CM_RECV].
3713 counter[attr_id - CM_ATTR_ID_OFFSET]);
3715 work = kmalloc(sizeof(*work) + sizeof(struct sa_path_rec) * paths,
3718 ib_free_recv_mad(mad_recv_wc);
3722 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3723 work->cm_event.event = event;
3724 work->mad_recv_wc = mad_recv_wc;
3727 /* Check if the device started its remove_one */
3728 spin_lock_irq(&cm.lock);
3729 if (!port->cm_dev->going_down)
3730 queue_delayed_work(cm.wq, &work->work, 0);
3733 spin_unlock_irq(&cm.lock);
3737 ib_free_recv_mad(mad_recv_wc);
3741 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3742 struct ib_qp_attr *qp_attr,
3745 unsigned long flags;
3748 spin_lock_irqsave(&cm_id_priv->lock, flags);
3749 switch (cm_id_priv->id.state) {
3750 case IB_CM_REQ_SENT:
3751 case IB_CM_MRA_REQ_RCVD:
3752 case IB_CM_REQ_RCVD:
3753 case IB_CM_MRA_REQ_SENT:
3754 case IB_CM_REP_RCVD:
3755 case IB_CM_MRA_REP_SENT:
3756 case IB_CM_REP_SENT:
3757 case IB_CM_MRA_REP_RCVD:
3758 case IB_CM_ESTABLISHED:
3759 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3760 IB_QP_PKEY_INDEX | IB_QP_PORT;
3761 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3762 if (cm_id_priv->responder_resources)
3763 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3764 IB_ACCESS_REMOTE_ATOMIC;
3765 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3766 qp_attr->port_num = cm_id_priv->av.port->port_num;
3773 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3777 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3778 struct ib_qp_attr *qp_attr,
3781 unsigned long flags;
3784 spin_lock_irqsave(&cm_id_priv->lock, flags);
3785 switch (cm_id_priv->id.state) {
3786 case IB_CM_REQ_RCVD:
3787 case IB_CM_MRA_REQ_SENT:
3788 case IB_CM_REP_RCVD:
3789 case IB_CM_MRA_REP_SENT:
3790 case IB_CM_REP_SENT:
3791 case IB_CM_MRA_REP_RCVD:
3792 case IB_CM_ESTABLISHED:
3793 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3794 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3795 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3796 qp_attr->path_mtu = cm_id_priv->path_mtu;
3797 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3798 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3799 if (cm_id_priv->qp_type == IB_QPT_RC ||
3800 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3801 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3802 IB_QP_MIN_RNR_TIMER;
3803 qp_attr->max_dest_rd_atomic =
3804 cm_id_priv->responder_resources;
3805 qp_attr->min_rnr_timer = 0;
3807 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
3808 *qp_attr_mask |= IB_QP_ALT_PATH;
3809 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3810 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3811 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3812 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3820 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3824 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3825 struct ib_qp_attr *qp_attr,
3828 unsigned long flags;
3831 spin_lock_irqsave(&cm_id_priv->lock, flags);
3832 switch (cm_id_priv->id.state) {
3833 /* Allow transition to RTS before sending REP */
3834 case IB_CM_REQ_RCVD:
3835 case IB_CM_MRA_REQ_SENT:
3837 case IB_CM_REP_RCVD:
3838 case IB_CM_MRA_REP_SENT:
3839 case IB_CM_REP_SENT:
3840 case IB_CM_MRA_REP_RCVD:
3841 case IB_CM_ESTABLISHED:
3842 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3843 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3844 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3845 switch (cm_id_priv->qp_type) {
3847 case IB_QPT_XRC_INI:
3848 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3849 IB_QP_MAX_QP_RD_ATOMIC;
3850 qp_attr->retry_cnt = cm_id_priv->retry_count;
3851 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3852 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3854 case IB_QPT_XRC_TGT:
3855 *qp_attr_mask |= IB_QP_TIMEOUT;
3856 qp_attr->timeout = cm_id_priv->av.timeout;
3861 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
3862 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3863 qp_attr->path_mig_state = IB_MIG_REARM;
3866 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3867 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3868 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3869 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3870 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3871 qp_attr->path_mig_state = IB_MIG_REARM;
3879 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3883 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3884 struct ib_qp_attr *qp_attr,
3887 struct cm_id_private *cm_id_priv;
3890 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3891 switch (qp_attr->qp_state) {
3893 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3896 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3899 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3907 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3909 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3912 struct cm_counter_group *group;
3913 struct cm_counter_attribute *cm_attr;
3915 group = container_of(obj, struct cm_counter_group, obj);
3916 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3918 return sprintf(buf, "%ld\n",
3919 atomic_long_read(&group->counter[cm_attr->index]));
3922 static const struct sysfs_ops cm_counter_ops = {
3923 .show = cm_show_counter
3926 static struct kobj_type cm_counter_obj_type = {
3927 .sysfs_ops = &cm_counter_ops,
3928 .default_attrs = cm_counter_default_attrs
3931 static void cm_release_port_obj(struct kobject *obj)
3933 struct cm_port *cm_port;
3935 cm_port = container_of(obj, struct cm_port, port_obj);
3939 static struct kobj_type cm_port_obj_type = {
3940 .release = cm_release_port_obj
3943 static char *cm_devnode(struct device *dev, umode_t *mode)
3947 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3950 struct class cm_class = {
3951 .owner = THIS_MODULE,
3952 .name = "infiniband_cm",
3953 .devnode = cm_devnode,
3955 EXPORT_SYMBOL(cm_class);
3957 static int cm_create_port_fs(struct cm_port *port)
3961 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3962 &port->cm_dev->device->kobj,
3963 "%d", port->port_num);
3969 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3970 ret = kobject_init_and_add(&port->counter_group[i].obj,
3971 &cm_counter_obj_type,
3973 "%s", counter_group_names[i]);
3982 kobject_put(&port->counter_group[i].obj);
3983 kobject_put(&port->port_obj);
3988 static void cm_remove_port_fs(struct cm_port *port)
3992 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3993 kobject_put(&port->counter_group[i].obj);
3995 kobject_put(&port->port_obj);
3998 static void cm_add_one(struct ib_device *ib_device)
4000 struct cm_device *cm_dev;
4001 struct cm_port *port;
4002 struct ib_mad_reg_req reg_req = {
4003 .mgmt_class = IB_MGMT_CLASS_CM,
4004 .mgmt_class_version = IB_CM_CLASS_VERSION,
4006 struct ib_port_modify port_modify = {
4007 .set_port_cap_mask = IB_PORT_CM_SUP
4009 unsigned long flags;
4014 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
4015 ib_device->phys_port_cnt, GFP_KERNEL);
4019 cm_dev->ib_device = ib_device;
4020 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4021 cm_dev->going_down = 0;
4022 cm_dev->device = device_create(&cm_class, &ib_device->dev,
4024 "%s", ib_device->name);
4025 if (IS_ERR(cm_dev->device)) {
4030 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4031 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4032 if (!rdma_cap_ib_cm(ib_device, i))
4035 port = kzalloc(sizeof *port, GFP_KERNEL);
4039 cm_dev->port[i-1] = port;
4040 port->cm_dev = cm_dev;
4043 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4044 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4046 ret = cm_create_port_fs(port);
4050 port->mad_agent = ib_register_mad_agent(ib_device, i,
4058 if (IS_ERR(port->mad_agent))
4061 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4071 ib_set_client_data(ib_device, &cm_client, cm_dev);
4073 write_lock_irqsave(&cm.device_lock, flags);
4074 list_add_tail(&cm_dev->list, &cm.device_list);
4075 write_unlock_irqrestore(&cm.device_lock, flags);
4079 ib_unregister_mad_agent(port->mad_agent);
4081 cm_remove_port_fs(port);
4083 port_modify.set_port_cap_mask = 0;
4084 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4086 if (!rdma_cap_ib_cm(ib_device, i))
4089 port = cm_dev->port[i-1];
4090 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4091 ib_unregister_mad_agent(port->mad_agent);
4092 cm_remove_port_fs(port);
4095 device_unregister(cm_dev->device);
4099 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4101 struct cm_device *cm_dev = client_data;
4102 struct cm_port *port;
4103 struct cm_id_private *cm_id_priv;
4104 struct ib_mad_agent *cur_mad_agent;
4105 struct ib_port_modify port_modify = {
4106 .clr_port_cap_mask = IB_PORT_CM_SUP
4108 unsigned long flags;
4114 write_lock_irqsave(&cm.device_lock, flags);
4115 list_del(&cm_dev->list);
4116 write_unlock_irqrestore(&cm.device_lock, flags);
4118 spin_lock_irq(&cm.lock);
4119 cm_dev->going_down = 1;
4120 spin_unlock_irq(&cm.lock);
4122 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4123 if (!rdma_cap_ib_cm(ib_device, i))
4126 port = cm_dev->port[i-1];
4127 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4128 /* Mark all the cm_id's as not valid */
4129 spin_lock_irq(&cm.lock);
4130 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4131 cm_id_priv->altr_send_port_not_ready = 1;
4132 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4133 cm_id_priv->prim_send_port_not_ready = 1;
4134 spin_unlock_irq(&cm.lock);
4136 * We flush the queue here after the going_down set, this
4137 * verify that no new works will be queued in the recv handler,
4138 * after that we can call the unregister_mad_agent
4140 flush_workqueue(cm.wq);
4141 spin_lock_irq(&cm.state_lock);
4142 cur_mad_agent = port->mad_agent;
4143 port->mad_agent = NULL;
4144 spin_unlock_irq(&cm.state_lock);
4145 ib_unregister_mad_agent(cur_mad_agent);
4146 cm_remove_port_fs(port);
4149 device_unregister(cm_dev->device);
4153 static int __init ib_cm_init(void)
4157 memset(&cm, 0, sizeof cm);
4158 INIT_LIST_HEAD(&cm.device_list);
4159 rwlock_init(&cm.device_lock);
4160 spin_lock_init(&cm.lock);
4161 spin_lock_init(&cm.state_lock);
4162 cm.listen_service_table = RB_ROOT;
4163 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4164 cm.remote_id_table = RB_ROOT;
4165 cm.remote_qp_table = RB_ROOT;
4166 cm.remote_sidr_table = RB_ROOT;
4167 idr_init(&cm.local_id_table);
4168 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4169 INIT_LIST_HEAD(&cm.timewait_list);
4171 ret = class_register(&cm_class);
4177 cm.wq = create_workqueue("ib_cm");
4183 ret = ib_register_client(&cm_client);
4189 destroy_workqueue(cm.wq);
4191 class_unregister(&cm_class);
4193 idr_destroy(&cm.local_id_table);
4197 static void __exit ib_cm_cleanup(void)
4199 struct cm_timewait_info *timewait_info, *tmp;
4201 spin_lock_irq(&cm.lock);
4202 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4203 cancel_delayed_work(&timewait_info->work.work);
4204 spin_unlock_irq(&cm.lock);
4206 ib_unregister_client(&cm_client);
4207 destroy_workqueue(cm.wq);
4209 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4210 list_del(&timewait_info->list);
4211 kfree(timewait_info);
4214 class_unregister(&cm_class);
4215 idr_destroy(&cm.local_id_table);
4218 module_init(ib_cm_init);
4219 module_exit(ib_cm_cleanup);